content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(nlme)
### Name: VarCorr
### Title: Extract variance and correlation components
### Aliases: VarCorr VarCorr.lme VarCorr.pdMat VarCorr.pdBlocked
### print.VarCorr.lme
### Keywords: models
### ** Examples
fm1 <- lme(distance ~ age, data = Orthodont, random = ~age)
VarCorr(fm1)
|
/data/genthat_extracted_code/nlme/examples/VarCorr.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 293
|
r
|
library(nlme)
### Name: VarCorr
### Title: Extract variance and correlation components
### Aliases: VarCorr VarCorr.lme VarCorr.pdMat VarCorr.pdBlocked
### print.VarCorr.lme
### Keywords: models
### ** Examples
fm1 <- lme(distance ~ age, data = Orthodont, random = ~age)
VarCorr(fm1)
|
## Put comments here that give an overall description of what your
## functions do
## This code creates a function that accepts a matrix as x (or creates an empty matrix)
##should no matrix be supplied by default.This function creates function methods for getting and setting
##function methods that will enable the user to both set and get the inverse of the passed in matrix. The
##function then returns a list of methods so that other functions may call this function and use its methods.
##(what's being used to store the inverted matrix)
##is being set to NULL to declare the variable and set it to NULL as we don't know what the value
##will be just yet.
makeCacheMatrix <- function(x = matrix()) {
##set m to NULL because we don't have a value for the inverted matrix yet and are keeping it vacant. m will represent the inverted matrix.
m <- NULL
##set method used to set new data (a new matrix) into x instead of having to recall the entire function makeCacheMatrix
set <- function(y) {
x <<- y
m <<- NULL
}
##a simple get method used to retrieve the value passed into the function and currently stored as variable x
get <- function() x
##a method to set the inverted matrix (of the matrix stored in variable x) and store it in m. the store argument in m is an
##argument passed in by calling the cachesolve function.
setinverse <- function(store) m <<- solve(x)
##a simple get method used to return the value of the inverted matrix stored in m
getinverse <- function() m
##build a list of the setter and getter methods. Since this is the last value in the function it is the value that is returned.
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve function allows for the function (stored with the matrix data) to be passed to it through argument x.
##this function then checks for any cached data/results within the passed in function. If the function has cached data, the if statement
##succeeds and returns the value of the cached inverted matrix along with a message to the console. If it doesn't succeed
##the function calls the passed in function's get method, assigns the results to variable data, then computes the inverse of data
##using the solve function.It then takes these results, and in line 48 calls the passed in function's setinverse method to store
##these results in cache. Finally the function displays the inverted function.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
CBillings92/ProgrammingAssignment2
|
R
| false
| false
| 2,655
|
r
|
## Put comments here that give an overall description of what your
## functions do
## This code creates a function that accepts a matrix as x (or creates an empty matrix)
##should no matrix be supplied by default.This function creates function methods for getting and setting
##function methods that will enable the user to both set and get the inverse of the passed in matrix. The
##function then returns a list of methods so that other functions may call this function and use its methods.
##(what's being used to store the inverted matrix)
##is being set to NULL to declare the variable and set it to NULL as we don't know what the value
##will be just yet.
makeCacheMatrix <- function(x = matrix()) {
##set m to NULL because we don't have a value for the inverted matrix yet and are keeping it vacant. m will represent the inverted matrix.
m <- NULL
##set method used to set new data (a new matrix) into x instead of having to recall the entire function makeCacheMatrix
set <- function(y) {
x <<- y
m <<- NULL
}
##a simple get method used to retrieve the value passed into the function and currently stored as variable x
get <- function() x
##a method to set the inverted matrix (of the matrix stored in variable x) and store it in m. the store argument in m is an
##argument passed in by calling the cachesolve function.
setinverse <- function(store) m <<- solve(x)
##a simple get method used to return the value of the inverted matrix stored in m
getinverse <- function() m
##build a list of the setter and getter methods. Since this is the last value in the function it is the value that is returned.
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve function allows for the function (stored with the matrix data) to be passed to it through argument x.
##this function then checks for any cached data/results within the passed in function. If the function has cached data, the if statement
##succeeds and returns the value of the cached inverted matrix along with a message to the console. If it doesn't succeed
##the function calls the passed in function's get method, assigns the results to variable data, then computes the inverse of data
##using the solve function.It then takes these results, and in line 48 calls the passed in function's setinverse method to store
##these results in cache. Finally the function displays the inverted function.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/text RS 1.R
|
no_license
|
asz15273x3949/R
|
R
| false
| false
| 2,970
|
r
| ||
#targets <- c("hitIntensities_neg_pick7", "hitIntensities_pos_pick7")
#targets <- c("pickPeaks_SB1MP3e20")
# targets <- c("intMatrixNorm_pos_SB1MP3e20_set1",
# "spectraHitsDDA_pos_SB1MP3e20_set1",
# "pickPeaksDIA_SB1MP3e20",
# "profilesRawDIA")
# Here we specify which targets the Drake workflow should build.
# Empty vector means build everything.
targets <- c()
|
/targets.R
|
no_license
|
meowcat/ms2field-wwtp
|
R
| false
| false
| 403
|
r
|
#targets <- c("hitIntensities_neg_pick7", "hitIntensities_pos_pick7")
#targets <- c("pickPeaks_SB1MP3e20")
# targets <- c("intMatrixNorm_pos_SB1MP3e20_set1",
# "spectraHitsDDA_pos_SB1MP3e20_set1",
# "pickPeaksDIA_SB1MP3e20",
# "profilesRawDIA")
# Here we specify which targets the Drake workflow should build.
# Empty vector means build everything.
targets <- c()
|
#' Create an object of class iCellR.
#'
#' This function takes data frame and makes an object of class iCellR.
#' @param x A data frame containing gene counts for cells.
#' @return An object of class iCellR
#' @examples
#' \dontrun{
#' my.obj <- make.obj(my.data)
#' }
#'
#' @export
make.obj <- function (x = NULL) {
# get info
INFO = "An object of class iCellR version:"
INFO = paste(INFO, packageVersion("iCellR"))
Data.Dim = dim(x)
Data.Dim <- paste(Data.Dim , collapse=",")
Data.Dim <- paste("Raw/original data dimentions (rows,columns):", Data.Dim)
DATA <- colnames(x)
Col.n <- head(DATA,3)
Col.n <- paste(Col.n, collapse=",")
Col.n <- paste("Columns names:" , Col.n, "...")
Row.n <- head(row.names(x),3)
Row.n <- paste(Row.n, collapse=",")
Row.n <- paste("Row names:" , Row.n, "...")
# get conditions
do <- data.frame(do.call('rbind', strsplit(as.character(head(DATA,1)),'_',fixed=TRUE)))
do <- dim(do)[2]
if (do == 2) {
My.Conds <- data.frame(do.call('rbind', strsplit(as.character(DATA),'_',fixed=TRUE)))[1]
My.Conds <- as.data.frame(table(My.Conds))
Conds <- paste(as.character(My.Conds$My.Conds) , collapse=",")
cond.counts <- paste(as.character(My.Conds$Freq) , collapse=",")
My.Conds <- paste("Data conditions in raw data: ", Conds, " (",cond.counts,")", sep="")
} else {
My.Conds = "Data conditions: no conditions/single sample"
}
# paste
INFO.to.show <- paste(INFO, Data.Dim, My.Conds, Row.n, Col.n, sep="\n")
INFO.to.show <- capture.output(cat(INFO.to.show))
# make object
row.names(x) <- gsub("-",".",row.names(x))
colnames(x) <- gsub("-",".",colnames(x))
object <- new(Class = "iCellR", obj.info = INFO.to.show, raw.data = x, data.conditions = My.Conds)
# return
return(object)
}
|
/R/F003.make.obj.R
|
no_license
|
weiliuyuan/iCellR
|
R
| false
| false
| 1,763
|
r
|
#' Create an object of class iCellR.
#'
#' This function takes data frame and makes an object of class iCellR.
#' @param x A data frame containing gene counts for cells.
#' @return An object of class iCellR
#' @examples
#' \dontrun{
#' my.obj <- make.obj(my.data)
#' }
#'
#' @export
make.obj <- function (x = NULL) {
# get info
INFO = "An object of class iCellR version:"
INFO = paste(INFO, packageVersion("iCellR"))
Data.Dim = dim(x)
Data.Dim <- paste(Data.Dim , collapse=",")
Data.Dim <- paste("Raw/original data dimentions (rows,columns):", Data.Dim)
DATA <- colnames(x)
Col.n <- head(DATA,3)
Col.n <- paste(Col.n, collapse=",")
Col.n <- paste("Columns names:" , Col.n, "...")
Row.n <- head(row.names(x),3)
Row.n <- paste(Row.n, collapse=",")
Row.n <- paste("Row names:" , Row.n, "...")
# get conditions
do <- data.frame(do.call('rbind', strsplit(as.character(head(DATA,1)),'_',fixed=TRUE)))
do <- dim(do)[2]
if (do == 2) {
My.Conds <- data.frame(do.call('rbind', strsplit(as.character(DATA),'_',fixed=TRUE)))[1]
My.Conds <- as.data.frame(table(My.Conds))
Conds <- paste(as.character(My.Conds$My.Conds) , collapse=",")
cond.counts <- paste(as.character(My.Conds$Freq) , collapse=",")
My.Conds <- paste("Data conditions in raw data: ", Conds, " (",cond.counts,")", sep="")
} else {
My.Conds = "Data conditions: no conditions/single sample"
}
# paste
INFO.to.show <- paste(INFO, Data.Dim, My.Conds, Row.n, Col.n, sep="\n")
INFO.to.show <- capture.output(cat(INFO.to.show))
# make object
row.names(x) <- gsub("-",".",row.names(x))
colnames(x) <- gsub("-",".",colnames(x))
object <- new(Class = "iCellR", obj.info = INFO.to.show, raw.data = x, data.conditions = My.Conds)
# return
return(object)
}
|
## cachematrix extends a basic matrix with the ability to cache solves.
## Using the setter and getter methods $set() and $get() will
## ensure that solves are only recalculated when the data has changed.
##
## Use in combination with cacheSolve(x, ...) instead
## of solve(x, ...) to get the cached effect.
## makeCacheMatrix encapsulates a matrix 'x' with caching functionality
## If the inverse is not cached $getinverse will return NULL
## Changes made through the $set method will reset the cache to NULL
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve receives a CacheMatrix and forwards any other
## parameters to the standard solve(x, ...) function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if ( !is.null(i) ) {
# debug: message("getting cached data")
return(i)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
RossJHagan/ProgrammingAssignment2
|
R
| false
| false
| 1,193
|
r
|
## cachematrix extends a basic matrix with the ability to cache solves.
## Using the setter and getter methods $set() and $get() will
## ensure that solves are only recalculated when the data has changed.
##
## Use in combination with cacheSolve(x, ...) instead
## of solve(x, ...) to get the cached effect.
## makeCacheMatrix encapsulates a matrix 'x' with caching functionality
## If the inverse is not cached $getinverse will return NULL
## Changes made through the $set method will reset the cache to NULL
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve receives a CacheMatrix and forwards any other
## parameters to the standard solve(x, ...) function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if ( !is.null(i) ) {
# debug: message("getting cached data")
return(i)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
# Project function for the size based modelling package mizer
# Copyright 2012 Finlay Scott and Julia Blanchard.
# Copyright 2018 Gustav Delius and Richard Southwell.
# Development has received funding from the European Commission's Horizon 2020
# Research and Innovation Programme under Grant Agreement No. 634495
# for the project MINOUW (http://minouw-project.eu/).
# Distributed under the GPL 3 or later
# Maintainer: Gustav Delius, University of York, <gustav.delius@york.ac.uk>
#' @useDynLib mizer
#' @importFrom Rcpp sourceCpp
NULL
#' Project size spectrum forward in time
#'
#' Runs the size spectrum model simulation.
#' The function returns an object of type
#' \linkS4class{MizerSim} that can then be explored with a range of
#' [summary_functions], [indicator_functions] and
#' [plotting_functions].
#'
#' @param object Either a \linkS4class{MizerParams} object or a
#' \linkS4class{MizerSim} object (which contains a `MizerParams` object).
#' @param effort The effort of each fishing gear through time. See notes below.
#' @param t_max The number of years the projection runs for. The default value
#' is 100. This argument is ignored if an array is used for the `effort`
#' argument. See notes below.
#' @param dt Time step of the solver. The default value is 0.1.
#' @param t_save The frequency with which the output is stored. The default
#' value is 1. This argument is ignored if an array is used for the `effort`
#' argument. See notes below.
#' @param t_start The the year of the start of the simulation. The simulation
#' will cover the period from `t_start` to \code{t_start + t_max}.
#' Defaults to 0. Ignored if an array is used for the `effort`
#' argument or a `MizerSim` for the `object` argument.
#' @param initial_n `r lifecycle::badge("deprecated")` The initial abundances of
#' species. Instead of using this argument you should set `initialN(params)`
#' to the desired value.
#' @param initial_n_pp `r lifecycle::badge("deprecated")` The initial abundances
#' of resource. Instead of using this argument you should set
#' `initialNResource(params)` to the desired value.
#' @param append A boolean that determines whether the new simulation results
#' are appended to the previous ones. Only relevant if `object` is a
#' `MizerSim` object. Default = TRUE.
#' @param progress_bar Either a boolean value to determine whether a progress
#' bar should be shown in the console, or a shiny Progress object to implement
#' a progress bar in a shiny app.
#' @param ... Other arguments will be passed to rate functions.
#'
#' @note The `effort` argument specifies the level of fishing effort during the
#' simulation. If it is not supplied, the initial effort stored in the params
#' object is used. The effort can be specified in four different ways:
#' \itemize{
#' \item A single numeric value. This specifies the effort of all fishing gears
#' which is constant through time (i.e. all the gears have the same constant
#' effort).
#' \item A named vector whose names match with existing gear names.
#' The values in the vector specify the constant fishing effort for those
#' fishing gears, i.e. the effort is constant through time. The
#' effort for gears that are not included in the effort vector is set to 0.
#' \item A numerical vector which has the same length as the number of fishing
#' gears. The values in the vector specify the
#' constant fishing effort of each of the fishing gears, with the ordering
#' assumed to be the same as in the MizerParams object.
#' \item A numerical array with dimensions time x gear. This specifies the
#' fishing effort of each gear at each time step. The first dimension, time,
#' must be named numerically and increasing. The second dimension of the array
#' must be named and the names must correspond to the gear names in the
#' `MizerParams` object. The value for the effort for a particular time
#' is used during the interval from that time to the next time in the array.
#' }
#'
#' If effort is specified as an array then the smallest time in the array is
#' used as the initial time for the simulation. Otherwise the initial time is
#' set to the final time of the previous simulation if `object` is a
#' `MizerSim` object or to `t_start` otherwise. Also, if the effort is
#' an array then the `t_max` and `t_save` arguments are ignored and the
#' simulation times will be taken from the effort array.
#'
#' If the `object` argument is of class `MizerSim` then the initial
#' values for the simulation are taken from the final values in the
#' `MizerSim` object and the corresponding arguments to this function will
#' be ignored.
#'
#' @return An object of class \linkS4class{MizerSim}.
#'
#' @export
#' @examples
#' \donttest{
#' params <- NS_params
#' # With constant fishing effort for all gears for 20 time steps
#' sim <- project(params, t_max = 20, effort = 0.5)
#' # With constant fishing effort which is different for each gear
#' effort <- c(Industrial = 0, Pelagic = 1, Beam = 0.5, Otter = 0.5)
#' sim <- project(params, t_max = 20, effort = effort)
#' # With fishing effort that varies through time for each gear
#' gear_names <- c("Industrial","Pelagic","Beam","Otter")
#' times <- seq(from = 1, to = 10, by = 1)
#' effort_array <- array(NA, dim = c(length(times), length(gear_names)),
#' dimnames = list(time = times, gear = gear_names))
#' effort_array[,"Industrial"] <- 0.5
#' effort_array[,"Pelagic"] <- seq(from = 1, to = 2, length = length(times))
#' effort_array[,"Beam"] <- seq(from = 1, to = 0, length = length(times))
#' effort_array[,"Otter"] <- seq(from = 1, to = 0.5, length = length(times))
#' sim <- project(params, effort = effort_array)
#' }
project <- function(object, effort,
t_max = 100, dt = 0.1, t_save = 1, t_start = 0,
initial_n, initial_n_pp,
append = TRUE,
progress_bar = TRUE, ...) {
# Set and check initial values ----
assert_that(t_max > 0)
if (is(object, "MizerSim")) {
validObject(object)
params <- setInitialValues(object@params, object)
t_start <- getTimes(object)[idxFinalT(object)]
} else if (is(object, "MizerParams")) {
params <- validParams(object)
if (!missing(initial_n)) params@initial_n[] <- initial_n
if (!missing(initial_n_pp)) params@initial_n_pp[] <- initial_n_pp
} else {
stop("The `object` argument must be either a MizerParams or a MizerSim object.")
}
initial_n <- params@initial_n
initial_n_pp <- params@initial_n_pp
initial_n_other <- params@initial_n_other
no_sp <- length(params@w_min_idx)
assert_that(is.array(initial_n),
is.numeric(initial_n),
are_equal(dim(initial_n), c(no_sp, length(params@w))))
assert_that(is.numeric(initial_n_pp),
length(initial_n_pp) == length(params@w_full))
assert_that(is.null(initial_n_other) || is.list(initial_n_other))
other_names <- names(params@other_dynamics)
if (length(other_names) > 0) {
if (is.null(names(initial_n_other))) {
stop("The initial_n_other needs to be a named list")
}
if (!setequal(names(initial_n_other), other_names)) {
stop("The names of the entries in initial_n_other do not match ",
"the names of the other components of the model.")
}
}
# Set effort array ----
if (missing(effort)) effort <- params@initial_effort
if (is.null(dim(effort))) { # effort is a vector or scalar
# Set up the effort array transposed so we can use the recycling rules
# no point running a simulation with no saved results
if (t_max < t_save) {
t_save <- t_max
}
times <- seq(t_start, t_start + t_max, by = t_save)
effort <- validEffortVector(effort, params)
effort <- t(array(effort,
dim = c(length(effort), length(times)),
dimnames = list(gear = names(effort),
time = times)))
} else {
effort <- validEffortArray(effort, params)
}
times <- as.numeric(dimnames(effort)[[1]])
# Make the MizerSim object with the right size ----
# We only save every t_save years
sim <- MizerSim(params, t_dimnames = times)
# Set initial population and effort
sim@n[1, , ] <- initial_n
sim@n_pp[1, ] <- initial_n_pp
sim@n_other[1, ] <- initial_n_other
sim@effort <- effort
## Initialise ----
# get functions
resource_dynamics_fn <- get(params@resource_dynamics)
other_dynamics_fns <- lapply(params@other_dynamics, get)
rates_fns <- lapply(params@rates_funcs, get)
# Set up progress bar
if (is(progress_bar, "Progress")) {
# We have been passed a shiny progress object
progress_bar$set(message = "Running simulation", value = 0)
proginc <- 1 / length(times)
} else if (progress_bar == TRUE) {
pb <- progress::progress_bar$new(
format = "[:bar] :percent ETA: :eta",
total = length(times), width = 60)
pb$tick(0)
}
n_list <- list(n = initial_n, n_pp = initial_n_pp,
n_other = initial_n_other)
t <- times[[1]]
## Loop over time ----
for (i in 2:length(times)) {
# number of time steps between saved times
steps <- round((times[[i]] - t) / dt)
# advance to next saved time
n_list <- project_simple(
params, n = n_list$n, n_pp = n_list$n_pp, n_other = n_list$n_other,
t = t, dt = dt, steps = steps,
effort = effort[i - 1, ],
resource_dynamics_fn = resource_dynamics_fn,
other_dynamics_fns = other_dynamics_fns,
rates_fns = rates_fns, ...)
# Calculate start time for next iteration
# The reason we don't simply use the next entry in `times` is that
# those entries may not be separated by exact multiples of dt.
t <- t + steps * dt
# Advance progress bar
if (is(progress_bar, "Progress")) {
progress_bar$inc(amount = proginc)
} else if (progress_bar == TRUE) {
pb$tick()
}
# Store result
sim@n[i, , ] <- n_list$n
sim@n_pp[i, ] <- n_list$n_pp
sim@n_other[i, ] <- n_list$n_other
}
# append to previous simulation ----
if (is(object, "MizerSim") && append) {
no_t_old <- dim(object@n)[1]
no_t <- length(times)
new_t_dimnames <- c(as.numeric(dimnames(object@n)[[1]]),
times[2:no_t])
new_sim <- MizerSim(params, t_dimnames = new_t_dimnames)
old_indices <- 1:no_t_old
new_indices <- seq(from = no_t_old + 1, length.out = no_t - 1)
new_sim@n[old_indices, , ] <- object@n
new_sim@n[new_indices, , ] <- sim@n[2:no_t, , ]
new_sim@n_pp[old_indices, ] <- object@n_pp
new_sim@n_pp[new_indices, ] <- sim@n_pp[2:no_t, ]
new_sim@n_other[old_indices, ] <- object@n_other
new_sim@n_other[new_indices, ] <- sim@n_other[2:no_t, ]
new_sim@effort[old_indices, ] <- object@effort
new_sim@effort[new_indices, ] <- sim@effort[2:no_t, ]
return(new_sim)
}
return(sim)
}
#' Project abundances by a given number of time steps into the future
#'
#' This is an internal function used by the user-facing `project()` function.
#' It is of potential interest only to mizer extension authors.
#'
#' The function does not check its arguments because it is meant to be as fast
#' as possible to allow it to be used in a loop. For example, it is called in
#' `project()` once for every saved value. The function also does not save its
#' intermediate results but only returns the result at time `t + dt * steps`.
#' During this time it uses the constant fishing effort `effort`.
#'
#' The functional arguments can be calculated from slots in the `params` object
#' with
#' ```
#' resource_dynamics_fn <- get(params@resource_dynamics)
#' other_dynamics_fns <- lapply(params@other_dynamics, get)
#' rates_fns <- lapply(params@rates_funcs, get)
#' ```
#' The reason the function does not do that itself is to shave 20 microseconds
#' of its running time, which pays when the function is called hundreds of
#' times in a row.
#'
#' This function is also used in `steady()`. In between calls to
#' `project_simple()` the `steady()` function checks whether the values are
#' still changing significantly, so that it can stop when a steady state has
#' been approached. Mizer extension packages might have a similar need to run
#' a simulation repeatedly for short periods to run some other code in
#' between. Because this code may want to use the values of the rates at the
#' final time step, these too are included in the returned list.
#'
#' @param params A MizerParams object.
#' @param n An array (species x size) with the number density at start of
#' simulation.
#' @param n_pp A vector (size) with the resource number density at start of
#' simulation.
#' @param n_other A named list with the abundances of other components at start
#' of simulation.
#' @param t Time at the start of the simulation.
#' @param dt Size of time step.
#' @param steps The number of time steps by which to project.
#' @param effort The fishing effort to be used throughout the simulation. This
#' must be a vector or list with one named entry per fishing gear.
#' @param resource_dynamics_fn The function for the resource
#' dynamics. See Details.
#' @param other_dynamics_fns List with the functions for the
#' dynamics of the other components. See Details.
#' @param rates_fns List with the functions for calculating
#' the rates. See Details.
#' @param ... Other arguments that are passed on to the rate functions.
#' @return List with the final values of `n`, `n_pp` and `n_other`, `rates`.
#'
#' @export
#' @concept helper
project_simple <-
function(params,
n = params@initial_n,
n_pp = params@initial_n_pp,
n_other = params@initial_n_other,
effort = params@initial_effort,
t = 0, dt = 0.1, steps,
resource_dynamics_fn = get(params@resource_dynamics),
other_dynamics_fns = lapply(params@other_dynamics, get),
rates_fns = lapply(params@rates_funcs, get), ...) {
# Handy things ----
no_sp <- nrow(params@species_params) # number of species
no_w <- length(params@w) # number of fish size bins
idx <- 2:no_w
# Hacky shortcut to access the correct element of a 2D array using 1D
# notation
# This references the egg size bracket for all species, so for example
# n[w_min_idx_array_ref] = n[,w_min_idx]
w_min_idx_array_ref <- (params@w_min_idx - 1) * no_sp + (1:no_sp)
# Matrices for solver
a <- matrix(0, nrow = no_sp, ncol = no_w)
b <- matrix(0, nrow = no_sp, ncol = no_w)
S <- matrix(0, nrow = no_sp, ncol = no_w)
# Loop over time steps ----
for (i_time in 1:steps) {
r <- rates_fns$Rates(
params, n = n, n_pp = n_pp, n_other = n_other,
t = t, effort = effort, rates_fns = rates_fns, ...)
# * Update other components ----
n_other_current <- n_other # So that the resource dynamics can still
# use the current value
for (component in names(params@other_dynamics)) {
n_other[[component]] <-
other_dynamics_fns[[component]](
params,
n = n,
n_pp = n_pp,
n_other = n_other_current,
rates = r,
t = t,
dt = dt,
component = component,
...
)
}
# * Update resource ----
n_pp <- resource_dynamics_fn(params, n = n, n_pp = n_pp,
n_other = n_other_current, rates = r,
t = t, dt = dt,
resource_rate = params@rr_pp,
resource_capacity = params@cc_pp, ...)
# * Update species ----
# a_{ij} = - g_i(w_{j-1}) / dw_j dt
a[, idx] <- sweep(-r$e_growth[, idx - 1, drop = FALSE] * dt, 2,
params@dw[idx], "/")
# b_{ij} = 1 + g_i(w_j) / dw_j dt + \mu_i(w_j) dt
b[] <- 1 + sweep(r$e_growth * dt, 2, params@dw, "/") + r$mort * dt
# S_{ij} <- N_i(w_j)
S[, idx] <- n[, idx, drop = FALSE]
# Update first size group of n
n[w_min_idx_array_ref] <-
(n[w_min_idx_array_ref] + r$rdd * dt /
params@dw[params@w_min_idx]) /
b[w_min_idx_array_ref]
# Update n
# for (i in 1:no_sp) # number of species assumed small, so no need to
# vectorize this loop over species
# for (j in (params@w_min_idx[i]+1):no_w)
# n[i,j] <- (S[i,j] - A[i,j]*n[i,j-1]) / B[i,j]
# This is implemented via Rcpp
n <- inner_project_loop(no_sp = no_sp, no_w = no_w, n = n,
A = a, B = b, S = S,
w_min_idx = params@w_min_idx)
# * Update time ----
t <- t + dt
}
return(list(n = n, n_pp = n_pp, n_other = n_other, rates = r))
}
validEffortArray <- function(effort, params) {
# Check that number and names of gears in effort array is same as in
# MizerParams object
no_gears <- dim(params@catchability)[1]
if (dim(effort)[2] != no_gears) {
stop("The number of gears in the effort array (length of the second dimension = ",
dim(effort)[2],
") does not equal the number of gears in the MizerParams object (",
no_gears, ").")
}
gear_names <- dimnames(params@catchability)[[1]]
if (!all(gear_names %in% dimnames(effort)[[2]])) {
stop("Gear names in the MizerParams object (",
paste(gear_names, collapse = ", "),
") do not match those in the effort array.")
}
# Sort effort array to match order in MizerParams
effort <- effort[, gear_names, drop = FALSE]
if (is.null(dimnames(effort)[[1]])) {
stop("The time dimname of the effort argument must be numeric.")
}
time_effort <- as.numeric(dimnames(effort)[[1]])
if (any(is.na(time_effort))) {
stop("The time dimname of the effort argument must be numeric.")
}
if (is.unsorted(time_effort)) {
stop("The time dimname of the effort argument should be increasing.")
}
# Replace any NA's with default value
effort_default <- ifelse(defaults_edition() < 2, 0, 1)
effort[is.na(effort)] <- effort_default
names(dimnames(effort)) <- c("time", "gear")
effort
}
|
/R/project.R
|
no_license
|
sizespectrum/mizer
|
R
| false
| false
| 19,066
|
r
|
# Project function for the size based modelling package mizer
# Copyright 2012 Finlay Scott and Julia Blanchard.
# Copyright 2018 Gustav Delius and Richard Southwell.
# Development has received funding from the European Commission's Horizon 2020
# Research and Innovation Programme under Grant Agreement No. 634495
# for the project MINOUW (http://minouw-project.eu/).
# Distributed under the GPL 3 or later
# Maintainer: Gustav Delius, University of York, <gustav.delius@york.ac.uk>
#' @useDynLib mizer
#' @importFrom Rcpp sourceCpp
NULL
#' Project size spectrum forward in time
#'
#' Runs the size spectrum model simulation.
#' The function returns an object of type
#' \linkS4class{MizerSim} that can then be explored with a range of
#' [summary_functions], [indicator_functions] and
#' [plotting_functions].
#'
#' @param object Either a \linkS4class{MizerParams} object or a
#' \linkS4class{MizerSim} object (which contains a `MizerParams` object).
#' @param effort The effort of each fishing gear through time. See notes below.
#' @param t_max The number of years the projection runs for. The default value
#' is 100. This argument is ignored if an array is used for the `effort`
#' argument. See notes below.
#' @param dt Time step of the solver. The default value is 0.1.
#' @param t_save The frequency with which the output is stored. The default
#' value is 1. This argument is ignored if an array is used for the `effort`
#' argument. See notes below.
#' @param t_start The the year of the start of the simulation. The simulation
#' will cover the period from `t_start` to \code{t_start + t_max}.
#' Defaults to 0. Ignored if an array is used for the `effort`
#' argument or a `MizerSim` for the `object` argument.
#' @param initial_n `r lifecycle::badge("deprecated")` The initial abundances of
#' species. Instead of using this argument you should set `initialN(params)`
#' to the desired value.
#' @param initial_n_pp `r lifecycle::badge("deprecated")` The initial abundances
#' of resource. Instead of using this argument you should set
#' `initialNResource(params)` to the desired value.
#' @param append A boolean that determines whether the new simulation results
#' are appended to the previous ones. Only relevant if `object` is a
#' `MizerSim` object. Default = TRUE.
#' @param progress_bar Either a boolean value to determine whether a progress
#' bar should be shown in the console, or a shiny Progress object to implement
#' a progress bar in a shiny app.
#' @param ... Other arguments will be passed to rate functions.
#'
#' @note The `effort` argument specifies the level of fishing effort during the
#' simulation. If it is not supplied, the initial effort stored in the params
#' object is used. The effort can be specified in four different ways:
#' \itemize{
#' \item A single numeric value. This specifies the effort of all fishing gears
#' which is constant through time (i.e. all the gears have the same constant
#' effort).
#' \item A named vector whose names match with existing gear names.
#' The values in the vector specify the constant fishing effort for those
#' fishing gears, i.e. the effort is constant through time. The
#' effort for gears that are not included in the effort vector is set to 0.
#' \item A numerical vector which has the same length as the number of fishing
#' gears. The values in the vector specify the
#' constant fishing effort of each of the fishing gears, with the ordering
#' assumed to be the same as in the MizerParams object.
#' \item A numerical array with dimensions time x gear. This specifies the
#' fishing effort of each gear at each time step. The first dimension, time,
#' must be named numerically and increasing. The second dimension of the array
#' must be named and the names must correspond to the gear names in the
#' `MizerParams` object. The value for the effort for a particular time
#' is used during the interval from that time to the next time in the array.
#' }
#'
#' If effort is specified as an array then the smallest time in the array is
#' used as the initial time for the simulation. Otherwise the initial time is
#' set to the final time of the previous simulation if `object` is a
#' `MizerSim` object or to `t_start` otherwise. Also, if the effort is
#' an array then the `t_max` and `t_save` arguments are ignored and the
#' simulation times will be taken from the effort array.
#'
#' If the `object` argument is of class `MizerSim` then the initial
#' values for the simulation are taken from the final values in the
#' `MizerSim` object and the corresponding arguments to this function will
#' be ignored.
#'
#' @return An object of class \linkS4class{MizerSim}.
#'
#' @export
#' @examples
#' \donttest{
#' params <- NS_params
#' # With constant fishing effort for all gears for 20 time steps
#' sim <- project(params, t_max = 20, effort = 0.5)
#' # With constant fishing effort which is different for each gear
#' effort <- c(Industrial = 0, Pelagic = 1, Beam = 0.5, Otter = 0.5)
#' sim <- project(params, t_max = 20, effort = effort)
#' # With fishing effort that varies through time for each gear
#' gear_names <- c("Industrial","Pelagic","Beam","Otter")
#' times <- seq(from = 1, to = 10, by = 1)
#' effort_array <- array(NA, dim = c(length(times), length(gear_names)),
#' dimnames = list(time = times, gear = gear_names))
#' effort_array[,"Industrial"] <- 0.5
#' effort_array[,"Pelagic"] <- seq(from = 1, to = 2, length = length(times))
#' effort_array[,"Beam"] <- seq(from = 1, to = 0, length = length(times))
#' effort_array[,"Otter"] <- seq(from = 1, to = 0.5, length = length(times))
#' sim <- project(params, effort = effort_array)
#' }
project <- function(object, effort,
t_max = 100, dt = 0.1, t_save = 1, t_start = 0,
initial_n, initial_n_pp,
append = TRUE,
progress_bar = TRUE, ...) {
# Set and check initial values ----
assert_that(t_max > 0)
if (is(object, "MizerSim")) {
validObject(object)
params <- setInitialValues(object@params, object)
t_start <- getTimes(object)[idxFinalT(object)]
} else if (is(object, "MizerParams")) {
params <- validParams(object)
if (!missing(initial_n)) params@initial_n[] <- initial_n
if (!missing(initial_n_pp)) params@initial_n_pp[] <- initial_n_pp
} else {
stop("The `object` argument must be either a MizerParams or a MizerSim object.")
}
initial_n <- params@initial_n
initial_n_pp <- params@initial_n_pp
initial_n_other <- params@initial_n_other
no_sp <- length(params@w_min_idx)
assert_that(is.array(initial_n),
is.numeric(initial_n),
are_equal(dim(initial_n), c(no_sp, length(params@w))))
assert_that(is.numeric(initial_n_pp),
length(initial_n_pp) == length(params@w_full))
assert_that(is.null(initial_n_other) || is.list(initial_n_other))
other_names <- names(params@other_dynamics)
if (length(other_names) > 0) {
if (is.null(names(initial_n_other))) {
stop("The initial_n_other needs to be a named list")
}
if (!setequal(names(initial_n_other), other_names)) {
stop("The names of the entries in initial_n_other do not match ",
"the names of the other components of the model.")
}
}
# Set effort array ----
if (missing(effort)) effort <- params@initial_effort
if (is.null(dim(effort))) { # effort is a vector or scalar
# Set up the effort array transposed so we can use the recycling rules
# no point running a simulation with no saved results
if (t_max < t_save) {
t_save <- t_max
}
times <- seq(t_start, t_start + t_max, by = t_save)
effort <- validEffortVector(effort, params)
effort <- t(array(effort,
dim = c(length(effort), length(times)),
dimnames = list(gear = names(effort),
time = times)))
} else {
effort <- validEffortArray(effort, params)
}
times <- as.numeric(dimnames(effort)[[1]])
# Make the MizerSim object with the right size ----
# We only save every t_save years
sim <- MizerSim(params, t_dimnames = times)
# Set initial population and effort
sim@n[1, , ] <- initial_n
sim@n_pp[1, ] <- initial_n_pp
sim@n_other[1, ] <- initial_n_other
sim@effort <- effort
## Initialise ----
# get functions
resource_dynamics_fn <- get(params@resource_dynamics)
other_dynamics_fns <- lapply(params@other_dynamics, get)
rates_fns <- lapply(params@rates_funcs, get)
# Set up progress bar
if (is(progress_bar, "Progress")) {
# We have been passed a shiny progress object
progress_bar$set(message = "Running simulation", value = 0)
proginc <- 1 / length(times)
} else if (progress_bar == TRUE) {
pb <- progress::progress_bar$new(
format = "[:bar] :percent ETA: :eta",
total = length(times), width = 60)
pb$tick(0)
}
n_list <- list(n = initial_n, n_pp = initial_n_pp,
n_other = initial_n_other)
t <- times[[1]]
## Loop over time ----
for (i in 2:length(times)) {
# number of time steps between saved times
steps <- round((times[[i]] - t) / dt)
# advance to next saved time
n_list <- project_simple(
params, n = n_list$n, n_pp = n_list$n_pp, n_other = n_list$n_other,
t = t, dt = dt, steps = steps,
effort = effort[i - 1, ],
resource_dynamics_fn = resource_dynamics_fn,
other_dynamics_fns = other_dynamics_fns,
rates_fns = rates_fns, ...)
# Calculate start time for next iteration
# The reason we don't simply use the next entry in `times` is that
# those entries may not be separated by exact multiples of dt.
t <- t + steps * dt
# Advance progress bar
if (is(progress_bar, "Progress")) {
progress_bar$inc(amount = proginc)
} else if (progress_bar == TRUE) {
pb$tick()
}
# Store result
sim@n[i, , ] <- n_list$n
sim@n_pp[i, ] <- n_list$n_pp
sim@n_other[i, ] <- n_list$n_other
}
# append to previous simulation ----
if (is(object, "MizerSim") && append) {
no_t_old <- dim(object@n)[1]
no_t <- length(times)
new_t_dimnames <- c(as.numeric(dimnames(object@n)[[1]]),
times[2:no_t])
new_sim <- MizerSim(params, t_dimnames = new_t_dimnames)
old_indices <- 1:no_t_old
new_indices <- seq(from = no_t_old + 1, length.out = no_t - 1)
new_sim@n[old_indices, , ] <- object@n
new_sim@n[new_indices, , ] <- sim@n[2:no_t, , ]
new_sim@n_pp[old_indices, ] <- object@n_pp
new_sim@n_pp[new_indices, ] <- sim@n_pp[2:no_t, ]
new_sim@n_other[old_indices, ] <- object@n_other
new_sim@n_other[new_indices, ] <- sim@n_other[2:no_t, ]
new_sim@effort[old_indices, ] <- object@effort
new_sim@effort[new_indices, ] <- sim@effort[2:no_t, ]
return(new_sim)
}
return(sim)
}
#' Project abundances by a given number of time steps into the future
#'
#' This is an internal function used by the user-facing `project()` function.
#' It is of potential interest only to mizer extension authors.
#'
#' The function does not check its arguments because it is meant to be as fast
#' as possible to allow it to be used in a loop. For example, it is called in
#' `project()` once for every saved value. The function also does not save its
#' intermediate results but only returns the result at time `t + dt * steps`.
#' During this time it uses the constant fishing effort `effort`.
#'
#' The functional arguments can be calculated from slots in the `params` object
#' with
#' ```
#' resource_dynamics_fn <- get(params@resource_dynamics)
#' other_dynamics_fns <- lapply(params@other_dynamics, get)
#' rates_fns <- lapply(params@rates_funcs, get)
#' ```
#' The reason the function does not do that itself is to shave 20 microseconds
#' of its running time, which pays when the function is called hundreds of
#' times in a row.
#'
#' This function is also used in `steady()`. In between calls to
#' `project_simple()` the `steady()` function checks whether the values are
#' still changing significantly, so that it can stop when a steady state has
#' been approached. Mizer extension packages might have a similar need to run
#' a simulation repeatedly for short periods to run some other code in
#' between. Because this code may want to use the values of the rates at the
#' final time step, these too are included in the returned list.
#'
#' @param params A MizerParams object.
#' @param n An array (species x size) with the number density at start of
#' simulation.
#' @param n_pp A vector (size) with the resource number density at start of
#' simulation.
#' @param n_other A named list with the abundances of other components at start
#' of simulation.
#' @param t Time at the start of the simulation.
#' @param dt Size of time step.
#' @param steps The number of time steps by which to project.
#' @param effort The fishing effort to be used throughout the simulation. This
#' must be a vector or list with one named entry per fishing gear.
#' @param resource_dynamics_fn The function for the resource
#' dynamics. See Details.
#' @param other_dynamics_fns List with the functions for the
#' dynamics of the other components. See Details.
#' @param rates_fns List with the functions for calculating
#' the rates. See Details.
#' @param ... Other arguments that are passed on to the rate functions.
#' @return List with the final values of `n`, `n_pp` and `n_other`, `rates`.
#'
#' @export
#' @concept helper
project_simple <-
function(params,
n = params@initial_n,
n_pp = params@initial_n_pp,
n_other = params@initial_n_other,
effort = params@initial_effort,
t = 0, dt = 0.1, steps,
resource_dynamics_fn = get(params@resource_dynamics),
other_dynamics_fns = lapply(params@other_dynamics, get),
rates_fns = lapply(params@rates_funcs, get), ...) {
# Handy things ----
no_sp <- nrow(params@species_params) # number of species
no_w <- length(params@w) # number of fish size bins
idx <- 2:no_w
# Hacky shortcut to access the correct element of a 2D array using 1D
# notation
# This references the egg size bracket for all species, so for example
# n[w_min_idx_array_ref] = n[,w_min_idx]
w_min_idx_array_ref <- (params@w_min_idx - 1) * no_sp + (1:no_sp)
# Matrices for solver
a <- matrix(0, nrow = no_sp, ncol = no_w)
b <- matrix(0, nrow = no_sp, ncol = no_w)
S <- matrix(0, nrow = no_sp, ncol = no_w)
# Loop over time steps ----
for (i_time in 1:steps) {
r <- rates_fns$Rates(
params, n = n, n_pp = n_pp, n_other = n_other,
t = t, effort = effort, rates_fns = rates_fns, ...)
# * Update other components ----
n_other_current <- n_other # So that the resource dynamics can still
# use the current value
for (component in names(params@other_dynamics)) {
n_other[[component]] <-
other_dynamics_fns[[component]](
params,
n = n,
n_pp = n_pp,
n_other = n_other_current,
rates = r,
t = t,
dt = dt,
component = component,
...
)
}
# * Update resource ----
n_pp <- resource_dynamics_fn(params, n = n, n_pp = n_pp,
n_other = n_other_current, rates = r,
t = t, dt = dt,
resource_rate = params@rr_pp,
resource_capacity = params@cc_pp, ...)
# * Update species ----
# a_{ij} = - g_i(w_{j-1}) / dw_j dt
a[, idx] <- sweep(-r$e_growth[, idx - 1, drop = FALSE] * dt, 2,
params@dw[idx], "/")
# b_{ij} = 1 + g_i(w_j) / dw_j dt + \mu_i(w_j) dt
b[] <- 1 + sweep(r$e_growth * dt, 2, params@dw, "/") + r$mort * dt
# S_{ij} <- N_i(w_j)
S[, idx] <- n[, idx, drop = FALSE]
# Update first size group of n
n[w_min_idx_array_ref] <-
(n[w_min_idx_array_ref] + r$rdd * dt /
params@dw[params@w_min_idx]) /
b[w_min_idx_array_ref]
# Update n
# for (i in 1:no_sp) # number of species assumed small, so no need to
# vectorize this loop over species
# for (j in (params@w_min_idx[i]+1):no_w)
# n[i,j] <- (S[i,j] - A[i,j]*n[i,j-1]) / B[i,j]
# This is implemented via Rcpp
n <- inner_project_loop(no_sp = no_sp, no_w = no_w, n = n,
A = a, B = b, S = S,
w_min_idx = params@w_min_idx)
# * Update time ----
t <- t + dt
}
return(list(n = n, n_pp = n_pp, n_other = n_other, rates = r))
}
validEffortArray <- function(effort, params) {
# Check that number and names of gears in effort array is same as in
# MizerParams object
no_gears <- dim(params@catchability)[1]
if (dim(effort)[2] != no_gears) {
stop("The number of gears in the effort array (length of the second dimension = ",
dim(effort)[2],
") does not equal the number of gears in the MizerParams object (",
no_gears, ").")
}
gear_names <- dimnames(params@catchability)[[1]]
if (!all(gear_names %in% dimnames(effort)[[2]])) {
stop("Gear names in the MizerParams object (",
paste(gear_names, collapse = ", "),
") do not match those in the effort array.")
}
# Sort effort array to match order in MizerParams
effort <- effort[, gear_names, drop = FALSE]
if (is.null(dimnames(effort)[[1]])) {
stop("The time dimname of the effort argument must be numeric.")
}
time_effort <- as.numeric(dimnames(effort)[[1]])
if (any(is.na(time_effort))) {
stop("The time dimname of the effort argument must be numeric.")
}
if (is.unsorted(time_effort)) {
stop("The time dimname of the effort argument should be increasing.")
}
# Replace any NA's with default value
effort_default <- ifelse(defaults_edition() < 2, 0, 1)
effort[is.na(effort)] <- effort_default
names(dimnames(effort)) <- c("time", "gear")
effort
}
|
rm(list=ls(all=TRUE))
graphics.off()
install.packages("locpol")
library(locpol)
# Specify your working directory
# setwd("Users...")
############################ SUBROUTINE ################################
spdbl = function(m, sigma, sigma1, sigma2, s, r, tau){ # (fstar,delta, gamma)
# spdbl uses the Breeden and Litzenberger (1978) method and a
# semiparametric specification of the Black-Scholes
# option pricing function to calculate the empirical State
# Price Density. The analytic formula uses an estimate of
# the volatility smile and its first and second derivative to
# calculate the State-price density, as well as Delta and
# Gamma of the option. This method can only be applied to
# European options (due to the assumptions).
rm = length(m)
ones = matrix(1,rm,1)
st = sqrt(tau)
ert = exp(r*tau)
rt = r*tau
# Modified Black-Scholes scaled by S-div instead of F
d1 = (log(m)+tau*(r+0.5*(sigma^2)))/(sigma*st)
d2 = d1-sigma*st
f = pnorm(d1)-pnorm(d2)/(ert*m)
# first derivative of d1 term
d11 = (1/(m*sigma*st))-(1/(st*(sigma^2)))*((log(m)+tau*r)*sigma1)+0.5*st*sigma1
#first derivative of d2 term
d21 = d11-st*sigma1
#second derivative of d1 term
d12 = -(1/(st*(m^2)*sigma))-sigma1/(st*m*(sigma^2))+sigma2*(0.5*st-(log(m)+rt)/(st*(sigma^2)))+sigma1*(2*sigma1*(log(m)+rt)/(st*sigma^3)-1/(st*m*sigma^2))
#second derivative of d2 term
d22 = d12-st*sigma2
#Please refer to either Rookley (1997) for derivations
f1 = dnorm(d1)*d11+(1/ert)*((-dnorm(d2)*d21)/m+pnorm(d2)/(m^2))
f2 = dnorm(d1)*d12-d1*dnorm(d1)*(d11^2)-(1/(ert*m)*dnorm(d2)*d22)+ ((dnorm(d2)*d21)/(ert*m^2))+(1/(ert*m)*d2*dnorm(d2)*(d21^2))-(2*pnorm(d2)/(ert*(m^3)))+(1/(ert*(m^2))*dnorm(d2)*d21)
#recover strike price
x = s/m
c1 = -(m^2)*f1
c2 = s*((1/x^2)*((m^2)*f2+2*m*f1))
#calculate the quantities of interest
cdf = ert*c1+1
fstar = ert*c2
delta = f + s* f1/x
gamma = 2*f1/x+s*f2/(x^2)
return(list(fstar=fstar,delta=delta, gamma=gamma))
}
# BlackScholes price for a European Call or Put Option
BlackScholes = function(S, K, r, sigma, tau, task){
S = c(S) # spot price
K = c(K) # exercise price
tau = c(tau) # time to maturity
sigma = c(sigma) # volatility, std error
r = c(r) # interest rate
if(task == "call" || task == "Call" || task == "CALL"){
task = 1 # call option
}else{
if(task == "put" || task == "Put" || task == "PUT"){
task = 0 # put option
}
}
if((min(S)<=0)){
stop("BlackScholes: price needs to be larger than 0")
}
if((min(K)<=0)){
stop("BlackScholes: strike price needs to be larger than 0")
}
if(((task!=0)&(task!=1))){
stop("BlackScholes: task needs to be either 1 or 0")
}
if(((r<=0)|(r>=1))){
stop("BlackScholes: interest rate needs to be between 0 and 1")
}
if((min(r)<0)){
stop("BlackScholes: interest rate can not be negative")
}
if(min(sigma)<=0){
stop("BlackScholes: volatility needs to be larger than 0")
}
if(min(tau)<0){
stop("BlackScholes: time to expiration can not be negative")
}
# Black-Scholes formula #
t = (tau==0) # check if it is the expire day
y = (log(S/K)+(r-sigma^2/2)*tau)/(sigma*sqrt(tau)+t)
if (task==1){
opv = S*(pnorm(y+sigma*sqrt(tau))*(!t)+t)-K*exp(-r*tau)*(pnorm(y)*(!t)+t)
}
if (task==0){
opv = K*exp(-r*tau)*(pnorm(-y)*(!t)+t)-S*(pnorm(-y-sigma*sqrt(tau))*(!t)+t)
}
opv = (opv>0)*opv
return(opv)
}
# Function to find BS Implied Vol using Bisection Method
ImplVola = function(S, K, Time, r, market, type){
sig = 0.20 # start value
sig.up = 1 # upper bound
sig.down = 0.001 # lower bound
count = 0 # iteration
err = BlackScholes(S, K, r, sig, Time, type) - market
# repeat until error is sufficiently small or counter hits 1000
while(abs(err) > 0.00001 && count<1000){
if(err < 0){
sig.down = sig
sig = (sig.up + sig)/2
}else{
sig.up = sig
sig = (sig.down + sig)/2
}
err = BlackScholes(S, K, r, sig,Time, type) - market
count = count + 1
}
# return NA if counter hit 1000
if(count==1000){
return(NA)
}else{
return(sig)
}
}
############################ Main Computation ############################
# set the day which has to be loaded
date = 10
# estimation method
metric = 1
#call data
dataloaded = read.table("XFGData9701.dat")
data = dataloaded[which((dataloaded[,1]==date)),]
type = data[,4] # 1 for calls 0 for puts
maturity = data[,5] # Maturity in calendar days.
K = data[,6] # K strike price
OP = data[,7] # OP option price
S = data[,8]
# S is the underlying price corrected for future dividends
# Thus, S depends on the date and the maturity of the option.
IR = data[,9] # Interest rate for the given maturity
IVola = data[,10] # Implied volatility of the function
SpotPrice = data[,11] # It is the real spot price not corrected for future dividends!
mat = maturity/365 # mat = maturity in year
FP = S*exp(IR*mat)# Future price
##########################################################################################################
# Here, we compute the implied volatilities of the options by the Newton Raphson method
# and estimate the surface with local polynomials using the quartic Kernel.
##########################################################################################################
data = cbind(S,K,IR,mat,OP,type)
metric2 = 0 # metric=0 specifies the moneyness metric (if =1 then strike metric)
stepwidth = c(0.01,0.025)# 0.02 is the step between each estimated point in the moneyness dimension
length1 = c(41,41) # 0.125 is the step between each estimated point in the maturity dimension
bandwidth = c(0.15,0.3) # 0.1 is the bandwidth used for the moneyness dimension.
# 0.4 is the bandwidth used for the maturity dimension.
firstXF = 0.8 # firstXF and
lastXF = 1.2 # lastXF define the range of the estimation for the moneyness dimension.
firstMat = 0 # firstMat and
lastMat = 1 # lastMat define the range of the estimation for the maturity dimension.
##########################################################################################################
Price = data[,1] # Spot price
Strike = data[,2] # Strike price
Rate = data[,3] # Risk-free interest rate
Time = data[,4] # Time to maturity
Value = data[,5] # Market value
Class = data[,6] # Call==1 | Put==0
mon = data[,2]/(data[,1]*exp(data[,3]*data[,4])) # Moneyness
data = cbind(data,mon)
x = data
n = length(x[,1]) # number of observations
# calculate implied volatility
iv = rep(0,n)
for(i in 1:n){
iv[i] = ImplVola(S=Price[i], K=Strike[i],Time=Time[i], r=Rate[i], market=Value[i],type=Class[i]);
}
imax = ceiling((lastXF-firstXF)/stepwidth[1])
jmax = ceiling((lastMat-firstMat)/stepwidth[2])
result = matrix(1,imax*jmax,1)
grid1 = seq(firstMat,lastMat,by=stepwidth[2]) # grid
grid2 = seq(firstXF,lastXF,by=stepwidth[1]) # grid
x12 = expand.grid(grid1,grid2) # expand the grid
x1 = x12[[1]]
x2 = x12[[2]]
MON = x2
MAT = x1
dataf = data.frame(iv=iv, mon=mon, Time=Time)
lpfit = locLinSmootherC(x=cbind(mon,Time),y=cbind(iv,iv),bw=bandwidth,xeval=cbind(MON,MAT),kernel=EpaK)
IV = lpfit[1:(nrow(lpfit)/2),3]
IVSurface = cbind(c(MON),c(MAT),c(IV))
###########################################################################################################
i=1
k=0
while(i<6){
mat=i*0.025
################################################################################################
#Here, we compute (S-Div) and r for tau=0.25 by linear interpolation between the two series of
#options around tau=0.25. In this example, we use the options with maturity tau=0.21096 and
#0.46027.
################################################################################################
temp1 = subset(cbind(data[,1],data[,3],data[,4]),data[,4]<=mat)
temp2 = subset(cbind(data[,1],data[,3],data[,4]),data[,4]>mat)
SandRMatinf = subset(cbind(temp1[,1],temp1[,2],temp1[,3]),round(temp1[,3],8)==round(mat-min(abs(mat-temp1[,3])),8))
SandRMatsup = subset(cbind(temp2[,1],temp2[,2],temp2[,3]),round(temp2[,3],8)==round(mat+min(abs(mat-temp2[,3])),8))
a = (mean(SandRMatsup[,1])-mean(SandRMatinf[,1]))/(mean(SandRMatsup[,3])-mean(SandRMatinf[,3]))
b = mean(SandRMatinf[,1])
x = mat-mean(SandRMatinf[,3])
sMat = a*x+b
a = (mean(SandRMatsup[,2])-mean(SandRMatinf[,2]))/(mean(SandRMatsup[,3])-mean(SandRMatinf[,3]))
b = mean(SandRMatinf[,2])
rMat = a*x+b
##################################################################################################
##################################################################################################
dataMatbis = subset(IVSurface,IVSurface[,2]==mat)
dataMat = cbind(1/(dataMatbis[,1]*exp(rMat*mat)),dataMatbis[,2:3])
hh = 0.1 # bandwidth
# estimation of the smile
smileMat = cbind(dataMat[,1],dataMat[,3])
smileMat = smileMat[order(smileMat[,1]),]
sM = data.frame(S=smileMat[,1],M=smileMat[,2])
# estimation of the first derivative of the smile w.r. to the strike dimension.
dersmileMat = locpol(M~S,data=sM,bw=hh,kernel=EpaK,deg=2,xevalLen=nrow(smileMat))$lpFit
lpspd = spdbl(m=smileMat[,1], sigma=smileMat[,2], sigma1=dersmileMat[,3], sigma2=dersmileMat[,4], s=mean(sMat), r=mean(rMat), tau=mat)
if (k==0){
if (metric!=0){
spd = cbind((1/(smileMat[,1]/mean(sMat))),(lpspd$fstar))
}else{
spd = cbind((1/(smileMat[,1]*exp(rMat*mat))),(lpspd$fstar*mean(sMat*exp(rMat*mat))))
}
delta = cbind((1/(smileMat[,1]/mean(sMat))),(lpspd$delta))
gamma = cbind((1/(smileMat[,1]/mean(sMat))),(lpspd$gamma))
}else{
if (metric!=0){
spd = cbind(spd,(1/(smileMat[,1]/mean(sMat))),(lpspd$fstar))
}else{
spd = cbind(spd,(1/(smileMat[,1]*exp(rMat*mat))),(lpspd$fstar*mean(sMat*exp(rMat*mat))))
}
delta = cbind(delta,(1/(smileMat[,1]/mean(sMat))),(lpspd$delta))
gamma = cbind(gamma,(1/(smileMat[,1]/mean(sMat))),(lpspd$gamma))
}
k = k+1
i = i+1
}
# plot local polynomial SPD
spd0125 = spd[,1:2]
delta0125 = delta[,1:2]
gamma0125 = gamma[,1:2]
spd025 = spd[,3:4]
delta025 = delta[,3:4]
gamma025 = gamma[,3:4]
spd0375 = spd[,5:6]
delta0375 = delta[,5:6]
gamma0375 = gamma[,5:6]
if (metric!=0){
measure = 0.0001
}else{
measure = 1
}
# Plots
plot(spd0125[,1:2], col="blue3",type="l",lwd=2,xlab="Stock price at expiry",ylab="Density")
lines(spd025[,1:2],col="black",lty="dashed",lwd=2)
lines(spd0375[,1:2],col="red3",lty="dotted",lwd=2)
title(paste("Semi-parametric SPD: ",date,"-01-1997",sep=""))
dev.new()
plot(delta0125[,1:2], col="blue3",type="l",lwd=2,xlab="Strike prices",ylab="Delta")
lines(delta025[,1:2],col="black",lty="dashed",lwd=2)
lines(delta0375[,1:2],col="red3",lty="dotted",lwd=2)
title(paste("Semi-parametric Delta: ",date,"-01-1997",sep=""))
dev.new()
plot(gamma0125[,1:2], col="blue3",type="l",lwd=2,xlab="Strike prices",ylab="Gamma")
lines(gamma025[,1:2],col="black",lty="dashed",lwd=2)
lines(gamma0375[,1:2],col="red3",lty="dotted",lwd=2)
title(paste("Semi-parametric Gamma: ",date,"-01-1997",sep=""))
|
/_Done/XFGSPDoneday/XFGSPDoneday.R
|
no_license
|
QuantLet/XFG-ToDo
|
R
| false
| false
| 12,449
|
r
|
rm(list=ls(all=TRUE))
graphics.off()
install.packages("locpol")
library(locpol)
# Specify your working directory
# setwd("Users...")
############################ SUBROUTINE ################################
spdbl = function(m, sigma, sigma1, sigma2, s, r, tau){ # (fstar,delta, gamma)
# spdbl uses the Breeden and Litzenberger (1978) method and a
# semiparametric specification of the Black-Scholes
# option pricing function to calculate the empirical State
# Price Density. The analytic formula uses an estimate of
# the volatility smile and its first and second derivative to
# calculate the State-price density, as well as Delta and
# Gamma of the option. This method can only be applied to
# European options (due to the assumptions).
rm = length(m)
ones = matrix(1,rm,1)
st = sqrt(tau)
ert = exp(r*tau)
rt = r*tau
# Modified Black-Scholes scaled by S-div instead of F
d1 = (log(m)+tau*(r+0.5*(sigma^2)))/(sigma*st)
d2 = d1-sigma*st
f = pnorm(d1)-pnorm(d2)/(ert*m)
# first derivative of d1 term
d11 = (1/(m*sigma*st))-(1/(st*(sigma^2)))*((log(m)+tau*r)*sigma1)+0.5*st*sigma1
#first derivative of d2 term
d21 = d11-st*sigma1
#second derivative of d1 term
d12 = -(1/(st*(m^2)*sigma))-sigma1/(st*m*(sigma^2))+sigma2*(0.5*st-(log(m)+rt)/(st*(sigma^2)))+sigma1*(2*sigma1*(log(m)+rt)/(st*sigma^3)-1/(st*m*sigma^2))
#second derivative of d2 term
d22 = d12-st*sigma2
#Please refer to either Rookley (1997) for derivations
f1 = dnorm(d1)*d11+(1/ert)*((-dnorm(d2)*d21)/m+pnorm(d2)/(m^2))
f2 = dnorm(d1)*d12-d1*dnorm(d1)*(d11^2)-(1/(ert*m)*dnorm(d2)*d22)+ ((dnorm(d2)*d21)/(ert*m^2))+(1/(ert*m)*d2*dnorm(d2)*(d21^2))-(2*pnorm(d2)/(ert*(m^3)))+(1/(ert*(m^2))*dnorm(d2)*d21)
#recover strike price
x = s/m
c1 = -(m^2)*f1
c2 = s*((1/x^2)*((m^2)*f2+2*m*f1))
#calculate the quantities of interest
cdf = ert*c1+1
fstar = ert*c2
delta = f + s* f1/x
gamma = 2*f1/x+s*f2/(x^2)
return(list(fstar=fstar,delta=delta, gamma=gamma))
}
# BlackScholes price for a European Call or Put Option
BlackScholes = function(S, K, r, sigma, tau, task){
S = c(S) # spot price
K = c(K) # exercise price
tau = c(tau) # time to maturity
sigma = c(sigma) # volatility, std error
r = c(r) # interest rate
if(task == "call" || task == "Call" || task == "CALL"){
task = 1 # call option
}else{
if(task == "put" || task == "Put" || task == "PUT"){
task = 0 # put option
}
}
if((min(S)<=0)){
stop("BlackScholes: price needs to be larger than 0")
}
if((min(K)<=0)){
stop("BlackScholes: strike price needs to be larger than 0")
}
if(((task!=0)&(task!=1))){
stop("BlackScholes: task needs to be either 1 or 0")
}
if(((r<=0)|(r>=1))){
stop("BlackScholes: interest rate needs to be between 0 and 1")
}
if((min(r)<0)){
stop("BlackScholes: interest rate can not be negative")
}
if(min(sigma)<=0){
stop("BlackScholes: volatility needs to be larger than 0")
}
if(min(tau)<0){
stop("BlackScholes: time to expiration can not be negative")
}
# Black-Scholes formula #
t = (tau==0) # check if it is the expire day
y = (log(S/K)+(r-sigma^2/2)*tau)/(sigma*sqrt(tau)+t)
if (task==1){
opv = S*(pnorm(y+sigma*sqrt(tau))*(!t)+t)-K*exp(-r*tau)*(pnorm(y)*(!t)+t)
}
if (task==0){
opv = K*exp(-r*tau)*(pnorm(-y)*(!t)+t)-S*(pnorm(-y-sigma*sqrt(tau))*(!t)+t)
}
opv = (opv>0)*opv
return(opv)
}
# Function to find BS Implied Vol using Bisection Method
ImplVola = function(S, K, Time, r, market, type){
sig = 0.20 # start value
sig.up = 1 # upper bound
sig.down = 0.001 # lower bound
count = 0 # iteration
err = BlackScholes(S, K, r, sig, Time, type) - market
# repeat until error is sufficiently small or counter hits 1000
while(abs(err) > 0.00001 && count<1000){
if(err < 0){
sig.down = sig
sig = (sig.up + sig)/2
}else{
sig.up = sig
sig = (sig.down + sig)/2
}
err = BlackScholes(S, K, r, sig,Time, type) - market
count = count + 1
}
# return NA if counter hit 1000
if(count==1000){
return(NA)
}else{
return(sig)
}
}
############################ Main Computation ############################
# set the day which has to be loaded
date = 10
# estimation method
metric = 1
#call data
dataloaded = read.table("XFGData9701.dat")
data = dataloaded[which((dataloaded[,1]==date)),]
type = data[,4] # 1 for calls 0 for puts
maturity = data[,5] # Maturity in calendar days.
K = data[,6] # K strike price
OP = data[,7] # OP option price
S = data[,8]
# S is the underlying price corrected for future dividends
# Thus, S depends on the date and the maturity of the option.
IR = data[,9] # Interest rate for the given maturity
IVola = data[,10] # Implied volatility of the function
SpotPrice = data[,11] # It is the real spot price not corrected for future dividends!
mat = maturity/365 # mat = maturity in year
FP = S*exp(IR*mat)# Future price
##########################################################################################################
# Here, we compute the implied volatilities of the options by the Newton Raphson method
# and estimate the surface with local polynomials using the quartic Kernel.
##########################################################################################################
data = cbind(S,K,IR,mat,OP,type)
metric2 = 0 # metric=0 specifies the moneyness metric (if =1 then strike metric)
stepwidth = c(0.01,0.025)# 0.02 is the step between each estimated point in the moneyness dimension
length1 = c(41,41) # 0.125 is the step between each estimated point in the maturity dimension
bandwidth = c(0.15,0.3) # 0.1 is the bandwidth used for the moneyness dimension.
# 0.4 is the bandwidth used for the maturity dimension.
firstXF = 0.8 # firstXF and
lastXF = 1.2 # lastXF define the range of the estimation for the moneyness dimension.
firstMat = 0 # firstMat and
lastMat = 1 # lastMat define the range of the estimation for the maturity dimension.
##########################################################################################################
Price = data[,1] # Spot price
Strike = data[,2] # Strike price
Rate = data[,3] # Risk-free interest rate
Time = data[,4] # Time to maturity
Value = data[,5] # Market value
Class = data[,6] # Call==1 | Put==0
mon = data[,2]/(data[,1]*exp(data[,3]*data[,4])) # Moneyness
data = cbind(data,mon)
x = data
n = length(x[,1]) # number of observations
# calculate implied volatility
iv = rep(0,n)
for(i in 1:n){
iv[i] = ImplVola(S=Price[i], K=Strike[i],Time=Time[i], r=Rate[i], market=Value[i],type=Class[i]);
}
imax = ceiling((lastXF-firstXF)/stepwidth[1])
jmax = ceiling((lastMat-firstMat)/stepwidth[2])
result = matrix(1,imax*jmax,1)
grid1 = seq(firstMat,lastMat,by=stepwidth[2]) # grid
grid2 = seq(firstXF,lastXF,by=stepwidth[1]) # grid
x12 = expand.grid(grid1,grid2) # expand the grid
x1 = x12[[1]]
x2 = x12[[2]]
MON = x2
MAT = x1
dataf = data.frame(iv=iv, mon=mon, Time=Time)
lpfit = locLinSmootherC(x=cbind(mon,Time),y=cbind(iv,iv),bw=bandwidth,xeval=cbind(MON,MAT),kernel=EpaK)
IV = lpfit[1:(nrow(lpfit)/2),3]
IVSurface = cbind(c(MON),c(MAT),c(IV))
###########################################################################################################
i=1
k=0
while(i<6){
mat=i*0.025
################################################################################################
#Here, we compute (S-Div) and r for tau=0.25 by linear interpolation between the two series of
#options around tau=0.25. In this example, we use the options with maturity tau=0.21096 and
#0.46027.
################################################################################################
temp1 = subset(cbind(data[,1],data[,3],data[,4]),data[,4]<=mat)
temp2 = subset(cbind(data[,1],data[,3],data[,4]),data[,4]>mat)
SandRMatinf = subset(cbind(temp1[,1],temp1[,2],temp1[,3]),round(temp1[,3],8)==round(mat-min(abs(mat-temp1[,3])),8))
SandRMatsup = subset(cbind(temp2[,1],temp2[,2],temp2[,3]),round(temp2[,3],8)==round(mat+min(abs(mat-temp2[,3])),8))
a = (mean(SandRMatsup[,1])-mean(SandRMatinf[,1]))/(mean(SandRMatsup[,3])-mean(SandRMatinf[,3]))
b = mean(SandRMatinf[,1])
x = mat-mean(SandRMatinf[,3])
sMat = a*x+b
a = (mean(SandRMatsup[,2])-mean(SandRMatinf[,2]))/(mean(SandRMatsup[,3])-mean(SandRMatinf[,3]))
b = mean(SandRMatinf[,2])
rMat = a*x+b
##################################################################################################
##################################################################################################
dataMatbis = subset(IVSurface,IVSurface[,2]==mat)
dataMat = cbind(1/(dataMatbis[,1]*exp(rMat*mat)),dataMatbis[,2:3])
hh = 0.1 # bandwidth
# estimation of the smile
smileMat = cbind(dataMat[,1],dataMat[,3])
smileMat = smileMat[order(smileMat[,1]),]
sM = data.frame(S=smileMat[,1],M=smileMat[,2])
# estimation of the first derivative of the smile w.r. to the strike dimension.
dersmileMat = locpol(M~S,data=sM,bw=hh,kernel=EpaK,deg=2,xevalLen=nrow(smileMat))$lpFit
lpspd = spdbl(m=smileMat[,1], sigma=smileMat[,2], sigma1=dersmileMat[,3], sigma2=dersmileMat[,4], s=mean(sMat), r=mean(rMat), tau=mat)
if (k==0){
if (metric!=0){
spd = cbind((1/(smileMat[,1]/mean(sMat))),(lpspd$fstar))
}else{
spd = cbind((1/(smileMat[,1]*exp(rMat*mat))),(lpspd$fstar*mean(sMat*exp(rMat*mat))))
}
delta = cbind((1/(smileMat[,1]/mean(sMat))),(lpspd$delta))
gamma = cbind((1/(smileMat[,1]/mean(sMat))),(lpspd$gamma))
}else{
if (metric!=0){
spd = cbind(spd,(1/(smileMat[,1]/mean(sMat))),(lpspd$fstar))
}else{
spd = cbind(spd,(1/(smileMat[,1]*exp(rMat*mat))),(lpspd$fstar*mean(sMat*exp(rMat*mat))))
}
delta = cbind(delta,(1/(smileMat[,1]/mean(sMat))),(lpspd$delta))
gamma = cbind(gamma,(1/(smileMat[,1]/mean(sMat))),(lpspd$gamma))
}
k = k+1
i = i+1
}
# plot local polynomial SPD
spd0125 = spd[,1:2]
delta0125 = delta[,1:2]
gamma0125 = gamma[,1:2]
spd025 = spd[,3:4]
delta025 = delta[,3:4]
gamma025 = gamma[,3:4]
spd0375 = spd[,5:6]
delta0375 = delta[,5:6]
gamma0375 = gamma[,5:6]
if (metric!=0){
measure = 0.0001
}else{
measure = 1
}
# Plots
plot(spd0125[,1:2], col="blue3",type="l",lwd=2,xlab="Stock price at expiry",ylab="Density")
lines(spd025[,1:2],col="black",lty="dashed",lwd=2)
lines(spd0375[,1:2],col="red3",lty="dotted",lwd=2)
title(paste("Semi-parametric SPD: ",date,"-01-1997",sep=""))
dev.new()
plot(delta0125[,1:2], col="blue3",type="l",lwd=2,xlab="Strike prices",ylab="Delta")
lines(delta025[,1:2],col="black",lty="dashed",lwd=2)
lines(delta0375[,1:2],col="red3",lty="dotted",lwd=2)
title(paste("Semi-parametric Delta: ",date,"-01-1997",sep=""))
dev.new()
plot(gamma0125[,1:2], col="blue3",type="l",lwd=2,xlab="Strike prices",ylab="Gamma")
lines(gamma025[,1:2],col="black",lty="dashed",lwd=2)
lines(gamma0375[,1:2],col="red3",lty="dotted",lwd=2)
title(paste("Semi-parametric Gamma: ",date,"-01-1997",sep=""))
|
Git <- R6Class(
"Git",
public = list(
initialize = function(path) {
private$path <- path
},
cmd = function(...) {
args <- c(...)
message(paste("git", paste(args, collapse = " ")))
status <- withr::with_dir(private$path, system2("git", args))
if (status != 0) {
stopc("git exited with status ", status)
}
},
query = function(...) {
args <- c(...)
message(paste("git", paste(args, collapse = " ")))
withr::with_dir(private$path, system2("git", args, stdout = TRUE))
},
init_repo = function() {
message("Initializing Git repo at ", private$path)
dir.create(private$path, recursive = TRUE, showWarnings = FALSE)
private$repo <- git2r::init(private$path)
},
get_repo = function() {
private$repo
}
),
private = list(
path = NULL,
repo = NULL
)
)
SetupPushDeploy <- R6Class( # nolint
"SetupPushDeploy",
inherit = TicStep,
public = list(
initialize = function(path = ".", branch = NULL, orphan = FALSE,
remote_url = NULL, checkout = TRUE) {
if (is.null(branch) && orphan) {
stopc("Cannot orphan the branch that has been used for the CI run.")
}
if (is.null(branch) && path != ".") {
stopc("Must specify branch name if `path` is given.")
}
if (path != "." && !checkout && !orphan) {
stopc(
"If `checkout` is FALSE and `path` is set, `orphan` must be TRUE."
)
}
if (is.null(branch)) {
branch <- ci_get_branch()
}
if (is.null(remote_url)) {
remote_url <- paste0("git@github.com:", ci_get_slug(), ".git")
}
private$git <- Git$new(path)
private$branch <- branch
private$orphan <- orphan
private$remote_url <- remote_url
private$checkout <- checkout
},
prepare = function() {
verify_install("git2r")
super$prepare()
},
run = function() {
private$git$init_repo()
private$init_author()
private$fetch()
}
),
private = list(
git = NULL,
branch = NULL,
orphan = FALSE,
remote_url = NULL,
checkout = FALSE,
repo = NULL,
remote_name = "tic-remote", # HACK
init_author = function() {
latest_commit <- get_head_commit(git2r_head(git2r::repository(".")))
print(latest_commit)
latest_author <- git2r_attrib(latest_commit, "author")
print(latest_author)
git2r::config(
private$git$get_repo(),
user.name = git2r_attrib(latest_author, "name"),
user.email = git2r_attrib(latest_author, "email")
)
},
fetch = function() {
remote_name <- private$remote_name
if (remote_name %in% git2r::remotes(private$git$get_repo())) {
message("Not overriding existing remote ", remote_name)
} else {
message("Adding remote ", remote_name, " with URL ", private$remote_url)
git2r::remote_add(
private$git$get_repo(), remote_name, private$remote_url
)
}
message("Setting branch name to ", private$branch)
private$git$cmd("checkout", "-B", private$branch)
if (!private$orphan) {
message("Fetching from remote ", remote_name)
tryCatch(
{ # nolint
remote_branch <- private$try_fetch()
if (!is.null(remote_branch)) {
message("Remote branch is ", remote_branch$name)
if (private$checkout) {
git2r::checkout(
private$git$get_repo(),
private$branch,
create = TRUE,
force = TRUE
)
}
}
},
error = function(e) {
message(
conditionMessage(e),
"\nCould not fetch branch, will attempt to create new"
)
}
)
}
},
try_fetch = function() {
remote_name <- private$remote_name
private$git$cmd(
"fetch", remote_name, paste0("refs/heads/", private$branch)
)
branches <- git2r::branches(private$git$get_repo(), "remote")
branches[[paste0(remote_name, "/", private$branch)]]
}
)
)
#' Step: Setup push deploy
#'
#' Clones a repo, inits author information, and sets up remotes
#' for a subsequent [step_do_push_deploy()].
#'
#' @param path `[string]`\cr
#' Path to the repository, default `"."` which means setting up the current
#' repository.
#' @param branch `[string]`\cr
#' Target branch, default: current branch.
#' @param orphan `[flag]`\cr
#' Create and force-push an orphan branch consisting of only one commit?
#' This can be useful e.g. for `path = "docs", branch = "gh-pages"`,
#' but cannot be applied for pushing to the current branch.
#' @param remote_url `[string]`\cr
#' The URL of the remote Git repository to push to, defaults to the
#' current GitHub repository.
#' @param checkout `[flag]`\cr
#' Check out the current contents of the repository? Defaults to `TRUE`,
#' set to `FALSE` if the build process relies on existing contents or
#' if you deploy to a different branch.
#'
#' @family deploy steps
#' @family steps
#' @export
#' @examples
#' \dontrun{
#' dsl_init()
#'
#' get_stage("deploy") %>%
#' add_step(step_setup_push_deploy(path = "docs", branch = "gh-pages")) %>%
#' add_step(step_build_pkgdown())
#'
#' # This example needs a Git repository
#' if (rlang::is_installed("git2r") && git2r::in_repository()) {
#' # Deployment only works if a companion step_do_push_deploy() is added
#' get_stage("deploy") %>%
#' add_step(step_do_push_deploy(path = "docs"))
#' }
#'
#' dsl_get()
#' }
step_setup_push_deploy <- function(path = ".", branch = NULL, orphan = FALSE,
remote_url = NULL, checkout = TRUE) {
SetupPushDeploy$new(
path = path, branch = branch, orphan = orphan,
remote_url = remote_url, checkout = checkout
)
}
DoPushDeploy <- R6Class(
"DoPushDeploy",
inherit = TicStep,
public = list(
initialize = function(path = ".", commit_message = NULL,
commit_paths = ".") {
private$git <- Git$new(path)
if (is.null(commit_message)) {
commit_message <- private$format_commit_message()
}
private$commit_message <- commit_message
private$commit_paths <- commit_paths
},
check = function() {
!ci_is_tag()
},
prepare = function() {
verify_install("git2r")
super$prepare()
},
run = function() {
private$git$init_repo()
maybe_orphan <- is.null(git2r_head(private$git$get_repo()))
if (private$commit()) {
private$push(force = maybe_orphan)
}
}
),
private = list(
git = NULL,
commit_message = NULL,
commit_paths = NULL,
repo = NULL,
remote_name = "tic-remote", # HACK
commit = function() {
message("Staging: ", paste(private$commit_paths, collapse = ", "))
git2r::add(private$git$get_repo(), private$commit_paths)
message("Checking changed files")
status <- git2r::status(
private$git$get_repo(),
staged = TRUE,
unstaged = FALSE, untracked = FALSE, ignored = FALSE
)
if (length(status$staged) == 0) {
message("Nothing to commit!")
return(FALSE)
}
message("Committing to ", git2r_attrib(private$git$get_repo(), "path"))
new_commit <-
git2r::commit(private$git$get_repo(), private$commit_message)$sha
local <- git2r_head(private$git$get_repo())
upstream <- git2r::branch_get_upstream(local)
if (is.null(upstream)) {
message("No upstream branch found")
return(TRUE)
}
message("Wiping repository")
private$git$cmd("checkout .")
private$git$cmd("clean -fdx")
message("Pulling new changes")
private$git$cmd("fetch")
## Needed to handle empty commits, pull, rebase or default cherry-pick
## have bad default behavior here (#160)
private$git$cmd("reset", "--hard", git2r::branch_target(upstream))
private$git$cmd("cherry-pick", "--no-commit", new_commit)
private$git$cmd("commit", "--no-edit", "--allow-empty")
c_local <-
git2r::lookup(private$git$get_repo(), git2r::branch_target(local))
c_upstream <-
git2r::lookup(private$git$get_repo(), git2r::branch_target(upstream))
ab <- git2r::ahead_behind(c_local, c_upstream)
message("Ahead: ", ab[[1]], ", behind: ", ab[[2]])
ab[[1]] > 0
},
push = function(force) {
message("Pushing to remote")
private$git$cmd(
"push",
if (force) "--force",
private$remote_name,
"HEAD"
)
},
format_commit_message = function() {
paste0(
"Deploy from ", ci_get_build_number(), " [ci skip]\n\n",
if (!is.null(ci_get_build_url())) {
paste0("Build URL: ", ci_get_build_url(), "\n")
},
"Commit: ", ci_get_commit()
)
}
)
)
#' Step: Perform push deploy
#'
#' @description
#' Commits and pushes to a repo prepared by [step_setup_push_deploy()].
#'
#' Deployment usually requires setting up SSH keys with
#' [use_tic()] or [travis::use_travis_deploy()].
#'
#'
#' @details
#' It is highly recommended to restrict the set of files
#' touched by the deployment with the `commit_paths` argument:
#' this step assumes that it can freely overwrite all changes to all files
#' below `commit_paths`, and will not warn in case of conflicts.
#'
#' To mitigate conflicts race conditions to the greatest extent possible,
#' the following strategy is used:
#'
#' - The changes are committed to the branch
#' - Before pushing, new commits are fetched, and the changes are cherry-picked
#' on top of the new commits
#'
#' If no new commits were pushed after the CI run has started,
#' this strategy is equivalent to committing and pushing.
#' In the opposite case, if the remote repo has new commits,
#' the deployment is safely applied to the current tip.
#'
#' @inheritParams step_setup_push_deploy
#' @param commit_message `[string]`\cr
#' Commit message to use, defaults to a useful message linking to the CI build
#' and avoiding recursive CI runs.
#' @param commit_paths `[character]`\cr
#' Restrict the set of directories and/or files added to Git before deploying.
#' Default: deploy all files.
#'
#' @family deploy steps
#' @family steps
#'
#' @export
#' @examples
#' \dontrun{
#' dsl_init()
#'
#' # Deployment only works if a companion step_setup_push_deploy() is added
#' get_stage("deploy") %>%
#' add_step(step_setup_push_deploy(path = "docs", branch = "gh-pages")) %>%
#' add_step(step_build_pkgdown())
#'
#' if (rlang::is_installed("git2r") && git2r::in_repository()) {
#' get_stage("deploy") %>%
#' add_step(step_do_push_deploy(path = "docs"))
#' }
#'
#' dsl_get()
#' }
step_do_push_deploy <- function(path = ".", commit_message = NULL,
commit_paths = ".") {
DoPushDeploy$new(
path = path, commit_message = commit_message, commit_paths = commit_paths
)
}
PushDeploy <- R6Class(
"PushDeploy",
inherit = TicStep,
public = list(
initialize = function(path = ".", branch = ci_get_branch(),
remote_url =
paste0("git@github.com:", ci_get_slug(), ".git"),
commit_message = NULL, commit_paths = ".") {
orphan <- (path != ".")
private$setup <- step_setup_push_deploy(
path = path, branch = branch, orphan = orphan, remote_url = remote_url,
checkout = FALSE
)
private$do <- step_do_push_deploy(
path = path,
commit_message = commit_message, commit_paths = commit_paths
)
},
check = function() {
private$setup$check() && private$do$check()
},
prepare = function() {
private$setup$prepare()
private$do$prepare()
},
run = function() {
private$setup$run()
private$do$run()
}
),
private = list(
setup = NULL,
do = NULL
)
)
#' Step: Setup and perform push deploy
#'
#' @description
#' Clones a repo, inits author information, sets up remotes,
#' commits, and pushes.
#' Combines [step_setup_push_deploy()] with `checkout = FALSE` and
#' a suitable `orphan` argument,
#' and [step_do_push_deploy()].
#'
#' Deployment usually requires setting up SSH keys with
#' [use_tic()] or [travis::use_travis_deploy()].
#'
#' @details
#' Setup and deployment are combined in one step,
#' the files to be deployed must be prepared in a previous step.
#' This poses some restrictions on how the repository can be initialized,
#' in particular for a nonstandard `path` argument only `orphan = TRUE`
#' can be supported (and will be used).
#'
#' For more control, create two separate steps with
#' `step_setup_push_deploy()` and `step_do_push_deploy()`,
#' and create the files to be deployed inbetween these steps.
#'
#' @inheritParams step_setup_push_deploy
#' @inheritParams step_do_push_deploy
#'
#' @family deploy steps
#' @family steps
#'
#' @export
#' @examples
#' \dontrun{
#' dsl_init()
#'
#' get_stage("script") %>%
#' add_step(step_push_deploy(commit_paths = c("NAMESPACE", "man")))
#'
#' dsl_get()
#' }
step_push_deploy <- function(path = ".", branch = NULL,
remote_url = NULL,
commit_message = NULL, commit_paths = ".") {
PushDeploy$new(
path = path, branch = branch,
remote_url = remote_url,
commit_message = commit_message,
commit_paths = commit_paths
)
}
|
/R/steps-git.R
|
no_license
|
krlmlr/tic
|
R
| false
| false
| 13,675
|
r
|
Git <- R6Class(
"Git",
public = list(
initialize = function(path) {
private$path <- path
},
cmd = function(...) {
args <- c(...)
message(paste("git", paste(args, collapse = " ")))
status <- withr::with_dir(private$path, system2("git", args))
if (status != 0) {
stopc("git exited with status ", status)
}
},
query = function(...) {
args <- c(...)
message(paste("git", paste(args, collapse = " ")))
withr::with_dir(private$path, system2("git", args, stdout = TRUE))
},
init_repo = function() {
message("Initializing Git repo at ", private$path)
dir.create(private$path, recursive = TRUE, showWarnings = FALSE)
private$repo <- git2r::init(private$path)
},
get_repo = function() {
private$repo
}
),
private = list(
path = NULL,
repo = NULL
)
)
SetupPushDeploy <- R6Class( # nolint
"SetupPushDeploy",
inherit = TicStep,
public = list(
initialize = function(path = ".", branch = NULL, orphan = FALSE,
remote_url = NULL, checkout = TRUE) {
if (is.null(branch) && orphan) {
stopc("Cannot orphan the branch that has been used for the CI run.")
}
if (is.null(branch) && path != ".") {
stopc("Must specify branch name if `path` is given.")
}
if (path != "." && !checkout && !orphan) {
stopc(
"If `checkout` is FALSE and `path` is set, `orphan` must be TRUE."
)
}
if (is.null(branch)) {
branch <- ci_get_branch()
}
if (is.null(remote_url)) {
remote_url <- paste0("git@github.com:", ci_get_slug(), ".git")
}
private$git <- Git$new(path)
private$branch <- branch
private$orphan <- orphan
private$remote_url <- remote_url
private$checkout <- checkout
},
prepare = function() {
verify_install("git2r")
super$prepare()
},
run = function() {
private$git$init_repo()
private$init_author()
private$fetch()
}
),
private = list(
git = NULL,
branch = NULL,
orphan = FALSE,
remote_url = NULL,
checkout = FALSE,
repo = NULL,
remote_name = "tic-remote", # HACK
init_author = function() {
latest_commit <- get_head_commit(git2r_head(git2r::repository(".")))
print(latest_commit)
latest_author <- git2r_attrib(latest_commit, "author")
print(latest_author)
git2r::config(
private$git$get_repo(),
user.name = git2r_attrib(latest_author, "name"),
user.email = git2r_attrib(latest_author, "email")
)
},
fetch = function() {
remote_name <- private$remote_name
if (remote_name %in% git2r::remotes(private$git$get_repo())) {
message("Not overriding existing remote ", remote_name)
} else {
message("Adding remote ", remote_name, " with URL ", private$remote_url)
git2r::remote_add(
private$git$get_repo(), remote_name, private$remote_url
)
}
message("Setting branch name to ", private$branch)
private$git$cmd("checkout", "-B", private$branch)
if (!private$orphan) {
message("Fetching from remote ", remote_name)
tryCatch(
{ # nolint
remote_branch <- private$try_fetch()
if (!is.null(remote_branch)) {
message("Remote branch is ", remote_branch$name)
if (private$checkout) {
git2r::checkout(
private$git$get_repo(),
private$branch,
create = TRUE,
force = TRUE
)
}
}
},
error = function(e) {
message(
conditionMessage(e),
"\nCould not fetch branch, will attempt to create new"
)
}
)
}
},
try_fetch = function() {
remote_name <- private$remote_name
private$git$cmd(
"fetch", remote_name, paste0("refs/heads/", private$branch)
)
branches <- git2r::branches(private$git$get_repo(), "remote")
branches[[paste0(remote_name, "/", private$branch)]]
}
)
)
#' Step: Setup push deploy
#'
#' Clones a repo, inits author information, and sets up remotes
#' for a subsequent [step_do_push_deploy()].
#'
#' @param path `[string]`\cr
#' Path to the repository, default `"."` which means setting up the current
#' repository.
#' @param branch `[string]`\cr
#' Target branch, default: current branch.
#' @param orphan `[flag]`\cr
#' Create and force-push an orphan branch consisting of only one commit?
#' This can be useful e.g. for `path = "docs", branch = "gh-pages"`,
#' but cannot be applied for pushing to the current branch.
#' @param remote_url `[string]`\cr
#' The URL of the remote Git repository to push to, defaults to the
#' current GitHub repository.
#' @param checkout `[flag]`\cr
#' Check out the current contents of the repository? Defaults to `TRUE`,
#' set to `FALSE` if the build process relies on existing contents or
#' if you deploy to a different branch.
#'
#' @family deploy steps
#' @family steps
#' @export
#' @examples
#' \dontrun{
#' dsl_init()
#'
#' get_stage("deploy") %>%
#' add_step(step_setup_push_deploy(path = "docs", branch = "gh-pages")) %>%
#' add_step(step_build_pkgdown())
#'
#' # This example needs a Git repository
#' if (rlang::is_installed("git2r") && git2r::in_repository()) {
#' # Deployment only works if a companion step_do_push_deploy() is added
#' get_stage("deploy") %>%
#' add_step(step_do_push_deploy(path = "docs"))
#' }
#'
#' dsl_get()
#' }
step_setup_push_deploy <- function(path = ".", branch = NULL, orphan = FALSE,
remote_url = NULL, checkout = TRUE) {
SetupPushDeploy$new(
path = path, branch = branch, orphan = orphan,
remote_url = remote_url, checkout = checkout
)
}
DoPushDeploy <- R6Class(
"DoPushDeploy",
inherit = TicStep,
public = list(
initialize = function(path = ".", commit_message = NULL,
commit_paths = ".") {
private$git <- Git$new(path)
if (is.null(commit_message)) {
commit_message <- private$format_commit_message()
}
private$commit_message <- commit_message
private$commit_paths <- commit_paths
},
check = function() {
!ci_is_tag()
},
prepare = function() {
verify_install("git2r")
super$prepare()
},
run = function() {
private$git$init_repo()
maybe_orphan <- is.null(git2r_head(private$git$get_repo()))
if (private$commit()) {
private$push(force = maybe_orphan)
}
}
),
private = list(
git = NULL,
commit_message = NULL,
commit_paths = NULL,
repo = NULL,
remote_name = "tic-remote", # HACK
commit = function() {
message("Staging: ", paste(private$commit_paths, collapse = ", "))
git2r::add(private$git$get_repo(), private$commit_paths)
message("Checking changed files")
status <- git2r::status(
private$git$get_repo(),
staged = TRUE,
unstaged = FALSE, untracked = FALSE, ignored = FALSE
)
if (length(status$staged) == 0) {
message("Nothing to commit!")
return(FALSE)
}
message("Committing to ", git2r_attrib(private$git$get_repo(), "path"))
new_commit <-
git2r::commit(private$git$get_repo(), private$commit_message)$sha
local <- git2r_head(private$git$get_repo())
upstream <- git2r::branch_get_upstream(local)
if (is.null(upstream)) {
message("No upstream branch found")
return(TRUE)
}
message("Wiping repository")
private$git$cmd("checkout .")
private$git$cmd("clean -fdx")
message("Pulling new changes")
private$git$cmd("fetch")
## Needed to handle empty commits, pull, rebase or default cherry-pick
## have bad default behavior here (#160)
private$git$cmd("reset", "--hard", git2r::branch_target(upstream))
private$git$cmd("cherry-pick", "--no-commit", new_commit)
private$git$cmd("commit", "--no-edit", "--allow-empty")
c_local <-
git2r::lookup(private$git$get_repo(), git2r::branch_target(local))
c_upstream <-
git2r::lookup(private$git$get_repo(), git2r::branch_target(upstream))
ab <- git2r::ahead_behind(c_local, c_upstream)
message("Ahead: ", ab[[1]], ", behind: ", ab[[2]])
ab[[1]] > 0
},
push = function(force) {
message("Pushing to remote")
private$git$cmd(
"push",
if (force) "--force",
private$remote_name,
"HEAD"
)
},
format_commit_message = function() {
paste0(
"Deploy from ", ci_get_build_number(), " [ci skip]\n\n",
if (!is.null(ci_get_build_url())) {
paste0("Build URL: ", ci_get_build_url(), "\n")
},
"Commit: ", ci_get_commit()
)
}
)
)
#' Step: Perform push deploy
#'
#' @description
#' Commits and pushes to a repo prepared by [step_setup_push_deploy()].
#'
#' Deployment usually requires setting up SSH keys with
#' [use_tic()] or [travis::use_travis_deploy()].
#'
#'
#' @details
#' It is highly recommended to restrict the set of files
#' touched by the deployment with the `commit_paths` argument:
#' this step assumes that it can freely overwrite all changes to all files
#' below `commit_paths`, and will not warn in case of conflicts.
#'
#' To mitigate conflicts race conditions to the greatest extent possible,
#' the following strategy is used:
#'
#' - The changes are committed to the branch
#' - Before pushing, new commits are fetched, and the changes are cherry-picked
#' on top of the new commits
#'
#' If no new commits were pushed after the CI run has started,
#' this strategy is equivalent to committing and pushing.
#' In the opposite case, if the remote repo has new commits,
#' the deployment is safely applied to the current tip.
#'
#' @inheritParams step_setup_push_deploy
#' @param commit_message `[string]`\cr
#' Commit message to use, defaults to a useful message linking to the CI build
#' and avoiding recursive CI runs.
#' @param commit_paths `[character]`\cr
#' Restrict the set of directories and/or files added to Git before deploying.
#' Default: deploy all files.
#'
#' @family deploy steps
#' @family steps
#'
#' @export
#' @examples
#' \dontrun{
#' dsl_init()
#'
#' # Deployment only works if a companion step_setup_push_deploy() is added
#' get_stage("deploy") %>%
#' add_step(step_setup_push_deploy(path = "docs", branch = "gh-pages")) %>%
#' add_step(step_build_pkgdown())
#'
#' if (rlang::is_installed("git2r") && git2r::in_repository()) {
#' get_stage("deploy") %>%
#' add_step(step_do_push_deploy(path = "docs"))
#' }
#'
#' dsl_get()
#' }
step_do_push_deploy <- function(path = ".", commit_message = NULL,
commit_paths = ".") {
DoPushDeploy$new(
path = path, commit_message = commit_message, commit_paths = commit_paths
)
}
PushDeploy <- R6Class(
"PushDeploy",
inherit = TicStep,
public = list(
initialize = function(path = ".", branch = ci_get_branch(),
remote_url =
paste0("git@github.com:", ci_get_slug(), ".git"),
commit_message = NULL, commit_paths = ".") {
orphan <- (path != ".")
private$setup <- step_setup_push_deploy(
path = path, branch = branch, orphan = orphan, remote_url = remote_url,
checkout = FALSE
)
private$do <- step_do_push_deploy(
path = path,
commit_message = commit_message, commit_paths = commit_paths
)
},
check = function() {
private$setup$check() && private$do$check()
},
prepare = function() {
private$setup$prepare()
private$do$prepare()
},
run = function() {
private$setup$run()
private$do$run()
}
),
private = list(
setup = NULL,
do = NULL
)
)
#' Step: Setup and perform push deploy
#'
#' @description
#' Clones a repo, inits author information, sets up remotes,
#' commits, and pushes.
#' Combines [step_setup_push_deploy()] with `checkout = FALSE` and
#' a suitable `orphan` argument,
#' and [step_do_push_deploy()].
#'
#' Deployment usually requires setting up SSH keys with
#' [use_tic()] or [travis::use_travis_deploy()].
#'
#' @details
#' Setup and deployment are combined in one step,
#' the files to be deployed must be prepared in a previous step.
#' This poses some restrictions on how the repository can be initialized,
#' in particular for a nonstandard `path` argument only `orphan = TRUE`
#' can be supported (and will be used).
#'
#' For more control, create two separate steps with
#' `step_setup_push_deploy()` and `step_do_push_deploy()`,
#' and create the files to be deployed inbetween these steps.
#'
#' @inheritParams step_setup_push_deploy
#' @inheritParams step_do_push_deploy
#'
#' @family deploy steps
#' @family steps
#'
#' @export
#' @examples
#' \dontrun{
#' dsl_init()
#'
#' get_stage("script") %>%
#' add_step(step_push_deploy(commit_paths = c("NAMESPACE", "man")))
#'
#' dsl_get()
#' }
step_push_deploy <- function(path = ".", branch = NULL,
remote_url = NULL,
commit_message = NULL, commit_paths = ".") {
PushDeploy$new(
path = path, branch = branch,
remote_url = remote_url,
commit_message = commit_message,
commit_paths = commit_paths
)
}
|
#' @export
#' @import ggmulti
#' @rdname loon2ggplot
loon2ggplot.l_layer_scatterplot <- function(target, asAes = TRUE, selectedOnTop = TRUE,
showNearestColor = FALSE, ...) {
widget <- loon::l_create_handle(attr(target, "widget"))
args <- list(...)
ggObj <- args$ggObj
facets <- args$facets
facetsLabels <- args$facetsLabels
levels <- args$levels
if(is.null(facets)) {
n <- widget['n']
if(n == 0) return(ggObj)
states <- ggStates(widget = widget, ggObj = ggObj,
showNearestColor = showNearestColor,
selectedOnTop = selectedOnTop)
} else {
n <- sum(vapply(facets, function(facet) facet['n'], numeric(1L)))
if(n == 0) return(ggObj)
facetsVar <- rownames(facetsLabels)
states <- do.call(rbind,
lapply(seq_along(facets),
function(i) {
facet <- facets[[i]]
states <- ggStates(widget = facet, ggObj = ggObj,
showNearestColor = showNearestColor,
selectedOnTop = selectedOnTop)
do.call(cbind,
c(list(states),
stats::setNames(as.list(facetsLabels[, i]),
facetsVar),
facetGroup = i))
})
)
for (i in seq_along(facetsVar)) {
states[[facetsVar[i]]] <- factor(states[[facetsVar[i]]],
levels = levels[[i]])
}
}
ggObj <- if(asAes) {
scatterplotAsAesTRUE(ggObj = ggObj, widget = widget,
states = states,
selectedOnTop = selectedOnTop, facets = facets)
} else {
scatterplotAsAesFALSE(ggObj = ggObj, widget = widget,
states = states,
selectedOnTop = selectedOnTop, facets = facets)
}
return(ggObj)
}
ggStates <- function(widget, ggObj, showNearestColor = FALSE,
selectedOnTop = TRUE) {
n <- widget['n']
if (n == 0 || !any(widget['active'])) {
return(
data.frame(
x = NA,
y = NA,
glyph = NA,
color = NA,
size = NA,
index = NA
)
)
}
states <- get_layer_states(widget, native_unit = FALSE)
states$color <- l_colorName(states$color, error = FALSE,
precise = !showNearestColor)
# No active points in scatterplot
displayOrder <- if(selectedOnTop) {
get_model_display_order(widget)
} else {
seq(widget['n'])
}
active <- states$active[displayOrder]
selected <- states$selected[displayOrder][active]
if (widget['swapAxes']) {
x <- as.numeric(states$y[displayOrder][active])
y <- as.numeric(states$x[displayOrder][active])
} else {
x <- as.numeric(states$x[displayOrder][active])
y <- as.numeric(states$y[displayOrder][active])
}
data.frame(
x = x,
y = y,
glyph = states$glyph[displayOrder][active],
color = get_display_color(states$color[displayOrder][active], selected),
size = states$size[displayOrder][active],
index = displayOrder[active]
)
}
|
/R/loon2ggplot-l_layer_scatterplot.R
|
no_license
|
great-northern-diver/loon.ggplot
|
R
| false
| false
| 3,396
|
r
|
#' @export
#' @import ggmulti
#' @rdname loon2ggplot
loon2ggplot.l_layer_scatterplot <- function(target, asAes = TRUE, selectedOnTop = TRUE,
showNearestColor = FALSE, ...) {
widget <- loon::l_create_handle(attr(target, "widget"))
args <- list(...)
ggObj <- args$ggObj
facets <- args$facets
facetsLabels <- args$facetsLabels
levels <- args$levels
if(is.null(facets)) {
n <- widget['n']
if(n == 0) return(ggObj)
states <- ggStates(widget = widget, ggObj = ggObj,
showNearestColor = showNearestColor,
selectedOnTop = selectedOnTop)
} else {
n <- sum(vapply(facets, function(facet) facet['n'], numeric(1L)))
if(n == 0) return(ggObj)
facetsVar <- rownames(facetsLabels)
states <- do.call(rbind,
lapply(seq_along(facets),
function(i) {
facet <- facets[[i]]
states <- ggStates(widget = facet, ggObj = ggObj,
showNearestColor = showNearestColor,
selectedOnTop = selectedOnTop)
do.call(cbind,
c(list(states),
stats::setNames(as.list(facetsLabels[, i]),
facetsVar),
facetGroup = i))
})
)
for (i in seq_along(facetsVar)) {
states[[facetsVar[i]]] <- factor(states[[facetsVar[i]]],
levels = levels[[i]])
}
}
ggObj <- if(asAes) {
scatterplotAsAesTRUE(ggObj = ggObj, widget = widget,
states = states,
selectedOnTop = selectedOnTop, facets = facets)
} else {
scatterplotAsAesFALSE(ggObj = ggObj, widget = widget,
states = states,
selectedOnTop = selectedOnTop, facets = facets)
}
return(ggObj)
}
ggStates <- function(widget, ggObj, showNearestColor = FALSE,
selectedOnTop = TRUE) {
n <- widget['n']
if (n == 0 || !any(widget['active'])) {
return(
data.frame(
x = NA,
y = NA,
glyph = NA,
color = NA,
size = NA,
index = NA
)
)
}
states <- get_layer_states(widget, native_unit = FALSE)
states$color <- l_colorName(states$color, error = FALSE,
precise = !showNearestColor)
# No active points in scatterplot
displayOrder <- if(selectedOnTop) {
get_model_display_order(widget)
} else {
seq(widget['n'])
}
active <- states$active[displayOrder]
selected <- states$selected[displayOrder][active]
if (widget['swapAxes']) {
x <- as.numeric(states$y[displayOrder][active])
y <- as.numeric(states$x[displayOrder][active])
} else {
x <- as.numeric(states$x[displayOrder][active])
y <- as.numeric(states$y[displayOrder][active])
}
data.frame(
x = x,
y = y,
glyph = states$glyph[displayOrder][active],
color = get_display_color(states$color[displayOrder][active], selected),
size = states$size[displayOrder][active],
index = displayOrder[active]
)
}
|
## -----------------------------------------------------------------------------
if(!requireNamespace("GeNetIt", quietly = TRUE)) remotes::install_github("jeffreyevans/GeNetIt")
if(!requireNamespace("spatialEco", quietly = TRUE)) remotes::install_github("jeffreyevans/spatialEco")
## ----packages global_options, include=TRUE, results="hide", message=FALSE, warning=FALSE----
library(LandGenCourse)
library(sp)
#library(landscapemetrics)
#library(raster)
#library(rgdal)
#library(GeNetIt)
#library(spatialEco)
#library(GeNetIt)
#library(igraph)
#library(deldir)
## -----------------------------------------------------------------------------
wetlands <- read.csv(system.file("extdata", "Wetlands.csv",
package = "LandGenCourse"), header = TRUE)
str(wetlands)
## -----------------------------------------------------------------------------
sp::coordinates(wetlands) <- ~X+Y
class(wetlands)
str(wetlands)
## -----------------------------------------------------------------------------
plot(wetlands, asp=1, bty="n", xlab="", ylab="", main = "All Wetlands")
points(wetlands, pch=19, cex=0.75, col="blue")
## -----------------------------------------------------------------------------
options(warn=-1)
wetlandgraph <- deldir::deldir(coordinates(wetlands)[,1],
coordinates(wetlands)[,2],
z = wetlands$SiteName)
options(warn=0)
## -----------------------------------------------------------------------------
plot(wetlands, asp=1, bty="n", xlab="", ylab="", main = "All Wetlands")
points(wetlands, pch=19, cex=0.75, col="blue")
plot(wetlandgraph, wlines = "triang", wpoints="none",
number=FALSE, add=TRUE, lty=1)
## -----------------------------------------------------------------------------
ind <- wetlandgraph$delsgs[,5:6] #pull out individual nodes
adj <- matrix(0, length(wetlands$X), length(wetlands$Y))
for (i in 1:nrow(ind)){
adj[ind[i,1], ind[i,2]] <- 1
adj[ind[i,2], ind[i,1]] <- 1
}
## -----------------------------------------------------------------------------
wetnet <- igraph::graph_from_adjacency_matrix(adj, weighted = NULL, mode="undirected")
plot(wetnet)
## -----------------------------------------------------------------------------
wetlands@data$degree <- igraph::degree(wetnet)
head(wetlands@data)
## -----------------------------------------------------------------------------
wetlands@data$betweenness <- igraph::betweenness(wetnet)
head(wetlands@data)
## -----------------------------------------------------------------------------
sites <- read.csv(system.file("extdata", "RALU_Site.csv",
package = "LandGenCourse"), header = TRUE)
head(sites)
## -----------------------------------------------------------------------------
nodestats <- as.data.frame(wetlands@data[,3:5])
degree.betweenness <- nodestats[which(nodestats$SiteName %in% sites$SiteName),]
head(degree.betweenness)
## -----------------------------------------------------------------------------
sites <- merge(degree.betweenness, sites, by= "SiteName" )
head(sites)
## -----------------------------------------------------------------------------
coordinates(sites) <- ~X+Y
str(sites)
## -----------------------------------------------------------------------------
summary(sites@data)
sites@data$SiteName <- as.character(sites@data$SiteName)
class(sites@data$SiteName)
## -----------------------------------------------------------------------------
sites@data$SiteID <- as.factor(sites@data$SiteID)
class(sites@data$SiteID)
## -----------------------------------------------------------------------------
dist.graph <- GeNetIt::knn.graph(sites, row.names = sites@data[,"SiteID"])
#dist.graph@proj4string@projargs <- "+proj=utm +zone=11 +ellps=GRS80 +towgs84=0,0,0,-0,-0,-0,0 +units=m +no_defs "
#dist.graph <- GeNetIt::knn.graph(sites, row.names = sites@data[,"SiteName"], max.dist=5000)
## -----------------------------------------------------------------------------
gdist <- read.csv(system.file("extdata", "RALU_Dps.csv",
package = "LandGenCourse"), header=TRUE)
rownames(gdist) <- t(names(gdist))
gdist <- as.matrix (gdist)
head(gdist)
## -----------------------------------------------------------------------------
gdist <- GeNetIt::flow(gdist)
head(gdist)
## -----------------------------------------------------------------------------
gdist <- GeNetIt::dmatrix.df(gdist)
head(gdist)
## -----------------------------------------------------------------------------
names(gdist)[3] <- "GDIST"
names(gdist)
## -----------------------------------------------------------------------------
names(gdist)[1] <- "FROM"
names(gdist)[2] <- "TO"
gdist[,1] <-sub("X", "", gdist[,1])
gdist[,2] <-sub("X", "", gdist[,2])
names(gdist)
## -----------------------------------------------------------------------------
gdist <- cbind(from.to=paste(gdist[,1], gdist[,2], sep="."), gdist)
dist.graph@data$from.to <- paste(dist.graph$from_ID, dist.graph$to_ID, sep=".")
dist.graph <- merge(dist.graph, gdist, by = "from.to")
head(dist.graph@data)
## -----------------------------------------------------------------------------
if(!dir.exists(paste0(here::here(),"/output")))
dir.create(paste0(here::here(),"/output"))
write.csv(gdist, file= paste0(here::here(),"/output/gdist.csv"))
## -----------------------------------------------------------------------------
#rgdal::writeOGR(dist.graph, paste0(here::here(),"/output"), "DistGraph",
# driver="ESRI Shapefile", check_exists=TRUE, overwrite_layer=TRUE)
## -----------------------------------------------------------------------------
xvars <- rio::import("https://www.dropbox.com/s/xjl9zpgqplwg1us/ralu.rasters.rds?dl=1")
xvars
names(xvars)
## -----------------------------------------------------------------------------
m <- c(0,10.8, 0,10.9,12.1,1,12.9,89.1,0, 89.5,95.1,1)
reclass <- matrix(m, ncol=3, byrow=TRUE)
## ----warning=FALSE------------------------------------------------------------
wetlnd <- raster::reclassify(xvars$nlcd, reclass)
## ----warning=FALSE------------------------------------------------------------
wetlnd@data@names <- "wetlnd"
## -----------------------------------------------------------------------------
plot(wetlnd)
## -----------------------------------------------------------------------------
xvars <- raster::stack(xvars, wetlnd)
names(xvars)
## -----------------------------------------------------------------------------
nlcd_sampled <- landscapemetrics::sample_lsm(landscape = xvars[["wetlnd"]],
what = "lsm_c_pland",
shape = "circle",
y = sites,
size = 300,
return_raster = FALSE,
plot_id=sites@data$SiteID)
pwetland <- dplyr::select(dplyr::filter(nlcd_sampled, class == 1,
metric == "pland"), plot_id, value)
names(pwetland) <- c("SiteID", "pwetland")
pwetland$pwetland <- pwetland$pwetland/100
head(pwetland)
## -----------------------------------------------------------------------------
sites@data <- dplyr::left_join(sites@data, pwetland)
sites@data$pwetland[is.na(sites@data$pwetland)] <- 0
head(sites@data)
## -----------------------------------------------------------------------------
sites@data <- data.frame(sites@data, raster::extract(xvars, sites))
## -----------------------------------------------------------------------------
names(sites@data)
## -----------------------------------------------------------------------------
idx <- which(names(xvars) %in% c("nlcd","wetlnd"))
## -----------------------------------------------------------------------------
dist.graph@proj4string@projargs <- "+proj=utm +zone=11 +ellps=GRS80 +towgs84=0,0,0,-0,-0,-0,0 +units=m +no_defs "
stats <- GeNetIt::graph.statistics(dist.graph, r = xvars[[-idx]], buffer= NULL,
stats = c("min", "mean", "max", "var", "median"))
dist.graph@data <- data.frame(dist.graph@data, stats)
names(dist.graph@data)
## -----------------------------------------------------------------------------
wet.pct <- function(x) {
x <- ifelse( x == 11 | x == 90 | x == 95, 1, 0)
prop.table(table(x))[2]
}
## -----------------------------------------------------------------------------
wetstats <- GeNetIt::graph.statistics(dist.graph, r=xvars$nlcd, buffer= NULL,
stats = c("wet.pct"))
wetstats[is.na(wetstats)] <- 0
dist.graph@data <- data.frame(dist.graph@data, wetstats)
names(dist.graph@data)
## -----------------------------------------------------------------------------
node.var <- c("degree", "betweenness", "Elev", "Length", "Area", "Perim",
"Depth", "pH","Dforest","Drock", "Dshrub", "pwetland", "cti",
"dd5", "ffp","gsp","pratio","hli","rough27","srr")
## -----------------------------------------------------------------------------
node <- GeNetIt::build.node.data(sites@data, group.ids = "SiteID", from.parms = node.var)
head(node)
## -----------------------------------------------------------------------------
gdata <- merge(dist.graph, node, by.x="from_ID", by.y="SiteID")
gdata <- gdata@data
names(gdata)
## -----------------------------------------------------------------------------
nodeln <- node[,c(2:21)]
for(i in 1:ncol(nodeln)) {
nodeln[,i] <- log(nodeln[,i] - (min(nodeln[,i]) - 1))
}
nodecor.ln <- cor(nodeln, y = NULL,
use = "complete.obs",
method = "pearson")
round(nodecor.ln, 3)
#pairs(nodecor.ln, pch=19, cex=0.50)
## ----fig.height=5, fig.width=8------------------------------------------------
edge.ln <- dist.graph@data[,10:length(dist.graph@data)]
for(i in 1:ncol(edge.ln)) {
edge.ln[,i] <- log(edge.ln[,i] - (min(edge.ln[,i]) - 1))
}
edgecor.ln <- cor(edge.ln, y = NULL,
use = "complete.obs",
method = "pearson")
round(edgecor.ln, 3)
## -----------------------------------------------------------------------------
pdf(file=paste0(here::here(),"/output/node.cor.pdf"), width=20, height=20)
pairs(nodecor.ln, pch=19, cex=0.50)
dev.off()
## -----------------------------------------------------------------------------
write.csv(round(edgecor.ln, 4),
file = paste0(here::here(),"/output/EdgeCorrelationsLn.csv"))
write.csv(round(nodecor.ln, 4),
file = paste0(here::here(),"/output/NodeCorrelationsLn.csv"))
## -----------------------------------------------------------------------------
( null <- GeNetIt::gravity(y = "GDIST", x = c("length"), d = "length", group = "from_ID",
data = gdata, method = "ML") )
## -----------------------------------------------------------------------------
( global <- GeNetIt::gravity(y = "GDIST", x = c("length", "wet.pct.nlcd",
"median.gsp", "from.Depth",
"from.ffp", "from.hli", "from.pratio",
"from.degree", "from.betweenness",
"from.pwetland", "median.srr",
"median.rough27"), d = "length",
group = "from_ID", data = gdata, method = "ML") )
## -----------------------------------------------------------------------------
( published <- GeNetIt::gravity(y = "GDIST", x = c("length", "median.gsp", "from.Depth",
"from.hli", "median.cti", "median.srr"), d = "length",
group = "from_ID", data = gdata, method = "ML"))
## -----------------------------------------------------------------------------
( habitat <- GeNetIt::gravity(y = "GDIST", x = c("length", "wet.pct.nlcd", "median.gsp"), d = "length",
group = "from_ID", data = gdata, method = "ML") )
## -----------------------------------------------------------------------------
#compare.models(null, depth, product, climate, wetlands, topo, habitat, global)
#compare.models(depth, product, climate, wetlands, topo, habitat, published, global, null)
GeNetIt::compare.models(null, habitat, global, published) #NOTE - global will need to be edited to match your paramters
## -----------------------------------------------------------------------------
par(mfrow=c(2,3))
for (i in 1:6) { plot(global, type=i) }
## -----------------------------------------------------------------------------
habitat_fit <- GeNetIt::gravity(y = "GDIST", x = c("length", "wet.pct.nlcd", "median.gsp"),
d = "length", group = "from_ID", data = gdata, method = "REML")
## -----------------------------------------------------------------------------
global_fit <- GeNetIt::gravity(y = "GDIST", x = c("length", "wet.pct.nlcd", "median.gsp",
"from.Depth", "from.ffp", "from.hli",
"from.pratio", "from.degree",
"from.betweenness", "from.pwetland", "median.srr",
"median.rough27"),
d = "length", group = "from_ID", data = gdata, method = "REML")
## -----------------------------------------------------------------------------
published_fit <- GeNetIt::gravity(y = "GDIST", x = c("length", "median.gsp", "from.Depth",
"from.hli", "median.cti", "median.srr"), d = "length",
group = "from_ID", data = gdata, method = "REML")
## -----------------------------------------------------------------------------
GeNetIt::compare.models(global_fit, habitat_fit, published_fit)
## -----------------------------------------------------------------------------
GeNetIt::gravity.es(habitat_fit)
GeNetIt::gravity.es(global_fit)
GeNetIt::gravity.es(published_fit)
|
/inst/doc/Week13_vignette.R
|
no_license
|
hhwagner1/LandGenCourse
|
R
| false
| false
| 14,050
|
r
|
## -----------------------------------------------------------------------------
if(!requireNamespace("GeNetIt", quietly = TRUE)) remotes::install_github("jeffreyevans/GeNetIt")
if(!requireNamespace("spatialEco", quietly = TRUE)) remotes::install_github("jeffreyevans/spatialEco")
## ----packages global_options, include=TRUE, results="hide", message=FALSE, warning=FALSE----
library(LandGenCourse)
library(sp)
#library(landscapemetrics)
#library(raster)
#library(rgdal)
#library(GeNetIt)
#library(spatialEco)
#library(GeNetIt)
#library(igraph)
#library(deldir)
## -----------------------------------------------------------------------------
wetlands <- read.csv(system.file("extdata", "Wetlands.csv",
package = "LandGenCourse"), header = TRUE)
str(wetlands)
## -----------------------------------------------------------------------------
sp::coordinates(wetlands) <- ~X+Y
class(wetlands)
str(wetlands)
## -----------------------------------------------------------------------------
plot(wetlands, asp=1, bty="n", xlab="", ylab="", main = "All Wetlands")
points(wetlands, pch=19, cex=0.75, col="blue")
## -----------------------------------------------------------------------------
options(warn=-1)
wetlandgraph <- deldir::deldir(coordinates(wetlands)[,1],
coordinates(wetlands)[,2],
z = wetlands$SiteName)
options(warn=0)
## -----------------------------------------------------------------------------
plot(wetlands, asp=1, bty="n", xlab="", ylab="", main = "All Wetlands")
points(wetlands, pch=19, cex=0.75, col="blue")
plot(wetlandgraph, wlines = "triang", wpoints="none",
number=FALSE, add=TRUE, lty=1)
## -----------------------------------------------------------------------------
ind <- wetlandgraph$delsgs[,5:6] #pull out individual nodes
adj <- matrix(0, length(wetlands$X), length(wetlands$Y))
for (i in 1:nrow(ind)){
adj[ind[i,1], ind[i,2]] <- 1
adj[ind[i,2], ind[i,1]] <- 1
}
## -----------------------------------------------------------------------------
wetnet <- igraph::graph_from_adjacency_matrix(adj, weighted = NULL, mode="undirected")
plot(wetnet)
## -----------------------------------------------------------------------------
wetlands@data$degree <- igraph::degree(wetnet)
head(wetlands@data)
## -----------------------------------------------------------------------------
wetlands@data$betweenness <- igraph::betweenness(wetnet)
head(wetlands@data)
## -----------------------------------------------------------------------------
sites <- read.csv(system.file("extdata", "RALU_Site.csv",
package = "LandGenCourse"), header = TRUE)
head(sites)
## -----------------------------------------------------------------------------
nodestats <- as.data.frame(wetlands@data[,3:5])
degree.betweenness <- nodestats[which(nodestats$SiteName %in% sites$SiteName),]
head(degree.betweenness)
## -----------------------------------------------------------------------------
sites <- merge(degree.betweenness, sites, by= "SiteName" )
head(sites)
## -----------------------------------------------------------------------------
coordinates(sites) <- ~X+Y
str(sites)
## -----------------------------------------------------------------------------
summary(sites@data)
sites@data$SiteName <- as.character(sites@data$SiteName)
class(sites@data$SiteName)
## -----------------------------------------------------------------------------
sites@data$SiteID <- as.factor(sites@data$SiteID)
class(sites@data$SiteID)
## -----------------------------------------------------------------------------
dist.graph <- GeNetIt::knn.graph(sites, row.names = sites@data[,"SiteID"])
#dist.graph@proj4string@projargs <- "+proj=utm +zone=11 +ellps=GRS80 +towgs84=0,0,0,-0,-0,-0,0 +units=m +no_defs "
#dist.graph <- GeNetIt::knn.graph(sites, row.names = sites@data[,"SiteName"], max.dist=5000)
## -----------------------------------------------------------------------------
gdist <- read.csv(system.file("extdata", "RALU_Dps.csv",
package = "LandGenCourse"), header=TRUE)
rownames(gdist) <- t(names(gdist))
gdist <- as.matrix (gdist)
head(gdist)
## -----------------------------------------------------------------------------
gdist <- GeNetIt::flow(gdist)
head(gdist)
## -----------------------------------------------------------------------------
gdist <- GeNetIt::dmatrix.df(gdist)
head(gdist)
## -----------------------------------------------------------------------------
names(gdist)[3] <- "GDIST"
names(gdist)
## -----------------------------------------------------------------------------
names(gdist)[1] <- "FROM"
names(gdist)[2] <- "TO"
gdist[,1] <-sub("X", "", gdist[,1])
gdist[,2] <-sub("X", "", gdist[,2])
names(gdist)
## -----------------------------------------------------------------------------
gdist <- cbind(from.to=paste(gdist[,1], gdist[,2], sep="."), gdist)
dist.graph@data$from.to <- paste(dist.graph$from_ID, dist.graph$to_ID, sep=".")
dist.graph <- merge(dist.graph, gdist, by = "from.to")
head(dist.graph@data)
## -----------------------------------------------------------------------------
if(!dir.exists(paste0(here::here(),"/output")))
dir.create(paste0(here::here(),"/output"))
write.csv(gdist, file= paste0(here::here(),"/output/gdist.csv"))
## -----------------------------------------------------------------------------
#rgdal::writeOGR(dist.graph, paste0(here::here(),"/output"), "DistGraph",
# driver="ESRI Shapefile", check_exists=TRUE, overwrite_layer=TRUE)
## -----------------------------------------------------------------------------
xvars <- rio::import("https://www.dropbox.com/s/xjl9zpgqplwg1us/ralu.rasters.rds?dl=1")
xvars
names(xvars)
## -----------------------------------------------------------------------------
m <- c(0,10.8, 0,10.9,12.1,1,12.9,89.1,0, 89.5,95.1,1)
reclass <- matrix(m, ncol=3, byrow=TRUE)
## ----warning=FALSE------------------------------------------------------------
wetlnd <- raster::reclassify(xvars$nlcd, reclass)
## ----warning=FALSE------------------------------------------------------------
wetlnd@data@names <- "wetlnd"
## -----------------------------------------------------------------------------
plot(wetlnd)
## -----------------------------------------------------------------------------
xvars <- raster::stack(xvars, wetlnd)
names(xvars)
## -----------------------------------------------------------------------------
nlcd_sampled <- landscapemetrics::sample_lsm(landscape = xvars[["wetlnd"]],
what = "lsm_c_pland",
shape = "circle",
y = sites,
size = 300,
return_raster = FALSE,
plot_id=sites@data$SiteID)
pwetland <- dplyr::select(dplyr::filter(nlcd_sampled, class == 1,
metric == "pland"), plot_id, value)
names(pwetland) <- c("SiteID", "pwetland")
pwetland$pwetland <- pwetland$pwetland/100
head(pwetland)
## -----------------------------------------------------------------------------
sites@data <- dplyr::left_join(sites@data, pwetland)
sites@data$pwetland[is.na(sites@data$pwetland)] <- 0
head(sites@data)
## -----------------------------------------------------------------------------
sites@data <- data.frame(sites@data, raster::extract(xvars, sites))
## -----------------------------------------------------------------------------
names(sites@data)
## -----------------------------------------------------------------------------
idx <- which(names(xvars) %in% c("nlcd","wetlnd"))
## -----------------------------------------------------------------------------
dist.graph@proj4string@projargs <- "+proj=utm +zone=11 +ellps=GRS80 +towgs84=0,0,0,-0,-0,-0,0 +units=m +no_defs "
stats <- GeNetIt::graph.statistics(dist.graph, r = xvars[[-idx]], buffer= NULL,
stats = c("min", "mean", "max", "var", "median"))
dist.graph@data <- data.frame(dist.graph@data, stats)
names(dist.graph@data)
## -----------------------------------------------------------------------------
wet.pct <- function(x) {
x <- ifelse( x == 11 | x == 90 | x == 95, 1, 0)
prop.table(table(x))[2]
}
## -----------------------------------------------------------------------------
wetstats <- GeNetIt::graph.statistics(dist.graph, r=xvars$nlcd, buffer= NULL,
stats = c("wet.pct"))
wetstats[is.na(wetstats)] <- 0
dist.graph@data <- data.frame(dist.graph@data, wetstats)
names(dist.graph@data)
## -----------------------------------------------------------------------------
node.var <- c("degree", "betweenness", "Elev", "Length", "Area", "Perim",
"Depth", "pH","Dforest","Drock", "Dshrub", "pwetland", "cti",
"dd5", "ffp","gsp","pratio","hli","rough27","srr")
## -----------------------------------------------------------------------------
node <- GeNetIt::build.node.data(sites@data, group.ids = "SiteID", from.parms = node.var)
head(node)
## -----------------------------------------------------------------------------
gdata <- merge(dist.graph, node, by.x="from_ID", by.y="SiteID")
gdata <- gdata@data
names(gdata)
## -----------------------------------------------------------------------------
nodeln <- node[,c(2:21)]
for(i in 1:ncol(nodeln)) {
nodeln[,i] <- log(nodeln[,i] - (min(nodeln[,i]) - 1))
}
nodecor.ln <- cor(nodeln, y = NULL,
use = "complete.obs",
method = "pearson")
round(nodecor.ln, 3)
#pairs(nodecor.ln, pch=19, cex=0.50)
## ----fig.height=5, fig.width=8------------------------------------------------
edge.ln <- dist.graph@data[,10:length(dist.graph@data)]
for(i in 1:ncol(edge.ln)) {
edge.ln[,i] <- log(edge.ln[,i] - (min(edge.ln[,i]) - 1))
}
edgecor.ln <- cor(edge.ln, y = NULL,
use = "complete.obs",
method = "pearson")
round(edgecor.ln, 3)
## -----------------------------------------------------------------------------
pdf(file=paste0(here::here(),"/output/node.cor.pdf"), width=20, height=20)
pairs(nodecor.ln, pch=19, cex=0.50)
dev.off()
## -----------------------------------------------------------------------------
write.csv(round(edgecor.ln, 4),
file = paste0(here::here(),"/output/EdgeCorrelationsLn.csv"))
write.csv(round(nodecor.ln, 4),
file = paste0(here::here(),"/output/NodeCorrelationsLn.csv"))
## -----------------------------------------------------------------------------
( null <- GeNetIt::gravity(y = "GDIST", x = c("length"), d = "length", group = "from_ID",
data = gdata, method = "ML") )
## -----------------------------------------------------------------------------
( global <- GeNetIt::gravity(y = "GDIST", x = c("length", "wet.pct.nlcd",
"median.gsp", "from.Depth",
"from.ffp", "from.hli", "from.pratio",
"from.degree", "from.betweenness",
"from.pwetland", "median.srr",
"median.rough27"), d = "length",
group = "from_ID", data = gdata, method = "ML") )
## -----------------------------------------------------------------------------
( published <- GeNetIt::gravity(y = "GDIST", x = c("length", "median.gsp", "from.Depth",
"from.hli", "median.cti", "median.srr"), d = "length",
group = "from_ID", data = gdata, method = "ML"))
## -----------------------------------------------------------------------------
( habitat <- GeNetIt::gravity(y = "GDIST", x = c("length", "wet.pct.nlcd", "median.gsp"), d = "length",
group = "from_ID", data = gdata, method = "ML") )
## -----------------------------------------------------------------------------
#compare.models(null, depth, product, climate, wetlands, topo, habitat, global)
#compare.models(depth, product, climate, wetlands, topo, habitat, published, global, null)
GeNetIt::compare.models(null, habitat, global, published) #NOTE - global will need to be edited to match your paramters
## -----------------------------------------------------------------------------
par(mfrow=c(2,3))
for (i in 1:6) { plot(global, type=i) }
## -----------------------------------------------------------------------------
habitat_fit <- GeNetIt::gravity(y = "GDIST", x = c("length", "wet.pct.nlcd", "median.gsp"),
d = "length", group = "from_ID", data = gdata, method = "REML")
## -----------------------------------------------------------------------------
global_fit <- GeNetIt::gravity(y = "GDIST", x = c("length", "wet.pct.nlcd", "median.gsp",
"from.Depth", "from.ffp", "from.hli",
"from.pratio", "from.degree",
"from.betweenness", "from.pwetland", "median.srr",
"median.rough27"),
d = "length", group = "from_ID", data = gdata, method = "REML")
## -----------------------------------------------------------------------------
published_fit <- GeNetIt::gravity(y = "GDIST", x = c("length", "median.gsp", "from.Depth",
"from.hli", "median.cti", "median.srr"), d = "length",
group = "from_ID", data = gdata, method = "REML")
## -----------------------------------------------------------------------------
GeNetIt::compare.models(global_fit, habitat_fit, published_fit)
## -----------------------------------------------------------------------------
GeNetIt::gravity.es(habitat_fit)
GeNetIt::gravity.es(global_fit)
GeNetIt::gravity.es(published_fit)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_travel_time.R
\name{get_travel_time}
\alias{get_travel_time}
\title{Helper function: get traveltime}
\usage{
get_travel_time(sinusfit_sw, sinusfit_gw, retardation_factor = 1.8)
}
\arguments{
\item{sinusfit_sw}{as retrieved by \code{\link{optimise_sinus_variablePeriod}} with surface water temperature data}
\item{sinusfit_gw}{as retrieved by \code{\link{optimise_sinus_variablePeriod}} with groundwater temperature data}
\item{retardation_factor}{hydraulic retardation factor (default: 2)}
}
\value{
data frame with travel times for min/max and turning points
}
\description{
Helper function: get traveltime
}
|
/man/get_travel_time.Rd
|
permissive
|
KWB-R/kwb.heatsine
|
R
| false
| true
| 694
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_travel_time.R
\name{get_travel_time}
\alias{get_travel_time}
\title{Helper function: get traveltime}
\usage{
get_travel_time(sinusfit_sw, sinusfit_gw, retardation_factor = 1.8)
}
\arguments{
\item{sinusfit_sw}{as retrieved by \code{\link{optimise_sinus_variablePeriod}} with surface water temperature data}
\item{sinusfit_gw}{as retrieved by \code{\link{optimise_sinus_variablePeriod}} with groundwater temperature data}
\item{retardation_factor}{hydraulic retardation factor (default: 2)}
}
\value{
data frame with travel times for min/max and turning points
}
\description{
Helper function: get traveltime
}
|
##Reading full dataset
datafile <- "./data/household_power_consumption.txt"
data_full <- read.csv(datafile, sep=";", header=TRUE, na.strings="?", stringsAsFactors=FALSE)
##Converting Date variable format
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
##Subsetting the dataset
data_sub <- subset(data_full, Date=="2007-02-01" | Date=="2007-02-02")
rm(data_full)
##Converting DateTime variable format
data_sub$DT <- paste(data_sub$Date, data_sub$Time)
data_sub$DateTime <- as.POSIXct(data_sub$DT)
##Plot 4
png("plot4.png", height=480, width=480)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data_sub, {
plot(Global_active_power ~ DateTime, type="l", xlab="", ylab="Global Active Power")
plot(Voltage ~ DateTime, type="l", xlab="datetime", ylab="Voltage")
plot(Sub_metering_1 ~ DateTime, type="l", xlab="", ylab="Energy sub metering")
lines(Sub_metering_2 ~ DateTime, col="red")
lines(Sub_metering_3 ~ DateTime, col="blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power ~ DateTime, type="l", xlab="datetime")
})
dev.off()
|
/plot4.R
|
no_license
|
sharmanas/ExData_Plotting1
|
R
| false
| false
| 1,193
|
r
|
##Reading full dataset
datafile <- "./data/household_power_consumption.txt"
data_full <- read.csv(datafile, sep=";", header=TRUE, na.strings="?", stringsAsFactors=FALSE)
##Converting Date variable format
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
##Subsetting the dataset
data_sub <- subset(data_full, Date=="2007-02-01" | Date=="2007-02-02")
rm(data_full)
##Converting DateTime variable format
data_sub$DT <- paste(data_sub$Date, data_sub$Time)
data_sub$DateTime <- as.POSIXct(data_sub$DT)
##Plot 4
png("plot4.png", height=480, width=480)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data_sub, {
plot(Global_active_power ~ DateTime, type="l", xlab="", ylab="Global Active Power")
plot(Voltage ~ DateTime, type="l", xlab="datetime", ylab="Voltage")
plot(Sub_metering_1 ~ DateTime, type="l", xlab="", ylab="Energy sub metering")
lines(Sub_metering_2 ~ DateTime, col="red")
lines(Sub_metering_3 ~ DateTime, col="blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power ~ DateTime, type="l", xlab="datetime")
})
dev.off()
|
# server.R script for MelphalanApp
# Reactive objects (i.e., those dependent on widget input) are written here
# ------------------------------------------------------------------------------
# Define the "server" part of the Shiny application
shinyServer(function(input,output,session) {
###########
##_INPUT_##
###########
# Create an input data frame that stores input patient characteristics
# Will be used for the three different simulation scenarios
Rinput.data <- reactive({
# Call in user-defined widget values
AGE <- input$AGE # Numeric input for patient's age
TBW <- input$TBW # Numeric input for patient's total body weight
HT <- input$HT # Numeric input for patient's height
SECR <- input$SECR # Numeric input for patient's serum creatinine
HCT <- input$HCT # Numeric input for patient's haematocrit
ANCBASE <- input$ANCBASE # Numeric input for patient's baseline absolute neutrophil count
if (input$SEX == 1) SEX <- 0 # Select input for patient's gender, female = 0
if (input$SEX == 2) SEX <- 1 # Select input for patient's gender, male = 1
if (input$RACE == 1 | input$RACE == 3) RACE <- 0 # Select input for patient's race, Caucasian and Unknown = 0
if (input$RACE == 2) RACE <- 1 # Select input for patient's race, African-American = 1
if (input$SLC7A5 == 1) SLC7A5 <- 0 # Select input for patient's SLC7A5 genotype, AA or AG = 0
if (input$SLC7A5 == 2) SLC7A5 <- 1 # Select input for patient's SLC7A5 genotype, GG = 1
# Calculate secondary parameters based on input
# Body mass index (BMI)
BMI <- TBW/(HT/100)^2 # Used to calculate fat free mass
# Body surface area (BSA)
BSA <- 0.007184*(TBW^0.425)*(HT^0.725) # Based on the Du Bois formula
# Creatinine clearance (CRCL) and fat free mass (FFM) based on gender
if (SEX == 0) { # Females
CRCL <- (((140-AGE)*TBW)/(SECR*72))*0.85
FFM <- 9270*TBW/(8780+(244*BMI))
} else { # Males
CRCL <- ((140-AGE)*TBW)/(SECR*72)
FFM <- 9270*TBW/(6680+(216*BMI))
}
# Set up input.data
# Only columns missing values will be the amount to be administered and when G-CSF rescue was administered as each "sim.data" data frame will have a different value for "amt" and "G-CSF"
input.data <- expand.ev(
ID = 1:(n+1), # n individuals (plus an additional because the first ID is PRED)
time = 0, # time that melphalan dose will be administered
amt = NA, # amt in mg/m^2, currently amount per m^2 is unknown
evid = 1, # dosing event
cmt = 1, # dose into compartment 1, i.e., CENT
rate = -2, # infusion duration is specified in the model file
BSA = BSA, # Required to be stored to calculate "amt"
BMI = BMI, # Required to be stored for ui
FFM = FFM, # Fat free mass
CRCL = CRCL, # Creatinine clearance
HCT = HCT, # Haematocrit
ANCBASE = ANCBASE, # Baseline absolute neutrophil count
SEX = SEX, # Gender
RACE = RACE, # Race
SLC7A5 = SLC7A5, # Genotype
GCSF = 0 # Time of administration of G-CSF (will be different for different sim.data), default is on Day 1 (0)
)
}) # Brackets closing "Rinput.data"
###########
##_DOSE1_##
###########
# Simulate a population based on input characteristics
# Will have it's own specific dose and time of G-CSF administration
Rsim.data1 <- reactive({
withProgress(
message = "Simulating profiles...",
value = 0,
{
# Read in reactive input.data
input.data <- Rinput.data()
# Read in simulation specific value for G-CSF
if (input$GCSF1 == 2) input.data$GCSF <- 1 # Select input for when to administer G-CSF (Neupogen), Day 7 = 1
# Calculate amt to be administered based on patient's BSA and DOSE1
input.data$amt <- input.data$BSA*input$DOSE1
# Simulate
sim.data1 <- mod %>% data_set(input.data) %>% mrgsim(add = time)
sim.data1 <- as.data.frame(sim.data1) #Convert to a data frame so that it is more useful for me!
}
) # Brackets closing "withProgress"
}) # Brackets closing "Rsim.data1"
# Create a data frame that only contains the "PRED" data
Rpred.data1 <- reactive({
# Read in reactive expressions
sim.data1 <- Rsim.data1()
# Subset out only ID == 1 (PRED individual)
pred.data1 <- sim.data1[sim.data1$ID == 1,]
}) # Brackets closing "Rpred.data1"
# Summarise simulated data as prediction intervals when option is selected
Rsummary.data1 <- reactive({
# Read in reactive expressions
sim.data1 <- Rsim.data1()
# Summarise data
sim.data1 <- sim.data1[sim.data1$ID != 1,] # Do not include ID == 1 - they are PRED
summary.data1 <- ddply(sim.data1, .(time), summary.function)
}) # Brackets closing "Rsummary.data1"
###########
##_DOSE2_##
###########
# Simulate a population based on input characteristics
# Will have it's own specific dose and time of G-CSF administration
Rsim.data2 <- reactive({
withProgress(
message = "Simulating profiles...",
value = 0,
{
if (input$NREG > 1) {
# Read in reactive input.data
input.data <- Rinput.data()
# Read in simulation specific value for G-CSF
if (input$GCSF2 == 2) input.data$GCSF <- 1 # Select input for when to administer G-CSF (Neupogen), Day 7 = 1
# Calculate amt to be administered based on patient's BSA and DOSE1
input.data$amt <- input.data$BSA*input$DOSE2
# Simulate
sim.data2 <- mod %>% data_set(input.data) %>% mrgsim(add = time)
sim.data2 <- as.data.frame(sim.data2) #Convert to a data frame so that it is more useful for me!
}
}
) # Brackets closing "withProgress"
}) # Brackets closing "Rsim.data2"
# Create a data frame that only contains the "PRED" data
Rpred.data2 <- reactive({
# Read in reactive expressions
sim.data2 <- Rsim.data2()
# Subset out only ID == 1 (PRED individual)
pred.data2 <- sim.data2[sim.data2$ID == 1,]
}) # Brackets closing "Rpred.data2"
# Summarise simulated data as prediction intervals when option is selected
Rsummary.data2 <- reactive({
# Read in reactive expressions
sim.data2 <- Rsim.data2()
# Summarise data
sim.data2 <- sim.data2[sim.data2$ID != 1,] # Do not include ID == 1 - they are PRED
summary.data2 <- ddply(sim.data2, .(time), summary.function)
}) # Brackets closing "Rsummary.data2"
###########
##_DOSE3_##
###########
# Simulate a population based on input characteristics
# Will have it's own specific dose and time of G-CSF administration
Rsim.data3 <- reactive({
withProgress(
message = "Simulating profiles...",
value = 0,
{
if (input$NREG > 2) {
# Read in reactive input.data
input.data <- Rinput.data()
# Read in simulation specific value for G-CSF
if (input$GCSF3 == 3) input.data$GCSF <- 1 # Select input for when to administer G-CSF (Neupogen), Day 7 = 1
# Calculate amt to be administered based on patient's BSA and DOSE1
input.data$amt <- input.data$BSA*input$DOSE3
# Simulate
sim.data3 <- mod %>% data_set(input.data) %>% mrgsim(add = time)
sim.data3 <- as.data.frame(sim.data3) #Convert to a data frame so that it is more useful for me!
}
}
) # Brackets closing "withProgress"
}) # Brackets closing "Rsim.data3"
# Create a data frame that only contains the "PRED" data
Rpred.data3 <- reactive({
# Read in reactive expressions
sim.data3 <- Rsim.data3()
# Subset out only ID == 1 (PRED individual)
pred.data3 <- sim.data3[sim.data3$ID == 1,]
}) # Brackets closing "Rpred.data3"
# Summarise simulated data as prediction intervals when option is selected
Rsummary.data3 <- reactive({
# Read in reactive expressions
sim.data3 <- Rsim.data3()
# Summarise data
sim.data3 <- sim.data3[sim.data3$ID != 1,] # Do not include ID == 1 - they are PRED
summary.data3 <- ddply(sim.data3, .(time), summary.function)
}) # Brackets closing "Rsummary.data3"
############
##_OUTPUT_##
############
output$BMI.text <- renderUI({
input.data <- Rinput.data()
withMathJax(paste0("Body mass index = ",round(input.data$BMI[1],digits = 1)," \\(kg/m^2\\)"))
}) # Brackets closing "renderUI" expression
output$BSA.text <- renderUI({
input.data <- Rinput.data()
withMathJax(paste0("Body surface area = ",round(input.data$BSA[1],digits = 1)," \\(m^2\\)"))
}) # Brackets closing "renderUI" expression
output$FFM.text <- renderUI({
input.data <- Rinput.data()
withMathJax(paste0("Fat free mass = ",round(input.data$FFM[1],digits = 1)," \\(kg\\)"))
}) # Brackets closing "renderUI" expression
output$CRCL.text <- renderUI({
input.data <- Rinput.data()
withMathJax(paste0("Creatinine clearance = ",round(input.data$CRCL[1],digits = 1)," \\(mL/min\\)"))
}) # Brackets closing "renderUI" expression
# Simulation results for ANC
output$anc.plot <- renderPlot({
# Read in reactive data
pred.data1 <- Rpred.data1()
summary.data1 <- Rsummary.data1()
# Only read in reactive data if regimen has been selected
if (input$NREG > 1) {
pred.data2 <- Rpred.data2()
summary.data2 <- Rsummary.data2()
}
if (input$NREG > 2) {
pred.data3 <- Rpred.data3()
summary.data3 <- Rsummary.data3()
}
# Plot ANC over time
plotobj1 <- NULL
plotobj1 <- ggplot()
# Population predicted
plotobj1 <- plotobj1 + geom_line(aes(x = time,y = ANC),data = pred.data1,colour = "#F8766D",size = 1) # DOSE1
if (input$NREG > 1) plotobj1 <- plotobj1 + geom_line(aes(x = time,y = ANC),data = pred.data2,colour = "#619CFF",size = 1) # DOSE2
if (input$NREG > 2) plotobj1 <- plotobj1 + geom_line(aes(x = time,y = ANC),data = pred.data3,colour = "#00BA38",size = 1) # DOSE3
# 95% prediction intervals
if (input$PI == TRUE) {
plotobj1 <- plotobj1 + geom_ribbon(aes(x = time,ymin = CIlo_ANC,ymax = CIhi_ANC),data = summary.data1,fill = "#F8766D",alpha = 0.3) # DOSE1
if (input$NREG > 1) plotobj1 <- plotobj1 + geom_ribbon(aes(x = time,ymin = CIlo_ANC,ymax = CIhi_ANC),data = summary.data2,fill = "#619CFF",alpha = 0.3) # DOSE2
if (input$NREG > 2) plotobj1 <- plotobj1 + geom_ribbon(aes(x = time,ymin = CIlo_ANC,ymax = CIhi_ANC),data = summary.data3,fill = "#00BA38",alpha = 0.3) # DOSE3
}
# Grade 4 neutropenia
plotobj1 <- plotobj1 + geom_hline(aes(yintercept = 0.5),linetype = "dashed")
plotobj1 <- plotobj1 + annotate("text",x = 648,y = 0.6,label = "Grade 4 Neutropenia",size = 5)
# Axes
plotobj1 <- plotobj1 + scale_x_continuous("\nTime since Melphalan Dose (days)",breaks = seq(from = 0,to = max(time.PD),by = 100))
plotobj1 <- plotobj1 + scale_y_log10("Absolute Neutrophil Count (K/µL)\n",breaks = log.plot.breaks,labels = log.plot.breaks)
# Return plot
print(plotobj1)
}) # Brackets closing "renderPlot"
# Simulate results of time spent in Grade 4 neutropenia
output$g4n.plot <- renderPlot({
# Read in reactive data
pred.data1 <- Rpred.data1()
summary.data1 <- Rsummary.data1()
# Only read in reactive data if regimen has been selected
if (input$NREG > 1) {
pred.data2 <- Rpred.data2()
summary.data2 <- Rsummary.data2()
}
if (input$NREG > 2) {
pred.data3 <- Rpred.data3()
summary.data3 <- Rsummary.data3()
}
# Plot PRED time spent in G4N and error bars
plotobj2 <- NULL
plotobj2 <- ggplot()
# Population predicted
plotobj2 <- plotobj2 + geom_point(aes(x = input$DOSE1,y = tail(pred.data1$G4N1,1)),size = 3,colour = "#F8766D") # DOSE1
if (input$NREG > 1) plotobj2 <- plotobj2 + geom_point(aes(x = input$DOSE2,y = tail(pred.data2$G4N1,1)),size = 3,colour = "#619CFF") # DOSE2
if (input$NREG > 2) plotobj2 <- plotobj2 + geom_point(aes(x = input$DOSE3,y = tail(pred.data3$G4N1,1)),size = 3,colour = "#00BA38") # DOSE1
# 95% prediction intervals (error bars)
if (input$PI == TRUE) {
plotobj2 <- plotobj2 + geom_errorbar(aes(x = input$DOSE1,ymin = tail(summary.data1$CIlo_G4N1,1),ymax = tail(summary.data1$CIhi_G4N1,1)),width = 5,colour = "#F8766D") # DOSE1
if (input$NREG > 1) plotobj2 <- plotobj2 + geom_errorbar(aes(x = input$DOSE2,ymin = tail(summary.data2$CIlo_G4N1,1),ymax = tail(summary.data2$CIhi_G4N1,1)),width = 5,colour = "#619CFF") # DOSE1
if (input$NREG > 2) plotobj2 <- plotobj2 + geom_errorbar(aes(x = input$DOSE3,ymin = tail(summary.data3$CIlo_G4N1,1),ymax = tail(summary.data3$CIhi_G4N1,1)),width = 5,colour = "#00BA38") # DOSE1
}
# Axes
plotobj2 <- plotobj2 + xlab(expression(paste("Melphalan Dose (",mg/m^2,")")))
if (input$NREG == 1) plotobj2 <- plotobj2 + scale_x_continuous(breaks = c(input$DOSE1),labels = c(input$DOSE1))
if (input$NREG > 1) plotobj2 <- plotobj2 + scale_x_continuous(breaks = c(input$DOSE1,input$DOSE2),labels = c(input$DOSE1,input$DOSE2))
if (input$NREG > 2) plotobj2 <- plotobj2 + scale_x_continuous(breaks = c(input$DOSE1,input$DOSE2,input$DOSE3),labels = c(input$DOSE1,input$DOSE2,input$DOSE3))
plotobj2 <- plotobj2 + scale_y_continuous("Time Spent in Grade 4 Neutropenia (hours)\n")
# Return plot
print(plotobj2)
}) # Brackets closing "renderPlot"
# Simulation results for melphalan concentrations
output$melph.plot <- renderPlot({
# Read in reactive data
pred.data1 <- Rpred.data1()
summary.data1 <- Rsummary.data1()
# Only read in reactive data if regimen has been selected
if (input$NREG > 1) {
pred.data2 <- Rpred.data2()
summary.data2 <- Rsummary.data2()
}
if (input$NREG > 2) {
pred.data3 <- Rpred.data3()
summary.data3 <- Rsummary.data3()
}
# Plot ANC over time
plotobj3 <- NULL
plotobj3 <- ggplot()
# Population predicted
plotobj3 <- plotobj3 + geom_line(aes(x = time,y = IPRE),data = pred.data1,colour = "#F8766D",size = 1) # DOSE1
if (input$NREG > 1) plotobj3 <- plotobj3 + geom_line(aes(x = time,y = IPRE),data = pred.data2,colour = "#619CFF",size = 1) # DOSE2
if (input$NREG > 2) plotobj3 <- plotobj3 + geom_line(aes(x = time,y = IPRE),data = pred.data3,colour = "#00BA38",size = 1) # DOSE3
# 95% prediction intervals
if (input$PI == TRUE) {
plotobj3 <- plotobj3 + geom_ribbon(aes(x = time,ymin = CIlo_IPRE,ymax = CIhi_IPRE),data = summary.data1,fill = "#F8766D",alpha = 0.3) # DOSE1
if (input$NREG > 1) plotobj3 <- plotobj3 + geom_ribbon(aes(x = time,ymin = CIlo_IPRE,ymax = CIhi_IPRE),data = summary.data2,fill = "#619CFF",alpha = 0.3) # DOSE2
if (input$NREG > 2) plotobj3 <- plotobj3 + geom_ribbon(aes(x = time,ymin = CIlo_IPRE,ymax = CIhi_IPRE),data = summary.data3,fill = "#00BA38",alpha = 0.3) # DOSE3
}
# Axes
plotobj3 <- plotobj3 + scale_x_continuous("\nTime since Melphalan Dose (hours)",lim = c(0,13))
plotobj3 <- plotobj3 + scale_y_log10("Melphalan Concentration (mg/L)\n",lim = c(0.001,NA),breaks = log.plot.breaks,labels = log.plot.breaks)
# Return plot
print(plotobj3)
}) # Brackets closing "renderPlot"
# Summary of Time spent in Grade 4 Neutropenia for DOSE1
output$G4N1.text.DOSE1 <- renderText({
# Read in reactive data
pred.data1 <- Rpred.data1()
summary.data1 <- Rsummary.data1()
# Create a text object
pred.G4N1 <- round(tail(pred.data1$G4N1,1)) # PRED
G4N1.text.DOSE1 <- paste0("Duration in Grade 4 Neutropenia = ",pred.G4N1," hours")
if (input$PI == TRUE) {
CIlo.G4N1 <- round(tail(summary.data1$CIlo_G4N1,1)) # 2.5th percentile
CIhi.G4N1 <- round(tail(summary.data1$CIhi_G4N1,1)) # 97.5th percentile
G4N1.text.DOSE1 <- paste0("Duration in Grade 4 Neutropenia = ",pred.G4N1," hours (",CIlo.G4N1," - ",CIhi.G4N1,")")
}
G4N1.text.DOSE1
}) # Brackets closing "renderText"
# Summary of Time spent in Grade 4 Neutropenia for DOSE2
output$G4N1.text.DOSE2 <- renderText({
if (input$NREG > 1) {
# Read in reactive data
pred.data2 <- Rpred.data2()
summary.data2 <- Rsummary.data2()
# Create a text object
pred.G4N1 <- round(tail(pred.data2$G4N1,1)) # PRED
G4N1.text.DOSE2 <- paste0("Duration in Grade 4 Neutropenia = ",pred.G4N1," hours")
if (input$PI == TRUE) {
CIlo.G4N1 <- round(tail(summary.data2$CIlo_G4N1,1)) # 2.5th percentile
CIhi.G4N1 <- round(tail(summary.data2$CIhi_G4N1,1)) # 97.5th percentile
G4N1.text.DOSE2 <- paste0("Duration in Grade 4 Neutropenia = ",pred.G4N1," hours (",CIlo.G4N1," - ",CIhi.G4N1,")")
}
G4N1.text.DOSE2
}
}) # Brackets closing "renderText"
# Summary of Time spent in Grade 4 Neutropenia for DOSE3
output$G4N1.text.DOSE3 <- renderText({
if (input$NREG > 2) {
# Read in reactive data
pred.data3 <- Rpred.data3()
summary.data3 <- Rsummary.data3()
# Create a text object
pred.G4N1 <- round(tail(pred.data3$G4N1,1)) # PRED
G4N1.text.DOSE3 <- paste0("Duration in Grade 4 Neutropenia = ",pred.G4N1," hours")
if (input$PI == TRUE) {
CIlo.G4N1 <- round(tail(summary.data3$CIlo_G4N1,1)) # 3.5th percentile
CIhi.G4N1 <- round(tail(summary.data3$CIhi_G4N1,1)) # 97.5th percentile
G4N1.text.DOSE3 <- paste0("Duration in Grade 4 Neutropenia = ",pred.G4N1," hours (",CIlo.G4N1," - ",CIhi.G4N1,")")
}
G4N1.text.DOSE3
}
}) # Brackets closing "renderText"
#############
##_SESSION_##
#############
# Close the R session when Chrome closes
session$onSessionEnded(function() {
stopApp()
})
output$session.info <- renderPrint({
# Load session information
session.info <- sessionInfo()
print(session.info)
}) # Brackets closing "renderText"
}) # Brackets closing "shinyServer" function
|
/MelphalanApp/server.R
|
no_license
|
wojjy001/melphalan-app
|
R
| false
| false
| 17,577
|
r
|
# server.R script for MelphalanApp
# Reactive objects (i.e., those dependent on widget input) are written here
# ------------------------------------------------------------------------------
# Define the "server" part of the Shiny application
shinyServer(function(input,output,session) {
###########
##_INPUT_##
###########
# Create an input data frame that stores input patient characteristics
# Will be used for the three different simulation scenarios
Rinput.data <- reactive({
# Call in user-defined widget values
AGE <- input$AGE # Numeric input for patient's age
TBW <- input$TBW # Numeric input for patient's total body weight
HT <- input$HT # Numeric input for patient's height
SECR <- input$SECR # Numeric input for patient's serum creatinine
HCT <- input$HCT # Numeric input for patient's haematocrit
ANCBASE <- input$ANCBASE # Numeric input for patient's baseline absolute neutrophil count
if (input$SEX == 1) SEX <- 0 # Select input for patient's gender, female = 0
if (input$SEX == 2) SEX <- 1 # Select input for patient's gender, male = 1
if (input$RACE == 1 | input$RACE == 3) RACE <- 0 # Select input for patient's race, Caucasian and Unknown = 0
if (input$RACE == 2) RACE <- 1 # Select input for patient's race, African-American = 1
if (input$SLC7A5 == 1) SLC7A5 <- 0 # Select input for patient's SLC7A5 genotype, AA or AG = 0
if (input$SLC7A5 == 2) SLC7A5 <- 1 # Select input for patient's SLC7A5 genotype, GG = 1
# Calculate secondary parameters based on input
# Body mass index (BMI)
BMI <- TBW/(HT/100)^2 # Used to calculate fat free mass
# Body surface area (BSA)
BSA <- 0.007184*(TBW^0.425)*(HT^0.725) # Based on the Du Bois formula
# Creatinine clearance (CRCL) and fat free mass (FFM) based on gender
if (SEX == 0) { # Females
CRCL <- (((140-AGE)*TBW)/(SECR*72))*0.85
FFM <- 9270*TBW/(8780+(244*BMI))
} else { # Males
CRCL <- ((140-AGE)*TBW)/(SECR*72)
FFM <- 9270*TBW/(6680+(216*BMI))
}
# Set up input.data
# Only columns missing values will be the amount to be administered and when G-CSF rescue was administered as each "sim.data" data frame will have a different value for "amt" and "G-CSF"
input.data <- expand.ev(
ID = 1:(n+1), # n individuals (plus an additional because the first ID is PRED)
time = 0, # time that melphalan dose will be administered
amt = NA, # amt in mg/m^2, currently amount per m^2 is unknown
evid = 1, # dosing event
cmt = 1, # dose into compartment 1, i.e., CENT
rate = -2, # infusion duration is specified in the model file
BSA = BSA, # Required to be stored to calculate "amt"
BMI = BMI, # Required to be stored for ui
FFM = FFM, # Fat free mass
CRCL = CRCL, # Creatinine clearance
HCT = HCT, # Haematocrit
ANCBASE = ANCBASE, # Baseline absolute neutrophil count
SEX = SEX, # Gender
RACE = RACE, # Race
SLC7A5 = SLC7A5, # Genotype
GCSF = 0 # Time of administration of G-CSF (will be different for different sim.data), default is on Day 1 (0)
)
}) # Brackets closing "Rinput.data"
###########
##_DOSE1_##
###########
# Simulate a population based on input characteristics
# Will have it's own specific dose and time of G-CSF administration
Rsim.data1 <- reactive({
withProgress(
message = "Simulating profiles...",
value = 0,
{
# Read in reactive input.data
input.data <- Rinput.data()
# Read in simulation specific value for G-CSF
if (input$GCSF1 == 2) input.data$GCSF <- 1 # Select input for when to administer G-CSF (Neupogen), Day 7 = 1
# Calculate amt to be administered based on patient's BSA and DOSE1
input.data$amt <- input.data$BSA*input$DOSE1
# Simulate
sim.data1 <- mod %>% data_set(input.data) %>% mrgsim(add = time)
sim.data1 <- as.data.frame(sim.data1) #Convert to a data frame so that it is more useful for me!
}
) # Brackets closing "withProgress"
}) # Brackets closing "Rsim.data1"
# Create a data frame that only contains the "PRED" data
Rpred.data1 <- reactive({
# Read in reactive expressions
sim.data1 <- Rsim.data1()
# Subset out only ID == 1 (PRED individual)
pred.data1 <- sim.data1[sim.data1$ID == 1,]
}) # Brackets closing "Rpred.data1"
# Summarise simulated data as prediction intervals when option is selected
Rsummary.data1 <- reactive({
# Read in reactive expressions
sim.data1 <- Rsim.data1()
# Summarise data
sim.data1 <- sim.data1[sim.data1$ID != 1,] # Do not include ID == 1 - they are PRED
summary.data1 <- ddply(sim.data1, .(time), summary.function)
}) # Brackets closing "Rsummary.data1"
###########
##_DOSE2_##
###########
# Simulate a population based on input characteristics
# Will have it's own specific dose and time of G-CSF administration
Rsim.data2 <- reactive({
withProgress(
message = "Simulating profiles...",
value = 0,
{
if (input$NREG > 1) {
# Read in reactive input.data
input.data <- Rinput.data()
# Read in simulation specific value for G-CSF
if (input$GCSF2 == 2) input.data$GCSF <- 1 # Select input for when to administer G-CSF (Neupogen), Day 7 = 1
# Calculate amt to be administered based on patient's BSA and DOSE1
input.data$amt <- input.data$BSA*input$DOSE2
# Simulate
sim.data2 <- mod %>% data_set(input.data) %>% mrgsim(add = time)
sim.data2 <- as.data.frame(sim.data2) #Convert to a data frame so that it is more useful for me!
}
}
) # Brackets closing "withProgress"
}) # Brackets closing "Rsim.data2"
# Create a data frame that only contains the "PRED" data
Rpred.data2 <- reactive({
# Read in reactive expressions
sim.data2 <- Rsim.data2()
# Subset out only ID == 1 (PRED individual)
pred.data2 <- sim.data2[sim.data2$ID == 1,]
}) # Brackets closing "Rpred.data2"
# Summarise simulated data as prediction intervals when option is selected
Rsummary.data2 <- reactive({
# Read in reactive expressions
sim.data2 <- Rsim.data2()
# Summarise data
sim.data2 <- sim.data2[sim.data2$ID != 1,] # Do not include ID == 1 - they are PRED
summary.data2 <- ddply(sim.data2, .(time), summary.function)
}) # Brackets closing "Rsummary.data2"
###########
##_DOSE3_##
###########
# Simulate a population based on input characteristics
# Will have it's own specific dose and time of G-CSF administration
Rsim.data3 <- reactive({
withProgress(
message = "Simulating profiles...",
value = 0,
{
if (input$NREG > 2) {
# Read in reactive input.data
input.data <- Rinput.data()
# Read in simulation specific value for G-CSF
if (input$GCSF3 == 3) input.data$GCSF <- 1 # Select input for when to administer G-CSF (Neupogen), Day 7 = 1
# Calculate amt to be administered based on patient's BSA and DOSE1
input.data$amt <- input.data$BSA*input$DOSE3
# Simulate
sim.data3 <- mod %>% data_set(input.data) %>% mrgsim(add = time)
sim.data3 <- as.data.frame(sim.data3) #Convert to a data frame so that it is more useful for me!
}
}
) # Brackets closing "withProgress"
}) # Brackets closing "Rsim.data3"
# Create a data frame that only contains the "PRED" data
Rpred.data3 <- reactive({
# Read in reactive expressions
sim.data3 <- Rsim.data3()
# Subset out only ID == 1 (PRED individual)
pred.data3 <- sim.data3[sim.data3$ID == 1,]
}) # Brackets closing "Rpred.data3"
# Summarise simulated data as prediction intervals when option is selected
Rsummary.data3 <- reactive({
# Read in reactive expressions
sim.data3 <- Rsim.data3()
# Summarise data
sim.data3 <- sim.data3[sim.data3$ID != 1,] # Do not include ID == 1 - they are PRED
summary.data3 <- ddply(sim.data3, .(time), summary.function)
}) # Brackets closing "Rsummary.data3"
############
##_OUTPUT_##
############
output$BMI.text <- renderUI({
input.data <- Rinput.data()
withMathJax(paste0("Body mass index = ",round(input.data$BMI[1],digits = 1)," \\(kg/m^2\\)"))
}) # Brackets closing "renderUI" expression
output$BSA.text <- renderUI({
input.data <- Rinput.data()
withMathJax(paste0("Body surface area = ",round(input.data$BSA[1],digits = 1)," \\(m^2\\)"))
}) # Brackets closing "renderUI" expression
output$FFM.text <- renderUI({
input.data <- Rinput.data()
withMathJax(paste0("Fat free mass = ",round(input.data$FFM[1],digits = 1)," \\(kg\\)"))
}) # Brackets closing "renderUI" expression
output$CRCL.text <- renderUI({
input.data <- Rinput.data()
withMathJax(paste0("Creatinine clearance = ",round(input.data$CRCL[1],digits = 1)," \\(mL/min\\)"))
}) # Brackets closing "renderUI" expression
# Simulation results for ANC
output$anc.plot <- renderPlot({
# Read in reactive data
pred.data1 <- Rpred.data1()
summary.data1 <- Rsummary.data1()
# Only read in reactive data if regimen has been selected
if (input$NREG > 1) {
pred.data2 <- Rpred.data2()
summary.data2 <- Rsummary.data2()
}
if (input$NREG > 2) {
pred.data3 <- Rpred.data3()
summary.data3 <- Rsummary.data3()
}
# Plot ANC over time
plotobj1 <- NULL
plotobj1 <- ggplot()
# Population predicted
plotobj1 <- plotobj1 + geom_line(aes(x = time,y = ANC),data = pred.data1,colour = "#F8766D",size = 1) # DOSE1
if (input$NREG > 1) plotobj1 <- plotobj1 + geom_line(aes(x = time,y = ANC),data = pred.data2,colour = "#619CFF",size = 1) # DOSE2
if (input$NREG > 2) plotobj1 <- plotobj1 + geom_line(aes(x = time,y = ANC),data = pred.data3,colour = "#00BA38",size = 1) # DOSE3
# 95% prediction intervals
if (input$PI == TRUE) {
plotobj1 <- plotobj1 + geom_ribbon(aes(x = time,ymin = CIlo_ANC,ymax = CIhi_ANC),data = summary.data1,fill = "#F8766D",alpha = 0.3) # DOSE1
if (input$NREG > 1) plotobj1 <- plotobj1 + geom_ribbon(aes(x = time,ymin = CIlo_ANC,ymax = CIhi_ANC),data = summary.data2,fill = "#619CFF",alpha = 0.3) # DOSE2
if (input$NREG > 2) plotobj1 <- plotobj1 + geom_ribbon(aes(x = time,ymin = CIlo_ANC,ymax = CIhi_ANC),data = summary.data3,fill = "#00BA38",alpha = 0.3) # DOSE3
}
# Grade 4 neutropenia
plotobj1 <- plotobj1 + geom_hline(aes(yintercept = 0.5),linetype = "dashed")
plotobj1 <- plotobj1 + annotate("text",x = 648,y = 0.6,label = "Grade 4 Neutropenia",size = 5)
# Axes
plotobj1 <- plotobj1 + scale_x_continuous("\nTime since Melphalan Dose (days)",breaks = seq(from = 0,to = max(time.PD),by = 100))
plotobj1 <- plotobj1 + scale_y_log10("Absolute Neutrophil Count (K/µL)\n",breaks = log.plot.breaks,labels = log.plot.breaks)
# Return plot
print(plotobj1)
}) # Brackets closing "renderPlot"
# Simulate results of time spent in Grade 4 neutropenia
output$g4n.plot <- renderPlot({
# Read in reactive data
pred.data1 <- Rpred.data1()
summary.data1 <- Rsummary.data1()
# Only read in reactive data if regimen has been selected
if (input$NREG > 1) {
pred.data2 <- Rpred.data2()
summary.data2 <- Rsummary.data2()
}
if (input$NREG > 2) {
pred.data3 <- Rpred.data3()
summary.data3 <- Rsummary.data3()
}
# Plot PRED time spent in G4N and error bars
plotobj2 <- NULL
plotobj2 <- ggplot()
# Population predicted
plotobj2 <- plotobj2 + geom_point(aes(x = input$DOSE1,y = tail(pred.data1$G4N1,1)),size = 3,colour = "#F8766D") # DOSE1
if (input$NREG > 1) plotobj2 <- plotobj2 + geom_point(aes(x = input$DOSE2,y = tail(pred.data2$G4N1,1)),size = 3,colour = "#619CFF") # DOSE2
if (input$NREG > 2) plotobj2 <- plotobj2 + geom_point(aes(x = input$DOSE3,y = tail(pred.data3$G4N1,1)),size = 3,colour = "#00BA38") # DOSE1
# 95% prediction intervals (error bars)
if (input$PI == TRUE) {
plotobj2 <- plotobj2 + geom_errorbar(aes(x = input$DOSE1,ymin = tail(summary.data1$CIlo_G4N1,1),ymax = tail(summary.data1$CIhi_G4N1,1)),width = 5,colour = "#F8766D") # DOSE1
if (input$NREG > 1) plotobj2 <- plotobj2 + geom_errorbar(aes(x = input$DOSE2,ymin = tail(summary.data2$CIlo_G4N1,1),ymax = tail(summary.data2$CIhi_G4N1,1)),width = 5,colour = "#619CFF") # DOSE1
if (input$NREG > 2) plotobj2 <- plotobj2 + geom_errorbar(aes(x = input$DOSE3,ymin = tail(summary.data3$CIlo_G4N1,1),ymax = tail(summary.data3$CIhi_G4N1,1)),width = 5,colour = "#00BA38") # DOSE1
}
# Axes
plotobj2 <- plotobj2 + xlab(expression(paste("Melphalan Dose (",mg/m^2,")")))
if (input$NREG == 1) plotobj2 <- plotobj2 + scale_x_continuous(breaks = c(input$DOSE1),labels = c(input$DOSE1))
if (input$NREG > 1) plotobj2 <- plotobj2 + scale_x_continuous(breaks = c(input$DOSE1,input$DOSE2),labels = c(input$DOSE1,input$DOSE2))
if (input$NREG > 2) plotobj2 <- plotobj2 + scale_x_continuous(breaks = c(input$DOSE1,input$DOSE2,input$DOSE3),labels = c(input$DOSE1,input$DOSE2,input$DOSE3))
plotobj2 <- plotobj2 + scale_y_continuous("Time Spent in Grade 4 Neutropenia (hours)\n")
# Return plot
print(plotobj2)
}) # Brackets closing "renderPlot"
# Simulation results for melphalan concentrations
output$melph.plot <- renderPlot({
# Read in reactive data
pred.data1 <- Rpred.data1()
summary.data1 <- Rsummary.data1()
# Only read in reactive data if regimen has been selected
if (input$NREG > 1) {
pred.data2 <- Rpred.data2()
summary.data2 <- Rsummary.data2()
}
if (input$NREG > 2) {
pred.data3 <- Rpred.data3()
summary.data3 <- Rsummary.data3()
}
# Plot ANC over time
plotobj3 <- NULL
plotobj3 <- ggplot()
# Population predicted
plotobj3 <- plotobj3 + geom_line(aes(x = time,y = IPRE),data = pred.data1,colour = "#F8766D",size = 1) # DOSE1
if (input$NREG > 1) plotobj3 <- plotobj3 + geom_line(aes(x = time,y = IPRE),data = pred.data2,colour = "#619CFF",size = 1) # DOSE2
if (input$NREG > 2) plotobj3 <- plotobj3 + geom_line(aes(x = time,y = IPRE),data = pred.data3,colour = "#00BA38",size = 1) # DOSE3
# 95% prediction intervals
if (input$PI == TRUE) {
plotobj3 <- plotobj3 + geom_ribbon(aes(x = time,ymin = CIlo_IPRE,ymax = CIhi_IPRE),data = summary.data1,fill = "#F8766D",alpha = 0.3) # DOSE1
if (input$NREG > 1) plotobj3 <- plotobj3 + geom_ribbon(aes(x = time,ymin = CIlo_IPRE,ymax = CIhi_IPRE),data = summary.data2,fill = "#619CFF",alpha = 0.3) # DOSE2
if (input$NREG > 2) plotobj3 <- plotobj3 + geom_ribbon(aes(x = time,ymin = CIlo_IPRE,ymax = CIhi_IPRE),data = summary.data3,fill = "#00BA38",alpha = 0.3) # DOSE3
}
# Axes
plotobj3 <- plotobj3 + scale_x_continuous("\nTime since Melphalan Dose (hours)",lim = c(0,13))
plotobj3 <- plotobj3 + scale_y_log10("Melphalan Concentration (mg/L)\n",lim = c(0.001,NA),breaks = log.plot.breaks,labels = log.plot.breaks)
# Return plot
print(plotobj3)
}) # Brackets closing "renderPlot"
# Summary of Time spent in Grade 4 Neutropenia for DOSE1
output$G4N1.text.DOSE1 <- renderText({
# Read in reactive data
pred.data1 <- Rpred.data1()
summary.data1 <- Rsummary.data1()
# Create a text object
pred.G4N1 <- round(tail(pred.data1$G4N1,1)) # PRED
G4N1.text.DOSE1 <- paste0("Duration in Grade 4 Neutropenia = ",pred.G4N1," hours")
if (input$PI == TRUE) {
CIlo.G4N1 <- round(tail(summary.data1$CIlo_G4N1,1)) # 2.5th percentile
CIhi.G4N1 <- round(tail(summary.data1$CIhi_G4N1,1)) # 97.5th percentile
G4N1.text.DOSE1 <- paste0("Duration in Grade 4 Neutropenia = ",pred.G4N1," hours (",CIlo.G4N1," - ",CIhi.G4N1,")")
}
G4N1.text.DOSE1
}) # Brackets closing "renderText"
# Summary of Time spent in Grade 4 Neutropenia for DOSE2
output$G4N1.text.DOSE2 <- renderText({
if (input$NREG > 1) {
# Read in reactive data
pred.data2 <- Rpred.data2()
summary.data2 <- Rsummary.data2()
# Create a text object
pred.G4N1 <- round(tail(pred.data2$G4N1,1)) # PRED
G4N1.text.DOSE2 <- paste0("Duration in Grade 4 Neutropenia = ",pred.G4N1," hours")
if (input$PI == TRUE) {
CIlo.G4N1 <- round(tail(summary.data2$CIlo_G4N1,1)) # 2.5th percentile
CIhi.G4N1 <- round(tail(summary.data2$CIhi_G4N1,1)) # 97.5th percentile
G4N1.text.DOSE2 <- paste0("Duration in Grade 4 Neutropenia = ",pred.G4N1," hours (",CIlo.G4N1," - ",CIhi.G4N1,")")
}
G4N1.text.DOSE2
}
}) # Brackets closing "renderText"
# Summary of Time spent in Grade 4 Neutropenia for DOSE3
output$G4N1.text.DOSE3 <- renderText({
if (input$NREG > 2) {
# Read in reactive data
pred.data3 <- Rpred.data3()
summary.data3 <- Rsummary.data3()
# Create a text object
pred.G4N1 <- round(tail(pred.data3$G4N1,1)) # PRED
G4N1.text.DOSE3 <- paste0("Duration in Grade 4 Neutropenia = ",pred.G4N1," hours")
if (input$PI == TRUE) {
CIlo.G4N1 <- round(tail(summary.data3$CIlo_G4N1,1)) # 3.5th percentile
CIhi.G4N1 <- round(tail(summary.data3$CIhi_G4N1,1)) # 97.5th percentile
G4N1.text.DOSE3 <- paste0("Duration in Grade 4 Neutropenia = ",pred.G4N1," hours (",CIlo.G4N1," - ",CIhi.G4N1,")")
}
G4N1.text.DOSE3
}
}) # Brackets closing "renderText"
#############
##_SESSION_##
#############
# Close the R session when Chrome closes
session$onSessionEnded(function() {
stopApp()
})
output$session.info <- renderPrint({
# Load session information
session.info <- sessionInfo()
print(session.info)
}) # Brackets closing "renderText"
}) # Brackets closing "shinyServer" function
|
# Data Analysis with R
# Rick Scavetta
# 03.09.2018
# QBM R workshop for MSc
# Clear workspace (environment)
rm(list = ls())
# Load packages
library(tidyverse)
# Basic R syntax:
n <- log2(8) # 2 to the power of what = 8
n
log2(8)
# A Simple Case Study
# Access a built-in dataset:
PlantGrowth
# Explore our data:
# What are the group?
levels(PlantGrowth$group)
# How many groups?
nlevels(PlantGrowth$group)
# Two broad types of variables:
# Continuous (aka quantitative) i.e. weight
# Categorical (aka qualitative, discrete, factor) i.e. group
# Categorical has "groups" or "levels"
# Descriptive statistics:
# Mean of ALL values
mean(PlantGrowth$weight)
# group-wise descriptive stats:
# use ctrl + shift + m to get %>%
# %>% is the "pipe operator" (say "and then...")
PlantGrowth %>%
group_by(group) %>% # comment
summarise(avg = mean(weight),
stdev = sd(weight),
n = n()) -> PGSummary
# only use -> in the tidyverse context
# Making plots: using ggplot2 functions
# 3 parts - Data, Aesthetics, Geometries
# Aesthetics: aes(), MAPPING data onto a visual scale (axis)
# e.g. x, y, ymin, ymax, col, shape, size, ...
# Geometries: How will the data look?
# 1 - All data points
ggplot(PlantGrowth, aes(x = group, y = weight)) +
geom_jitter(width = 0.2, alpha = 0.65)
# alpha controls transparency
# 2 - summary statistics:
# 2a - mean and sd:
ggplot(PGSummary, aes(x = group,
y = avg,
ymin = avg - stdev,
ymax = avg + stdev)) +
geom_pointrange()
# geom_col() +
# geom_errorbar(width = 0.2)
# 2b - box plots:
ggplot(PlantGrowth, aes(x = group, y = weight)) +
geom_boxplot()
# Do Stats:
# Group differences:
# First, build a linear model
# y ~ x means "y as described by x"
# response ~ predictor
plant.lm <- lm(weight ~ group, data = PlantGrowth)
plant.lm
# shortcut to t-tests: use summary
summary(plant.lm) # of a linear model
# summary() is just a generic function
summary(PlantGrowth) # of a dataset (i.e. dataframe)
# So how to do a direct t-test?
sleep
# The data is "paired" i.e. same individual on each treatment:
t.test(extra ~ group, data = sleep, paired = TRUE)
# One-way ANOVA:
anova(plant.lm) # gives ANOVA table
# One last way: Compare all pair-wise t-test
# Tukey Post-hoc test:
# to do this, set up ANOVA in a different way:
# use aov() instead of lm()
plant.aov <- aov(weight ~ group, data = PlantGrowth)
summary(plant.aov) # of an aov object give ANOVA table
TukeyHSD(plant.aov) # All pair-wise t-tests:
# Element 2: Functions
# Everything that happens, is because of a function
# Arithmetic operators
# +, -, *, /, ^
34 + 6
# this is actually a function
`+`(34, 6)
# Order or operations
# BEDMAS - brackets, exp, div, mult, add, sub
2 - 3/4 # 1.25
(2 - 3)/4 # -0.25
# Make some objects
n <- 34
p <- 6
# use them like numbers
n + p
# Form of functions:
# fun_name(fun_args)
# fun_args can be:
# named or unnamed (positional matching)
log2(8)
log2(x = 8)
log(x = 8, base = 2)
log(8, 2) # positional matching
log(8, base = 2) # combination, typical
# Some basic and common functions:
# Combine/concatenate: unnamed arguments
xx <- c(3, 8, 9 , 23)
xx
myNames <- c("healthy", "tissue", "quantity")
myNames
# Sequential numbers: seq()
seq(from = 1, to = 100, by = 7)
foo1 <- seq(1, 100, 7)
# of course, we can use objects in functions:
foo2 <- seq(1, n, p)
# trick: a regular interval of 1
# the colon operator
1:10
seq(1, 10, 1)
# Two major types of math functions:
# 1 - Transformation functions (e.g. log)
# output length == input length
# EVERY value is treated the same way
log(foo1)
# 2 - Aggregration functions (e.g. sum, mean)
# output lenght typically 1 (or a few) number(s)
# e.g.
mean(foo1)
median(foo1)
sum(foo1)
# Exercise 6.1
foo2
foo2 + 100 # trans
foo2 + foo2 # trans
sum(foo2) + foo2 # agg followed by trans
1:3 + foo2 # trans
1:4 + foo2
############### Key Concept in R
############### Vector Recycling!
# it's why this works:
# z-scores:
(foo1 - mean(foo1))/sd(foo1)
# Short cut:
scale(foo1)
# Exercise 6.2: linear model transformation
# y = mx + b
m <- 1.12
b <- -0.4
xx
m * xx + b
# What if... I had two m values
m2 <- c(0, 1.12)
# I want to get 8 numbers as output
0 * xx + b
1.12 * xx + b
# but... I get only one series
m2 * xx + b
# How can I reiterate a function over all values
# (or subsets) of a data set?
# 1 - Make your own function
equation <- function(x) {
1.12 * x + (-0.4)
}
equation(xx)
# Exercise 6.4 - defining a function
# With default values
lin <- function(x, m = 1.12, b = -0.4) {
m * x + b
}
# m & b in our function are NOT from the environment
rm(m) # delete m
rm(b) # delete b
lin(xx) # Only one arg, m & b use defaults
lin(xx, 5, 60) # All args
lin(xx, b = 10) # Only two args, m uses default
# Examples of functions with no arguments
ls()
list.files()
lin(xx, m = c(0, 1.12)) # still not working,
# We need to reiterate over the m values
# 2 - Using map() from the purrr package (part of tidyverse)
# "map" each "element" of m2 onto a function:
# use . for a place-holder
# use ~ to define which function
map(m2, ~ lin(xx, .))
# Element 3: Objects
# Anything that exists, is an object
# Common data storage:
# Vectors - 1 Dimensional, Homogenous data types
# e.g.
foo1 # 15 elements
foo2 # 6 elements
myNames # 3 elements
# 4 most common user-defined Atomic Vector Types
# Logical - binary, boolean (TRUE/FALSE or T/F)
# Integer - whole numbers
# Double - numbers with decimals
# Character (aka strings)
# "Numeric" refers to both interger and double
# not really common: raw, complex
test <- c(1:10, "bob")
test
# to find out the type use typof()
typeof(test)
typeof(foo1)
# make a couple more objects
foo3 <- c("Liver", "Brain", "Testes", "Muscle",
"Intestine", "Heart")
typeof(foo3)
foo4 <- c(T, F, F, T, T, F)
typeof(foo4)
# Lists: 1 Dimensional, Heterogenous data types
# e.g.
typeof(plant.lm)
# Take a look inside and getting values:
# 1 - Use attributes (like metadata):
attributes(plant.lm)
# 2 - Use accessor functions:
names(plant.lm)
class(plant.lm)
# 3 - Use the $ for names elements:
plant.lm$coefficients # A named numeric vector of 3 elements
plant.lm$residuals # A numeric vector, 30 elements
# Data Frame - 2 Dimensional, Heterogenous
# A special class of type list
# A collection of vertical vectors of the same lengths
# e.g.
PlantGrowth
typeof(PlantGrowth)
class(PlantGrowth)
# Columns == variables
# Rows == observations
# Make one from scratch:
foo.df <- data.frame(foo4, foo3, foo2)
foo.df
attributes(foo.df) # 3-element long char vector
myNames
names(foo.df) <- myNames
attributes(foo.df)
# Access names elements using $ notation:
foo.df$quantity
# Some typical functions:
# Examine data:
summary(foo.df)
str(foo.df) # structure
glimpse(foo.df) # from tidyverse, dplyr package
dim(foo.df) # (row, col)
nrow(foo.df)
ncol(foo.df)
# don't use length()
length(foo.df) # gives the number of elements in the list
# Two most common problems in R:
# 1 - Wrong data type (in vectors)
# solution - examine is.*() and coerce as.*()
# replace * with type
test
mean(test)
is.numeric(test)
is.na(test)
test <- as.numeric(test)
mean(test, na.rm = T)
# 2 - Wrong structure or format
# solution - rearrange data
# Element 4: Logical Expressions
# Asking and combining (Yes/No) questions
# Relational operators
# == test for equivalence
# != test for non-equivalence
# >, <, >=, <=
# !x, negation of x, where x is a logical vector
# ALWAYS results in a logical vector
n
p
n > p
n < p
!foo4
# Logical operators: Combine Yes/No questions
# & AND - a TRUE in EVERY question
# | OR - a TRUE in at least ONE question
# %in% WITHIN
# Examples with different data types:
# old school: subset()
# new way: dplyr::filter()
# Logical variables
# All healthy
foo.df %>%
filter(healthy)
# All unhealthy
foo.df %>%
filter(!healthy)
# Numeric variables (Int of Dbl)
# Below 10
foo.df %>%
filter(quantity < 10)
# Exactly 31
foo.df %>%
filter(quantity == 31)
# Range between 10 - 20
foo.df %>%
filter(quantity > 10 & quantity < 20)
# Meaningless
foo.df %>%
filter(quantity > 10 | quantity < 20)
# Tail ends (beyond [10,20])
foo.df %>%
filter(quantity < 10 | quantity > 20)
# Impossible
foo.df %>%
filter(quantity < 10 & quantity > 20)
# Character variables
# NO pattern matching
# Heart Samples:
foo.df %>%
filter(tissue == "Heart")
# Liver and Heart Samples:
# Cheap and easy way :)
foo.df %>%
filter(tissue == "Heart" | tissue == "Liver")
# More efficient: vector recycling
# This doesn't work:
foo.df %>%
filter(tissue == c("Heart", "Liver"))
# But...
foo.df %>%
filter(tissue == c("Liver", "Heart"))
# So the real way...
# These are equivalent:
foo.df %>%
filter(tissue %in% c("Heart", "Liver"))
foo.df %>%
filter(tissue %in% c("Liver", "Heart"))
# Element 5: Indexing
# Finding information according to position using []
# Vectors:
foo1
foo1[6] # The sixth value
foo1[p] # The pth value, p == 6
foo1[3:p] # 3rd to pth values
foo1[p:length(foo1)] # pth to last value
# use combinations of
# integers, objects, functions, etc...
# But, the exciting part is ... logical vectors!
# i.e. the result of logical expressions
# all values less than 50
foo1[foo1 < 50]
# Data frames: 2 dimensions so use [rows, cols]
foo.df[3, ] # 3rd row, ALL cols
foo.df[ ,3] # ALL rows, 3rd col by number
foo.df[ ,"quantity"] # ALL rows, 3rd col by name
# no comma is a short cut to access columns
foo.df[3]
foo.df["quantity"]
# But compare this to:
foo.df[ ,3]
# R switched to a vector! To prevent this
# use a tibble
foo.df <- as_tibble(foo.df)
# The data frame always remains a data frame:
foo.df[ ,3]
# Can I have a comma with a 1D vector
foo1[,6]
foo1[6,]
# Error!
# Exercises:
# use [] or filter()
# or... even subset() (common but old)
# 1 - 3rd to the 6th rows, only quantity
foo.df[3:6,3]
foo.df[3:6,"quantity"] # a nicer way
foo.df$quantity[3:6] # as a vector
# 2 - Everything except the healthy column
foo.df[,2:3]
foo.df[,-1]
foo.df[,names(foo.df) != "healthy"]
foo.df[,c("tissue", "quantity")]
# also...
foo.df[,-(c(1,3))] # exclude more than one column
# 3 - Tissues that have a quantity less than 10
foo.df[foo.df$quantity < 10, "tissue"]
foo.df$tissue[foo.df$quantity < 10]
# 4 - Which tissue has the highest quantity?
max(foo.df$quantity) # gives actual value
which.max(foo.df$quantity) # Where is it?
foo.df$tissue[which.max(foo.df$quantity)] # index it
# Element 8: Factor Variables (with levels)
# aka categorical, discrete, qualitative (with groups)
# Factor is a special class of type integer
# with labels:
# e.g.
PlantGrowth$group
typeof(PlantGrowth$group) # "integer"
class(PlantGrowth$group) # "factor"
# you can see it here:
str(PlantGrowth)
# some problems:
foo3 # character
foo.df$tissue # factor
str(foo.df)
# convert to a character:
as.character(PlantGrowth$group) # The labels for each level
as.integer(PlantGrowth$group) # The actual value of the level
# Element 9: Tidy Data
source("PlayData.R")
# Make the data tidy using the tidyr package
# Part of the tidyverse
# gather() with 4 arguments:
# 1 - data
# 2&3 - key, value (the names of the OUTPUT columns)
# 4 - either the ID or the MEASURE variables
gather(PlayData, key, value, -c(type, time)) # give ID vars
gather(PlayData, key, value, c(height, width)) # give MEASURE vars
# Assign to new Data Frame
PlayData.t <- gather(PlayData, key, value, -c(type, time))
# To do transformations, it's easiest to have two columns:
# Scenario 1: According to measure and type
PlayData.t %>%
spread(type, value) -> scenario1
scenario1$A/scenario1$B
# Scenario 2: According to measure and time
PlayData.t %>%
spread(time, value) -> scenario2
scenario2$`1`/scenario2$`2`
# Scenario 3: According to type and time
# Already possible with the raw data
PlayData$height/PlayData$width
# Element 10: The dplyr functions
# Go to the SILAC protein project for examples
# dplyr functions:
# 2e - summarise(), for Aggregration functions
# 2 - the group_by() adverb
# Apply aggregration functions:
# Scenario 1: According to measure and type
PlayData.t %>%
group_by(key, type) %>%
summarise(avg = mean(value))
# Scenario 2: According to measure and time
PlayData.t %>%
group_by(key, time) %>%
summarise(avg = mean(value))
# Scenario 3: According to type and time
PlayData.t %>%
group_by(time, type) %>%
summarise(avg = mean(value))
#########
# Regular Expressions
# Find patterns in strings or numbers
source("genes.R")
# This makes:
genes
# Find motif: "GGGCCC"
genes == "GGGCCC"
# wrong - This is not pattern matching
# grep - global regular expression, print
grep("GGGCCC", genes) # which gene contains the sequence
# Make use of Regular Expressions:
# Exactly as above, but using special characters
# "{3}" means exactly 3
grep("G{3}C{3}", genes)
grepl("G{3}C{3}", genes)
library(stringr)
str_detect(genes, "G{3}C{3}") # logical vector, like grepl
str_locate(genes, "G{3}C{3}") # logical vector, like grepl
# Less strict: "." means "anything"
grep("G.{4}C", genes)
# Some examples:
genes <- c("alpha4", "p53", "CDC53", "Agft-4", "cepb2")
genes
# Which genes that begin with c (either C, c)
str_extract(genes, regex("^c.*", ignore_case = TRUE))
# Some last examples:
# Indexing and data types review and expansion
# Vectors
foo1[5] # 5th value
# Data frames
foo.df[3,] # 3rd row
# Make a list and index it:
myList <- list(A = 1:3,
B = 56:85,
C = foo.df,
D = plant.lm)
# typeof(myList)
# typeof(plant.lm)
myList$C # view a part using $
# What about []? this always results in a list!
myList[3] # using numbers
myList["C"] # using names
# If you want the actual object as itself, use [[]]
myList[[3]] # using numbers
myList[["C"]] # using names
# Access columns directly:
myList[[3]]$tissue # typical
myList[3]$C$tissue # :/ kinda confusing
# Matrices
# 2D vector - only one data type!
myMatrix <- matrix(1:12, ncol = 3)
myMatrix
myMatrix[,3]
colSums(myMatrix)
rowSums(myMatrix)
# how to find peaks in an ordered vector:
# e.g.
yy <- c(1:5,0:-4,0:7,3:1)
myDF <- data.frame(xx = seq_along(yy),
yy = yy)
plot(myDF)
# find peaks in
diff(sign(diff(myDF$yy)))
# Peaks produce -2
# Minima produce 2
# So where are they?
which(diff(sign(diff(myDF$yy))) == -2) + 1 # peaks
which(diff(sign(diff(myDF$yy))) == 2) + 1 # minima
|
/Main R Tutorial.R
|
no_license
|
Scavetta/QBM_MSc
|
R
| false
| false
| 14,381
|
r
|
# Data Analysis with R
# Rick Scavetta
# 03.09.2018
# QBM R workshop for MSc
# Clear workspace (environment)
rm(list = ls())
# Load packages
library(tidyverse)
# Basic R syntax:
n <- log2(8) # 2 to the power of what = 8
n
log2(8)
# A Simple Case Study
# Access a built-in dataset:
PlantGrowth
# Explore our data:
# What are the group?
levels(PlantGrowth$group)
# How many groups?
nlevels(PlantGrowth$group)
# Two broad types of variables:
# Continuous (aka quantitative) i.e. weight
# Categorical (aka qualitative, discrete, factor) i.e. group
# Categorical has "groups" or "levels"
# Descriptive statistics:
# Mean of ALL values
mean(PlantGrowth$weight)
# group-wise descriptive stats:
# use ctrl + shift + m to get %>%
# %>% is the "pipe operator" (say "and then...")
PlantGrowth %>%
group_by(group) %>% # comment
summarise(avg = mean(weight),
stdev = sd(weight),
n = n()) -> PGSummary
# only use -> in the tidyverse context
# Making plots: using ggplot2 functions
# 3 parts - Data, Aesthetics, Geometries
# Aesthetics: aes(), MAPPING data onto a visual scale (axis)
# e.g. x, y, ymin, ymax, col, shape, size, ...
# Geometries: How will the data look?
# 1 - All data points
ggplot(PlantGrowth, aes(x = group, y = weight)) +
geom_jitter(width = 0.2, alpha = 0.65)
# alpha controls transparency
# 2 - summary statistics:
# 2a - mean and sd:
ggplot(PGSummary, aes(x = group,
y = avg,
ymin = avg - stdev,
ymax = avg + stdev)) +
geom_pointrange()
# geom_col() +
# geom_errorbar(width = 0.2)
# 2b - box plots:
ggplot(PlantGrowth, aes(x = group, y = weight)) +
geom_boxplot()
# Do Stats:
# Group differences:
# First, build a linear model
# y ~ x means "y as described by x"
# response ~ predictor
plant.lm <- lm(weight ~ group, data = PlantGrowth)
plant.lm
# shortcut to t-tests: use summary
summary(plant.lm) # of a linear model
# summary() is just a generic function
summary(PlantGrowth) # of a dataset (i.e. dataframe)
# So how to do a direct t-test?
sleep
# The data is "paired" i.e. same individual on each treatment:
t.test(extra ~ group, data = sleep, paired = TRUE)
# One-way ANOVA:
anova(plant.lm) # gives ANOVA table
# One last way: Compare all pair-wise t-test
# Tukey Post-hoc test:
# to do this, set up ANOVA in a different way:
# use aov() instead of lm()
plant.aov <- aov(weight ~ group, data = PlantGrowth)
summary(plant.aov) # of an aov object give ANOVA table
TukeyHSD(plant.aov) # All pair-wise t-tests:
# Element 2: Functions
# Everything that happens, is because of a function
# Arithmetic operators
# +, -, *, /, ^
34 + 6
# this is actually a function
`+`(34, 6)
# Order or operations
# BEDMAS - brackets, exp, div, mult, add, sub
2 - 3/4 # 1.25
(2 - 3)/4 # -0.25
# Make some objects
n <- 34
p <- 6
# use them like numbers
n + p
# Form of functions:
# fun_name(fun_args)
# fun_args can be:
# named or unnamed (positional matching)
log2(8)
log2(x = 8)
log(x = 8, base = 2)
log(8, 2) # positional matching
log(8, base = 2) # combination, typical
# Some basic and common functions:
# Combine/concatenate: unnamed arguments
xx <- c(3, 8, 9 , 23)
xx
myNames <- c("healthy", "tissue", "quantity")
myNames
# Sequential numbers: seq()
seq(from = 1, to = 100, by = 7)
foo1 <- seq(1, 100, 7)
# of course, we can use objects in functions:
foo2 <- seq(1, n, p)
# trick: a regular interval of 1
# the colon operator
1:10
seq(1, 10, 1)
# Two major types of math functions:
# 1 - Transformation functions (e.g. log)
# output length == input length
# EVERY value is treated the same way
log(foo1)
# 2 - Aggregration functions (e.g. sum, mean)
# output lenght typically 1 (or a few) number(s)
# e.g.
mean(foo1)
median(foo1)
sum(foo1)
# Exercise 6.1
foo2
foo2 + 100 # trans
foo2 + foo2 # trans
sum(foo2) + foo2 # agg followed by trans
1:3 + foo2 # trans
1:4 + foo2
############### Key Concept in R
############### Vector Recycling!
# it's why this works:
# z-scores:
(foo1 - mean(foo1))/sd(foo1)
# Short cut:
scale(foo1)
# Exercise 6.2: linear model transformation
# y = mx + b
m <- 1.12
b <- -0.4
xx
m * xx + b
# What if... I had two m values
m2 <- c(0, 1.12)
# I want to get 8 numbers as output
0 * xx + b
1.12 * xx + b
# but... I get only one series
m2 * xx + b
# How can I reiterate a function over all values
# (or subsets) of a data set?
# 1 - Make your own function
equation <- function(x) {
1.12 * x + (-0.4)
}
equation(xx)
# Exercise 6.4 - defining a function
# With default values
lin <- function(x, m = 1.12, b = -0.4) {
m * x + b
}
# m & b in our function are NOT from the environment
rm(m) # delete m
rm(b) # delete b
lin(xx) # Only one arg, m & b use defaults
lin(xx, 5, 60) # All args
lin(xx, b = 10) # Only two args, m uses default
# Examples of functions with no arguments
ls()
list.files()
lin(xx, m = c(0, 1.12)) # still not working,
# We need to reiterate over the m values
# 2 - Using map() from the purrr package (part of tidyverse)
# "map" each "element" of m2 onto a function:
# use . for a place-holder
# use ~ to define which function
map(m2, ~ lin(xx, .))
# Element 3: Objects
# Anything that exists, is an object
# Common data storage:
# Vectors - 1 Dimensional, Homogenous data types
# e.g.
foo1 # 15 elements
foo2 # 6 elements
myNames # 3 elements
# 4 most common user-defined Atomic Vector Types
# Logical - binary, boolean (TRUE/FALSE or T/F)
# Integer - whole numbers
# Double - numbers with decimals
# Character (aka strings)
# "Numeric" refers to both interger and double
# not really common: raw, complex
test <- c(1:10, "bob")
test
# to find out the type use typof()
typeof(test)
typeof(foo1)
# make a couple more objects
foo3 <- c("Liver", "Brain", "Testes", "Muscle",
"Intestine", "Heart")
typeof(foo3)
foo4 <- c(T, F, F, T, T, F)
typeof(foo4)
# Lists: 1 Dimensional, Heterogenous data types
# e.g.
typeof(plant.lm)
# Take a look inside and getting values:
# 1 - Use attributes (like metadata):
attributes(plant.lm)
# 2 - Use accessor functions:
names(plant.lm)
class(plant.lm)
# 3 - Use the $ for names elements:
plant.lm$coefficients # A named numeric vector of 3 elements
plant.lm$residuals # A numeric vector, 30 elements
# Data Frame - 2 Dimensional, Heterogenous
# A special class of type list
# A collection of vertical vectors of the same lengths
# e.g.
PlantGrowth
typeof(PlantGrowth)
class(PlantGrowth)
# Columns == variables
# Rows == observations
# Make one from scratch:
foo.df <- data.frame(foo4, foo3, foo2)
foo.df
attributes(foo.df) # 3-element long char vector
myNames
names(foo.df) <- myNames
attributes(foo.df)
# Access names elements using $ notation:
foo.df$quantity
# Some typical functions:
# Examine data:
summary(foo.df)
str(foo.df) # structure
glimpse(foo.df) # from tidyverse, dplyr package
dim(foo.df) # (row, col)
nrow(foo.df)
ncol(foo.df)
# don't use length()
length(foo.df) # gives the number of elements in the list
# Two most common problems in R:
# 1 - Wrong data type (in vectors)
# solution - examine is.*() and coerce as.*()
# replace * with type
test
mean(test)
is.numeric(test)
is.na(test)
test <- as.numeric(test)
mean(test, na.rm = T)
# 2 - Wrong structure or format
# solution - rearrange data
# Element 4: Logical Expressions
# Asking and combining (Yes/No) questions
# Relational operators
# == test for equivalence
# != test for non-equivalence
# >, <, >=, <=
# !x, negation of x, where x is a logical vector
# ALWAYS results in a logical vector
n
p
n > p
n < p
!foo4
# Logical operators: Combine Yes/No questions
# & AND - a TRUE in EVERY question
# | OR - a TRUE in at least ONE question
# %in% WITHIN
# Examples with different data types:
# old school: subset()
# new way: dplyr::filter()
# Logical variables
# All healthy
foo.df %>%
filter(healthy)
# All unhealthy
foo.df %>%
filter(!healthy)
# Numeric variables (Int of Dbl)
# Below 10
foo.df %>%
filter(quantity < 10)
# Exactly 31
foo.df %>%
filter(quantity == 31)
# Range between 10 - 20
foo.df %>%
filter(quantity > 10 & quantity < 20)
# Meaningless
foo.df %>%
filter(quantity > 10 | quantity < 20)
# Tail ends (beyond [10,20])
foo.df %>%
filter(quantity < 10 | quantity > 20)
# Impossible
foo.df %>%
filter(quantity < 10 & quantity > 20)
# Character variables
# NO pattern matching
# Heart Samples:
foo.df %>%
filter(tissue == "Heart")
# Liver and Heart Samples:
# Cheap and easy way :)
foo.df %>%
filter(tissue == "Heart" | tissue == "Liver")
# More efficient: vector recycling
# This doesn't work:
foo.df %>%
filter(tissue == c("Heart", "Liver"))
# But...
foo.df %>%
filter(tissue == c("Liver", "Heart"))
# So the real way...
# These are equivalent:
foo.df %>%
filter(tissue %in% c("Heart", "Liver"))
foo.df %>%
filter(tissue %in% c("Liver", "Heart"))
# Element 5: Indexing
# Finding information according to position using []
# Vectors:
foo1
foo1[6] # The sixth value
foo1[p] # The pth value, p == 6
foo1[3:p] # 3rd to pth values
foo1[p:length(foo1)] # pth to last value
# use combinations of
# integers, objects, functions, etc...
# But, the exciting part is ... logical vectors!
# i.e. the result of logical expressions
# all values less than 50
foo1[foo1 < 50]
# Data frames: 2 dimensions so use [rows, cols]
foo.df[3, ] # 3rd row, ALL cols
foo.df[ ,3] # ALL rows, 3rd col by number
foo.df[ ,"quantity"] # ALL rows, 3rd col by name
# no comma is a short cut to access columns
foo.df[3]
foo.df["quantity"]
# But compare this to:
foo.df[ ,3]
# R switched to a vector! To prevent this
# use a tibble
foo.df <- as_tibble(foo.df)
# The data frame always remains a data frame:
foo.df[ ,3]
# Can I have a comma with a 1D vector
foo1[,6]
foo1[6,]
# Error!
# Exercises:
# use [] or filter()
# or... even subset() (common but old)
# 1 - 3rd to the 6th rows, only quantity
foo.df[3:6,3]
foo.df[3:6,"quantity"] # a nicer way
foo.df$quantity[3:6] # as a vector
# 2 - Everything except the healthy column
foo.df[,2:3]
foo.df[,-1]
foo.df[,names(foo.df) != "healthy"]
foo.df[,c("tissue", "quantity")]
# also...
foo.df[,-(c(1,3))] # exclude more than one column
# 3 - Tissues that have a quantity less than 10
foo.df[foo.df$quantity < 10, "tissue"]
foo.df$tissue[foo.df$quantity < 10]
# 4 - Which tissue has the highest quantity?
max(foo.df$quantity) # gives actual value
which.max(foo.df$quantity) # Where is it?
foo.df$tissue[which.max(foo.df$quantity)] # index it
# Element 8: Factor Variables (with levels)
# aka categorical, discrete, qualitative (with groups)
# Factor is a special class of type integer
# with labels:
# e.g.
PlantGrowth$group
typeof(PlantGrowth$group) # "integer"
class(PlantGrowth$group) # "factor"
# you can see it here:
str(PlantGrowth)
# some problems:
foo3 # character
foo.df$tissue # factor
str(foo.df)
# convert to a character:
as.character(PlantGrowth$group) # The labels for each level
as.integer(PlantGrowth$group) # The actual value of the level
# Element 9: Tidy Data
source("PlayData.R")
# Make the data tidy using the tidyr package
# Part of the tidyverse
# gather() with 4 arguments:
# 1 - data
# 2&3 - key, value (the names of the OUTPUT columns)
# 4 - either the ID or the MEASURE variables
gather(PlayData, key, value, -c(type, time)) # give ID vars
gather(PlayData, key, value, c(height, width)) # give MEASURE vars
# Assign to new Data Frame
PlayData.t <- gather(PlayData, key, value, -c(type, time))
# To do transformations, it's easiest to have two columns:
# Scenario 1: According to measure and type
PlayData.t %>%
spread(type, value) -> scenario1
scenario1$A/scenario1$B
# Scenario 2: According to measure and time
PlayData.t %>%
spread(time, value) -> scenario2
scenario2$`1`/scenario2$`2`
# Scenario 3: According to type and time
# Already possible with the raw data
PlayData$height/PlayData$width
# Element 10: The dplyr functions
# Go to the SILAC protein project for examples
# dplyr functions:
# 2e - summarise(), for Aggregration functions
# 2 - the group_by() adverb
# Apply aggregration functions:
# Scenario 1: According to measure and type
PlayData.t %>%
group_by(key, type) %>%
summarise(avg = mean(value))
# Scenario 2: According to measure and time
PlayData.t %>%
group_by(key, time) %>%
summarise(avg = mean(value))
# Scenario 3: According to type and time
PlayData.t %>%
group_by(time, type) %>%
summarise(avg = mean(value))
#########
# Regular Expressions
# Find patterns in strings or numbers
source("genes.R")
# This makes:
genes
# Find motif: "GGGCCC"
genes == "GGGCCC"
# wrong - This is not pattern matching
# grep - global regular expression, print
grep("GGGCCC", genes) # which gene contains the sequence
# Make use of Regular Expressions:
# Exactly as above, but using special characters
# "{3}" means exactly 3
grep("G{3}C{3}", genes)
grepl("G{3}C{3}", genes)
library(stringr)
str_detect(genes, "G{3}C{3}") # logical vector, like grepl
str_locate(genes, "G{3}C{3}") # logical vector, like grepl
# Less strict: "." means "anything"
grep("G.{4}C", genes)
# Some examples:
genes <- c("alpha4", "p53", "CDC53", "Agft-4", "cepb2")
genes
# Which genes that begin with c (either C, c)
str_extract(genes, regex("^c.*", ignore_case = TRUE))
# Some last examples:
# Indexing and data types review and expansion
# Vectors
foo1[5] # 5th value
# Data frames
foo.df[3,] # 3rd row
# Make a list and index it:
myList <- list(A = 1:3,
B = 56:85,
C = foo.df,
D = plant.lm)
# typeof(myList)
# typeof(plant.lm)
myList$C # view a part using $
# What about []? this always results in a list!
myList[3] # using numbers
myList["C"] # using names
# If you want the actual object as itself, use [[]]
myList[[3]] # using numbers
myList[["C"]] # using names
# Access columns directly:
myList[[3]]$tissue # typical
myList[3]$C$tissue # :/ kinda confusing
# Matrices
# 2D vector - only one data type!
myMatrix <- matrix(1:12, ncol = 3)
myMatrix
myMatrix[,3]
colSums(myMatrix)
rowSums(myMatrix)
# how to find peaks in an ordered vector:
# e.g.
yy <- c(1:5,0:-4,0:7,3:1)
myDF <- data.frame(xx = seq_along(yy),
yy = yy)
plot(myDF)
# find peaks in
diff(sign(diff(myDF$yy)))
# Peaks produce -2
# Minima produce 2
# So where are they?
which(diff(sign(diff(myDF$yy))) == -2) + 1 # peaks
which(diff(sign(diff(myDF$yy))) == 2) + 1 # minima
|
#' @export
update_inputs <- function() {
params = yaml.load_file( system.file("content/parameter_values.yaml", package='covid19icu') )
###solving set of linear equations for rate of stepping up from floor to ICU and
###death rate from floor
M11 = matrix(0, 2,2)
M11[1,1] = 1-params$ptheta_F1
M11[1,2] = -params$ptheta_F1
M11[2,1] = -params$pmu_F1
M11[2,2] = 1-params$pmu_F1
out11 = solve(M11)%*% matrix(params$chi_L1*c(params$ptheta_F1, params$pmu_F1), 2,1)
theta_F1 = out11[1];
mu_WF1 = out11[2]
M21 = matrix(0, 2,2)
M21[1,1] = 1-params$ptheta_F2
M21[1,2] = -params$ptheta_F2
M21[2,1] = -params$pmu_F2
M21[2,2] = 1-params$pmu_F2
out21 = solve(M21)%*% matrix(params$chi_L2*c(params$ptheta_F2, params$pmu_F2), 2,1)
theta_F2 = out21[1];
mu_WF2 = out21[2]
M31 = matrix(0, 2,2)
M31[1,1] = 1-params$ptheta_F3
M31[1,2] = -params$ptheta_F3
M31[2,1] = -params$pmu_F3
M31[2,2] = 1-params$pmu_F3
out31 = solve(M31)%*% matrix(params$chi_L3*c(params$ptheta_F3, params$pmu_F3), 2,1)
theta_F3 = out31[1];
mu_WF3 = out31[2]
###solving set of linear equations for rate of returning to ED and
###death rate of patients triaged as having mild symptoms
M12 = matrix(0,2,2)
M12[1,1] = 1-params$pxi_MS1
M12[1,2] = -params$pxi_MS1
M12[2,1] = -params$pmu_MS1
M12[2,2] = 1-params$pmu_MS1
out12 = solve(M12)%*% matrix(params$phi1*c(params$pxi_MS1, params$pmu_MS1), 2,1)
xi_MS1 = out12[1];
mu_MS1 = out12[2];
M22 = matrix(0,2,2)
M22[1,1] = 1-params$pxi_MS2
M22[1,2] = -params$pxi_MS2
M22[2,1] = -params$pmu_MS2
M22[2,2] = 1-params$pmu_MS2
out22 = solve(M22)%*% matrix(params$phi2*c(params$pxi_MS2, params$pmu_MS2), 2,1)
xi_MS2 = out22[1];
mu_MS2 = out22[2];
M32 = matrix(0,2,2)
M32[1,1] = 1-params$pxi_MS3
M32[1,2] = -params$pxi_MS3
M32[2,1] = -params$pmu_MS3
M32[2,2] = 1-params$pmu_MS3
out32 = solve(M32)%*% matrix(params$phi3*c(params$pxi_MS3, params$pmu_MS3), 2,1)
xi_MS3 = out32[1];
mu_MS3 = out32[2];
# Readmission rate to the ICU from floor queue
#phi_I1 = 1/(params$p_phi1*params$d_phi1 + (1-params$p_phi1)*params$d_mu1);
#lambda1 = (params$phi_I1 + params1$mu_I1)*params$p_lambda1/(1-params$p_lambda1)
#phi_I2 = 1/(params$p_phi2*params$d_phi2 + (1-params$p_phi2)*params$d_mu2);
#lambda2 = (params$phi_I2 + params1$mu_I2)*params$p_lambda2/(1-params$p_lambda2)
#phi_I3 = 1/(params$p_phi3*params$d_phi3 + (1-params$p_phi3)*params$d_mu3);
#lambda3 = (params$phi_I3 + params1$mu_I3)*params$p_lambda3/(1-params$p_lambda3)
params$theta_F1 =theta_F1;
params$mu_WF1 =mu_WF1;
params$theta_F2 =theta_F2;
params$mu_WF2 =mu_WF2;
params$theta_F3 =theta_F3;
params$mu_WF3 =mu_WF3;
# step up rate is undifferentiated between floor and floor queue
params$theta_WF1 = params$ptheta_WF1*params$mu_WF1/(1-params$ptheta_WF1)
params$theta_WF2 = params$ptheta_WF2*params$mu_WF2/(1-params$ptheta_WF2)
params$theta_WF3 = params$ptheta_WF3*params$mu_WF3/(1-params$ptheta_WF3)
params$theta_F1 = params$theta_F1
params$theta_F2 = params$theta_F2
params$theta_F3 = params$theta_F3
# Death rate is also undifferentiated
params$mu_F1 = mu_WF1
params$mu_F2 = mu_WF2
params$mu_F3 = mu_WF3
params$xi_MS1 =xi_MS1;
params$mu_MS1 =mu_MS1;
params$xi_MS2 =xi_MS2;
params$mu_MS2 =mu_MS2;
params$xi_MS3 =xi_MS3;
params$mu_MS3 =mu_MS3;
params$phi_I1 = 1/(params$p_phi*params$d_phi + (1-params$p_phi)*params$d_mu);
params$phi_I2 = 1/(params$p_phi*params$d_phi + (1-params$p_phi)*params$d_mu);
params$phi_I3 = 1/(params$p_phi*params$d_phi + (1-params$p_phi)*params$d_mu);
params
}
|
/R/queueinputs.R
|
permissive
|
nswartwo/covid19_icu
|
R
| false
| false
| 3,683
|
r
|
#' @export
update_inputs <- function() {
params = yaml.load_file( system.file("content/parameter_values.yaml", package='covid19icu') )
###solving set of linear equations for rate of stepping up from floor to ICU and
###death rate from floor
M11 = matrix(0, 2,2)
M11[1,1] = 1-params$ptheta_F1
M11[1,2] = -params$ptheta_F1
M11[2,1] = -params$pmu_F1
M11[2,2] = 1-params$pmu_F1
out11 = solve(M11)%*% matrix(params$chi_L1*c(params$ptheta_F1, params$pmu_F1), 2,1)
theta_F1 = out11[1];
mu_WF1 = out11[2]
M21 = matrix(0, 2,2)
M21[1,1] = 1-params$ptheta_F2
M21[1,2] = -params$ptheta_F2
M21[2,1] = -params$pmu_F2
M21[2,2] = 1-params$pmu_F2
out21 = solve(M21)%*% matrix(params$chi_L2*c(params$ptheta_F2, params$pmu_F2), 2,1)
theta_F2 = out21[1];
mu_WF2 = out21[2]
M31 = matrix(0, 2,2)
M31[1,1] = 1-params$ptheta_F3
M31[1,2] = -params$ptheta_F3
M31[2,1] = -params$pmu_F3
M31[2,2] = 1-params$pmu_F3
out31 = solve(M31)%*% matrix(params$chi_L3*c(params$ptheta_F3, params$pmu_F3), 2,1)
theta_F3 = out31[1];
mu_WF3 = out31[2]
###solving set of linear equations for rate of returning to ED and
###death rate of patients triaged as having mild symptoms
M12 = matrix(0,2,2)
M12[1,1] = 1-params$pxi_MS1
M12[1,2] = -params$pxi_MS1
M12[2,1] = -params$pmu_MS1
M12[2,2] = 1-params$pmu_MS1
out12 = solve(M12)%*% matrix(params$phi1*c(params$pxi_MS1, params$pmu_MS1), 2,1)
xi_MS1 = out12[1];
mu_MS1 = out12[2];
M22 = matrix(0,2,2)
M22[1,1] = 1-params$pxi_MS2
M22[1,2] = -params$pxi_MS2
M22[2,1] = -params$pmu_MS2
M22[2,2] = 1-params$pmu_MS2
out22 = solve(M22)%*% matrix(params$phi2*c(params$pxi_MS2, params$pmu_MS2), 2,1)
xi_MS2 = out22[1];
mu_MS2 = out22[2];
M32 = matrix(0,2,2)
M32[1,1] = 1-params$pxi_MS3
M32[1,2] = -params$pxi_MS3
M32[2,1] = -params$pmu_MS3
M32[2,2] = 1-params$pmu_MS3
out32 = solve(M32)%*% matrix(params$phi3*c(params$pxi_MS3, params$pmu_MS3), 2,1)
xi_MS3 = out32[1];
mu_MS3 = out32[2];
# Readmission rate to the ICU from floor queue
#phi_I1 = 1/(params$p_phi1*params$d_phi1 + (1-params$p_phi1)*params$d_mu1);
#lambda1 = (params$phi_I1 + params1$mu_I1)*params$p_lambda1/(1-params$p_lambda1)
#phi_I2 = 1/(params$p_phi2*params$d_phi2 + (1-params$p_phi2)*params$d_mu2);
#lambda2 = (params$phi_I2 + params1$mu_I2)*params$p_lambda2/(1-params$p_lambda2)
#phi_I3 = 1/(params$p_phi3*params$d_phi3 + (1-params$p_phi3)*params$d_mu3);
#lambda3 = (params$phi_I3 + params1$mu_I3)*params$p_lambda3/(1-params$p_lambda3)
params$theta_F1 =theta_F1;
params$mu_WF1 =mu_WF1;
params$theta_F2 =theta_F2;
params$mu_WF2 =mu_WF2;
params$theta_F3 =theta_F3;
params$mu_WF3 =mu_WF3;
# step up rate is undifferentiated between floor and floor queue
params$theta_WF1 = params$ptheta_WF1*params$mu_WF1/(1-params$ptheta_WF1)
params$theta_WF2 = params$ptheta_WF2*params$mu_WF2/(1-params$ptheta_WF2)
params$theta_WF3 = params$ptheta_WF3*params$mu_WF3/(1-params$ptheta_WF3)
params$theta_F1 = params$theta_F1
params$theta_F2 = params$theta_F2
params$theta_F3 = params$theta_F3
# Death rate is also undifferentiated
params$mu_F1 = mu_WF1
params$mu_F2 = mu_WF2
params$mu_F3 = mu_WF3
params$xi_MS1 =xi_MS1;
params$mu_MS1 =mu_MS1;
params$xi_MS2 =xi_MS2;
params$mu_MS2 =mu_MS2;
params$xi_MS3 =xi_MS3;
params$mu_MS3 =mu_MS3;
params$phi_I1 = 1/(params$p_phi*params$d_phi + (1-params$p_phi)*params$d_mu);
params$phi_I2 = 1/(params$p_phi*params$d_phi + (1-params$p_phi)*params$d_mu);
params$phi_I3 = 1/(params$p_phi*params$d_phi + (1-params$p_phi)*params$d_mu);
params
}
|
require(reshape2)
#Prepare isoform expression data. from Rsem-Star
iso.exp <- read.delim("~/data/Hook_rsem_isoforms_Count_matrix.txt",row.names = 1,header = T)
colnames(iso.exp) <- str_replace(colnames(iso.exp), ".isoforms.results", "")
iso.exp[1:10,1:3]
dim(iso.exp)
gds <- GEOquery::getGEO(filename = '~/data/GSE108020_series_matrix.txt.gz')
meta <- data.frame(row.names = str_replace(gds@phenoData@data[["relation"]],"BioSample: https://www.ncbi.nlm.nih.gov/biosample/",""),
SAMid = str_replace(gds@phenoData@data[["relation"]],"BioSample: https://www.ncbi.nlm.nih.gov/biosample/",""),
sampnames = paste(gds@phenoData@data[["project:ch1"]],"_0",gds@phenoData@data[["correct_source_plate:ch1"]],"_",gds@phenoData@data[["well:ch1"]], sep = ""),
Sample_Name=gds@phenoData@data[["geo_accession"]],
qc=gds@phenoData@data[["passed_qc:ch1"]],
age=gds@phenoData@data[["age:ch1"]],
region=gds@phenoData@data[["region:ch1"]],
subset=gds@phenoData@data[["subset.cluster:ch1"]])
#Filter cells:
head(meta)
#Filter out low quality cell data as explained in the Hook et al.
meta <- meta[meta$qc == TRUE,]
#Exclude E15.5 cells as requested by CNSDR
meta <- meta[meta$age == "P7",]
cells.pass <- rownames(meta)
iso.exp <- iso.exp[,which(colnames(iso.exp) %in% cells.pass)]
#Add cell type annotation to the meta data
celltypes <- get(load("~/data/PriorPosttable.Hook2scemap.Rdata"))
head(celltypes)
meta <- cbind(meta, celltypes)
dim(meta)
dim(iso.exp)
Hook2018iso <- SeuratWrapper(ExpData = iso.exp, perp = 10, ProjectLabel = "Hook2018iso", NewMeta = meta, Normalize = T, scale.only.var = F, PCs = 5, dump.files = F, min.cells = 0)
head(Hook2018iso@meta.data)
save(Hook2018iso, file="~/data/Hook2018iso.seurat.Robj")
#Percent isoform usage table for all genes and cells
pct.iso <- read.delim("~/data/Hook_rsem_isoforms_Percent_matrix.txt",row.names = 1,header = T)
colnames(pct.iso) <- str_replace(colnames(pct.iso), ".isoforms.results", "")
pct.iso[1:10,1:3]
pct.iso <- pct.iso[,which(colnames(pct.iso) %in% cells.pass)]
dim(pct.iso)
save(pct.iso, file="~/data/Hook_rsem_isoforms_Percent_matrix-Sub.Rdata")
#Flip - Flop isoforms
ensids <- c("ENSMUST00000094179.10",
"ENSMUST00000036315.15",
"ENSMUST00000075316.9",
"ENSMUST00000107745.7",
"ENSMUST00000165288.1",
"ENSMUST00000076349.11",
"ENSMUST00000027020.12",
"ENSMUST00000063508.14")
transcripts <- NULL
for (id in ensids){
print(id)
transcripts <- c(transcripts,grep(pattern = id, x = rownames(Hook2018iso@data), value = TRUE))
print(transcripts)
}
plotdata <- cbind(Hook2018iso@meta.data,
t(as.matrix(Hook2018iso@data)[transcripts,]),
t(as.matrix(Hook2018gene@data)[c("Gria1","Gria2","Gria3","Gria4"),]))
head(plotdata)
pdf("output/FlipFlop-gene-isoforms.pdf",width = 10,height = 5)
#Plot Gene expressions
plotdata[,c("subset","Gria1","Gria2","Gria3","Gria4")] %>%
melt() %>%
ggplot(aes(x=subset, y=value, fill=subset ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Genes")+
theme(legend.position="top")
plotdata[,c("subset","ENSMUST00000094179.10_Gria1-202","ENSMUST00000036315.15_Gria1-201")] %>%
melt() %>%
ggplot(aes(x=subset, y=value, fill=subset ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
plotdata[,c("subset","ENSMUST00000075316.9_Gria2-201","ENSMUST00000107745.7_Gria2-202")] %>%
melt() %>%
ggplot(aes(x=subset, y=value, fill=subset ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
plotdata[,c("subset","ENSMUST00000165288.1_Gria3-209","ENSMUST00000076349.11_Gria3-201")] %>%
melt() %>%
ggplot(aes(x=subset, y=value, fill=subset ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
plotdata[,c("subset","ENSMUST00000027020.12_Gria4-201","ENSMUST00000063508.14_Gria4-202")] %>%
melt() %>%
ggplot(aes(x=subset, y=value, fill=subset ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
dev.off()
#plots
pdf("output/FlipFlop-gene-isoforms-PredictionGrouped.pdf",width = 10,height = 5)
#Plot Gene expressions
plotdata[,c("Prediction","Gria1","Gria2","Gria3","Gria4")] %>%
melt() %>%
ggplot(aes(x=Prediction, y=value, fill=Prediction ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Genes")+
theme(legend.position="top")
plotdata[,c("Prediction","ENSMUST00000094179.10_Gria1-202","ENSMUST00000036315.15_Gria1-201")] %>%
melt() %>%
ggplot(aes(x=Prediction, y=value, fill=Prediction ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
plotdata[,c("Prediction","ENSMUST00000075316.9_Gria2-201","ENSMUST00000107745.7_Gria2-202")] %>%
melt() %>%
ggplot(aes(x=Prediction, y=value, fill=Prediction ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
plotdata[,c("Prediction","ENSMUST00000165288.1_Gria3-209","ENSMUST00000076349.11_Gria3-201")] %>%
melt() %>%
ggplot(aes(x=Prediction, y=value, fill=Prediction ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
plotdata[,c("Prediction","ENSMUST00000027020.12_Gria4-201","ENSMUST00000063508.14_Gria4-202")] %>%
melt() %>%
ggplot(aes(x=Prediction, y=value, fill=Prediction ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
dev.off()
pdf("output/Heatmap-Percent-isoforms.pdf",width = 12,height = 8)
pheatmap::pheatmap(pct.iso[c(grep(pattern = "Gria1", x = rownames(Hook2018iso@data), value = TRUE)),],
cluster_rows = F,show_colnames = F,cellheight = 10,
annotation_col = plotdata[,c("Prediction", "region", "Gria1")]
)
pheatmap::pheatmap(pct.iso[c(grep(pattern = "Gria2", x = rownames(Hook2018iso@data), value = TRUE)),],
cluster_rows = F,show_colnames = F,cellheight = 10,
annotation_col = plotdata[,c("Prediction", "region", "Gria2")]
)
pheatmap::pheatmap(pct.iso[c(grep(pattern = "Gria3", x = rownames(Hook2018iso@data), value = TRUE)),],
cluster_rows = F,show_colnames = F,cellheight = 10,
annotation_col = plotdata[,c("Prediction", "region", "Gria3")]
)
pheatmap::pheatmap(pct.iso[c(grep(pattern = "Gria4", x = rownames(Hook2018iso@data), value = TRUE)),],
cluster_rows = F,show_colnames = F,cellheight = 10,
annotation_col = plotdata[,c("Prediction", "region", "Gria4")]
)
dev.off()
Hook2018iso <- SetAllIdent(Hook2018iso, id = "subset")
transcripts <- grep(pattern = "Gria1", x = rownames(Hook2018iso@data), value = TRUE)
pdf("output/Gria1.Isoplots.pdf",width = 20,height = 15)
VlnPlot(object = Hook2018iso, features.plot = transcripts, use.raw = TRUE, y.log = TRUE,nCol = 1,x.lab.rot = T)
dev.off()
transcripts <- grep(pattern = "Gria4", x = rownames(Hook2018iso@data), value = TRUE)
pdf("output/Gria4.Isoplots.pdf",width = 20,height = 20)
VlnPlot(object = Hook2018iso, features.plot = transcripts, use.raw = TRUE, y.log = TRUE,nCol = 1,x.lab.rot = T)
dev.off()
transcripts <- grep(pattern = "Grm4", x = rownames(Hook2018iso@data), value = TRUE)
pdf("output/Grm4.Isoplots.pdf",width = 20,height = 30)
VlnPlot(object = Hook2018iso, features.plot = transcripts, use.raw = TRUE, y.log = TRUE,nCol = 1,x.lab.rot = T)
dev.off()
transcripts <- grep(pattern = "Gpr83", x = rownames(Hook2018iso@data), value = TRUE)
pdf("output/Gpr83.Isoplots.pdf",width = 20,height = 10)
VlnPlot(object = Hook2018iso, features.plot = transcripts, use.raw = TRUE, y.log = TRUE,nCol = 1,x.lab.rot = T)
dev.off()
|
/code/Rsem-Isoform-counts.R
|
no_license
|
yasinkaymaz/Harvard-RosenbrockLab
|
R
| false
| false
| 9,431
|
r
|
require(reshape2)
#Prepare isoform expression data. from Rsem-Star
iso.exp <- read.delim("~/data/Hook_rsem_isoforms_Count_matrix.txt",row.names = 1,header = T)
colnames(iso.exp) <- str_replace(colnames(iso.exp), ".isoforms.results", "")
iso.exp[1:10,1:3]
dim(iso.exp)
gds <- GEOquery::getGEO(filename = '~/data/GSE108020_series_matrix.txt.gz')
meta <- data.frame(row.names = str_replace(gds@phenoData@data[["relation"]],"BioSample: https://www.ncbi.nlm.nih.gov/biosample/",""),
SAMid = str_replace(gds@phenoData@data[["relation"]],"BioSample: https://www.ncbi.nlm.nih.gov/biosample/",""),
sampnames = paste(gds@phenoData@data[["project:ch1"]],"_0",gds@phenoData@data[["correct_source_plate:ch1"]],"_",gds@phenoData@data[["well:ch1"]], sep = ""),
Sample_Name=gds@phenoData@data[["geo_accession"]],
qc=gds@phenoData@data[["passed_qc:ch1"]],
age=gds@phenoData@data[["age:ch1"]],
region=gds@phenoData@data[["region:ch1"]],
subset=gds@phenoData@data[["subset.cluster:ch1"]])
#Filter cells:
head(meta)
#Filter out low quality cell data as explained in the Hook et al.
meta <- meta[meta$qc == TRUE,]
#Exclude E15.5 cells as requested by CNSDR
meta <- meta[meta$age == "P7",]
cells.pass <- rownames(meta)
iso.exp <- iso.exp[,which(colnames(iso.exp) %in% cells.pass)]
#Add cell type annotation to the meta data
celltypes <- get(load("~/data/PriorPosttable.Hook2scemap.Rdata"))
head(celltypes)
meta <- cbind(meta, celltypes)
dim(meta)
dim(iso.exp)
Hook2018iso <- SeuratWrapper(ExpData = iso.exp, perp = 10, ProjectLabel = "Hook2018iso", NewMeta = meta, Normalize = T, scale.only.var = F, PCs = 5, dump.files = F, min.cells = 0)
head(Hook2018iso@meta.data)
save(Hook2018iso, file="~/data/Hook2018iso.seurat.Robj")
#Percent isoform usage table for all genes and cells
pct.iso <- read.delim("~/data/Hook_rsem_isoforms_Percent_matrix.txt",row.names = 1,header = T)
colnames(pct.iso) <- str_replace(colnames(pct.iso), ".isoforms.results", "")
pct.iso[1:10,1:3]
pct.iso <- pct.iso[,which(colnames(pct.iso) %in% cells.pass)]
dim(pct.iso)
save(pct.iso, file="~/data/Hook_rsem_isoforms_Percent_matrix-Sub.Rdata")
#Flip - Flop isoforms
ensids <- c("ENSMUST00000094179.10",
"ENSMUST00000036315.15",
"ENSMUST00000075316.9",
"ENSMUST00000107745.7",
"ENSMUST00000165288.1",
"ENSMUST00000076349.11",
"ENSMUST00000027020.12",
"ENSMUST00000063508.14")
transcripts <- NULL
for (id in ensids){
print(id)
transcripts <- c(transcripts,grep(pattern = id, x = rownames(Hook2018iso@data), value = TRUE))
print(transcripts)
}
plotdata <- cbind(Hook2018iso@meta.data,
t(as.matrix(Hook2018iso@data)[transcripts,]),
t(as.matrix(Hook2018gene@data)[c("Gria1","Gria2","Gria3","Gria4"),]))
head(plotdata)
pdf("output/FlipFlop-gene-isoforms.pdf",width = 10,height = 5)
#Plot Gene expressions
plotdata[,c("subset","Gria1","Gria2","Gria3","Gria4")] %>%
melt() %>%
ggplot(aes(x=subset, y=value, fill=subset ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Genes")+
theme(legend.position="top")
plotdata[,c("subset","ENSMUST00000094179.10_Gria1-202","ENSMUST00000036315.15_Gria1-201")] %>%
melt() %>%
ggplot(aes(x=subset, y=value, fill=subset ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
plotdata[,c("subset","ENSMUST00000075316.9_Gria2-201","ENSMUST00000107745.7_Gria2-202")] %>%
melt() %>%
ggplot(aes(x=subset, y=value, fill=subset ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
plotdata[,c("subset","ENSMUST00000165288.1_Gria3-209","ENSMUST00000076349.11_Gria3-201")] %>%
melt() %>%
ggplot(aes(x=subset, y=value, fill=subset ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
plotdata[,c("subset","ENSMUST00000027020.12_Gria4-201","ENSMUST00000063508.14_Gria4-202")] %>%
melt() %>%
ggplot(aes(x=subset, y=value, fill=subset ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
dev.off()
#plots
pdf("output/FlipFlop-gene-isoforms-PredictionGrouped.pdf",width = 10,height = 5)
#Plot Gene expressions
plotdata[,c("Prediction","Gria1","Gria2","Gria3","Gria4")] %>%
melt() %>%
ggplot(aes(x=Prediction, y=value, fill=Prediction ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Genes")+
theme(legend.position="top")
plotdata[,c("Prediction","ENSMUST00000094179.10_Gria1-202","ENSMUST00000036315.15_Gria1-201")] %>%
melt() %>%
ggplot(aes(x=Prediction, y=value, fill=Prediction ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
plotdata[,c("Prediction","ENSMUST00000075316.9_Gria2-201","ENSMUST00000107745.7_Gria2-202")] %>%
melt() %>%
ggplot(aes(x=Prediction, y=value, fill=Prediction ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
plotdata[,c("Prediction","ENSMUST00000165288.1_Gria3-209","ENSMUST00000076349.11_Gria3-201")] %>%
melt() %>%
ggplot(aes(x=Prediction, y=value, fill=Prediction ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
plotdata[,c("Prediction","ENSMUST00000027020.12_Gria4-201","ENSMUST00000063508.14_Gria4-202")] %>%
melt() %>%
ggplot(aes(x=Prediction, y=value, fill=Prediction ))+
geom_boxplot(aes(fill=variable),notch=FALSE,outlier.colour="red")+
labs(y="Normalized Expression")+
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))+
scale_fill_discrete(name = "Isoforms")+
theme(legend.position="top")
dev.off()
pdf("output/Heatmap-Percent-isoforms.pdf",width = 12,height = 8)
pheatmap::pheatmap(pct.iso[c(grep(pattern = "Gria1", x = rownames(Hook2018iso@data), value = TRUE)),],
cluster_rows = F,show_colnames = F,cellheight = 10,
annotation_col = plotdata[,c("Prediction", "region", "Gria1")]
)
pheatmap::pheatmap(pct.iso[c(grep(pattern = "Gria2", x = rownames(Hook2018iso@data), value = TRUE)),],
cluster_rows = F,show_colnames = F,cellheight = 10,
annotation_col = plotdata[,c("Prediction", "region", "Gria2")]
)
pheatmap::pheatmap(pct.iso[c(grep(pattern = "Gria3", x = rownames(Hook2018iso@data), value = TRUE)),],
cluster_rows = F,show_colnames = F,cellheight = 10,
annotation_col = plotdata[,c("Prediction", "region", "Gria3")]
)
pheatmap::pheatmap(pct.iso[c(grep(pattern = "Gria4", x = rownames(Hook2018iso@data), value = TRUE)),],
cluster_rows = F,show_colnames = F,cellheight = 10,
annotation_col = plotdata[,c("Prediction", "region", "Gria4")]
)
dev.off()
Hook2018iso <- SetAllIdent(Hook2018iso, id = "subset")
transcripts <- grep(pattern = "Gria1", x = rownames(Hook2018iso@data), value = TRUE)
pdf("output/Gria1.Isoplots.pdf",width = 20,height = 15)
VlnPlot(object = Hook2018iso, features.plot = transcripts, use.raw = TRUE, y.log = TRUE,nCol = 1,x.lab.rot = T)
dev.off()
transcripts <- grep(pattern = "Gria4", x = rownames(Hook2018iso@data), value = TRUE)
pdf("output/Gria4.Isoplots.pdf",width = 20,height = 20)
VlnPlot(object = Hook2018iso, features.plot = transcripts, use.raw = TRUE, y.log = TRUE,nCol = 1,x.lab.rot = T)
dev.off()
transcripts <- grep(pattern = "Grm4", x = rownames(Hook2018iso@data), value = TRUE)
pdf("output/Grm4.Isoplots.pdf",width = 20,height = 30)
VlnPlot(object = Hook2018iso, features.plot = transcripts, use.raw = TRUE, y.log = TRUE,nCol = 1,x.lab.rot = T)
dev.off()
transcripts <- grep(pattern = "Gpr83", x = rownames(Hook2018iso@data), value = TRUE)
pdf("output/Gpr83.Isoplots.pdf",width = 20,height = 10)
VlnPlot(object = Hook2018iso, features.plot = transcripts, use.raw = TRUE, y.log = TRUE,nCol = 1,x.lab.rot = T)
dev.off()
|
#' Original authors code to compute relevance. 'Forked' from https://github.com/cpsievert/LDAvis/blob/6f93aa85499b705c9ae6c56e5985df637f9f5132/R/createJSON.R
lda2rel.f<- function(stmbow2lda,R = 10,lambda.step = 0.5,reorder.topics = FALSE,save.to.disk=F,check.for.saved.output=F,out.dir,...) {
sfn<-'lda2rel.RData'
if(check.for.saved.output) if(any(grepl(sfn,dir(recursive=T,full.names=T,ignore.case=T)))) {
warning(paste('Loading and returning first saved',sfn),call.=F)
l<-ls()
load(dir(pattern=sfn,full.names=T,recursive=T,ignore.case=F)[1])
return(get(setdiff(ls(),c(l,'l'))))
}
library(data.table,quietly = T)
phi = stmbow2lda$top.word.phi.beta
theta = stmbow2lda$doc.top.theta
vocab = stmbow2lda$vocab
doc.length = stmbow2lda$doc.length
term.frequency = stmbow2lda$term.frequency
#rm(stmbow2lda)
# Set the values of a few summary statistics of the corpus and model:
dp <- dim(phi) # should be K x W
dt <- dim(theta) # should be D x K
N <- sum(doc.length) # number of tokens in the data
W <- length(vocab) # number of terms in the vocab
D <- length(doc.length) # number of documents in the data
K <- dt[2] # number of topics in the model
# check that certain input dimensions match
if (dp[1] != K) stop("Number of rows of phi does not match
number of columns of theta; both should be equal to the number of topics
in the model.")
if (D != dt[1]) stop("Length of doc.length not equal
to the number of rows in theta; both should be equal to the number of
documents in the data.")
if (dp[2] != W) stop("Number of terms in vocabulary does
not match the number of columns of phi (where each row of phi is a
probability distribution of terms for a given topic).")
if (length(term.frequency) != W) stop("Length of term.frequency
not equal to the number of terms in the vocabulary.")
if (any(nchar(vocab) == 0)) stop("One or more terms in the vocabulary
has zero characters -- all terms must have at least one character.")
# check that conditional distributions are normalized:
phi.test <- all.equal(rowSums(phi), rep(1, K), check.attributes = FALSE)
theta.test <- all.equal(rowSums(theta), rep(1, dt[1]),
check.attributes = FALSE)
if (!isTRUE(phi.test)) stop("Rows of phi don't all sum to 1.")
if (!isTRUE(theta.test)) stop("Rows of theta don't all sum to 1.")
# compute counts of tokens across K topics (length-K vector):
# (this determines the areas of the default topic circles when no term is
# highlighted)
topic.frequency <- colSums(theta * doc.length)
topic.proportion <- topic.frequency/sum(topic.frequency)
# re-order the K topics in order of decreasing proportion:
if(reorder.topics) {o <- order(topic.proportion, decreasing = TRUE)} else {o <- seq_along(topic.proportion)}
phi <- phi[o, ]
theta <- theta[, o]
topic.frequency <- topic.frequency[o]
topic.proportion <- topic.proportion[o]
# compute intertopic distances using the specified multidimensional
# scaling method:
# mds.res <- mds.method(phi)
# if (is.matrix(mds.res)) {
# colnames(mds.res) <- c("x", "y")
# } else if (is.data.frame(mds.res)) {
# names(mds.res) <- c("x", "y")
# } else {
# warning("Result of mds.method should be a matrix or data.frame.")
# }
# mds.df <- data.frame(mds.res, topics = seq_len(K), Freq = topic.proportion*100,
# cluster = 1, stringsAsFactors = FALSE)
# note: cluster (should?) be deprecated soon.
# token counts for each term-topic combination (widths of red bars)
term.topic.frequency <- phi * topic.frequency
# compute term frequencies as column sums of term.topic.frequency
# we actually won't use the user-supplied term.frequency vector.
# the term frequencies won't match the user-supplied frequencies exactly
# this is a work-around to solve the bug described in Issue #32 on github:
# https://github.com/cpsievert/LDAvis/issues/32
term.frequency <- colSums(term.topic.frequency)
stopifnot(all(term.frequency > 0))
# marginal distribution over terms (width of blue bars)
term.proportion <- term.frequency/sum(term.frequency)
# Old code to adjust term frequencies. Deprecated for now
# adjust to match term frequencies exactly (get rid of rounding error)
#err <- as.numeric(term.frequency/colSums(term.topic.frequency))
# http://stackoverflow.com/questions/3643555/multiply-rows-of-matrix-by-vector
#term.topic.frequency <- sweep(term.topic.frequency, MARGIN=2, err, `*`)
# Most operations on phi after this point are across topics
# R has better facilities for column-wise operations
phi <- t(phi)
# compute the distinctiveness and saliency of the terms:
# this determines the R terms that are displayed when no topic is selected
topic.given.term <- phi/rowSums(phi) # (W x K)
kernel <- topic.given.term * log(sweep(topic.given.term, MARGIN=2,
topic.proportion, `/`))
distinctiveness <- rowSums(kernel)
saliency <- term.proportion * distinctiveness
# Order the terms for the "default" view by decreasing saliency:
default.terms <- vocab[order(saliency, decreasing = TRUE)][1:R]
counts <- as.integer(term.frequency[match(default.terms, vocab)])
Rs <- rev(seq_len(R))
default <- data.frame(Term = default.terms, logprob = Rs, loglift = Rs,
Freq = counts, Total = counts, Category = "Default",
stringsAsFactors = FALSE,lambda=NA)
topic_seq <- rep(seq_len(K), each = R)
category <- paste0("Topic", topic_seq)
lift <- phi/term.proportion
# Collect R most relevant terms for each topic/lambda combination
# Note that relevance is re-computed in the browser, so we only need
# to send each possible term/topic combination to the browser
find_relevance <- function(i) {
relevance <- i*log(phi) + (1 - i)*log(lift)
idx <- apply(relevance, 2,
function(x) order(x, decreasing = TRUE)[seq_len(R)])
# for matrices, we pick out elements by their row/column index
indices <- cbind(c(idx), topic_seq)
data.frame(Term = vocab[idx], Category = category,
logprob = round(log(phi[indices]), 4),
loglift = round(log(lift[indices]), 4),
stringsAsFactors = FALSE)
}
lambda.seq <- if(!(length(lambda.step)-1)) seq(0, 1, by=lambda.step) else lambda.step
#if (missing(cluster)) {
tinfo <- lapply(as.list(lambda.seq), function(x) {x<-data.frame(find_relevance(x),lambda=as.character(x));data.frame(x,ord=1:R)})
#} else {
# tinfo <- parallel::parLapply(cluster, as.list(lambda.seq), find_relevance)
#}
tinfo <- unique(do.call("rbind", tinfo))
tinfo$Total <- term.frequency[match(tinfo$Term, vocab)]
rownames(term.topic.frequency) <- paste0("Topic", seq_len(K))
colnames(term.topic.frequency) <- vocab
tinfo$Freq <- term.topic.frequency[as.matrix(tinfo[c("Category", "Term")])]
#tinfo <- rbind(default, tinfo)
tinfo$Category<-sub('opic','',tinfo$Category)
lda2rel<-data.table(tinfo)
if(save.to.disk) try(save(lda2rel,file=paste(grep(paste(out.dir,'$',sep=''),dir(include.dirs = T,full.names=T,recursive=F,ignore.case=F),value = T)[1],sfn,sep=.Platform$file.sep)))
lda2rel
}
|
/plagiat/R/lda2rel.f.R
|
no_license
|
brooksambrose/pack-dev
|
R
| false
| false
| 7,126
|
r
|
#' Original authors code to compute relevance. 'Forked' from https://github.com/cpsievert/LDAvis/blob/6f93aa85499b705c9ae6c56e5985df637f9f5132/R/createJSON.R
lda2rel.f<- function(stmbow2lda,R = 10,lambda.step = 0.5,reorder.topics = FALSE,save.to.disk=F,check.for.saved.output=F,out.dir,...) {
sfn<-'lda2rel.RData'
if(check.for.saved.output) if(any(grepl(sfn,dir(recursive=T,full.names=T,ignore.case=T)))) {
warning(paste('Loading and returning first saved',sfn),call.=F)
l<-ls()
load(dir(pattern=sfn,full.names=T,recursive=T,ignore.case=F)[1])
return(get(setdiff(ls(),c(l,'l'))))
}
library(data.table,quietly = T)
phi = stmbow2lda$top.word.phi.beta
theta = stmbow2lda$doc.top.theta
vocab = stmbow2lda$vocab
doc.length = stmbow2lda$doc.length
term.frequency = stmbow2lda$term.frequency
#rm(stmbow2lda)
# Set the values of a few summary statistics of the corpus and model:
dp <- dim(phi) # should be K x W
dt <- dim(theta) # should be D x K
N <- sum(doc.length) # number of tokens in the data
W <- length(vocab) # number of terms in the vocab
D <- length(doc.length) # number of documents in the data
K <- dt[2] # number of topics in the model
# check that certain input dimensions match
if (dp[1] != K) stop("Number of rows of phi does not match
number of columns of theta; both should be equal to the number of topics
in the model.")
if (D != dt[1]) stop("Length of doc.length not equal
to the number of rows in theta; both should be equal to the number of
documents in the data.")
if (dp[2] != W) stop("Number of terms in vocabulary does
not match the number of columns of phi (where each row of phi is a
probability distribution of terms for a given topic).")
if (length(term.frequency) != W) stop("Length of term.frequency
not equal to the number of terms in the vocabulary.")
if (any(nchar(vocab) == 0)) stop("One or more terms in the vocabulary
has zero characters -- all terms must have at least one character.")
# check that conditional distributions are normalized:
phi.test <- all.equal(rowSums(phi), rep(1, K), check.attributes = FALSE)
theta.test <- all.equal(rowSums(theta), rep(1, dt[1]),
check.attributes = FALSE)
if (!isTRUE(phi.test)) stop("Rows of phi don't all sum to 1.")
if (!isTRUE(theta.test)) stop("Rows of theta don't all sum to 1.")
# compute counts of tokens across K topics (length-K vector):
# (this determines the areas of the default topic circles when no term is
# highlighted)
topic.frequency <- colSums(theta * doc.length)
topic.proportion <- topic.frequency/sum(topic.frequency)
# re-order the K topics in order of decreasing proportion:
if(reorder.topics) {o <- order(topic.proportion, decreasing = TRUE)} else {o <- seq_along(topic.proportion)}
phi <- phi[o, ]
theta <- theta[, o]
topic.frequency <- topic.frequency[o]
topic.proportion <- topic.proportion[o]
# compute intertopic distances using the specified multidimensional
# scaling method:
# mds.res <- mds.method(phi)
# if (is.matrix(mds.res)) {
# colnames(mds.res) <- c("x", "y")
# } else if (is.data.frame(mds.res)) {
# names(mds.res) <- c("x", "y")
# } else {
# warning("Result of mds.method should be a matrix or data.frame.")
# }
# mds.df <- data.frame(mds.res, topics = seq_len(K), Freq = topic.proportion*100,
# cluster = 1, stringsAsFactors = FALSE)
# note: cluster (should?) be deprecated soon.
# token counts for each term-topic combination (widths of red bars)
term.topic.frequency <- phi * topic.frequency
# compute term frequencies as column sums of term.topic.frequency
# we actually won't use the user-supplied term.frequency vector.
# the term frequencies won't match the user-supplied frequencies exactly
# this is a work-around to solve the bug described in Issue #32 on github:
# https://github.com/cpsievert/LDAvis/issues/32
term.frequency <- colSums(term.topic.frequency)
stopifnot(all(term.frequency > 0))
# marginal distribution over terms (width of blue bars)
term.proportion <- term.frequency/sum(term.frequency)
# Old code to adjust term frequencies. Deprecated for now
# adjust to match term frequencies exactly (get rid of rounding error)
#err <- as.numeric(term.frequency/colSums(term.topic.frequency))
# http://stackoverflow.com/questions/3643555/multiply-rows-of-matrix-by-vector
#term.topic.frequency <- sweep(term.topic.frequency, MARGIN=2, err, `*`)
# Most operations on phi after this point are across topics
# R has better facilities for column-wise operations
phi <- t(phi)
# compute the distinctiveness and saliency of the terms:
# this determines the R terms that are displayed when no topic is selected
topic.given.term <- phi/rowSums(phi) # (W x K)
kernel <- topic.given.term * log(sweep(topic.given.term, MARGIN=2,
topic.proportion, `/`))
distinctiveness <- rowSums(kernel)
saliency <- term.proportion * distinctiveness
# Order the terms for the "default" view by decreasing saliency:
default.terms <- vocab[order(saliency, decreasing = TRUE)][1:R]
counts <- as.integer(term.frequency[match(default.terms, vocab)])
Rs <- rev(seq_len(R))
default <- data.frame(Term = default.terms, logprob = Rs, loglift = Rs,
Freq = counts, Total = counts, Category = "Default",
stringsAsFactors = FALSE,lambda=NA)
topic_seq <- rep(seq_len(K), each = R)
category <- paste0("Topic", topic_seq)
lift <- phi/term.proportion
# Collect R most relevant terms for each topic/lambda combination
# Note that relevance is re-computed in the browser, so we only need
# to send each possible term/topic combination to the browser
find_relevance <- function(i) {
relevance <- i*log(phi) + (1 - i)*log(lift)
idx <- apply(relevance, 2,
function(x) order(x, decreasing = TRUE)[seq_len(R)])
# for matrices, we pick out elements by their row/column index
indices <- cbind(c(idx), topic_seq)
data.frame(Term = vocab[idx], Category = category,
logprob = round(log(phi[indices]), 4),
loglift = round(log(lift[indices]), 4),
stringsAsFactors = FALSE)
}
lambda.seq <- if(!(length(lambda.step)-1)) seq(0, 1, by=lambda.step) else lambda.step
#if (missing(cluster)) {
tinfo <- lapply(as.list(lambda.seq), function(x) {x<-data.frame(find_relevance(x),lambda=as.character(x));data.frame(x,ord=1:R)})
#} else {
# tinfo <- parallel::parLapply(cluster, as.list(lambda.seq), find_relevance)
#}
tinfo <- unique(do.call("rbind", tinfo))
tinfo$Total <- term.frequency[match(tinfo$Term, vocab)]
rownames(term.topic.frequency) <- paste0("Topic", seq_len(K))
colnames(term.topic.frequency) <- vocab
tinfo$Freq <- term.topic.frequency[as.matrix(tinfo[c("Category", "Term")])]
#tinfo <- rbind(default, tinfo)
tinfo$Category<-sub('opic','',tinfo$Category)
lda2rel<-data.table(tinfo)
if(save.to.disk) try(save(lda2rel,file=paste(grep(paste(out.dir,'$',sep=''),dir(include.dirs = T,full.names=T,recursive=F,ignore.case=F),value = T)[1],sfn,sep=.Platform$file.sep)))
lda2rel
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GCA.R
\docType{package}
\name{GCA-package}
\alias{GCA}
\alias{GCA-package}
\title{\strong{GCA}: Genetic connectedness analysis}
\description{
An R package for genetic connectedness analysis across units using pedigree and genomic data.
}
\details{
The GCA package encompasses numerous connectedness statistics, which could be labeled as two groups,
by reference to connectedness based on prediction error variance (PEV) and variance of unit effect estimates (VE).
The PEV-derived metrics include prediction error variance of differences (PEVD), coefficient of determination (CD),
and prediction error correlation (r). These PEV-derived metrics can be summarized at the unit level as the average PEV within
and across units (GrpAve), average PEV of all pairwise differences between individuals across units (IdAve),
or using a contrast vector (Contrast). VE-derived metrics comprise variance of differences in management unit effects (VED),
coefficient of determination of VED (CDVED), and connectedness rating (CR). Three correction factors accounting for the number
of fixed effects can be applied for each VE-derived metric. These include non-correction (0), correction of unit effect (1),
and correction of two or more fixed effects (2). The core function of GCA is integrated with C++ to improve computational
efficiency using the Rcpp package (Eddelbuettel and François 2011). The details of these connectedness statistics can be found
in Yu and Morota 2019.
}
\section{Available functions in GCA package}{
\itemize{
\item computeA(): Computation of numerator relationship matrix.
\item computeG(): Computation of genomic relationship matrix.
\item gca(): Measures of genetic connectedness.
\item varcomp(): Estimates of variance components using eigenvalues and eigenvectors.
}
}
\references{
\emph{Eddelbuettel D, François R (2011). Rcpp: Seamless R and C++ Integration. Journal of Statistical Software, 40(8), 1–18.}
\emph{Yu H and Morota G. 2019. GCA: An R Package for Genetic Connectedness Analysis Using Pedigree and Genomic Data.}
}
\author{
Haipeng Yu and Gota Morota
Maintainer: Haipeng Yu \email{haipengyu@vt.edu}
}
\keyword{internal}
|
/man/GCA-package.Rd
|
no_license
|
QGresources/GCA
|
R
| false
| true
| 2,254
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GCA.R
\docType{package}
\name{GCA-package}
\alias{GCA}
\alias{GCA-package}
\title{\strong{GCA}: Genetic connectedness analysis}
\description{
An R package for genetic connectedness analysis across units using pedigree and genomic data.
}
\details{
The GCA package encompasses numerous connectedness statistics, which could be labeled as two groups,
by reference to connectedness based on prediction error variance (PEV) and variance of unit effect estimates (VE).
The PEV-derived metrics include prediction error variance of differences (PEVD), coefficient of determination (CD),
and prediction error correlation (r). These PEV-derived metrics can be summarized at the unit level as the average PEV within
and across units (GrpAve), average PEV of all pairwise differences between individuals across units (IdAve),
or using a contrast vector (Contrast). VE-derived metrics comprise variance of differences in management unit effects (VED),
coefficient of determination of VED (CDVED), and connectedness rating (CR). Three correction factors accounting for the number
of fixed effects can be applied for each VE-derived metric. These include non-correction (0), correction of unit effect (1),
and correction of two or more fixed effects (2). The core function of GCA is integrated with C++ to improve computational
efficiency using the Rcpp package (Eddelbuettel and François 2011). The details of these connectedness statistics can be found
in Yu and Morota 2019.
}
\section{Available functions in GCA package}{
\itemize{
\item computeA(): Computation of numerator relationship matrix.
\item computeG(): Computation of genomic relationship matrix.
\item gca(): Measures of genetic connectedness.
\item varcomp(): Estimates of variance components using eigenvalues and eigenvectors.
}
}
\references{
\emph{Eddelbuettel D, François R (2011). Rcpp: Seamless R and C++ Integration. Journal of Statistical Software, 40(8), 1–18.}
\emph{Yu H and Morota G. 2019. GCA: An R Package for Genetic Connectedness Analysis Using Pedigree and Genomic Data.}
}
\author{
Haipeng Yu and Gota Morota
Maintainer: Haipeng Yu \email{haipengyu@vt.edu}
}
\keyword{internal}
|
#' Clustering coefficient
#'
#' @param net network object
#'
#' @return clustering coefficient, \deqn{\frac{3 \times triangles}{two-paths}}
#' @export
#' @importFrom ergm summary.formula
#' @importFrom network network
#'
#' @examples
#' n <- makeNetwork(10, .3)
#' clusteringCoef(n)
clusteringCoef = function(net) {
if (is.directed(net)) {
warning("Converting the network to undirected")
net = network(symmetrize(net[,]), directed = FALSE)
}
unname(3 * summary(net ~ triangles) / summary(net ~ twopath))
}
|
/R/clusteringCoef.R
|
no_license
|
michaellevy/netUtils
|
R
| false
| false
| 522
|
r
|
#' Clustering coefficient
#'
#' @param net network object
#'
#' @return clustering coefficient, \deqn{\frac{3 \times triangles}{two-paths}}
#' @export
#' @importFrom ergm summary.formula
#' @importFrom network network
#'
#' @examples
#' n <- makeNetwork(10, .3)
#' clusteringCoef(n)
clusteringCoef = function(net) {
if (is.directed(net)) {
warning("Converting the network to undirected")
net = network(symmetrize(net[,]), directed = FALSE)
}
unname(3 * summary(net ~ triangles) / summary(net ~ twopath))
}
|
##======================================##
## Coursera - Getting and Cleaning Data
## "Tidy Data" course project
##======================================##
## Requirements:
## "Human Activity Recognition Using Smartphones Data Set"
## UCI Machine Learning Repository
## https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
##
## Download data and extract content as a subfolder to your R working directory
## "./UCI HAR Dataset/"
##======================================##
## This function will read select data from the set
## Apply a format and summarise process
## write a "tidy data" csv file to your R working directory, "tidydata.txt"
##======================================##
## to run this code:
## save "run_analysis.R" to your R working directory
## in R:
## > source("run_analysis.R")
## > run_analysis()
##======================================##
## Jack Cheney. 24 October 2015
run_analysis <- function() {
library(dplyr)
#---- Step 0. Read the data ----#
## train data set
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
## test data set
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
## labels
features <- read.table("./UCI HAR Dataset/features.txt") # colnames
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt") # six activities
#---- Objective 1. Merge the data sets ----#
# and
#---- Objective 4. Add variable names ----#
x_data <- rbind(x_train, x_test) # the measurements
y_data <- rbind(y_train, y_test) # activity id
subject_data <- rbind(subject_train, subject_test) # subject id
colnames(x_data) <- features$V2 # add variable names
merge_data <- cbind(Activity = y_data[,1], Subject = subject_data[,1], x_data) # mash it
#---- Objective 2. Extract measurements for mean and std ----#
sub_data <- data.frame(c(merge_data[,1:2]),
merge_data[,grepl("std",colnames(merge_data))
|(grepl("mean",colnames(merge_data))
& !grepl("meanFreq",colnames(merge_data)))])
#---- Objective 3. Use descriptive activity names ----#
sub_data$Activity <- activity_labels[match(sub_data$Activity, activity_labels$V1),2]
#---- Objective 5. Group by Activity, Subject and average of variables ----#
clean_data <- sub_data %>% group_by(Activity, Subject) %>% summarise_each(funs(mean))
write.table(clean_data, "tidydata.txt", sep="\t", row.names=FALSE)
}
|
/GettingCleaningData_Project/run_analysis.R
|
no_license
|
Jackhawk/datasciencecoursera
|
R
| false
| false
| 2,720
|
r
|
##======================================##
## Coursera - Getting and Cleaning Data
## "Tidy Data" course project
##======================================##
## Requirements:
## "Human Activity Recognition Using Smartphones Data Set"
## UCI Machine Learning Repository
## https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
##
## Download data and extract content as a subfolder to your R working directory
## "./UCI HAR Dataset/"
##======================================##
## This function will read select data from the set
## Apply a format and summarise process
## write a "tidy data" csv file to your R working directory, "tidydata.txt"
##======================================##
## to run this code:
## save "run_analysis.R" to your R working directory
## in R:
## > source("run_analysis.R")
## > run_analysis()
##======================================##
## Jack Cheney. 24 October 2015
run_analysis <- function() {
library(dplyr)
#---- Step 0. Read the data ----#
## train data set
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
## test data set
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
## labels
features <- read.table("./UCI HAR Dataset/features.txt") # colnames
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt") # six activities
#---- Objective 1. Merge the data sets ----#
# and
#---- Objective 4. Add variable names ----#
x_data <- rbind(x_train, x_test) # the measurements
y_data <- rbind(y_train, y_test) # activity id
subject_data <- rbind(subject_train, subject_test) # subject id
colnames(x_data) <- features$V2 # add variable names
merge_data <- cbind(Activity = y_data[,1], Subject = subject_data[,1], x_data) # mash it
#---- Objective 2. Extract measurements for mean and std ----#
sub_data <- data.frame(c(merge_data[,1:2]),
merge_data[,grepl("std",colnames(merge_data))
|(grepl("mean",colnames(merge_data))
& !grepl("meanFreq",colnames(merge_data)))])
#---- Objective 3. Use descriptive activity names ----#
sub_data$Activity <- activity_labels[match(sub_data$Activity, activity_labels$V1),2]
#---- Objective 5. Group by Activity, Subject and average of variables ----#
clean_data <- sub_data %>% group_by(Activity, Subject) %>% summarise_each(funs(mean))
write.table(clean_data, "tidydata.txt", sep="\t", row.names=FALSE)
}
|
/*
<codex>
<abstract>AUPinkNoise.r</abstract>
<\codex>
*/
#include <AudioUnit/AudioUnit.r>
#include "AUPinkNoiseVersion.h"
// Note that resource IDs must be spaced 2 apart for the 'STR ' name and description
#define kAudioUnitResID_AUPinkNoise 1000
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AUPinkNoise~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#define RES_ID kAudioUnitResID_AUPinkNoise
#define COMP_TYPE kAudioUnitType_Generator
#define COMP_SUBTYPE 'pink'
#define COMP_MANUF kAudioUnitManufacturer_Apple
#define VERSION 0x00010000
#define NAME "Apple: AUPinkNoise"
#define DESCRIPTION "Audio Unit Pink Noise Generator"
#define ENTRY_POINT "AUPinkNoiseEntry"
#include "AUResources.r"
|
/AUPinkNoise/AUPinkNoise.r
|
no_license
|
ocrickard/CocoaSampleCode
|
R
| false
| false
| 708
|
r
|
/*
<codex>
<abstract>AUPinkNoise.r</abstract>
<\codex>
*/
#include <AudioUnit/AudioUnit.r>
#include "AUPinkNoiseVersion.h"
// Note that resource IDs must be spaced 2 apart for the 'STR ' name and description
#define kAudioUnitResID_AUPinkNoise 1000
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AUPinkNoise~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#define RES_ID kAudioUnitResID_AUPinkNoise
#define COMP_TYPE kAudioUnitType_Generator
#define COMP_SUBTYPE 'pink'
#define COMP_MANUF kAudioUnitManufacturer_Apple
#define VERSION 0x00010000
#define NAME "Apple: AUPinkNoise"
#define DESCRIPTION "Audio Unit Pink Noise Generator"
#define ENTRY_POINT "AUPinkNoiseEntry"
#include "AUResources.r"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/worker.R
\name{worker}
\alias{worker}
\title{Initialize jiebaR worker}
\usage{
worker(type = "mix", dict = DICTPATH, hmm = HMMPATH, user = USERPATH,
idf = IDFPATH, stop_word = STOPPATH, write = T, qmax = 20, topn = 5,
encoding = "UTF-8", detect = T, symbol = F, lines = 1e+05,
output = NULL, bylines = F, user_weight = "max")
}
\arguments{
\item{type}{The type of jiebaR workers including \code{mix}, \code{mp}, \code{hmm},
\code{query}, \code{tag}, \code{simhash}, and \code{keywords}.}
\item{dict}{A path to main dictionary, default value is \code{DICTPATH},
and the value is used for \code{mix}, \code{mp}, \code{query},
\code{tag}, \code{simhash} and \code{keywords} workers.}
\item{hmm}{A path to Hidden Markov Model, default value is \code{HMMPATH},
and the value is used for \code{mix}, \code{hmm}, \code{query},
\code{tag}, \code{simhash} and \code{keywords} workers.}
\item{user}{A path to user dictionary, default value is \code{USERPATH},
and the value is used for \code{mix}, \code{tag} and \code{mp} workers.}
\item{idf}{A path to inverse document frequency, default value is \code{IDFPATH},
and the value is used for \code{simhash} and \code{keywords} workers.}
\item{stop_word}{A path to stop word dictionary, default value is \code{STOPPATH},
and the value is used for \code{simhash}, \code{keywords}, \code{tagger} and \code{segment} workers. Encoding of this file is checked by \code{filecoding}, and it should be UTF-8 encoding. For \code{segment} workers, the default \code{STOPPATH} will not be used, so you should provide another file path.}
\item{write}{Whether to write the output to a file, or return
a the result in a object. This value will only be used when
the input is a file path. The default value is TRUE. The value
is used for segment and speech tagging workers.}
\item{qmax}{Max query length of words, and the value
is used for \code{query} workers.}
\item{topn}{The number of keywords, and the value is used for
\code{simhash} and \code{keywords} workers.}
\item{encoding}{The encoding of the input file. If encoding
detection is enable, the value of \code{encoding} will be
ignore.}
\item{detect}{Whether to detect the encoding of input file
using \code{filecoding} function. If encoding
detection is enable, the value of \code{encoding} will be
ignore.}
\item{symbol}{Whether to keep symbols in the sentence.}
\item{lines}{The maximal number of lines to read at one
time when input is a file. The value
is used for segmentation and speech tagging workers.}
\item{output}{A path to the output file, and default worker will
generate file name by system time stamp, the value
is used for segmentation and speech tagging workers.}
\item{bylines}{return the result by the lines of input files}
\item{user_weight}{the weight of the user dict words. "min" "max" or "median".}
}
\value{
This function returns an environment containing segmentation
settings and worker. Public settings can be modified and got
using \code{$}.
}
\description{
This function can initialize jiebaR workers. You can initialize different
kinds of workers including \code{mix}, \code{mp}, \code{hmm},
\code{query}, \code{tag}, \code{simhash}, and \code{keywords}.
see Detail for more information.
}
\details{
The package uses initialized engines for word segmentation, and you
can initialize multiple engines simultaneously. You can also reset the model
public settings using \code{$} such as
\code{ WorkerName$symbol = T }. Some private settings are fixed
when a engine is initialized, and you can get then by
\code{WorkerName$PrivateVarible}.
Maximum probability segmentation model uses Trie tree to construct
a directed acyclic graph and uses dynamic programming algorithm. It
is the core segmentation algorithm. \code{dict} and \code{user}
should be provided when initializing jiebaR worker.
Hidden Markov Model uses HMM model to determine status set and
observed set of words. The default HMM model is based on People's Daily
language library. \code{hmm} should be provided when initializing
jiebaR worker.
MixSegment model uses both Maximum probability segmentation model
and Hidden Markov Model to construct segmentation. \code{dict}
\code{hmm} and \code{user} should be provided when initializing
jiebaR worker.
QuerySegment model uses MixSegment to construct segmentation and then
enumerates all the possible long words in the dictionary. \code{dict},
\code{hmm} and \code{qmax} should be provided when initializing
jiebaR worker.
Speech Tagging worker uses MixSegment model to cut word and
tag each word after segmentation using labels compatible with
ictclas. \code{dict},
\code{hmm} and \code{user} should be provided when initializing
jiebaR worker.
Keyword Extraction worker uses MixSegment model to cut word and use
TF-IDF algorithm to find the keywords. \code{dict} ,\code{hmm},
\code{idf}, \code{stop_word} and \code{topn} should be provided when initializing
jiebaR worker.
Simhash worker uses the keyword extraction worker to find the keywords
and uses simhash algorithm to compute simhash. \code{dict}
\code{hmm}, \code{idf} and \code{stop_word} should be provided when initializing
jiebaR worker.
}
\examples{
### Note: Can not display Chinese character on Windows here.
\dontrun{
words = "hello world"
test1 = worker()
test1
test1 <= words}
\dontrun{
test <= "./temp.txt"
engine2 = worker("mix",symbol = T)
engine2 <= "./temp.txt"
engine2
engine2$symbol = T
engine2
engine2 <= words
engine3 = worker(type = "mix", dict = "dict_path",symbol = T)
engine3 <= "./temp.txt"
}
\dontrun{
keys = worker("keywords", topn = 1)
keys <= words
tagger = worker("tag")
tagger <= words}
}
\author{
Qin Wenfeng
}
|
/man/worker.Rd
|
permissive
|
Veterun/jiebaR
|
R
| false
| true
| 5,941
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/worker.R
\name{worker}
\alias{worker}
\title{Initialize jiebaR worker}
\usage{
worker(type = "mix", dict = DICTPATH, hmm = HMMPATH, user = USERPATH,
idf = IDFPATH, stop_word = STOPPATH, write = T, qmax = 20, topn = 5,
encoding = "UTF-8", detect = T, symbol = F, lines = 1e+05,
output = NULL, bylines = F, user_weight = "max")
}
\arguments{
\item{type}{The type of jiebaR workers including \code{mix}, \code{mp}, \code{hmm},
\code{query}, \code{tag}, \code{simhash}, and \code{keywords}.}
\item{dict}{A path to main dictionary, default value is \code{DICTPATH},
and the value is used for \code{mix}, \code{mp}, \code{query},
\code{tag}, \code{simhash} and \code{keywords} workers.}
\item{hmm}{A path to Hidden Markov Model, default value is \code{HMMPATH},
and the value is used for \code{mix}, \code{hmm}, \code{query},
\code{tag}, \code{simhash} and \code{keywords} workers.}
\item{user}{A path to user dictionary, default value is \code{USERPATH},
and the value is used for \code{mix}, \code{tag} and \code{mp} workers.}
\item{idf}{A path to inverse document frequency, default value is \code{IDFPATH},
and the value is used for \code{simhash} and \code{keywords} workers.}
\item{stop_word}{A path to stop word dictionary, default value is \code{STOPPATH},
and the value is used for \code{simhash}, \code{keywords}, \code{tagger} and \code{segment} workers. Encoding of this file is checked by \code{filecoding}, and it should be UTF-8 encoding. For \code{segment} workers, the default \code{STOPPATH} will not be used, so you should provide another file path.}
\item{write}{Whether to write the output to a file, or return
a the result in a object. This value will only be used when
the input is a file path. The default value is TRUE. The value
is used for segment and speech tagging workers.}
\item{qmax}{Max query length of words, and the value
is used for \code{query} workers.}
\item{topn}{The number of keywords, and the value is used for
\code{simhash} and \code{keywords} workers.}
\item{encoding}{The encoding of the input file. If encoding
detection is enable, the value of \code{encoding} will be
ignore.}
\item{detect}{Whether to detect the encoding of input file
using \code{filecoding} function. If encoding
detection is enable, the value of \code{encoding} will be
ignore.}
\item{symbol}{Whether to keep symbols in the sentence.}
\item{lines}{The maximal number of lines to read at one
time when input is a file. The value
is used for segmentation and speech tagging workers.}
\item{output}{A path to the output file, and default worker will
generate file name by system time stamp, the value
is used for segmentation and speech tagging workers.}
\item{bylines}{return the result by the lines of input files}
\item{user_weight}{the weight of the user dict words. "min" "max" or "median".}
}
\value{
This function returns an environment containing segmentation
settings and worker. Public settings can be modified and got
using \code{$}.
}
\description{
This function can initialize jiebaR workers. You can initialize different
kinds of workers including \code{mix}, \code{mp}, \code{hmm},
\code{query}, \code{tag}, \code{simhash}, and \code{keywords}.
see Detail for more information.
}
\details{
The package uses initialized engines for word segmentation, and you
can initialize multiple engines simultaneously. You can also reset the model
public settings using \code{$} such as
\code{ WorkerName$symbol = T }. Some private settings are fixed
when a engine is initialized, and you can get then by
\code{WorkerName$PrivateVarible}.
Maximum probability segmentation model uses Trie tree to construct
a directed acyclic graph and uses dynamic programming algorithm. It
is the core segmentation algorithm. \code{dict} and \code{user}
should be provided when initializing jiebaR worker.
Hidden Markov Model uses HMM model to determine status set and
observed set of words. The default HMM model is based on People's Daily
language library. \code{hmm} should be provided when initializing
jiebaR worker.
MixSegment model uses both Maximum probability segmentation model
and Hidden Markov Model to construct segmentation. \code{dict}
\code{hmm} and \code{user} should be provided when initializing
jiebaR worker.
QuerySegment model uses MixSegment to construct segmentation and then
enumerates all the possible long words in the dictionary. \code{dict},
\code{hmm} and \code{qmax} should be provided when initializing
jiebaR worker.
Speech Tagging worker uses MixSegment model to cut word and
tag each word after segmentation using labels compatible with
ictclas. \code{dict},
\code{hmm} and \code{user} should be provided when initializing
jiebaR worker.
Keyword Extraction worker uses MixSegment model to cut word and use
TF-IDF algorithm to find the keywords. \code{dict} ,\code{hmm},
\code{idf}, \code{stop_word} and \code{topn} should be provided when initializing
jiebaR worker.
Simhash worker uses the keyword extraction worker to find the keywords
and uses simhash algorithm to compute simhash. \code{dict}
\code{hmm}, \code{idf} and \code{stop_word} should be provided when initializing
jiebaR worker.
}
\examples{
### Note: Can not display Chinese character on Windows here.
\dontrun{
words = "hello world"
test1 = worker()
test1
test1 <= words}
\dontrun{
test <= "./temp.txt"
engine2 = worker("mix",symbol = T)
engine2 <= "./temp.txt"
engine2
engine2$symbol = T
engine2
engine2 <= words
engine3 = worker(type = "mix", dict = "dict_path",symbol = T)
engine3 <= "./temp.txt"
}
\dontrun{
keys = worker("keywords", topn = 1)
keys <= words
tagger = worker("tag")
tagger <= words}
}
\author{
Qin Wenfeng
}
|
## The script assumes that the load_data.R is contained in the current R
## working directory which in turn assumes that unzipped data has already been
## downloaded. If not, then please follow the instructions in load_data.R file
## to download the dataset into the current R working directory.
## load data using load_data.R
source("load_data.R")
## set locale to US/English (for days of week in plot ticks)
## Sys.setlocale(category="LC_TIME", locale="US")
## open device
png(filename="plot2.png", width=480, height=480)
## plot time series
plot(data$global_active_power ~ data$datetime, ylab="Global Active Power (kilowatts)",
xlab="", type="l")
## close device:
dev.off()
|
/plot2.R
|
no_license
|
jayantsahewal/ExData_Plotting1
|
R
| false
| false
| 688
|
r
|
## The script assumes that the load_data.R is contained in the current R
## working directory which in turn assumes that unzipped data has already been
## downloaded. If not, then please follow the instructions in load_data.R file
## to download the dataset into the current R working directory.
## load data using load_data.R
source("load_data.R")
## set locale to US/English (for days of week in plot ticks)
## Sys.setlocale(category="LC_TIME", locale="US")
## open device
png(filename="plot2.png", width=480, height=480)
## plot time series
plot(data$global_active_power ~ data$datetime, ylab="Global Active Power (kilowatts)",
xlab="", type="l")
## close device:
dev.off()
|
classifyBurnability <- function(cohortData, pixelGroupMap, pixelsToSubset = NULL){
#
# NEEDS TO BE REDONE BASED ON !runMe.R!
# # Function to reclassify
# reclassCohortData <- function(cohortData, reclassTable){
# newCohortData <- cohortData[reclassTable, on = "speciesCode"]
# newCohortData[age < 15, burnClass := "class1"]
#
# # Assertion
# testthat::expect_true(all(newCohortData$pixelGroup %in% firePixelGroup))
#
# return(newCohortData)
# }
#
# if (!is.null(pixelsToSubset)){
# # Get the pixel groups for the fire polygons for the given years
# # As fires happened in non-forest places as well, we have NA's here
# # (i.e. a lot of NA's @ 40-50%)
# firePixelGroup <- pixelGroupMap[pixelsToSubset]
# # Reduce cohortData to these pixels
# # any(is.na(cohortData$pixelGroup)) == FALSE : No NA's in pixelGroups
# cohortData <- cohortData[pixelGroup %in% firePixelGroup, ]
# # All cohortData's pixelGroups are in the firePixelGroup:
# # We just subsetted the cohort data to the firePixelGroups
# # Not all firePixelGroup are in the cohortData's pixelGroups:
# # We have several fires that did NOT happen in forests, so do not have a
# # correspondent pixelGroup
# # Browse[1]> all(cohortData$pixelGroup %in% firePixelGroup)
# # [1] TRUE
# # Browse[1]> all(firePixelGroup %in% cohortData$pixelGroup)
# # [1] FALSE
#
# }
#
# spCode <- c('Pice_Mar', 'Pice_Gla', 'Lari_Lar', 'Betu_Pap', 'Popu_Tre', 'Pinu_Ban') # TODO Make it flexible!
# reclassTable <- data.table(speciesCode = spCode, burnClass = c("class3", "class3", "class3", "class2", "class2", "class4"))
#
# # Classify the burnClasses
# cohortData <- reclassCohortData(cohortData = cohortData, reclassTable = reclassTable)
#
# # Calculate proportional biomass (it might or might now be present already in cohortData)
# cohortData[, totalBiomass := sum(B, na.rm = TRUE), by = "pixelGroup"]
# cohortData[, propBiomass := B/totalBiomass, by = "pixelGroup"]
# # Calculate proportional biomass per class
# cohortData[, propBurnClass := sum(propBiomass, na.rm = TRUE), by = c("pixelGroup", "burnClass")]
# cohortData[totalBiomass == 0 & age == 0, c("propBiomass", "propBurnClass") := 1]
#
# # Assertion: we don't have any more NA's
# testthat::expect_true(NROW(cohortData) == NROW(na.omit(cohortData)))
#
return(cohortData)
}
|
/functions/not_included/classifyBurnability.R
|
no_license
|
tati-micheletti/NWT
|
R
| false
| false
| 2,457
|
r
|
classifyBurnability <- function(cohortData, pixelGroupMap, pixelsToSubset = NULL){
#
# NEEDS TO BE REDONE BASED ON !runMe.R!
# # Function to reclassify
# reclassCohortData <- function(cohortData, reclassTable){
# newCohortData <- cohortData[reclassTable, on = "speciesCode"]
# newCohortData[age < 15, burnClass := "class1"]
#
# # Assertion
# testthat::expect_true(all(newCohortData$pixelGroup %in% firePixelGroup))
#
# return(newCohortData)
# }
#
# if (!is.null(pixelsToSubset)){
# # Get the pixel groups for the fire polygons for the given years
# # As fires happened in non-forest places as well, we have NA's here
# # (i.e. a lot of NA's @ 40-50%)
# firePixelGroup <- pixelGroupMap[pixelsToSubset]
# # Reduce cohortData to these pixels
# # any(is.na(cohortData$pixelGroup)) == FALSE : No NA's in pixelGroups
# cohortData <- cohortData[pixelGroup %in% firePixelGroup, ]
# # All cohortData's pixelGroups are in the firePixelGroup:
# # We just subsetted the cohort data to the firePixelGroups
# # Not all firePixelGroup are in the cohortData's pixelGroups:
# # We have several fires that did NOT happen in forests, so do not have a
# # correspondent pixelGroup
# # Browse[1]> all(cohortData$pixelGroup %in% firePixelGroup)
# # [1] TRUE
# # Browse[1]> all(firePixelGroup %in% cohortData$pixelGroup)
# # [1] FALSE
#
# }
#
# spCode <- c('Pice_Mar', 'Pice_Gla', 'Lari_Lar', 'Betu_Pap', 'Popu_Tre', 'Pinu_Ban') # TODO Make it flexible!
# reclassTable <- data.table(speciesCode = spCode, burnClass = c("class3", "class3", "class3", "class2", "class2", "class4"))
#
# # Classify the burnClasses
# cohortData <- reclassCohortData(cohortData = cohortData, reclassTable = reclassTable)
#
# # Calculate proportional biomass (it might or might now be present already in cohortData)
# cohortData[, totalBiomass := sum(B, na.rm = TRUE), by = "pixelGroup"]
# cohortData[, propBiomass := B/totalBiomass, by = "pixelGroup"]
# # Calculate proportional biomass per class
# cohortData[, propBurnClass := sum(propBiomass, na.rm = TRUE), by = c("pixelGroup", "burnClass")]
# cohortData[totalBiomass == 0 & age == 0, c("propBiomass", "propBurnClass") := 1]
#
# # Assertion: we don't have any more NA's
# testthat::expect_true(NROW(cohortData) == NROW(na.omit(cohortData)))
#
return(cohortData)
}
|
library(QRM)
### Name: dji
### Title: Dow Jones Index
### Aliases: dji dji.df
### Keywords: datasets
### ** Examples
data(dji)
head(dji)
|
/data/genthat_extracted_code/QRM/examples/dji.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 146
|
r
|
library(QRM)
### Name: dji
### Title: Dow Jones Index
### Aliases: dji dji.df
### Keywords: datasets
### ** Examples
data(dji)
head(dji)
|
library(SenSrivastava)
### Name: E2.2
### Title: Data on House Prices
### Aliases: E2.2
### Keywords: datasets
### ** Examples
data(E2.2)
summary(E2.2)
|
/data/genthat_extracted_code/SenSrivastava/examples/E2.2.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 159
|
r
|
library(SenSrivastava)
### Name: E2.2
### Title: Data on House Prices
### Aliases: E2.2
### Keywords: datasets
### ** Examples
data(E2.2)
summary(E2.2)
|
# PUMP EO - F1
# PUMP06/PUMP07/PUMP11 - No F1
setwd("/Users/iris/Desktop/NRL/PUMP/PUMP/PUMPAPDM/StandingEO")
PUMPpath <- "/Users/iris/Desktop/NRL/PUMP/PUMP"
PUMPfiles <- list.files(PUMPpath)[-12]
PUMPfiles <- PUMPfiles[-c(6,7,11)]
for (x in PUMPfiles){
id <- x
path <- paste0(PUMPpath,"/",x,"/APDMFolUp1")
files <- list.files(path)
# filepath
if (x == "PUMP09"){
filepath <- paste0(path,"/",files[2])
f <- read.csv(filepath, stringsAsFactors = F, skip = 9)
}
else {
filepath <- paste0(path,"/",files[3])
f <- read.csv(filepath, stringsAsFactors = F, skip = 9)
}
# check condition
condition <- read.csv(filepath, stringsAsFactors = F)
print(condition[3,2])
if (x == "PUMP01"){
df <- data.frame(matrix(ncol = 33, nrow = 0))
colnames(df) <- f$Measure
df[1,] <- f$Mean
rownames(df) <- id
}
else{
df <- rbind(df, f$Mean)
rownames(df)[nrow(df)] <- id
}
}
write.csv(df, "PUMP_StandingEO_F1.csv")
|
/PUMP/PUMPAPDM/StandingEO/PUMP_StandingEO_F1.R
|
no_license
|
irislxxy/NRL
|
R
| false
| false
| 966
|
r
|
# PUMP EO - F1
# PUMP06/PUMP07/PUMP11 - No F1
setwd("/Users/iris/Desktop/NRL/PUMP/PUMP/PUMPAPDM/StandingEO")
PUMPpath <- "/Users/iris/Desktop/NRL/PUMP/PUMP"
PUMPfiles <- list.files(PUMPpath)[-12]
PUMPfiles <- PUMPfiles[-c(6,7,11)]
for (x in PUMPfiles){
id <- x
path <- paste0(PUMPpath,"/",x,"/APDMFolUp1")
files <- list.files(path)
# filepath
if (x == "PUMP09"){
filepath <- paste0(path,"/",files[2])
f <- read.csv(filepath, stringsAsFactors = F, skip = 9)
}
else {
filepath <- paste0(path,"/",files[3])
f <- read.csv(filepath, stringsAsFactors = F, skip = 9)
}
# check condition
condition <- read.csv(filepath, stringsAsFactors = F)
print(condition[3,2])
if (x == "PUMP01"){
df <- data.frame(matrix(ncol = 33, nrow = 0))
colnames(df) <- f$Measure
df[1,] <- f$Mean
rownames(df) <- id
}
else{
df <- rbind(df, f$Mean)
rownames(df)[nrow(df)] <- id
}
}
write.csv(df, "PUMP_StandingEO_F1.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimal_transport.R
\name{iterative_optimal_transport}
\alias{iterative_optimal_transport}
\title{Iterate the optimal transport problem with penalization for different lambda.}
\usage{
iterative_optimal_transport(X, Y, Q = NULL, lambda = 0.01,
eps = 0.01, numReps = 1000)
}
\arguments{
\item{X}{the n x d matrix of vectors}
\item{Y}{the m x d matrix of vectors}
\item{Q}{optional, an initialization point}
\item{lambda}{the penalization parameter}
\item{eps}{tolerance for computing sinkhorn divergence}
\item{numReps}{when to stop}
}
\value{
a list of the final Pi and Q
}
\description{
Function to iterate optimal transport based on sinkhorn divergence, for a fixed
penalization parameter
}
\examples{
library(rstiefel)
set.seed(2019)
X <- matrix(rnorm(1000,1,.2),ncol= 4)
Y <- rbind(X,X)
W <- rustiefel(4,4)
Y <- Y \%*\% W
test <- iterative_optimal_transport(X,Y,numReps = 1000,lambda = .0001)
norm(test$`Orthogonal Matrix` - W,"2")
X <- matrix(rnorm(5000,.2,.02),ncol= 5)
Y <- rbind(X,X)
W <- rustiefel(5,5)
Y <- Y \%*\% W
Y <- matrix(rnorm(200,.7),ncol =5)
test2 <- iterative_optimal_transport(X,Y,numReps = 1000,lambda = .0001)
norm(test2$`Orthogonal Matrix` - W,"2")
}
|
/man/iterative_optimal_transport.Rd
|
no_license
|
youngser/nonparGraphTesting
|
R
| false
| true
| 1,261
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimal_transport.R
\name{iterative_optimal_transport}
\alias{iterative_optimal_transport}
\title{Iterate the optimal transport problem with penalization for different lambda.}
\usage{
iterative_optimal_transport(X, Y, Q = NULL, lambda = 0.01,
eps = 0.01, numReps = 1000)
}
\arguments{
\item{X}{the n x d matrix of vectors}
\item{Y}{the m x d matrix of vectors}
\item{Q}{optional, an initialization point}
\item{lambda}{the penalization parameter}
\item{eps}{tolerance for computing sinkhorn divergence}
\item{numReps}{when to stop}
}
\value{
a list of the final Pi and Q
}
\description{
Function to iterate optimal transport based on sinkhorn divergence, for a fixed
penalization parameter
}
\examples{
library(rstiefel)
set.seed(2019)
X <- matrix(rnorm(1000,1,.2),ncol= 4)
Y <- rbind(X,X)
W <- rustiefel(4,4)
Y <- Y \%*\% W
test <- iterative_optimal_transport(X,Y,numReps = 1000,lambda = .0001)
norm(test$`Orthogonal Matrix` - W,"2")
X <- matrix(rnorm(5000,.2,.02),ncol= 5)
Y <- rbind(X,X)
W <- rustiefel(5,5)
Y <- Y \%*\% W
Y <- matrix(rnorm(200,.7),ncol =5)
test2 <- iterative_optimal_transport(X,Y,numReps = 1000,lambda = .0001)
norm(test2$`Orthogonal Matrix` - W,"2")
}
|
library("knitr")
library("rgl")
#knit("flurochloridone.Rmd")
#markdownToHTML('flurochloridone.md', 'flurochloridone.html', options=c("use_xhml"))
#system("pandoc -s flurochloridone.html -o flurochloridone.pdf")
knit2html('flurochloridone.Rmd')
|
/FDA_Pesticide_Glossary/flurochloridone.R
|
permissive
|
andrewdefries/andrewdefries.github.io
|
R
| false
| false
| 246
|
r
|
library("knitr")
library("rgl")
#knit("flurochloridone.Rmd")
#markdownToHTML('flurochloridone.md', 'flurochloridone.html', options=c("use_xhml"))
#system("pandoc -s flurochloridone.html -o flurochloridone.pdf")
knit2html('flurochloridone.Rmd')
|
# -----------------------------------------------------------------------------#
# --- Funciones para la identificación de eventos ----
# -----------------------------------------------------------------------------#
# Función para transformar a longitudes de cadenas
transformarALongitudesCadenas = function(x) {
# 1. Calcular RLE para la cadena de booleanos x
base::stopifnot(! is.null(x) && ! any(is.na(x)) && is.logical(x))
x.rle <- base::rle(x)
# 2. Transformar valores TRUE/FALSE a las longitudes de cadenas
x.rle$values <- x.rle$lengths
# 3. Invertir RLE y regenerar vector
y <- base::inverse.rle(x.rle)
stopifnot(length(x) == length(y))
return (y)
}
# Función para identificar longitudes de cadenas
identificarLongitudesCadenas = function(x) {
# 1. Calcular RLE para la cadena de duraciones x
base::stopifnot(! is.null(x) && ! any(is.na(x)) && is.integer(x))
x.rle <- base::rle(x)
# 2. Transformar duraciones a secuencias de 1 a N
x.rle$values <- seq(from = 1, to = length(x.rle$lengths))
# 3. Invertir RLE y regenerar vector
y <- base::inverse.rle(x.rle)
stopifnot(length(x) == length(y))
return (y)
}
# Función para la identificación de eventos
identificarEventosConfigUbicacionR = function(realizacion, conf_indice, ubicacion,
tipo_evento = c("seco", "humedo"),
umbral_indice, duracion_minima,
valores.indices.realizacion,
interpolar_aislados = TRUE,
metodo_imputacion_id = 0) {
# 0. Validar tipo de evento
base::match.arg(tipo_evento)
# 0. Identificar la columna con el id de la ubicación (usualmente station_id, o point_id)
id_column <- IdentificarIdColumn(ubicacion)
# 1. Buscar valores de indices para la estacion. Si hay valores faltantes (NA) aislados
# (un NA con un predecesor y un sucesor no NA), interpolar linealmente para evitar "perder" eventos.
eventos <- NULL
valores.indices <- valores.indices.realizacion %>%
dplyr::filter(!!rlang::sym(id_column) == dplyr::pull(ubicacion, !!id_column) &
metodo_imputacion_id == !!metodo_imputacion_id &
conf_id == conf_indice$id)
if (nrow(valores.indices) > 0) {
valores.indices <- valores.indices %>%
dplyr::arrange(!!rlang::sym(id_column), ano, pentada_fin)
}
# 2. Si hay valores faltantes (NA) aislados (un NA con un predecesor y un sucesor no NA),
# interpolar linealmente para evitar "perder" eventos.
if (nrow(valores.indices) > 0) {
if (interpolar_aislados) {
eventos <- valores.indices %>%
dplyr::mutate(valor_indice_anterior = dplyr::lag(valor_indice),
valor_indice_siguiente = dplyr::lead(valor_indice)) %>%
dplyr::mutate(interpolar = (is.na(valor_indice) && ! is.na(valor_indice_anterior) && ! is.na(valor_indice_siguiente))) %>%
dplyr::mutate(valor_indice_ajustado = dplyr::if_else(interpolar, (valor_indice_anterior + valor_indice_siguiente) / 2, valor_indice)) %>%
dplyr::select(!!id_column, ano, pentada_fin, valor_indice_ajustado)
} else {
eventos <- valores.indices %>%
dplyr::mutate(valor_indice_ajustado = valor_indice) %>%
dplyr::select(!!id_column, ano, pentada_fin, valor_indice_ajustado)
}
}
# 3. Identificar pentadas donde se cumple que el indice esta por debajo/encima del umbral.
if (! is.null(eventos)) {
if (tipo_evento == "seco") {
eventos <- eventos %>%
dplyr::mutate(cumple_condicion = ! is.na(valor_indice_ajustado) &
! is.nan(valor_indice_ajustado) &
valor_indice_ajustado <= umbral_indice)
} else {
eventos <- eventos %>%
dplyr::mutate(cumple_condicion = ! is.na(valor_indice_ajustado) &
! is.nan(valor_indice_ajustado) &
valor_indice_ajustado >= umbral_indice)
}
}
# 4. Calcular duracion de eventos y filtrar solamente aquellas filas que:
# a. Cumplen con la condicion de evento
# b. Cumplen con la condicion de duracion
if (! is.null(eventos)) {
if (all(! eventos$cumple_condicion)) {
# Ningun evento cumple la condicion. No calcular nada mas.
eventos <- NULL
} else {
eventos <- eventos %>%
dplyr::mutate(duracion_evento = transformarALongitudesCadenas(cumple_condicion)) %>%
dplyr::filter(cumple_condicion & (duracion_evento >= duracion_minima))
if (nrow(eventos) > 0) {
eventos <- eventos %>%
dplyr::mutate(numero_evento = identificarLongitudesCadenas(duracion_evento)) %>%
dplyr::select(-cumple_condicion)
} else {
# No hay eventos que cumplan con la condicion de duracion minima
eventos <- NULL
}
}
}
# 5. Agregar y sumarizar:
# i. Fecha de comienzo de evento
# ii. Fecha de fin de evento
# iii. Intensidad de evento
# iv. Magnitud de evento
# v. Valores extremos
if (! is.null(eventos)) {
eventos <- eventos %>%
dplyr::mutate(fecha_inicio_pentada = pentada.ano.a.fecha.inicio(pentada_fin, ano),
fecha_fin_pentada = fecha.fin.pentada(pentada.ano.a.fecha.inicio(pentada_fin, ano))) %>%
dplyr::group_by(!!rlang::sym(id_column), numero_evento) %>%
dplyr::summarise(fecha_inicio = min(fecha_inicio_pentada), fecha_fin = max(fecha_fin_pentada),
intensidad = mean(valor_indice_ajustado), magnitud = sum(valor_indice_ajustado),
duracion = min(duracion_evento), minimo = min(valor_indice_ajustado),
maximo = max(valor_indice_ajustado))
}
if (! is.null(eventos)) {
eventos <- eventos %>%
dplyr::mutate(tipo_evento = !!tipo_evento,
conf_id = conf_indice$id, indice = conf_indice$indice, escala = conf_indice$escala,
distribucion = conf_indice$distribucion, metodo_ajuste = conf_indice$metodo_ajuste,
referencia_comienzo = conf_indice$referencia_comienzo,
referencia_fin = conf_indice$referencia_fin,
realizacion = !!realizacion) %>%
dplyr::select(realizacion, dplyr::everything())
return (eventos)
} else {
type_of_id_col <- typeof(dplyr::pull(ubicacion,!!id_column))
return (tibble::tibble(realizacion = double(),
!!id_column := if(type_of_id_col == "integer") integer() else
if(type_of_id_col == "numeric") double() else
if(type_of_id_col == "logical") logical() else
if(type_of_id_col == "character") character() else
character(),
numero_evento = integer(),
fecha_inicio = as.Date(character()), fecha_fin = as.Date(character()),
intensidad = double(), magnitud = double(), duracion = integer(),
minimo = double(), maximo = double(), tipo_evento = character(),
conf_id = integer(), indice = character(), escala = integer(),
distribucion = character(), metodo_ajuste = character(),
referencia_comienzo = character(), referencia_fin = character()))
}
}
# --- Función responsable de iniciar la identificación de eventos
IdentificarEventos <- function(input.value, script, config, configuraciones.indices,
resultados.indices.sequia, interpolar_aislados) {
# Obtener la ubicación para la cual se calcularán los índices
ubicacion <- input.value
# Identificar la columna con el id de la ubicación (usualmente station_id, o point_id)
id_column <- IdentificarIdColumn(ubicacion)
# Informar estado de la ejecución
script$info(glue::glue("Identificando eventos secos para la ubicación con ",
"{id_column} = {ubicacion %>% dplyr::pull(!!id_column)} ",
"(lon: {ubicacion$longitude}, lat: {ubicacion$latitude})"))
eventos_secos <- purrr::pmap_dfr(
.l = configuraciones.indices %>% dplyr::arrange(id),
.f = function(...) {
conf_indice <- tibble::tibble(...)
eventos <- purrr::map_dfr(
.x = unique(resultados.indices.sequia$realizacion),
.f = function(r) {
eventos_x_realizacion <-
identificarEventosConfigUbicacionR(conf_indice = conf_indice, ubicacion = ubicacion,
tipo_evento = "seco",
umbral_indice = config$params$eventos$secos$umbral,
duracion_minima = config$params$eventos$secos$duracion_minima,
valores.indices.realizacion = resultados.indices.sequia %>%
dplyr::filter(realizacion == r),
interpolar_aislados = interpolar_aislados,
realizacion = r)
return (eventos_x_realizacion)
})
return (eventos)
})
# Informar estado de la ejecución
script$info(glue::glue("Identificando eventos humedos para la ubicación con ",
"{id_column} = {ubicacion %>% dplyr::pull(!!id_column)} ",
"(lon: {ubicacion$longitude}, lat: {ubicacion$latitude})"))
eventos_humedos <- purrr::pmap_dfr(
.l = configuraciones.indices,
.f = function(...) {
conf_indice <- tibble::tibble(...)
eventos <- purrr::map_dfr(
.x = unique(resultados.indices.sequia$realizacion),
.f = function(r) {
eventos_x_realizacion <-
identificarEventosConfigUbicacionR(conf_indice = conf_indice, ubicacion = ubicacion,
tipo_evento = "humedo",
umbral_indice = config$params$eventos$humedos$umbral,
duracion_minima = config$params$eventos$humedos$duracion_minima,
valores.indices.realizacion = resultados.indices.sequia %>%
dplyr::filter(realizacion == r),
interpolar_aislados = interpolar_aislados,
realizacion = r)
return (eventos_x_realizacion)
})
return (eventos)
})
return (dplyr::bind_rows(eventos_secos, eventos_humedos))
}
|
/IdentificarEventos/lib/funciones_eventos.R
|
no_license
|
CRC-SAS/indices-eventos
|
R
| false
| false
| 10,962
|
r
|
# -----------------------------------------------------------------------------#
# --- Funciones para la identificación de eventos ----
# -----------------------------------------------------------------------------#
# Función para transformar a longitudes de cadenas
transformarALongitudesCadenas = function(x) {
# 1. Calcular RLE para la cadena de booleanos x
base::stopifnot(! is.null(x) && ! any(is.na(x)) && is.logical(x))
x.rle <- base::rle(x)
# 2. Transformar valores TRUE/FALSE a las longitudes de cadenas
x.rle$values <- x.rle$lengths
# 3. Invertir RLE y regenerar vector
y <- base::inverse.rle(x.rle)
stopifnot(length(x) == length(y))
return (y)
}
# Función para identificar longitudes de cadenas
identificarLongitudesCadenas = function(x) {
# 1. Calcular RLE para la cadena de duraciones x
base::stopifnot(! is.null(x) && ! any(is.na(x)) && is.integer(x))
x.rle <- base::rle(x)
# 2. Transformar duraciones a secuencias de 1 a N
x.rle$values <- seq(from = 1, to = length(x.rle$lengths))
# 3. Invertir RLE y regenerar vector
y <- base::inverse.rle(x.rle)
stopifnot(length(x) == length(y))
return (y)
}
# Función para la identificación de eventos
identificarEventosConfigUbicacionR = function(realizacion, conf_indice, ubicacion,
tipo_evento = c("seco", "humedo"),
umbral_indice, duracion_minima,
valores.indices.realizacion,
interpolar_aislados = TRUE,
metodo_imputacion_id = 0) {
# 0. Validar tipo de evento
base::match.arg(tipo_evento)
# 0. Identificar la columna con el id de la ubicación (usualmente station_id, o point_id)
id_column <- IdentificarIdColumn(ubicacion)
# 1. Buscar valores de indices para la estacion. Si hay valores faltantes (NA) aislados
# (un NA con un predecesor y un sucesor no NA), interpolar linealmente para evitar "perder" eventos.
eventos <- NULL
valores.indices <- valores.indices.realizacion %>%
dplyr::filter(!!rlang::sym(id_column) == dplyr::pull(ubicacion, !!id_column) &
metodo_imputacion_id == !!metodo_imputacion_id &
conf_id == conf_indice$id)
if (nrow(valores.indices) > 0) {
valores.indices <- valores.indices %>%
dplyr::arrange(!!rlang::sym(id_column), ano, pentada_fin)
}
# 2. Si hay valores faltantes (NA) aislados (un NA con un predecesor y un sucesor no NA),
# interpolar linealmente para evitar "perder" eventos.
if (nrow(valores.indices) > 0) {
if (interpolar_aislados) {
eventos <- valores.indices %>%
dplyr::mutate(valor_indice_anterior = dplyr::lag(valor_indice),
valor_indice_siguiente = dplyr::lead(valor_indice)) %>%
dplyr::mutate(interpolar = (is.na(valor_indice) && ! is.na(valor_indice_anterior) && ! is.na(valor_indice_siguiente))) %>%
dplyr::mutate(valor_indice_ajustado = dplyr::if_else(interpolar, (valor_indice_anterior + valor_indice_siguiente) / 2, valor_indice)) %>%
dplyr::select(!!id_column, ano, pentada_fin, valor_indice_ajustado)
} else {
eventos <- valores.indices %>%
dplyr::mutate(valor_indice_ajustado = valor_indice) %>%
dplyr::select(!!id_column, ano, pentada_fin, valor_indice_ajustado)
}
}
# 3. Identificar pentadas donde se cumple que el indice esta por debajo/encima del umbral.
if (! is.null(eventos)) {
if (tipo_evento == "seco") {
eventos <- eventos %>%
dplyr::mutate(cumple_condicion = ! is.na(valor_indice_ajustado) &
! is.nan(valor_indice_ajustado) &
valor_indice_ajustado <= umbral_indice)
} else {
eventos <- eventos %>%
dplyr::mutate(cumple_condicion = ! is.na(valor_indice_ajustado) &
! is.nan(valor_indice_ajustado) &
valor_indice_ajustado >= umbral_indice)
}
}
# 4. Calcular duracion de eventos y filtrar solamente aquellas filas que:
# a. Cumplen con la condicion de evento
# b. Cumplen con la condicion de duracion
if (! is.null(eventos)) {
if (all(! eventos$cumple_condicion)) {
# Ningun evento cumple la condicion. No calcular nada mas.
eventos <- NULL
} else {
eventos <- eventos %>%
dplyr::mutate(duracion_evento = transformarALongitudesCadenas(cumple_condicion)) %>%
dplyr::filter(cumple_condicion & (duracion_evento >= duracion_minima))
if (nrow(eventos) > 0) {
eventos <- eventos %>%
dplyr::mutate(numero_evento = identificarLongitudesCadenas(duracion_evento)) %>%
dplyr::select(-cumple_condicion)
} else {
# No hay eventos que cumplan con la condicion de duracion minima
eventos <- NULL
}
}
}
# 5. Agregar y sumarizar:
# i. Fecha de comienzo de evento
# ii. Fecha de fin de evento
# iii. Intensidad de evento
# iv. Magnitud de evento
# v. Valores extremos
if (! is.null(eventos)) {
eventos <- eventos %>%
dplyr::mutate(fecha_inicio_pentada = pentada.ano.a.fecha.inicio(pentada_fin, ano),
fecha_fin_pentada = fecha.fin.pentada(pentada.ano.a.fecha.inicio(pentada_fin, ano))) %>%
dplyr::group_by(!!rlang::sym(id_column), numero_evento) %>%
dplyr::summarise(fecha_inicio = min(fecha_inicio_pentada), fecha_fin = max(fecha_fin_pentada),
intensidad = mean(valor_indice_ajustado), magnitud = sum(valor_indice_ajustado),
duracion = min(duracion_evento), minimo = min(valor_indice_ajustado),
maximo = max(valor_indice_ajustado))
}
if (! is.null(eventos)) {
eventos <- eventos %>%
dplyr::mutate(tipo_evento = !!tipo_evento,
conf_id = conf_indice$id, indice = conf_indice$indice, escala = conf_indice$escala,
distribucion = conf_indice$distribucion, metodo_ajuste = conf_indice$metodo_ajuste,
referencia_comienzo = conf_indice$referencia_comienzo,
referencia_fin = conf_indice$referencia_fin,
realizacion = !!realizacion) %>%
dplyr::select(realizacion, dplyr::everything())
return (eventos)
} else {
type_of_id_col <- typeof(dplyr::pull(ubicacion,!!id_column))
return (tibble::tibble(realizacion = double(),
!!id_column := if(type_of_id_col == "integer") integer() else
if(type_of_id_col == "numeric") double() else
if(type_of_id_col == "logical") logical() else
if(type_of_id_col == "character") character() else
character(),
numero_evento = integer(),
fecha_inicio = as.Date(character()), fecha_fin = as.Date(character()),
intensidad = double(), magnitud = double(), duracion = integer(),
minimo = double(), maximo = double(), tipo_evento = character(),
conf_id = integer(), indice = character(), escala = integer(),
distribucion = character(), metodo_ajuste = character(),
referencia_comienzo = character(), referencia_fin = character()))
}
}
# --- Función responsable de iniciar la identificación de eventos
IdentificarEventos <- function(input.value, script, config, configuraciones.indices,
resultados.indices.sequia, interpolar_aislados) {
# Obtener la ubicación para la cual se calcularán los índices
ubicacion <- input.value
# Identificar la columna con el id de la ubicación (usualmente station_id, o point_id)
id_column <- IdentificarIdColumn(ubicacion)
# Informar estado de la ejecución
script$info(glue::glue("Identificando eventos secos para la ubicación con ",
"{id_column} = {ubicacion %>% dplyr::pull(!!id_column)} ",
"(lon: {ubicacion$longitude}, lat: {ubicacion$latitude})"))
eventos_secos <- purrr::pmap_dfr(
.l = configuraciones.indices %>% dplyr::arrange(id),
.f = function(...) {
conf_indice <- tibble::tibble(...)
eventos <- purrr::map_dfr(
.x = unique(resultados.indices.sequia$realizacion),
.f = function(r) {
eventos_x_realizacion <-
identificarEventosConfigUbicacionR(conf_indice = conf_indice, ubicacion = ubicacion,
tipo_evento = "seco",
umbral_indice = config$params$eventos$secos$umbral,
duracion_minima = config$params$eventos$secos$duracion_minima,
valores.indices.realizacion = resultados.indices.sequia %>%
dplyr::filter(realizacion == r),
interpolar_aislados = interpolar_aislados,
realizacion = r)
return (eventos_x_realizacion)
})
return (eventos)
})
# Informar estado de la ejecución
script$info(glue::glue("Identificando eventos humedos para la ubicación con ",
"{id_column} = {ubicacion %>% dplyr::pull(!!id_column)} ",
"(lon: {ubicacion$longitude}, lat: {ubicacion$latitude})"))
eventos_humedos <- purrr::pmap_dfr(
.l = configuraciones.indices,
.f = function(...) {
conf_indice <- tibble::tibble(...)
eventos <- purrr::map_dfr(
.x = unique(resultados.indices.sequia$realizacion),
.f = function(r) {
eventos_x_realizacion <-
identificarEventosConfigUbicacionR(conf_indice = conf_indice, ubicacion = ubicacion,
tipo_evento = "humedo",
umbral_indice = config$params$eventos$humedos$umbral,
duracion_minima = config$params$eventos$humedos$duracion_minima,
valores.indices.realizacion = resultados.indices.sequia %>%
dplyr::filter(realizacion == r),
interpolar_aislados = interpolar_aislados,
realizacion = r)
return (eventos_x_realizacion)
})
return (eventos)
})
return (dplyr::bind_rows(eventos_secos, eventos_humedos))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create.report.R
\name{create.report}
\alias{create.report}
\title{Create a PDF report for a vertical profile performed using an optical package}
\usage{
create.report(dirdat)
}
\arguments{
\item{dirdat}{is the current directory path contaning the RData file to plot.}
}
\description{
A PDF report is generated using knitr package facilities and a *.Rnw
template. It provides an efficent mean to visualise the data.
It can be called from the higher level function \code{\link{IOPs.go}}, or
run in the command line as along as IOPs.RData and IOPs.fitted.down.RData have been
generated by \code{\link{correct.merge.IOP.profile}}.
}
\details{
The program edit a template named IOP_Station_TEMPLATE.Rnw located in the package data folder.
It will extract a number of informations from the current
data directory and RData files, cast.info.dat and instruments.dat files. For example,
the Latitude/Longitude are extracted from the cast.info.dat file to
create a map (Figure 1 of the report).
Next it uses the brew package to update the Rnw files.
The Rnw files are converted into PDF using knitr package. Both files will
be saved in dirdat and a "figures" folder is created with
a PDF file for each figures found in the report.
}
\seealso{
\code{\link{correct.merge.IOP.profile}}, \code{\link{IOPs.go}}
}
\author{
Simon Bélanger
}
|
/man/create.report.Rd
|
no_license
|
arboit/Riops
|
R
| false
| true
| 1,404
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create.report.R
\name{create.report}
\alias{create.report}
\title{Create a PDF report for a vertical profile performed using an optical package}
\usage{
create.report(dirdat)
}
\arguments{
\item{dirdat}{is the current directory path contaning the RData file to plot.}
}
\description{
A PDF report is generated using knitr package facilities and a *.Rnw
template. It provides an efficent mean to visualise the data.
It can be called from the higher level function \code{\link{IOPs.go}}, or
run in the command line as along as IOPs.RData and IOPs.fitted.down.RData have been
generated by \code{\link{correct.merge.IOP.profile}}.
}
\details{
The program edit a template named IOP_Station_TEMPLATE.Rnw located in the package data folder.
It will extract a number of informations from the current
data directory and RData files, cast.info.dat and instruments.dat files. For example,
the Latitude/Longitude are extracted from the cast.info.dat file to
create a map (Figure 1 of the report).
Next it uses the brew package to update the Rnw files.
The Rnw files are converted into PDF using knitr package. Both files will
be saved in dirdat and a "figures" folder is created with
a PDF file for each figures found in the report.
}
\seealso{
\code{\link{correct.merge.IOP.profile}}, \code{\link{IOPs.go}}
}
\author{
Simon Bélanger
}
|
###
source("../eqtl_functions.R")
library(GenomicRanges)
library(clusterProfiler)
library(limma)
##### summary statistics ####
load("rdas/devStats_controlSamples.rda")
#################################
###### Create isoform switches ##########
# drop genes
rangeList = lapply(statList[-1], function(x) {
cat(".")
xSig = x[which(x$p_bonf < 0.05),]
xList = split(xSig, factor(xSig$EnsemblGeneID,
levels = unique(x$EnsemblGeneID)))
xList = xList[lengths(xList) > 0]
theRange = t(sapply(split(xSig$ageCorr,factor(xSig$EnsemblGeneID,
levels = unique(xSig$EnsemblGeneID))), range))
theRange = as.data.frame(theRange)
colnames(theRange) = c("negCorr","posCorr")
## get min and max feature
mins = xSig[order(xSig$ageCorr)]
mins = mins[!duplicated(mins$EnsemblGeneID) &
!is.na(mins$EnsemblGeneID)]
theRange$minFeature = names(mins)[match(rownames(theRange),
mins$EnsemblGeneID)]
maxs = xSig[order(xSig$ageCorr,decreasing=TRUE)]
maxs = maxs[!duplicated(maxs$EnsemblGeneID) &
!is.na(maxs$EnsemblGeneID)]
theRange$maxFeature = names(maxs)[match(rownames(theRange),
maxs$EnsemblGeneID)]
## other metrics
theRange$numFeatures = table(x$EnsemblGeneID)[rownames(theRange)]
theRange$numSigFeatures = lengths(xList)
theRange$Symbol = x$Symbol[match(rownames(theRange), x$EnsemblGeneID)]
theRange$EntrezID = x$EntrezID[match(rownames(theRange), x$EnsemblGeneID)]
return(theRange)
})
### significant switches
switchList = lapply(rangeList, function(x) {
x$corDiff = x$posCorr - x$negCorr
x[which(x$negCorr < 0 & x$posCorr > 0 ),]
})
sapply(switchList, nrow)
# save
save(switchList, file="rdas/isoform_switch_devel_byFeature.rda")
############################################
#### gene ontology on genes that switch ####
load("rdas/isoform_switch_devel_byFeature.rda")
geneSwitchList = lapply(switchList, rownames)
geneSwitchList = lapply(geneSwitchList, function(x)
x[!grepl("-",x) & !is.na(x)]) # non fusion
## venn diagram of the IDs by the 5 features
allGenesSwitch = unique(unlist(geneSwitchList))
geneMatSwitch = sapply(geneSwitchList, function(x) allGenesSwitch %in% x)
rownames(geneMatSwitch) = allGenesSwitch
dim(geneMatSwitch)
pdf("plots/venn_geneIDs_devChanges_withSwitch.pdf",h=4.5,w=4.5)
vennDiagram(vennCounts(geneMatSwitch))
dev.off()
### write CSV
geneSwitchAll = do.call("rbind", switchList)
geneSwitchAll$Type = ss(rownames(geneSwitchAll), "\\.")
geneSwitchAll$EnsemblID = ss(rownames(geneSwitchAll), "\\.",2)
rownames(geneSwitchAll)= NULL
geneSwitchAll = geneSwitchAll[,c(11,10,1:9)]
write.csv(geneSwitchAll, file="tables/suppTable5_isoformSwitches.csv",
row.names=FALSE)
########################################
## get entrez id
entrezBgList = lapply(statList[-1], function(x) {
o = x$EntrezID[!is.na(x$p_bonf)]
unique(o[!is.na(o)])
})
lengths(entrezBgList)
entrezGeneSwitchList = lapply(switchList, function(x) {
unique(x$EntrezID[!is.na(x$EntrezID)])
})
lengths(entrezGeneSwitchList)
## also just regulated genes
entrezBgList_dev = lapply(statList[-1], function(x) {
o = x$EntrezID[which(x$p_bonf < 0.05)]
unique(o[!is.na(o)])
})
lengths(entrezBgList_dev)
############################
### kegg on switches #######
## kegg on dev reg
keggListSwitch_dev = mapply(function(g, bg) {
ht=enrichKEGG(as.character(g),
organism="human", pvalueCutoff=1,
universe= as.character(bg),minGSSize=5,
pAdjustMethod="none", qvalueCutoff=1)
as.data.frame(ht)
}, entrezGeneSwitchList, entrezBgList_dev, SIMPLIFY=FALSE)
keggSwitchMat = do.call("rbind", lapply(keggListSwitch_dev,
function(x) {
x$SetSize = as.integer(ss(as.character(x$BgRatio), "/", 1))
x[,c("ID", "Description","SetSize")]}))
keggSwitchMat = keggSwitchMat[!duplicated(keggSwitchMat$ID),]
rownames(keggSwitchMat) = keggSwitchMat$ID
keggSwitchMat2 = do.call("cbind", lapply(keggListSwitch_dev, function(x)
x[match(keggSwitchMat$ID,x$ID),c("pvalue", "qvalue")]))
rownames(keggSwitchMat2) = keggSwitchMat$ID
keggSwitchMat = cbind(keggSwitchMat, keggSwitchMat2)
keggSwitchMat$Type = "KEGG"
## numbers
colSums(keggSwitchMat[,grep("qvalue", colnames(keggSwitchMat))] < 0.05,
na.rm=TRUE)
table(rowSums(keggSwitchMat[,grep("qvalue", colnames(keggSwitchMat))] < 0.05,
na.rm=TRUE))
###################################
#### gene ontology on switches ####
###################################
## development background
goListSwitch_MF_dev = mapply(function(g, bg) {
ht=enrichGO(as.character(g),
OrgDb = "org.Hs.eg.db", pvalueCutoff=1,
universe= as.character(bg),minGSSize=5,
pAdjustMethod="none", qvalueCutoff=1, readable=TRUE)
as.data.frame(ht)
}, entrezGeneSwitchList, entrezBgList_dev, SIMPLIFY=FALSE)
goListSwitch_BP_dev = mapply(function(g, bg) {
ht=enrichGO(as.character(g), ont = "BP",
OrgDb = "org.Hs.eg.db", pvalueCutoff=1,
universe= as.character(bg),minGSSize=5,
pAdjustMethod="none", qvalueCutoff=1, readable=TRUE)
as.data.frame(ht)
}, entrezGeneSwitchList, entrezBgList_dev, SIMPLIFY=FALSE)
################
### combine ####
## make matrix
goSwitchMat_MF = do.call("rbind", lapply(goListSwitch_MF_dev,
function(x) {
x$SetSize = as.integer(ss(as.character(x$BgRatio), "/", 1))
x[,c("ID", "Description","SetSize")]}))
goSwitchMat_MF = goSwitchMat_MF[!duplicated(goSwitchMat_MF$ID),]
rownames(goSwitchMat_MF) = goSwitchMat_MF$ID
goSwitchMat_BP = do.call("rbind", lapply(goListSwitch_BP_dev,
function(x) {
x$SetSize = as.integer(ss(as.character(x$BgRatio), "/", 1))
x[,c("ID", "Description","SetSize")]}))
goSwitchMat_BP = goSwitchMat_BP[!duplicated(goSwitchMat_BP$ID),]
rownames(goSwitchMat_BP) = goSwitchMat_BP$ID
goSwitchMat_MF2 = do.call("cbind", lapply(goListSwitch_MF_dev, function(x)
x[match(goSwitchMat_MF$ID,x$ID),c("pvalue", "qvalue")]))
rownames(goSwitchMat_MF2) = goSwitchMat_MF$ID
goSwitchMat_MF = cbind(goSwitchMat_MF, goSwitchMat_MF2)
goSwitchMat_BP2 = do.call("cbind", lapply(goListSwitch_BP_dev, function(x)
x[match(goSwitchMat_BP$ID,x$ID),c("pvalue", "qvalue")]))
rownames(goSwitchMat_BP2) = goSwitchMat_BP$ID
goSwitchMat_BP = cbind(goSwitchMat_BP, goSwitchMat_BP2)
## merge again
goSwitchMat = rbind(goSwitchMat_BP, goSwitchMat_MF)
goSwitchMat$Type = rep(c("BP", "MF"),
times = c(nrow(goSwitchMat_BP), nrow(goSwitchMat_MF)))
colSums(goSwitchMat[,grep("qvalue", colnames(keggSwitchMat))] < 0.05,
na.rm=TRUE)
table(rowSums(goSwitchMat[,grep("qvalue", colnames(keggSwitchMat))] < 0.05,
na.rm=TRUE))
##################################
### filter to dev and q-value
geneSetSwitch_dev = rbind(goSwitchMat[,c(1:3,12,
grep("qvalue",names(goSwitchMat)))],
keggSwitchMat[,c(1:3,12,
grep("qvalue",names(goSwitchMat)))])
geneSetSwitch_dev = geneSetSwitch_dev[order(rowMeans(geneSetSwitch_dev[,5:8])),]
geneSetSwitch_dev = geneSetSwitch_dev[geneSetSwitch_dev$SetSize < 5000,]
colSums(geneSetSwitch_dev[,grep("qvalue",
names(geneSetSwitch_dev))] < 0.05,na.rm=TRUE)
write.csv(geneSetSwitch_dev, file="tables/geneSets_isoformSwitches.csv",
row.names=FALSE)
## filter to only significant
numSig = rowSums(geneSetSwitch_dev[,grep("qvalue",
names(geneSetSwitch_dev))] < 0.05,na.rm=TRUE)
sigGeneSetSwitch_dev = geneSetSwitch_dev[numSig > 0,]
write.csv(sigGeneSetSwitch_dev, row.names=FALSE,
file="tables/geneSets_isoformSwitches_onlySigInOne.csv")
### make figure for paper
keggMatSig = sigGeneSetSwitch_dev[sigGeneSetSwitch_dev$Type == "KEGG",]
library(lattice)
qMat = -log10(keggMatSig[,5:8])
rownames(qMat) = keggMatSig$Description
colnames(qMat) = ss(colnames(qMat),"\\.")
pdf("plots/geneSet_isoSwitch_heatmap_KEGG.pdf", useDingbats=FALSE,w=10)
theSeq = seq(0,7,by=0.1)
my.col <- colorRampPalette(c("white","darkblue"))(length(theSeq))
print(levelplot(t(as.matrix(qMat)), at = theSeq,pretty=TRUE,
col.regions = my.col, scales=list(y=list(cex=1.5),
x=list(rot=90, cex=1.5)),aspect="fill",
ylab = "", xlab = ""))
dev.off()
## summary statistics
colSums(sigGeneSetSwitch_dev[,grep("qvalue",
names(sigGeneSetSwitch_dev))] < 0.05, na.rm=TRUE)
table(rowSums(sigGeneSetSwitch_dev[,grep("qvalue",
names(sigGeneSetSwitch_dev))] < 0.05, na.rm=TRUE))
|
/devel/isoform_switches.R
|
permissive
|
LieberInstitute/BrainSeq_Phase1
|
R
| false
| false
| 8,070
|
r
|
###
source("../eqtl_functions.R")
library(GenomicRanges)
library(clusterProfiler)
library(limma)
##### summary statistics ####
load("rdas/devStats_controlSamples.rda")
#################################
###### Create isoform switches ##########
# drop genes
rangeList = lapply(statList[-1], function(x) {
cat(".")
xSig = x[which(x$p_bonf < 0.05),]
xList = split(xSig, factor(xSig$EnsemblGeneID,
levels = unique(x$EnsemblGeneID)))
xList = xList[lengths(xList) > 0]
theRange = t(sapply(split(xSig$ageCorr,factor(xSig$EnsemblGeneID,
levels = unique(xSig$EnsemblGeneID))), range))
theRange = as.data.frame(theRange)
colnames(theRange) = c("negCorr","posCorr")
## get min and max feature
mins = xSig[order(xSig$ageCorr)]
mins = mins[!duplicated(mins$EnsemblGeneID) &
!is.na(mins$EnsemblGeneID)]
theRange$minFeature = names(mins)[match(rownames(theRange),
mins$EnsemblGeneID)]
maxs = xSig[order(xSig$ageCorr,decreasing=TRUE)]
maxs = maxs[!duplicated(maxs$EnsemblGeneID) &
!is.na(maxs$EnsemblGeneID)]
theRange$maxFeature = names(maxs)[match(rownames(theRange),
maxs$EnsemblGeneID)]
## other metrics
theRange$numFeatures = table(x$EnsemblGeneID)[rownames(theRange)]
theRange$numSigFeatures = lengths(xList)
theRange$Symbol = x$Symbol[match(rownames(theRange), x$EnsemblGeneID)]
theRange$EntrezID = x$EntrezID[match(rownames(theRange), x$EnsemblGeneID)]
return(theRange)
})
### significant switches
switchList = lapply(rangeList, function(x) {
x$corDiff = x$posCorr - x$negCorr
x[which(x$negCorr < 0 & x$posCorr > 0 ),]
})
sapply(switchList, nrow)
# save
save(switchList, file="rdas/isoform_switch_devel_byFeature.rda")
############################################
#### gene ontology on genes that switch ####
load("rdas/isoform_switch_devel_byFeature.rda")
geneSwitchList = lapply(switchList, rownames)
geneSwitchList = lapply(geneSwitchList, function(x)
x[!grepl("-",x) & !is.na(x)]) # non fusion
## venn diagram of the IDs by the 5 features
allGenesSwitch = unique(unlist(geneSwitchList))
geneMatSwitch = sapply(geneSwitchList, function(x) allGenesSwitch %in% x)
rownames(geneMatSwitch) = allGenesSwitch
dim(geneMatSwitch)
pdf("plots/venn_geneIDs_devChanges_withSwitch.pdf",h=4.5,w=4.5)
vennDiagram(vennCounts(geneMatSwitch))
dev.off()
### write CSV
geneSwitchAll = do.call("rbind", switchList)
geneSwitchAll$Type = ss(rownames(geneSwitchAll), "\\.")
geneSwitchAll$EnsemblID = ss(rownames(geneSwitchAll), "\\.",2)
rownames(geneSwitchAll)= NULL
geneSwitchAll = geneSwitchAll[,c(11,10,1:9)]
write.csv(geneSwitchAll, file="tables/suppTable5_isoformSwitches.csv",
row.names=FALSE)
########################################
## get entrez id
entrezBgList = lapply(statList[-1], function(x) {
o = x$EntrezID[!is.na(x$p_bonf)]
unique(o[!is.na(o)])
})
lengths(entrezBgList)
entrezGeneSwitchList = lapply(switchList, function(x) {
unique(x$EntrezID[!is.na(x$EntrezID)])
})
lengths(entrezGeneSwitchList)
## also just regulated genes
entrezBgList_dev = lapply(statList[-1], function(x) {
o = x$EntrezID[which(x$p_bonf < 0.05)]
unique(o[!is.na(o)])
})
lengths(entrezBgList_dev)
############################
### kegg on switches #######
## kegg on dev reg
keggListSwitch_dev = mapply(function(g, bg) {
ht=enrichKEGG(as.character(g),
organism="human", pvalueCutoff=1,
universe= as.character(bg),minGSSize=5,
pAdjustMethod="none", qvalueCutoff=1)
as.data.frame(ht)
}, entrezGeneSwitchList, entrezBgList_dev, SIMPLIFY=FALSE)
keggSwitchMat = do.call("rbind", lapply(keggListSwitch_dev,
function(x) {
x$SetSize = as.integer(ss(as.character(x$BgRatio), "/", 1))
x[,c("ID", "Description","SetSize")]}))
keggSwitchMat = keggSwitchMat[!duplicated(keggSwitchMat$ID),]
rownames(keggSwitchMat) = keggSwitchMat$ID
keggSwitchMat2 = do.call("cbind", lapply(keggListSwitch_dev, function(x)
x[match(keggSwitchMat$ID,x$ID),c("pvalue", "qvalue")]))
rownames(keggSwitchMat2) = keggSwitchMat$ID
keggSwitchMat = cbind(keggSwitchMat, keggSwitchMat2)
keggSwitchMat$Type = "KEGG"
## numbers
colSums(keggSwitchMat[,grep("qvalue", colnames(keggSwitchMat))] < 0.05,
na.rm=TRUE)
table(rowSums(keggSwitchMat[,grep("qvalue", colnames(keggSwitchMat))] < 0.05,
na.rm=TRUE))
###################################
#### gene ontology on switches ####
###################################
## development background
goListSwitch_MF_dev = mapply(function(g, bg) {
ht=enrichGO(as.character(g),
OrgDb = "org.Hs.eg.db", pvalueCutoff=1,
universe= as.character(bg),minGSSize=5,
pAdjustMethod="none", qvalueCutoff=1, readable=TRUE)
as.data.frame(ht)
}, entrezGeneSwitchList, entrezBgList_dev, SIMPLIFY=FALSE)
goListSwitch_BP_dev = mapply(function(g, bg) {
ht=enrichGO(as.character(g), ont = "BP",
OrgDb = "org.Hs.eg.db", pvalueCutoff=1,
universe= as.character(bg),minGSSize=5,
pAdjustMethod="none", qvalueCutoff=1, readable=TRUE)
as.data.frame(ht)
}, entrezGeneSwitchList, entrezBgList_dev, SIMPLIFY=FALSE)
################
### combine ####
## make matrix
goSwitchMat_MF = do.call("rbind", lapply(goListSwitch_MF_dev,
function(x) {
x$SetSize = as.integer(ss(as.character(x$BgRatio), "/", 1))
x[,c("ID", "Description","SetSize")]}))
goSwitchMat_MF = goSwitchMat_MF[!duplicated(goSwitchMat_MF$ID),]
rownames(goSwitchMat_MF) = goSwitchMat_MF$ID
goSwitchMat_BP = do.call("rbind", lapply(goListSwitch_BP_dev,
function(x) {
x$SetSize = as.integer(ss(as.character(x$BgRatio), "/", 1))
x[,c("ID", "Description","SetSize")]}))
goSwitchMat_BP = goSwitchMat_BP[!duplicated(goSwitchMat_BP$ID),]
rownames(goSwitchMat_BP) = goSwitchMat_BP$ID
goSwitchMat_MF2 = do.call("cbind", lapply(goListSwitch_MF_dev, function(x)
x[match(goSwitchMat_MF$ID,x$ID),c("pvalue", "qvalue")]))
rownames(goSwitchMat_MF2) = goSwitchMat_MF$ID
goSwitchMat_MF = cbind(goSwitchMat_MF, goSwitchMat_MF2)
goSwitchMat_BP2 = do.call("cbind", lapply(goListSwitch_BP_dev, function(x)
x[match(goSwitchMat_BP$ID,x$ID),c("pvalue", "qvalue")]))
rownames(goSwitchMat_BP2) = goSwitchMat_BP$ID
goSwitchMat_BP = cbind(goSwitchMat_BP, goSwitchMat_BP2)
## merge again
goSwitchMat = rbind(goSwitchMat_BP, goSwitchMat_MF)
goSwitchMat$Type = rep(c("BP", "MF"),
times = c(nrow(goSwitchMat_BP), nrow(goSwitchMat_MF)))
colSums(goSwitchMat[,grep("qvalue", colnames(keggSwitchMat))] < 0.05,
na.rm=TRUE)
table(rowSums(goSwitchMat[,grep("qvalue", colnames(keggSwitchMat))] < 0.05,
na.rm=TRUE))
##################################
### filter to dev and q-value
geneSetSwitch_dev = rbind(goSwitchMat[,c(1:3,12,
grep("qvalue",names(goSwitchMat)))],
keggSwitchMat[,c(1:3,12,
grep("qvalue",names(goSwitchMat)))])
geneSetSwitch_dev = geneSetSwitch_dev[order(rowMeans(geneSetSwitch_dev[,5:8])),]
geneSetSwitch_dev = geneSetSwitch_dev[geneSetSwitch_dev$SetSize < 5000,]
colSums(geneSetSwitch_dev[,grep("qvalue",
names(geneSetSwitch_dev))] < 0.05,na.rm=TRUE)
write.csv(geneSetSwitch_dev, file="tables/geneSets_isoformSwitches.csv",
row.names=FALSE)
## filter to only significant
numSig = rowSums(geneSetSwitch_dev[,grep("qvalue",
names(geneSetSwitch_dev))] < 0.05,na.rm=TRUE)
sigGeneSetSwitch_dev = geneSetSwitch_dev[numSig > 0,]
write.csv(sigGeneSetSwitch_dev, row.names=FALSE,
file="tables/geneSets_isoformSwitches_onlySigInOne.csv")
### make figure for paper
keggMatSig = sigGeneSetSwitch_dev[sigGeneSetSwitch_dev$Type == "KEGG",]
library(lattice)
qMat = -log10(keggMatSig[,5:8])
rownames(qMat) = keggMatSig$Description
colnames(qMat) = ss(colnames(qMat),"\\.")
pdf("plots/geneSet_isoSwitch_heatmap_KEGG.pdf", useDingbats=FALSE,w=10)
theSeq = seq(0,7,by=0.1)
my.col <- colorRampPalette(c("white","darkblue"))(length(theSeq))
print(levelplot(t(as.matrix(qMat)), at = theSeq,pretty=TRUE,
col.regions = my.col, scales=list(y=list(cex=1.5),
x=list(rot=90, cex=1.5)),aspect="fill",
ylab = "", xlab = ""))
dev.off()
## summary statistics
colSums(sigGeneSetSwitch_dev[,grep("qvalue",
names(sigGeneSetSwitch_dev))] < 0.05, na.rm=TRUE)
table(rowSums(sigGeneSetSwitch_dev[,grep("qvalue",
names(sigGeneSetSwitch_dev))] < 0.05, na.rm=TRUE))
|
library(lme4)
library(lmerTest)
library(ggplot2)
setwd('/Users/Bubba/Desktop/COGS195/Github/RattleNN/RattleResults')
rattledata = read.csv('50data.csv')
colnames(rattledata) = c("run","yoked","sec","RMS","f")
rattledata$yoked[rattledata$yoked==0] = "RMS-reinforced"
rattledata$yoked[rattledata$yoked==1] = "yoked control"
rattlelm = lmer(scale(RMS) ~ (1|run) + yoked + scale(sec) + yoked*scale(sec), data = rattledata)
summary(rattlelm)
qplot(sec,RMS,data=rattledata,main= "50s, 5 runs, Rattle A fsine 3-5-16", geom=c("smooth"),method ="lm",formula=y~x,color=yoked)
quartz.save("3-5_50s_RAS3_rattleresults.pdf",type="pdf")
plot(rattledata$f,rattledata$RMS, main= "50s, 5 runs, Rattle A fsine 3-5-16", xlab="f",ylab="RMS")
quartz.save("3-5_50s_RAS3_RMSvsf.pdf",type="pdf")
setwd('/Users/Bubba/Desktop/COGS195/Github/RattleNN/RattleResults')
rattledata = read.csv('100data.csv')
colnames(rattledata) = c("run","yoked","sec","RMS","f")
rattledata$yoked[rattledata$yoked==0] = "RMS-reinforced"
rattledata$yoked[rattledata$yoked==1] = "yoked control"
rattlelm = lmer(scale(RMS) ~ (1|run) + yoked + scale(sec) + yoked*scale(sec), data = rattledata)
summary(rattlelm)
qplot(sec,RMS,data=rattledata,main= "100s, 5 runs, Rattle A fsine 3-5-16", geom=c("smooth"),method ="lm",formula=y~x,color=yoked)
quartz.save("3-5_100s_RAS3_rattleresults.pdf",type="pdf")
plot(rattledata$f,rattledata$RMS, main= "100s, 5 runs, Rattle A fsine 3-5-16", xlab="f",ylab="RMS")
quartz.save("3-5_100s_RAS3_RMSvsf.pdf",type="pdf")
setwd('/Users/Bubba/Desktop/COGS195/Github/RattleNN/RattleResults')
rattledata = read.csv('200data.csv')
colnames(rattledata) = c("run","yoked","sec","RMS","f")
rattledata$yoked[rattledata$yoked==0] = "RMS-reinforced"
rattledata$yoked[rattledata$yoked==1] = "yoked control"
rattlelm = lmer(scale(RMS) ~ (1|run) + yoked + scale(sec) + yoked*scale(sec), data = rattledata)
summary(rattlelm)
qplot(sec,RMS,data=rattledata,main= "200s, 5 runs, Rattle A fsine 3-5-16", geom=c("smooth"),method ="lm",formula=y~x,color=yoked)
quartz.save("3-5_200s_RAS3_rattleresults.pdf",type="pdf")
plot(rattledata$f,rattledata$RMS, main= "200s, 5 runs, Rattle A fsine 3-5-16", xlab="f",ylab="RMS")
quartz.save("3-5_200s_RAS3_RMSvsf.pdf",type="pdf")
setwd('/Users/Bubba/Desktop/COGS195/Github/RattleNN/RattleResults')
rattledata = read.csv('300data.csv')
colnames(rattledata) = c("run","yoked","sec","RMS","f")
rattledata$yoked[rattledata$yoked==0] = "RMS-reinforced"
rattledata$yoked[rattledata$yoked==1] = "yoked control"
rattlelm = lmer(scale(RMS) ~ (1|run) + yoked + scale(sec) + yoked*scale(sec), data = rattledata)
summary(rattlelm)
qplot(sec,RMS,data=rattledata,main= "300s, 5 runs, Rattle A fsine 3-5-16", geom=c("smooth"),method ="lm",formula=y~x,color=yoked)
quartz.save("3-5_300s_RAS3_rattleresults.pdf",type="pdf")
plot(rattledata$f,rattledata$RMS, main= "300s, 5 runs, Rattle A fsine 3-5-16", xlab="f",ylab="RMS")
quartz.save("3-5_300s_RAS3_RMSvsf.pdf",type="pdf")
setwd('/Users/Bubba/Desktop/COGS195/Github/RattleNN/RattleResults')
rattledata = read.csv('600data.csv')
colnames(rattledata) = c("run","yoked","sec","RMS","f")
rattledata$yoked[rattledata$yoked==0] = "RMS-reinforced"
rattledata$yoked[rattledata$yoked==1] = "yoked control"
rattlelm = lmer(scale(RMS) ~ (1|run) + yoked + scale(sec) + yoked*scale(sec), data = rattledata)
summary(rattlelm)
qplot(sec,RMS,data=rattledata,main= "600s, 5 runs, Rattle A fsine 3-5-16", geom=c("smooth"),method ="lm",formula=y~x,color=yoked)
quartz.save("3-5_600s_RAS3_rattleresults.pdf",type="pdf")
plot(rattledata$f,rattledata$RMS, main= "600s, 5 runs, Rattle A fsine 3-5-16", xlab="f",ylab="RMS")
quartz.save("3-5_600s_RAS3_RMSvsf.pdf",type="pdf")
|
/RattleResults/3-5 Rattle A Servo 3/DataAnalysis.R
|
no_license
|
yehsk8rz/RattleNN
|
R
| false
| false
| 3,711
|
r
|
library(lme4)
library(lmerTest)
library(ggplot2)
setwd('/Users/Bubba/Desktop/COGS195/Github/RattleNN/RattleResults')
rattledata = read.csv('50data.csv')
colnames(rattledata) = c("run","yoked","sec","RMS","f")
rattledata$yoked[rattledata$yoked==0] = "RMS-reinforced"
rattledata$yoked[rattledata$yoked==1] = "yoked control"
rattlelm = lmer(scale(RMS) ~ (1|run) + yoked + scale(sec) + yoked*scale(sec), data = rattledata)
summary(rattlelm)
qplot(sec,RMS,data=rattledata,main= "50s, 5 runs, Rattle A fsine 3-5-16", geom=c("smooth"),method ="lm",formula=y~x,color=yoked)
quartz.save("3-5_50s_RAS3_rattleresults.pdf",type="pdf")
plot(rattledata$f,rattledata$RMS, main= "50s, 5 runs, Rattle A fsine 3-5-16", xlab="f",ylab="RMS")
quartz.save("3-5_50s_RAS3_RMSvsf.pdf",type="pdf")
setwd('/Users/Bubba/Desktop/COGS195/Github/RattleNN/RattleResults')
rattledata = read.csv('100data.csv')
colnames(rattledata) = c("run","yoked","sec","RMS","f")
rattledata$yoked[rattledata$yoked==0] = "RMS-reinforced"
rattledata$yoked[rattledata$yoked==1] = "yoked control"
rattlelm = lmer(scale(RMS) ~ (1|run) + yoked + scale(sec) + yoked*scale(sec), data = rattledata)
summary(rattlelm)
qplot(sec,RMS,data=rattledata,main= "100s, 5 runs, Rattle A fsine 3-5-16", geom=c("smooth"),method ="lm",formula=y~x,color=yoked)
quartz.save("3-5_100s_RAS3_rattleresults.pdf",type="pdf")
plot(rattledata$f,rattledata$RMS, main= "100s, 5 runs, Rattle A fsine 3-5-16", xlab="f",ylab="RMS")
quartz.save("3-5_100s_RAS3_RMSvsf.pdf",type="pdf")
setwd('/Users/Bubba/Desktop/COGS195/Github/RattleNN/RattleResults')
rattledata = read.csv('200data.csv')
colnames(rattledata) = c("run","yoked","sec","RMS","f")
rattledata$yoked[rattledata$yoked==0] = "RMS-reinforced"
rattledata$yoked[rattledata$yoked==1] = "yoked control"
rattlelm = lmer(scale(RMS) ~ (1|run) + yoked + scale(sec) + yoked*scale(sec), data = rattledata)
summary(rattlelm)
qplot(sec,RMS,data=rattledata,main= "200s, 5 runs, Rattle A fsine 3-5-16", geom=c("smooth"),method ="lm",formula=y~x,color=yoked)
quartz.save("3-5_200s_RAS3_rattleresults.pdf",type="pdf")
plot(rattledata$f,rattledata$RMS, main= "200s, 5 runs, Rattle A fsine 3-5-16", xlab="f",ylab="RMS")
quartz.save("3-5_200s_RAS3_RMSvsf.pdf",type="pdf")
setwd('/Users/Bubba/Desktop/COGS195/Github/RattleNN/RattleResults')
rattledata = read.csv('300data.csv')
colnames(rattledata) = c("run","yoked","sec","RMS","f")
rattledata$yoked[rattledata$yoked==0] = "RMS-reinforced"
rattledata$yoked[rattledata$yoked==1] = "yoked control"
rattlelm = lmer(scale(RMS) ~ (1|run) + yoked + scale(sec) + yoked*scale(sec), data = rattledata)
summary(rattlelm)
qplot(sec,RMS,data=rattledata,main= "300s, 5 runs, Rattle A fsine 3-5-16", geom=c("smooth"),method ="lm",formula=y~x,color=yoked)
quartz.save("3-5_300s_RAS3_rattleresults.pdf",type="pdf")
plot(rattledata$f,rattledata$RMS, main= "300s, 5 runs, Rattle A fsine 3-5-16", xlab="f",ylab="RMS")
quartz.save("3-5_300s_RAS3_RMSvsf.pdf",type="pdf")
setwd('/Users/Bubba/Desktop/COGS195/Github/RattleNN/RattleResults')
rattledata = read.csv('600data.csv')
colnames(rattledata) = c("run","yoked","sec","RMS","f")
rattledata$yoked[rattledata$yoked==0] = "RMS-reinforced"
rattledata$yoked[rattledata$yoked==1] = "yoked control"
rattlelm = lmer(scale(RMS) ~ (1|run) + yoked + scale(sec) + yoked*scale(sec), data = rattledata)
summary(rattlelm)
qplot(sec,RMS,data=rattledata,main= "600s, 5 runs, Rattle A fsine 3-5-16", geom=c("smooth"),method ="lm",formula=y~x,color=yoked)
quartz.save("3-5_600s_RAS3_rattleresults.pdf",type="pdf")
plot(rattledata$f,rattledata$RMS, main= "600s, 5 runs, Rattle A fsine 3-5-16", xlab="f",ylab="RMS")
quartz.save("3-5_600s_RAS3_RMSvsf.pdf",type="pdf")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Tick2Sec.R
\name{to_secBATV}
\alias{to_secBATV}
\alias{alltick2sec}
\title{Convert tick data to one-second data}
\usage{
to_secBATV(x)
alltick2sec(getdir = "~/TRTH/tick/", savedir = "~/TRTH/sec/",
Symbols = list.files(getdir), overwrite = FALSE)
}
\arguments{
\item{x}{the xts series to convert to 1 minute BATV}
\item{getdir}{Directory that contains tick data}
\item{savedir}{Directory in which to save converted data}
\item{Symbols}{String names of instruments to convert}
\item{overwrite}{TRUE/FALSE. If file already exists in savedir, should it be
overwritten?}
}
\value{
\code{to_secBATV} returns an xts object of one second frequency.
\code{alltick2sec} returns a list of files that were converted.
}
\description{
This is like taking a snapshot of the market at the end of every second,
except the volume over the second is summed.
}
\details{
From tick data with columns: \dQuote{Price}, \dQuote{Volume},
\dQuote{Bid.Price}, \dQuote{Bid.Size}, \dQuote{Ask.Price}, \dQuote{Ask.Size},
to data of one second frequency with columns \dQuote{Bid.Price},
\dQuote{Bid.Size}, \dQuote{Ask.Price}, \dQuote{Ask.Size},
\dQuote{Trade.Price}, and \dQuote{Volume}
The primary purpose of these functions is to reduce the amount of data on
disk so that it will take less time to load the data into memory.
If there are no trades or bid/ask price updates in a given second, we will
not make a row for that timestamp. If there were no trades, but the bid or
ask price changed, then we _will_ have a row but the Volume and Trade.Price
will be NA.
If there are multiple trades in the same second, Volume will be the sum of
the volume, but only the last trade price in that second will be printed.
Similarly, if there is a trade, and then later in the same second, there is
a bid/ask update, the last Bid/Ask Price/Size will be used.
\code{alltick2sec} is used to convert the data of several files from tick to
one second frequency data.
}
\note{
\code{to_secBATV} is used by the TRTH_BackFill.R script in the
inst/parser directory of the FinancialInstrument package. These functions
are specific to to data created by that script and are not intended for
more general use.
}
\examples{
\dontrun{
getSymbols("CLU1")
system.time(xsec <- to_secBATV(CLU1))
convert.log <- alltick2sec()
}
}
\author{
gsee
}
|
/man/Tick2Sec.Rd
|
no_license
|
cran/FinancialInstrument
|
R
| false
| true
| 2,401
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Tick2Sec.R
\name{to_secBATV}
\alias{to_secBATV}
\alias{alltick2sec}
\title{Convert tick data to one-second data}
\usage{
to_secBATV(x)
alltick2sec(getdir = "~/TRTH/tick/", savedir = "~/TRTH/sec/",
Symbols = list.files(getdir), overwrite = FALSE)
}
\arguments{
\item{x}{the xts series to convert to 1 minute BATV}
\item{getdir}{Directory that contains tick data}
\item{savedir}{Directory in which to save converted data}
\item{Symbols}{String names of instruments to convert}
\item{overwrite}{TRUE/FALSE. If file already exists in savedir, should it be
overwritten?}
}
\value{
\code{to_secBATV} returns an xts object of one second frequency.
\code{alltick2sec} returns a list of files that were converted.
}
\description{
This is like taking a snapshot of the market at the end of every second,
except the volume over the second is summed.
}
\details{
From tick data with columns: \dQuote{Price}, \dQuote{Volume},
\dQuote{Bid.Price}, \dQuote{Bid.Size}, \dQuote{Ask.Price}, \dQuote{Ask.Size},
to data of one second frequency with columns \dQuote{Bid.Price},
\dQuote{Bid.Size}, \dQuote{Ask.Price}, \dQuote{Ask.Size},
\dQuote{Trade.Price}, and \dQuote{Volume}
The primary purpose of these functions is to reduce the amount of data on
disk so that it will take less time to load the data into memory.
If there are no trades or bid/ask price updates in a given second, we will
not make a row for that timestamp. If there were no trades, but the bid or
ask price changed, then we _will_ have a row but the Volume and Trade.Price
will be NA.
If there are multiple trades in the same second, Volume will be the sum of
the volume, but only the last trade price in that second will be printed.
Similarly, if there is a trade, and then later in the same second, there is
a bid/ask update, the last Bid/Ask Price/Size will be used.
\code{alltick2sec} is used to convert the data of several files from tick to
one second frequency data.
}
\note{
\code{to_secBATV} is used by the TRTH_BackFill.R script in the
inst/parser directory of the FinancialInstrument package. These functions
are specific to to data created by that script and are not intended for
more general use.
}
\examples{
\dontrun{
getSymbols("CLU1")
system.time(xsec <- to_secBATV(CLU1))
convert.log <- alltick2sec()
}
}
\author{
gsee
}
|
#!/usr/bin/env Rscript
# about named vectors
nv <- 11:15
object.size(nv)
names(nv) <- c("Num1", "Sum1", "Lum1", "Dum1", "Rum1")
# to see named vectors without the names, use unname() ... note the two n's.
object.size(nv)
object.size(names(nv))
# You're able to search on the names, but always using the names() function
grep("[NL].+", names(nv))
nv[sort(names(nv))]
|
/nv0.R
|
no_license
|
rafalcode/r-fresh
|
R
| false
| false
| 371
|
r
|
#!/usr/bin/env Rscript
# about named vectors
nv <- 11:15
object.size(nv)
names(nv) <- c("Num1", "Sum1", "Lum1", "Dum1", "Rum1")
# to see named vectors without the names, use unname() ... note the two n's.
object.size(nv)
object.size(names(nv))
# You're able to search on the names, but always using the names() function
grep("[NL].+", names(nv))
nv[sort(names(nv))]
|
# First pass PM detection. Takes in grayscale image and the reference
# GFP image. Removes detected membranes that intersect with image edges.
# Returns a list of removed membrane objects, detected membrane objects,
# and computed features.
detect_membranes <-function(img, channels, factor, chan, cutoff, cnum)
{
message("\n########################CELLS########################")
# chan <- normalize(chan)
g <- gblur(chan*factor, sigma = 2)
ct = thresh(g)
cm = bwlabel(ct)
fm <- computeFeatures.shape(cm)
noise <- which(fm[,"s.area"]< cutoff) # noise removal
message(paste0("Number of cells detected on first pass: ", length(table(cm))))
membranes <- rmObjects(cm, noise)
res <- remove_edge_membranes(membranes, img, channels, cnum)
message(paste0("Number of cells after noise removal: ", length(table(membranes))))
list(removed = res$removed, membranes = res$membranes, FM = res$FM)
}
# Removes detected membranes that intersect with image edges. Saves removed
# membranes and computes complete features on all membranes in reference to
# unaltered GFP channel.
remove_edge_membranes <-function(membranes,img, channels, cnum)
{
contours <- ocontour(membranes)
bound <- list(l = 3, # 3 pixel buffer to account for haze
r = dim(img[,,cnum$gfp_channel])[1]-3,
t = 3,
b = dim(img[,,cnum$gfp_channel])[2]-3)
left <- lapply(contours, function(x){min(x[,1])})
right <- lapply(contours, function(x){max(x[,1])})
top <- lapply(contours, function(x){min(x[,2])})
bottom <- lapply(contours, function(x){max(x[,2])})
edge_cells <- c(which(left < bound$l),which(right > bound$r), which(top < bound$t),which(bottom > bound$b))
edge_cells <- as.numeric(names(edge_cells))
edge_cells <- unique(edge_cells)
removed_ind = which(!(seq(1, length(table(membranes))) %in% edge_cells))
removed = rmObjects(membranes, removed_ind)
membranes <- rmObjects(membranes, edge_cells)
membranes <- bwlabel(membranes)
FM <- computeFeatures(membranes, ref = channels$ref_gfp, xname = "membrane")
FM<- FM[,c("membrane.a.b.mean", "membrane.0.m.cx", "membrane.0.m.cy", "membrane.0.s.area","membrane.0.s.radius.min")]
list(removed = removed, membranes = membranes, FM = FM)
}
|
/src/cells.R
|
no_license
|
sah129/CellQuant
|
R
| false
| false
| 2,283
|
r
|
# First pass PM detection. Takes in grayscale image and the reference
# GFP image. Removes detected membranes that intersect with image edges.
# Returns a list of removed membrane objects, detected membrane objects,
# and computed features.
detect_membranes <-function(img, channels, factor, chan, cutoff, cnum)
{
message("\n########################CELLS########################")
# chan <- normalize(chan)
g <- gblur(chan*factor, sigma = 2)
ct = thresh(g)
cm = bwlabel(ct)
fm <- computeFeatures.shape(cm)
noise <- which(fm[,"s.area"]< cutoff) # noise removal
message(paste0("Number of cells detected on first pass: ", length(table(cm))))
membranes <- rmObjects(cm, noise)
res <- remove_edge_membranes(membranes, img, channels, cnum)
message(paste0("Number of cells after noise removal: ", length(table(membranes))))
list(removed = res$removed, membranes = res$membranes, FM = res$FM)
}
# Removes detected membranes that intersect with image edges. Saves removed
# membranes and computes complete features on all membranes in reference to
# unaltered GFP channel.
remove_edge_membranes <-function(membranes,img, channels, cnum)
{
contours <- ocontour(membranes)
bound <- list(l = 3, # 3 pixel buffer to account for haze
r = dim(img[,,cnum$gfp_channel])[1]-3,
t = 3,
b = dim(img[,,cnum$gfp_channel])[2]-3)
left <- lapply(contours, function(x){min(x[,1])})
right <- lapply(contours, function(x){max(x[,1])})
top <- lapply(contours, function(x){min(x[,2])})
bottom <- lapply(contours, function(x){max(x[,2])})
edge_cells <- c(which(left < bound$l),which(right > bound$r), which(top < bound$t),which(bottom > bound$b))
edge_cells <- as.numeric(names(edge_cells))
edge_cells <- unique(edge_cells)
removed_ind = which(!(seq(1, length(table(membranes))) %in% edge_cells))
removed = rmObjects(membranes, removed_ind)
membranes <- rmObjects(membranes, edge_cells)
membranes <- bwlabel(membranes)
FM <- computeFeatures(membranes, ref = channels$ref_gfp, xname = "membrane")
FM<- FM[,c("membrane.a.b.mean", "membrane.0.m.cx", "membrane.0.m.cy", "membrane.0.s.area","membrane.0.s.radius.min")]
list(removed = removed, membranes = membranes, FM = FM)
}
|
## ---- echo=FALSE, message = FALSE----------------------------------------
library(RLumModel)
## ----global_options, include=FALSE---------------------------------------
knitr::opts_chunk$set(fig.pos = 'H', fig.align = 'center')
## ------------------------------------------------------------------------
own_parameters <- list(
N = c(2e15, 2e15, 2.4e16, 1e17),
E = c(0, 0, 0, 0),
s = c(0, 0, 0, 0),
A = c(2e-8, 2e-9, 4e-9, 1e-8),
B = c(0, 0, 5e-11, 4e-8),
K = 0,
model = "customized",
R = 1.7e15)
## ------------------------------------------------------------------------
own_state_parameters <- c(0, 0, 0, 9.4e15)
## ----set sequence Pagonis 2009-------------------------------------------
sequence <- list(RF = c(20, 0.1, 0.1))
## ---- fig.cap = "RF signal for 0.1 Gy/s"---------------------------------
RF_Pagonis2009 <- model_LuminescenceSignals(
model = "customized",
sequence = sequence,
own_parameters = own_parameters,
own_state_parameters = own_state_parameters,
verbose = FALSE)
## ---- fig.cap = "Concentration of m1 during RF"--------------------------
concentration_m1 <- Luminescence::get_RLum(
RF_Pagonis2009,
recordType = c("conc. level 4"))
Luminescence::plot_RLum(
concentration_m1,
ylim = c(9.2e15, 9.6e15))
## ----Different Dose-rates Pagonis 2009, fig.cap = "RF signals for different dose rates"----
dose.rate <- seq(from = 0.1, to = 0.5, by = 0.1)
model.output <- lapply(dose.rate, function(x) {
sequence <- list(RF = c(20, x, x))
RF_data <- model_LuminescenceSignals(
model = "customized",
sequence = sequence,
own_parameters = own_parameters,
own_state_parameters = own_state_parameters,
verbose = FALSE,
plot = FALSE
)
## "RF$" for exact matching RF and not (RF)
return(get_RLum(RF_data, recordType = "RF$", drop = FALSE))
})
model.output.merged <- merge_RLum(model.output)
plot_RLum(
object = model.output.merged,
xlab = "Stimulation time [s]",
ylab = "RF signal [a.u.]",
legend.text = paste(dose.rate, "Gy/s"),
legend.pos = "outside",
combine = TRUE)
## ----Initial signal------------------------------------------------------
dose.rate <- seq(from = 0.1, to = 0.5, by = 0.1)
model.output <- vapply(X = dose.rate, FUN = function(x) {
sequence <- list(RF = c(20, x, x))
temp <- model_LuminescenceSignals(
model = "customized",
sequence = sequence,
own_parameters = own_parameters,
own_state_parameters = own_state_parameters,
verbose = FALSE,
plot = FALSE
)
## "RF$" for exact matching RF and not (RF)
RF_curve <- get_RLum(temp, recordType = "RF$")
return(max(get_RLum(RF_curve)[2,2]))
}, FUN.VALUE = 1)
## ---- echo=FALSE, fig.cap = "Initial RF signal for different dose rates with parameters of Lawless 2009"----
plot(
dose.rate,
model.output,
type = "b",
xlab = "Stimulation Time [s]",
ylab = "Initial RF intensitiy [a.u.]"
)
## ----Lawless 2009 set parameters-----------------------------------------
own_parameters <- list(
N = c(1e14, 1e15),
E = c(0, 0),
s = c(0, 0),
A = c(1e-13, 1e-14),
B = c(0, 1e-7),
K = 0,
model = "customized",
R = 1e8)
sequence <- list(RF = c(20, 100, 1))
RF_Lawless_2009 <- model_LuminescenceSignals(
model = "customized",
sequence = sequence,
own_parameters = own_parameters,
verbose = FALSE,
plot = FALSE)
concentration_n <- Luminescence::get_RLum(
RF_Lawless_2009,
recordType = c("conc. level 1"))
## ---- echo=FALSE, fig.cap = "Concentration of Level 1 with numerical and analytical solutions"----
Luminescence::plot_RLum(
concentration_n,
ylim = c(0, 15e8), lwd = 3)
t <- seq(0, 100, 2)
numerical_eq16 <- 1e-13*1e14/1e-7 *((1 + 2*1e-7*1e8*t/(1e-13*1e14))^(0.5)-1)
numerical_eq18 <- (2*1e-13*1e14*1e8*t/(1e-7))^(0.5)
lines(t, numerical_eq16, pch = 3, col = "red", type = "b")
lines(t, numerical_eq18, pch = 4, col = "green", type = "b")
legend("bottomright", legend = c("Simulated", "Eq. 16","Eq. 18"), col = c("black", "red", "green"), lwd = 1)
## ----Chen 2013 set parameters--------------------------------------------
own_parameters <- list(
N = c(1e9, 0),
E = c(0.4, 0),
s = c(1e11, 0),
A = c(1e-9,0),
B = c(0, 1e-10),
K = 0,
model = "customized")
own_state_parameters <- c(1e8, 1e8)
own_start_temperature <- -220
sequence <- list(TL = c(-220, 130, 1))
## ----solve Chen 2013, fig.cap = "TL with parameter sets of Chen 2013"----
TL_Chen2013 <- model_LuminescenceSignals(
model = "customized",
sequence = sequence,
own_parameters = own_parameters,
own_state_parameters = own_state_parameters,
own_start_temperature = own_start_temperature,
verbose = FALSE)
## ---- echo=FALSE, fig.cap = "Concentrations of different energy levels"----
concentration <- Luminescence::get_RLum(
TL_Chen2013,
recordType = c("conc. level 1", "conc. level 2", "conc. n_c"),
drop = FALSE)
concentration@records[[1]]@recordType <- "TL"
concentration@records[[2]]@recordType <- "TL"
concentration@records[[3]]@recordType <- "TL"
Luminescence::plot_RLum(
concentration,
combine = TRUE,
ylab = "concentrations",
main = "",
legend.text = c("n", "m","nc")
)
|
/data/genthat_extracted_code/RLumModel/vignettes/RLumModel_-_Using_own_parameter_sets.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 5,245
|
r
|
## ---- echo=FALSE, message = FALSE----------------------------------------
library(RLumModel)
## ----global_options, include=FALSE---------------------------------------
knitr::opts_chunk$set(fig.pos = 'H', fig.align = 'center')
## ------------------------------------------------------------------------
own_parameters <- list(
N = c(2e15, 2e15, 2.4e16, 1e17),
E = c(0, 0, 0, 0),
s = c(0, 0, 0, 0),
A = c(2e-8, 2e-9, 4e-9, 1e-8),
B = c(0, 0, 5e-11, 4e-8),
K = 0,
model = "customized",
R = 1.7e15)
## ------------------------------------------------------------------------
own_state_parameters <- c(0, 0, 0, 9.4e15)
## ----set sequence Pagonis 2009-------------------------------------------
sequence <- list(RF = c(20, 0.1, 0.1))
## ---- fig.cap = "RF signal for 0.1 Gy/s"---------------------------------
RF_Pagonis2009 <- model_LuminescenceSignals(
model = "customized",
sequence = sequence,
own_parameters = own_parameters,
own_state_parameters = own_state_parameters,
verbose = FALSE)
## ---- fig.cap = "Concentration of m1 during RF"--------------------------
concentration_m1 <- Luminescence::get_RLum(
RF_Pagonis2009,
recordType = c("conc. level 4"))
Luminescence::plot_RLum(
concentration_m1,
ylim = c(9.2e15, 9.6e15))
## ----Different Dose-rates Pagonis 2009, fig.cap = "RF signals for different dose rates"----
dose.rate <- seq(from = 0.1, to = 0.5, by = 0.1)
model.output <- lapply(dose.rate, function(x) {
sequence <- list(RF = c(20, x, x))
RF_data <- model_LuminescenceSignals(
model = "customized",
sequence = sequence,
own_parameters = own_parameters,
own_state_parameters = own_state_parameters,
verbose = FALSE,
plot = FALSE
)
## "RF$" for exact matching RF and not (RF)
return(get_RLum(RF_data, recordType = "RF$", drop = FALSE))
})
model.output.merged <- merge_RLum(model.output)
plot_RLum(
object = model.output.merged,
xlab = "Stimulation time [s]",
ylab = "RF signal [a.u.]",
legend.text = paste(dose.rate, "Gy/s"),
legend.pos = "outside",
combine = TRUE)
## ----Initial signal------------------------------------------------------
dose.rate <- seq(from = 0.1, to = 0.5, by = 0.1)
model.output <- vapply(X = dose.rate, FUN = function(x) {
sequence <- list(RF = c(20, x, x))
temp <- model_LuminescenceSignals(
model = "customized",
sequence = sequence,
own_parameters = own_parameters,
own_state_parameters = own_state_parameters,
verbose = FALSE,
plot = FALSE
)
## "RF$" for exact matching RF and not (RF)
RF_curve <- get_RLum(temp, recordType = "RF$")
return(max(get_RLum(RF_curve)[2,2]))
}, FUN.VALUE = 1)
## ---- echo=FALSE, fig.cap = "Initial RF signal for different dose rates with parameters of Lawless 2009"----
plot(
dose.rate,
model.output,
type = "b",
xlab = "Stimulation Time [s]",
ylab = "Initial RF intensitiy [a.u.]"
)
## ----Lawless 2009 set parameters-----------------------------------------
own_parameters <- list(
N = c(1e14, 1e15),
E = c(0, 0),
s = c(0, 0),
A = c(1e-13, 1e-14),
B = c(0, 1e-7),
K = 0,
model = "customized",
R = 1e8)
sequence <- list(RF = c(20, 100, 1))
RF_Lawless_2009 <- model_LuminescenceSignals(
model = "customized",
sequence = sequence,
own_parameters = own_parameters,
verbose = FALSE,
plot = FALSE)
concentration_n <- Luminescence::get_RLum(
RF_Lawless_2009,
recordType = c("conc. level 1"))
## ---- echo=FALSE, fig.cap = "Concentration of Level 1 with numerical and analytical solutions"----
Luminescence::plot_RLum(
concentration_n,
ylim = c(0, 15e8), lwd = 3)
t <- seq(0, 100, 2)
numerical_eq16 <- 1e-13*1e14/1e-7 *((1 + 2*1e-7*1e8*t/(1e-13*1e14))^(0.5)-1)
numerical_eq18 <- (2*1e-13*1e14*1e8*t/(1e-7))^(0.5)
lines(t, numerical_eq16, pch = 3, col = "red", type = "b")
lines(t, numerical_eq18, pch = 4, col = "green", type = "b")
legend("bottomright", legend = c("Simulated", "Eq. 16","Eq. 18"), col = c("black", "red", "green"), lwd = 1)
## ----Chen 2013 set parameters--------------------------------------------
own_parameters <- list(
N = c(1e9, 0),
E = c(0.4, 0),
s = c(1e11, 0),
A = c(1e-9,0),
B = c(0, 1e-10),
K = 0,
model = "customized")
own_state_parameters <- c(1e8, 1e8)
own_start_temperature <- -220
sequence <- list(TL = c(-220, 130, 1))
## ----solve Chen 2013, fig.cap = "TL with parameter sets of Chen 2013"----
TL_Chen2013 <- model_LuminescenceSignals(
model = "customized",
sequence = sequence,
own_parameters = own_parameters,
own_state_parameters = own_state_parameters,
own_start_temperature = own_start_temperature,
verbose = FALSE)
## ---- echo=FALSE, fig.cap = "Concentrations of different energy levels"----
concentration <- Luminescence::get_RLum(
TL_Chen2013,
recordType = c("conc. level 1", "conc. level 2", "conc. n_c"),
drop = FALSE)
concentration@records[[1]]@recordType <- "TL"
concentration@records[[2]]@recordType <- "TL"
concentration@records[[3]]@recordType <- "TL"
Luminescence::plot_RLum(
concentration,
combine = TRUE,
ylab = "concentrations",
main = "",
legend.text = c("n", "m","nc")
)
|
library(rJava)
library(xlsxjars)
library(xlsx)
weeklyData = read.xlsx("C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/data-DJ30-july2011-june2013.xlsx", 1, header=TRUE)
monthlyData = read.xlsx("C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/data-DJ30-july2011-june2013.xlsx", 2, header=TRUE)
weeklyData = data.matrix(weeklyData)
monthlyData = data.matrix(monthlyData)
weeklyData = subset(weeklyData, select = -c(Date) )
monthlyData = subset(monthlyData, select = -c(Date) )
weeklyDataBottom = weeklyData[-(nrow(weeklyData)),]
weeklyDataTop = weeklyData[-1,]
monthlyDataBottom = monthlyData[-(nrow(monthlyData)),]
monthlyDataTop = monthlyData[-1,]
weeklyLog = log(weeklyDataTop/weeklyDataBottom)
monthlyLog = log(monthlyDataTop/monthlyDataBottom)
write.xlsx(weeklyLog ,"C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/Question16Output.xlsx", sheetName = "WeeklyLog", append = TRUE)
write.xlsx(monthlyLog ,"C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/Question16Output.xlsx", sheetName = "MonthlyLog", append = TRUE)
weeklyCovariance = cov(weeklyLog)
monthlyCovariance = cov(monthlyLog)
weeklyCorrilation = cor(weeklyLog)
monthlyCorrilation = cor(monthlyLog)
write.xlsx(weeklyCovariance ,"C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/Question16Output.xlsx", sheetName = "Weekly Covariance", append = TRUE)
write.xlsx(weeklyCorrilation ,"C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/Question16Output.xlsx", sheetName = "Weekly Corrilation", append = TRUE)
write.xlsx(monthlyCovariance ,"C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/Question16Output.xlsx", sheetName = "Monthly Covariance", append = TRUE)
write.xlsx(monthlyCorrilation ,"C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/Question16Output.xlsx", sheetName ="Monthly Corrilation", append = TRUE)
|
/Cov and Cor matrix.R
|
no_license
|
marc54/Finance
|
R
| false
| false
| 1,937
|
r
|
library(rJava)
library(xlsxjars)
library(xlsx)
weeklyData = read.xlsx("C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/data-DJ30-july2011-june2013.xlsx", 1, header=TRUE)
monthlyData = read.xlsx("C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/data-DJ30-july2011-june2013.xlsx", 2, header=TRUE)
weeklyData = data.matrix(weeklyData)
monthlyData = data.matrix(monthlyData)
weeklyData = subset(weeklyData, select = -c(Date) )
monthlyData = subset(monthlyData, select = -c(Date) )
weeklyDataBottom = weeklyData[-(nrow(weeklyData)),]
weeklyDataTop = weeklyData[-1,]
monthlyDataBottom = monthlyData[-(nrow(monthlyData)),]
monthlyDataTop = monthlyData[-1,]
weeklyLog = log(weeklyDataTop/weeklyDataBottom)
monthlyLog = log(monthlyDataTop/monthlyDataBottom)
write.xlsx(weeklyLog ,"C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/Question16Output.xlsx", sheetName = "WeeklyLog", append = TRUE)
write.xlsx(monthlyLog ,"C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/Question16Output.xlsx", sheetName = "MonthlyLog", append = TRUE)
weeklyCovariance = cov(weeklyLog)
monthlyCovariance = cov(monthlyLog)
weeklyCorrilation = cor(weeklyLog)
monthlyCorrilation = cor(monthlyLog)
write.xlsx(weeklyCovariance ,"C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/Question16Output.xlsx", sheetName = "Weekly Covariance", append = TRUE)
write.xlsx(weeklyCorrilation ,"C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/Question16Output.xlsx", sheetName = "Weekly Corrilation", append = TRUE)
write.xlsx(monthlyCovariance ,"C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/Question16Output.xlsx", sheetName = "Monthly Covariance", append = TRUE)
write.xlsx(monthlyCorrilation ,"C:/Users/Marc Pfeiffer/Desktop/Baruch Pre-MFE/NLA/Homework 6/Question16Output.xlsx", sheetName ="Monthly Corrilation", append = TRUE)
|
library(shiny)
library(nortest)
library(ggplot2)
#########################
fileList = list.files(path = "data/Generated-NA/", pattern="txt")
fileList.noExt=gsub(".txt", "", fileList)
AaCode.wGly = c("Alanine","Arginine","Asparagine","Aspartate","Cystine","Glutamine","Glutamate","Glycine","Histidine","Isoleucine","Leucine","Lysine","Methionine","Fhenylalanine","Proline","Serine","Threonine","Tyrosine","Tryptophan","Valine")
aaCode.wGly = c("A","R","N","D","C","Q","E","G","H","I","L","K","M","F","P","S","T","Y","W","V")
Ss = c("Beta Strand", "Helix", "Coil")
ss = c("B", "H", "C")
re = c("CA", "CB")
col.names = c("A.B.CA", "A.B.CB", "A.B.CO", "A.C.CA", "A.C.CB", "A.C.CO", "A.H.CA", "A.H.CB", "A.H.CO", "C.B.CA", "C.B.CB", "C.B.CO", "C.C.CA",
"C.C.CB", "C.C.CO", "C.H.CA", "C.H.CB", "C.H.CO", "D.B.CA", "D.B.CB", "D.B.CO", "D.C.CA", "D.C.CB", "D.C.CO", "D.H.CA", "D.H.CB",
"D.H.CO", "E.B.CA", "E.B.CB", "E.B.CO", "E.C.CA", "E.C.CB", "E.C.CO", "E.H.CA", "E.H.CB", "E.H.CO", "F.B.CA", "F.B.CB", "F.B.CO",
"F.C.CA", "F.C.CB", "F.C.CO", "F.H.CA", "F.H.CB", "F.H.CO", "G.B.CA", "G.B.CB", "G.B.CO", "G.C.CA", "G.C.CB", "G.C.CO", "G.H.CA",
"G.H.CB", "G.H.CO", "H.B.CA", "H.B.CB", "H.B.CO", "H.C.CA", "H.C.CB", "H.C.CO", "H.H.CA", "H.H.CB", "H.H.CO", "I.B.CA", "I.B.CB",
"I.B.CO", "I.C.CA", "I.C.CB", "I.C.CO", "I.H.CA", "I.H.CB", "I.H.CO", "K.B.CA", "K.B.CB", "K.B.CO", "K.C.CA", "K.C.CB", "K.C.CO",
"K.H.CA", "K.H.CB", "K.H.CO", "L.B.CA", "L.B.CB", "L.B.CO", "L.C.CA", "L.C.CB", "L.C.CO", "L.H.CA", "L.H.CB", "L.H.CO", "M.B.CA",
"M.B.CB", "M.B.CO", "M.C.CA", "M.C.CB", "M.C.CO", "M.H.CA", "M.H.CB", "M.H.CO", "N.B.CA", "N.B.CB", "N.B.CO", "N.C.CA", "N.C.CB",
"N.C.CO", "N.H.CA", "N.H.CB", "N.H.CO", "P.B.CA", "P.B.CB", "P.B.CO", "P.C.CA", "P.C.CB", "P.C.CO", "P.H.CA", "P.H.CB", "P.H.CO",
"Q.B.CA", "Q.B.CB", "Q.B.CO", "Q.C.CA", "Q.C.CB", "Q.C.CO", "Q.H.CA", "Q.H.CB", "Q.H.CO", "R.B.CA", "R.B.CB", "R.B.CO", "R.C.CA",
"R.C.CB", "R.C.CO", "R.H.CA", "R.H.CB", "R.H.CO", "S.B.CA", "S.B.CB", "S.B.CO", "S.C.CA", "S.C.CB", "S.C.CO", "S.H.CA", "S.H.CB",
"S.H.CO", "T.B.CA", "T.B.CB", "T.B.CO", "T.C.CA", "T.C.CB", "T.C.CO", "T.H.CA", "T.H.CB", "T.H.CO", "V.B.CA", "V.B.CB", "V.B.CO",
"V.C.CA", "V.C.CB", "V.C.CO", "V.H.CA", "V.H.CB", "V.H.CO", "W.B.CA", "W.B.CB", "W.B.CO", "W.C.CA", "W.C.CB", "W.C.CO", "W.H.CA",
"W.H.CB", "W.H.CO", "Y.B.CA", "Y.B.CB", "Y.B.CO", "Y.C.CA", "Y.C.CB", "Y.C.CO", "Y.H.CA", "Y.H.CB", "Y.H.CO")
#########################
RefDB.stat.ca = read.csv("data/RefDB.caStat.csv")
RefDB.stat.cb = read.csv("data/RefDB.cbStat.csv")
aaCode = c("A", "R", "N", "D", "C", "Q", "E", "H", "I", "L", "K", "M", "F", "P", "S", "T", "Y", "W", "V")
aaCode.L = c("ALA", "ARG", "ASN", "ASP", "CYS", "GLN", "GLU", "HIS", "ILE", "LEU", "LYS", "MET", "PHE", "PRO", "SER", "THR", "TYR","TRP","VAL")
aaCode.l = c("Ala", "Arg", "Asn", "Asp", "Cys", "Gln", "Glu", "His", "Ile", "Leu", "Lys", "Met", "Phe", "Pro", "Ser", "Thr", "Tyr","Trp","Val")
#########################
shinyServer(function(input, output){
RefDB.data = read.csv("data/RefDB.csv", header=F)
#############################
output$plot1 <- renderPlot({
name = paste(input$Aa, input$Ss, input$re, sep=".")
a <- which(col.names==name)
histdata <- RefDB.data[,a]
title.hist = paste("Histogram of",name)
hist(histdata, freq=F, main = title.hist, breaks=input$breaks)
})
#############################
# output$table <- renderDataTable({
# name = paste(input$Aa, input$Ss, input$re, sep="-")
# file.name = paste("data/Generated-NA/", name, ".txt", sep="")
# data = read.table(file.name, quote="\"")
# colnames(data) = name
# data
# })
#############################
output$stat.table <- renderDataTable({
name = paste("data/",input$stat.Input,sep="")
stat.data <- read.csv(name, header = T)
stat.data
})
})
|
/server.R
|
no_license
|
billchenxi/Shiny-RefDB-Data-Analysis-App
|
R
| false
| false
| 3,993
|
r
|
library(shiny)
library(nortest)
library(ggplot2)
#########################
fileList = list.files(path = "data/Generated-NA/", pattern="txt")
fileList.noExt=gsub(".txt", "", fileList)
AaCode.wGly = c("Alanine","Arginine","Asparagine","Aspartate","Cystine","Glutamine","Glutamate","Glycine","Histidine","Isoleucine","Leucine","Lysine","Methionine","Fhenylalanine","Proline","Serine","Threonine","Tyrosine","Tryptophan","Valine")
aaCode.wGly = c("A","R","N","D","C","Q","E","G","H","I","L","K","M","F","P","S","T","Y","W","V")
Ss = c("Beta Strand", "Helix", "Coil")
ss = c("B", "H", "C")
re = c("CA", "CB")
col.names = c("A.B.CA", "A.B.CB", "A.B.CO", "A.C.CA", "A.C.CB", "A.C.CO", "A.H.CA", "A.H.CB", "A.H.CO", "C.B.CA", "C.B.CB", "C.B.CO", "C.C.CA",
"C.C.CB", "C.C.CO", "C.H.CA", "C.H.CB", "C.H.CO", "D.B.CA", "D.B.CB", "D.B.CO", "D.C.CA", "D.C.CB", "D.C.CO", "D.H.CA", "D.H.CB",
"D.H.CO", "E.B.CA", "E.B.CB", "E.B.CO", "E.C.CA", "E.C.CB", "E.C.CO", "E.H.CA", "E.H.CB", "E.H.CO", "F.B.CA", "F.B.CB", "F.B.CO",
"F.C.CA", "F.C.CB", "F.C.CO", "F.H.CA", "F.H.CB", "F.H.CO", "G.B.CA", "G.B.CB", "G.B.CO", "G.C.CA", "G.C.CB", "G.C.CO", "G.H.CA",
"G.H.CB", "G.H.CO", "H.B.CA", "H.B.CB", "H.B.CO", "H.C.CA", "H.C.CB", "H.C.CO", "H.H.CA", "H.H.CB", "H.H.CO", "I.B.CA", "I.B.CB",
"I.B.CO", "I.C.CA", "I.C.CB", "I.C.CO", "I.H.CA", "I.H.CB", "I.H.CO", "K.B.CA", "K.B.CB", "K.B.CO", "K.C.CA", "K.C.CB", "K.C.CO",
"K.H.CA", "K.H.CB", "K.H.CO", "L.B.CA", "L.B.CB", "L.B.CO", "L.C.CA", "L.C.CB", "L.C.CO", "L.H.CA", "L.H.CB", "L.H.CO", "M.B.CA",
"M.B.CB", "M.B.CO", "M.C.CA", "M.C.CB", "M.C.CO", "M.H.CA", "M.H.CB", "M.H.CO", "N.B.CA", "N.B.CB", "N.B.CO", "N.C.CA", "N.C.CB",
"N.C.CO", "N.H.CA", "N.H.CB", "N.H.CO", "P.B.CA", "P.B.CB", "P.B.CO", "P.C.CA", "P.C.CB", "P.C.CO", "P.H.CA", "P.H.CB", "P.H.CO",
"Q.B.CA", "Q.B.CB", "Q.B.CO", "Q.C.CA", "Q.C.CB", "Q.C.CO", "Q.H.CA", "Q.H.CB", "Q.H.CO", "R.B.CA", "R.B.CB", "R.B.CO", "R.C.CA",
"R.C.CB", "R.C.CO", "R.H.CA", "R.H.CB", "R.H.CO", "S.B.CA", "S.B.CB", "S.B.CO", "S.C.CA", "S.C.CB", "S.C.CO", "S.H.CA", "S.H.CB",
"S.H.CO", "T.B.CA", "T.B.CB", "T.B.CO", "T.C.CA", "T.C.CB", "T.C.CO", "T.H.CA", "T.H.CB", "T.H.CO", "V.B.CA", "V.B.CB", "V.B.CO",
"V.C.CA", "V.C.CB", "V.C.CO", "V.H.CA", "V.H.CB", "V.H.CO", "W.B.CA", "W.B.CB", "W.B.CO", "W.C.CA", "W.C.CB", "W.C.CO", "W.H.CA",
"W.H.CB", "W.H.CO", "Y.B.CA", "Y.B.CB", "Y.B.CO", "Y.C.CA", "Y.C.CB", "Y.C.CO", "Y.H.CA", "Y.H.CB", "Y.H.CO")
#########################
RefDB.stat.ca = read.csv("data/RefDB.caStat.csv")
RefDB.stat.cb = read.csv("data/RefDB.cbStat.csv")
aaCode = c("A", "R", "N", "D", "C", "Q", "E", "H", "I", "L", "K", "M", "F", "P", "S", "T", "Y", "W", "V")
aaCode.L = c("ALA", "ARG", "ASN", "ASP", "CYS", "GLN", "GLU", "HIS", "ILE", "LEU", "LYS", "MET", "PHE", "PRO", "SER", "THR", "TYR","TRP","VAL")
aaCode.l = c("Ala", "Arg", "Asn", "Asp", "Cys", "Gln", "Glu", "His", "Ile", "Leu", "Lys", "Met", "Phe", "Pro", "Ser", "Thr", "Tyr","Trp","Val")
#########################
shinyServer(function(input, output){
RefDB.data = read.csv("data/RefDB.csv", header=F)
#############################
output$plot1 <- renderPlot({
name = paste(input$Aa, input$Ss, input$re, sep=".")
a <- which(col.names==name)
histdata <- RefDB.data[,a]
title.hist = paste("Histogram of",name)
hist(histdata, freq=F, main = title.hist, breaks=input$breaks)
})
#############################
# output$table <- renderDataTable({
# name = paste(input$Aa, input$Ss, input$re, sep="-")
# file.name = paste("data/Generated-NA/", name, ".txt", sep="")
# data = read.table(file.name, quote="\"")
# colnames(data) = name
# data
# })
#############################
output$stat.table <- renderDataTable({
name = paste("data/",input$stat.Input,sep="")
stat.data <- read.csv(name, header = T)
stat.data
})
})
|
# woebin woebin_plot woebin_ply woebin_adj
# converting vector (breaks & special_values) to data frame
split_vec_todf = function(vec) {
value = . = bin_chr = V1 = NULL
if (!is.null(vec)) data.table(
value=vec, bin_chr=vec
)[, rowid := .I
][, strsplit(as.character(value), "%,%", fixed=TRUE), by = .(rowid, bin_chr)
][, .(rowid, bin_chr, value = ifelse(V1=="missing", NA, as.character(V1)) )]
}
# add missing to spl_val if there is na in dtm$value and
# missing is not specified in breaks and spl_val
add_missing_spl_val = function(dtm, breaks, spl_val) {
value = NULL
if (dtm[,any(is.na(value))]) {
no_missing = !any(grepl('missing', c(breaks, spl_val)))
if (no_missing) {
spl_val = c('missing',spl_val)
}
}
return(spl_val)
}
# split dtm into bin_sv and dtm (without speical_values)
dtm_binning_sv = function(dtm, breaks, spl_val) {
binning_sv = value = . = y = variable = good = bad = bin = NULL
# spl_val
spl_val = add_missing_spl_val(dtm, breaks, spl_val)
if (!is.null(spl_val)) {
# special_values from vector to data frame
sv_df = split_vec_todf(spl_val)
# dtm_sv & dtm
dtm_sv = setDT(dtm)[value %in% sv_df$value]
dtm = setDT(dtm)[!(value %in% sv_df$value)]
# if (nrow(dtm_sv) == 0) return(list(binning_sv=NULL, dtm=dtm))
# binning_sv
binning_sv = merge(
dtm_sv[, .(good = sum(y==0), bad = sum(y==1), variable=unique(variable)) , by = value][,value:=as.character(value)],
sv_df[,value:=as.character(value)],
all.x = TRUE, by='value'
)[, value:=ifelse(is.na(value), "missing", as.character(value))
][, .(bin=paste0(value,collapse="%,%"), good=sum(good), bad=sum(bad), variable=unique(variable)), by=rowid
][, .(variable, bin, good, bad)]
}
return(list(binning_sv=binning_sv, dtm=dtm))
}
# check empty bins for unmeric variable
check_empty_bins = function(dtm, binning) {
. = bin = value = variable = y = NULL
# check empty bins
## break points from bin
breaks_list = lapply(
list(left="\\1", right="\\2"),
function(x) setdiff(sub("^\\[(.*), *(.*)\\)", x, unique(binning$bin)), c("Inf","-Inf")) )
## if there are empty bins
if (!setequal(breaks_list$left, breaks_list$right)) {
bstbrks = unique(c(-Inf, unique(breaks_list$right), Inf))
binning = dtm[
, bin := cut(value, bstbrks, right = FALSE, dig.lab = 10, ordered_result = FALSE)
][, .(good = sum(y==0), bad = sum(y==1), variable=unique(variable)) , by = .(bin)
][order(bin)]
# warning( paste0("The break points are modified into \'", paste0(breaks_list$right, collapse = ", "), "\'. There are empty bins based on the provided break points." ) )
}
return(binning)
}
# check zero in good bad, remove bins that have zeros in good or bad column
check_zero_goodbad = function(dtm, binning, count_distr_limit = NULL) {
brkp = good = bad = count = merge_tolead = count_lag = count_lead = brkp2 = . = variable = bin = badprob = value = NULL
while (binning[!is.na(brkp)][good==0 | bad==0,.N] > 0) {
# brkp needs to be removed if good==0 or bad==0
rm_brkp = binning[!is.na(brkp)][
,count := good+bad
][,`:=`(
count_lag=shift(count,type="lag", fill=nrow(dtm)+1),
count_lead=shift(count,type="lead", fill=nrow(dtm)+1)
)][, merge_tolead := count_lag > count_lead
][good == 0 | bad == 0][count == min(count)]
# set brkp to lead's or lag's
shift_type = ifelse(rm_brkp[1,merge_tolead], 'lead', 'lag')
binning = binning[
,brkp2 := shift(brkp,type=shift_type)
][brkp == rm_brkp[1,brkp], brkp := brkp2]
# groupby brkp
binning = binning[
,.(variable=unique(variable), bin=paste0(bin, collapse = "%,%"), good=sum(good), bad=sum(bad)), by=brkp
][, badprob:=bad/(good+bad)]
}
# format bin
if (is.numeric(dtm[,value])) {
binning = binning[
grepl("%,%",bin), bin := sub("^(\\[.+?,).+,(.+?\\))$", "\\1\\2", bin)
][bin == 'missing', brkp := NA
][bin != 'missing', brkp := as.numeric(sub("^\\[(.*),.+", "\\1", bin))]
}
return(binning)
}
# check count distri, remove bins that count_distribution rate less than count_distr_limit
check_count_distri = function(dtm, binning, count_distr_limit) {
count_distr = count = good = bad = brkp = merge_tolead = count_lag = count_lead = brkp2 = . = variable = bin = value = NULL
if (!('count' %in% names(binning))) binning[, count := good + bad]
binning[, count_distr := (count)/sum(count)]
while (binning[!is.na(brkp)][count_distr<count_distr_limit,.N] > 0) {
# brkp needs to be removed if good==0 or bad==0
rm_brkp = binning[!is.na(brkp)][
,count_distr := (count)/sum(count)
][,`:=`(
count_lag=shift(count_distr,type="lag", fill=nrow(dtm)+1),
count_lead=shift(count_distr,type="lead", fill=nrow(dtm)+1)
)][, merge_tolead := count_lag > count_lead
][count_distr<count_distr_limit][count_distr == min(count_distr)]
# set brkp to lead's or lag's
shift_type = ifelse(rm_brkp[1,merge_tolead], 'lead', 'lag')
binning = binning[
,brkp2 := shift(brkp,type=shift_type)
][brkp == rm_brkp[1,brkp], brkp := brkp2]
# groupby brkp
binning = binning[
,.(variable=unique(variable), bin=paste0(bin, collapse = "%,%"), count=sum(count), good=sum(good), bad=sum(bad)), by=brkp
][, count_distr := (count)/sum(count)]
}
# format bin
if (is.numeric(dtm[,value])) {
binning = binning[
grepl("%,%",bin), bin := sub("^(\\[.+?,).+,(.+?\\))$", "\\1\\2", bin)
][bin == 'missing', brkp := NA
][bin != 'missing', brkp := as.numeric(sub("^\\[(.*),.+", "\\1", bin))]
}
return(binning)
}
# required in woebin2 # return binning if breaks provided
#' @import data.table
woebin2_breaks = function(dtm, breaks, spl_val) {
# global variables or functions
value = bin = . = y = variable = bad = good = V1 = badprob = bksv_list = bin_chr = NULL
# breaks from vector to data frame
bk_df = split_vec_todf(breaks)
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list$dtm
binning_sv = dtm_binsv_list$binning_sv
if (dtm[,.N] == 0 || is.null(dtm)) return(list(binning_sv=binning_sv, binning=NULL))
# binning
if (is.numeric(dtm[,value])) {
bstbrks = c(-Inf, setdiff(unique(bk_df$value), c(NA, Inf, -Inf)), Inf)
binning = dtm[
, bin := cut(value, bstbrks, right = FALSE, dig.lab = 10, ordered_result = FALSE)
][, .(good = sum(y==0), bad = sum(y==1), variable=unique(variable)) , by = .(bin)
][order(bin)]
# check empty bins
binning = check_empty_bins(dtm, binning)
# merge binning with bk_df
if (bk_df[is.na(value),.N] == 1) {
binning = merge(
binning[, value:=sub("^\\[(.*), *(.*)\\)","\\2",bin)],
bk_df,
all.x = TRUE, by="value"
)[order(rowid,value)][, bin:=ifelse(is.na(bin), "missing", as.character(bin))
][, .(bin=paste0(bin,collapse="%,%"), good=sum(good), bad=sum(bad), variable=unique(variable)), by=rowid
][order(rowid)]
}
} else if (is.factor(dtm[,value]) || is.character(dtm[,value])) {
dtm = dtm[,value := as.character(value)]
# the values not specified in breaks_list
diff_dt_brk = setdiff(dtm[,unique(value)], bk_df[,value])
if (length(diff_dt_brk) > 0) {
warning(sprintf('The categorical values (`%s`) are not specified in `breaks_list` for the column `%s`.', paste0(diff_dt_brk, collapse = ', '), dtm[1,variable]) )
stop()
}
# merge binning with bk_df
binning = merge(
dtm, bk_df[,bin:=bin_chr], all.x = TRUE
)[order(rowid, bin)][, .(good = sum(y==0), bad = sum(y==1), variable=unique(variable)) , by = .(rowid, bin)]
}
# # remove rowid column in binning data frame
binning = binning[,rowid:=1][,rowid:=NULL]
# # bind binning_sv and binning
# if (setDT(binning_sv)[,.N] > 0) binning = rbind(binning_sv, binning)
return(list(binning_sv=binning_sv, binning=binning))
}
# required in woebin2 # return initial binning
woebin2_init_bin = function(dtm, init_count_distr, breaks, spl_val) {
# global variables or functions
. = bad = badprob = bin = brkp = good = value = variable = y = NULL
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list$dtm
binning_sv = dtm_binsv_list$binning_sv
if (is.null(dtm) || dtm[,.N]==0) return(list(binning_sv=binning_sv, initial_binning=NULL))
# binning
if (is.numeric(dtm[,value])) {
# numeric variable ------
xvalue = dtm[, value]
# breaks vector & outlier
iq = quantile(xvalue, na.rm = TRUE)
iqr = IQR(xvalue, na.rm = TRUE)
if (iqr == 0) {
xvalue_rm_outlier = xvalue
} else {
xvalue_rm_outlier = xvalue[which(xvalue >= iq[2]-3*iqr & xvalue <= iq[4]+3*iqr)]
}
# number of initial binning
n = trunc(1/init_count_distr)
len_uniq_x = length(setdiff(unique(xvalue_rm_outlier), c(NA,Inf,-Inf)))
if (len_uniq_x < n) n = len_uniq_x
# initial breaks
if (len_uniq_x < 10) {
brk = setdiff(unique(xvalue_rm_outlier), c(NA, Inf, -Inf))
} else {
brk = pretty(xvalue_rm_outlier, n)
}
brk = sort(brk[(brk < max(xvalue, na.rm =TRUE)) & (brk > min(xvalue, na.rm =TRUE))])
brk = unique(c(-Inf, brk, Inf))
if (anyNA(xvalue)) brk = c(brk, NA)
# initial binning datatable
init_bin = dtm[
, bin := cut(value, brk, right = FALSE, dig.lab = 10, ordered_result = FALSE)
][, .(good = sum(y==0), bad = sum(y==1), variable=unique(variable)) , by = bin
][order(bin)]
# check empty bins
init_bin = check_empty_bins(dtm, init_bin)
init_bin = init_bin[
, `:=`(brkp = as.numeric( sub("^\\[(.*),.+", "\\1", bin)), badprob = bad/(good+bad))
][, .(variable, bin, brkp, good, bad, badprob)]
} else if ( is.logical(dtm[,value]) || is.factor(dtm[,value]) || is.character(dtm[,value]) ) {
# other variable ------
# initial binning datatable
init_bin = dtm[
, .(variable = unique(variable), good = sum(y==0), bad = sum(y==1)), by=value
][, badprob := bad/(good+bad)]
# order by bin if is.factor, or by badprob if is.character
if (is.logical(dtm[,value]) || is.factor(dtm[,value])) {
init_bin = init_bin[
order(value)
][, brkp := ifelse(is.na(value), NA, .I)
][, .(variable, bin=value, brkp, good, bad, badprob)]
} else {
init_bin = init_bin[
order(badprob)
# next 3 lines make NA located at the last rows
][, brkp := ifelse(is.na(value), NA, .I)
][order(brkp)
][, brkp := ifelse(is.na(value), NA, .I)
][, .(variable, bin=value, brkp, good, bad, badprob)]
}
}
# remove brkp that good == 0 or bad == 0 ------
init_bin = check_zero_goodbad(dtm, init_bin)
return(list(binning_sv=binning_sv, initial_binning=init_bin))
}
# required in woebin2_tree # add 1 best break for tree-like binning
woebin2_tree_add_1brkp = function(dtm, initial_binning, count_distr_limit, bestbreaks=NULL) {
# global variables or functions
brkp = patterns = . = good = bad = variable = count_distr = value = min_count_distr = bstbin = min_count_distr = total_iv = bstbin = brkp = bin = NULL
# total_iv for all best breaks
total_iv_all_breaks = function(initial_binning, bestbreaks, dtm_rows) {
# best breaks set
breaks_set = setdiff( initial_binning[,brkp], c(bestbreaks, -Inf, Inf, NA) )
init_bin_all_breaks = copy(initial_binning)
# loop on breaks_set
for (i in breaks_set) {
# best break + i
bestbreaks_i = sort(c(bestbreaks, i))
# best break datatable
init_bin_all_breaks = init_bin_all_breaks[
, paste0("bstbin",i) := cut(brkp, c(-Inf, bestbreaks_i, Inf), right = FALSE, dig.lab = 10, ordered_result = FALSE) ]
}
# best break dt
total_iv_all_brks = melt(
init_bin_all_breaks, id = c("variable", "good", "bad"), variable.name = "bstbin", measure = patterns("bstbin.+")
)[, .(good = sum(good), bad = sum(bad), variable = unique(variable))
, by=.(bstbin, value)
][, count_distr := (good+bad)/dtm_rows, by=bstbin
][!is.na(value), min_count_distr := min(count_distr), by=bstbin
][, .(total_iv = iv_01(good, bad), variable = unique(variable), min_count_distr = min(min_count_distr,na.rm=TRUE)), by=bstbin
][, bstbin := as.numeric(sub("bstbin(.+)", "\\1", bstbin))][]
return(total_iv_all_brks)
}
# binning add 1best break
binning_add_1bst = function(initial_binning, bestbreaks) {
value = bstbin = . = good = bad = variable = woe = bin_iv = total_iv = bstbrkp = badprob = NULL # no visible binding for global variable
if ( is.numeric(dtm[,value]) ) {
binning_1bst_brk = initial_binning[
, bstbin := cut(brkp, c(-Inf, bestbreaks, Inf), right = FALSE, dig.lab = 10, ordered_result = FALSE)
][, .(variable=unique(variable), bin=unique(bstbin), good = sum(good), bad = sum(bad)) , by = bstbin
]
} else if (is.logical(dtm[,value]) || is.factor(dtm[,value]) || is.character(dtm[,value]) ) {
bestbreaks = setdiff(bestbreaks, min(initial_binning[,brkp]))
binning_1bst_brk = initial_binning[
, bstbin := cut(brkp, c(-Inf, bestbreaks, Inf), right = FALSE,dig.lab = 10, ordered_result = FALSE)
][, .(variable=unique(variable), bin = paste0(bin, collapse = "%,%"), good = sum(good), bad = sum(bad)), by = bstbin ]
}
binning_1bst_brk = binning_1bst_brk[
order(bstbin)
][, total_iv := iv_01(good, bad)
][, bstbrkp := as.numeric( sub("^\\[(.*),.+", "\\1", bstbin) )
][, .(variable, bin, bstbin, bstbrkp, good, bad, total_iv)]
return(binning_1bst_brk)
}
# adding 1 best breakpoint
dtm_rows = nrow(dtm)
total_iv_all_brks = total_iv_all_breaks(initial_binning, bestbreaks, dtm_rows)
# bestbreaks: total_iv == max(total_iv) & min(count_distr) >= count_distr_limit
bstbrk_max_iv = total_iv_all_brks[min_count_distr >= count_distr_limit][total_iv==max(total_iv)][, bstbin]
# add 1best break to bestbreaks
bestbreaks = unique(c(bestbreaks, bstbrk_max_iv[1]))
bin_add_1bst = binning_add_1bst(initial_binning, bestbreaks)
return(bin_add_1bst)
}
# required in woebin2 # return tree-like binning
woebin2_tree = function(dtm, init_count_distr=0.02, count_distr_limit=0.05, stop_limit=0.1, bin_num_limit=8, breaks=NULL, spl_val=NULL) {
# global variables or functions
brkp = bstbrkp = total_iv = NULL
# initial binning
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
initial_binning = bin_list$initial_binning
binning_sv = bin_list$binning_sv
if (nrow(initial_binning)<=1 || is.null(initial_binning)) {
return(list(binning_sv=binning_sv, binning=initial_binning))
}
# initialize parameters
## length all breaks
len_brks = initial_binning[!is.na(brkp), .N]
## param
bestbreaks = NULL ## best breaks
IVt1 = IVt2 = 1e-10
IVchg = 1 ## IV gain ratio
step_num = 1
# best breaks from three to n+1 bins
binning_tree = NULL
while ( (IVchg >= stop_limit) & (step_num+1 <= min(bin_num_limit, len_brks)) ) {
binning_tree = woebin2_tree_add_1brkp(dtm, initial_binning, count_distr_limit, bestbreaks)
# print(binning_tree)
# update parameters
## best breaks
bestbreaks = binning_tree[bstbrkp != -Inf & !is.na(bstbrkp), bstbrkp]
## information value
IVt2 = binning_tree[1, total_iv]
IVchg = IVt2/IVt1-1 ## ratio gain
IVt1 = IVt2
# print(IVchg)
step_num = step_num + 1
}
if (is.null(binning_tree)) binning_tree = initial_binning
return(list(binning_sv=binning_sv, binning=binning_tree))
# return(binning_tree)
}
# examples
# system.time( binning_list <- woebin2_init_bin(dtm, init_count_distr=0.02, breaks =NULL, spl_val=NULL) )
# initial_binning=binning_list$initial_binning
# binning_sv = binning_list$binning_sv
# system.time( woebin2_tree_add_1brkp(dtm, initial_binning, count_distr_limit=0.05) )
# system.time( woebin2_tree(dtm, initial_binning, count_distr_limit=0.05) )
# required in woebin2 # return chimerge binning
#' @importFrom stats qchisq
woebin2_chimerge = function(dtm, init_count_distr=0.02, count_distr_limit=0.05, stop_limit=0.1, bin_num_limit=8, breaks=NULL, spl_val=NULL) {
.= a= a_colsum= a_lag= a_lag_rowsum= a_rowsum= a_sum= bad= bin= brkp= brkp2= chisq= count= count_distr= e= e_lag= chisq_lead= good= goodbad= merge_tolead =value= variable= NULL
# [chimerge](http://blog.csdn.net/qunxingvip/article/details/50449376)
# [ChiMerge:Discretization of numeric attributs](http://www.aaai.org/Papers/AAAI/1992/AAAI92-019.pdf)
# chisq = function(a11, a12, a21, a22) {
# A = list(a1 = c(a11, a12), a2 = c(a21, a22))
# Adf = do.call(rbind, A)
#
# Edf =
# matrix(rowSums(Adf), ncol = 1) %*%
# matrix(colSums(Adf), nrow = 1) /
# sum(Adf)
#
# sum((Adf-Edf)^2/Edf)
# }
# initial binning
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
initial_binning = bin_list$initial_binning
binning_sv = bin_list$binning_sv
if (nrow(initial_binning)<=1 || is.null(initial_binning)) {
return(list(binning_sv=binning_sv, binning=initial_binning))
}
# function to create a chisq column in initial_binning
add_chisq = function(initial_binning) {
chisq_df = melt(initial_binning[!is.na(brkp)], id.vars = c("brkp", "variable", "bin"), measure.vars = c("good", "bad"), variable.name = "goodbad", value.name = "a"
)[order(brkp)
][, a_lag := shift(a, type="lag"), by=.(goodbad)
][, `:=`(
a_rowsum = sum(a),
a_lag_rowsum = sum(a_lag),
a_colsum = a+a_lag,
a_sum = sum(a+a_lag)), by=brkp
][, `:=`(
e = a_rowsum/a_sum*a_colsum,
e_lag = a_lag_rowsum/a_sum*a_colsum
)][, .(chisq=sum((a-e)^2/e + (a_lag-e_lag)^2/e_lag)), by=brkp]
return(merge(initial_binning[,count:=good+bad], chisq_df, all.x = TRUE))
}
# dtm_rows
dtm_rows = nrow(dtm)
# chisq limit
chisq_limit = qchisq(1-stop_limit,1)
# binning with chisq column
binning_chisq = add_chisq(initial_binning)
# param
bin_chisq_min = binning_chisq[, min(chisq, na.rm = TRUE)]
bin_count_distr_min = binning_chisq[!is.na(brkp), min((good+bad)/dtm_rows)]
bin_nrow = binning_chisq[,.N]
# remove brkp if chisq < chisq_limit
while (
bin_chisq_min < chisq_limit ||
bin_count_distr_min < count_distr_limit ||
bin_nrow > bin_num_limit) {
# brkp needs to be removed
if (bin_chisq_min < chisq_limit) {
rm_brkp = binning_chisq[, merge_tolead := FALSE][order(chisq, count)][1,]
} else if (bin_count_distr_min < count_distr_limit) {
rm_brkp = binning_chisq[,`:=`(
count_distr = count/sum(count),
chisq_lead = shift(chisq, type = "lead", fill = Inf)
)][,merge_tolead := ifelse(is.na(chisq), TRUE, chisq > chisq_lead)
][!is.na(brkp)][order(count_distr)][1,]
} else if (bin_nrow > bin_num_limit) {
rm_brkp = binning_chisq[, merge_tolead := FALSE][order(chisq, count)][1,]
}
# groupby brkp
shift_type = ifelse(rm_brkp[1,merge_tolead], 'lead', 'lag')
binning_chisq = binning_chisq[
,brkp2 := shift(brkp,type=shift_type)
][brkp == rm_brkp[1,brkp], brkp := brkp2
][,.(variable=unique(variable), bin=paste0(bin, collapse = "%,%"), good=sum(good), bad=sum(bad)), by=brkp
]#[, badprob:=bad/(good+bad)]
# update
## add chisq to new binning data frame
binning_chisq = add_chisq(binning_chisq)
## param
bin_chisq_min = binning_chisq[, min(chisq, na.rm = TRUE)]
bin_count_distr_min = binning_chisq[!is.na(brkp), min((good+bad)/dtm_rows)]
bin_nrow = binning_chisq[,.N]
}
# format bin # remove (.+\\)%,%\\[.+,)
if (is.numeric(dtm[,value])) {
binning_chisq = binning_chisq[grepl("%,%",bin), bin := sub("^(\\[.+?,).+,(.+?\\))$", "\\1\\2", bin)]
}
return(list(binning_sv=binning_sv, binning=binning_chisq))
# return(binning_chisq)
}
# required in woebin2 # return equal binning, supports numerical variables only
woebin2_equal = function(dtm, init_count_distr=0.02, count_distr_limit=0.05, stop_limit=0.1, bin_num_limit=8, breaks=NULL, spl_val=NULL, method='freq') {
count = value = group = . = minv = maxv = bin = y = variable = bad = good = badprob = NULL
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list$dtm
binning_sv = dtm_binsv_list$binning_sv
if (is.null(dtm) || dtm[,.N]==0) return(list(binning_sv=binning_sv, binning=NULL))
# dt_sl = dtm[,.(label=y, datset=variable, score=value)]
# dtm = dt_sl[,.(y=label, variable=datset, value=score)]
# breaks
if (bin_num_limit >= dtm[, length(unique(value))] ) {
# in each value
brkp = dtm[order(value)][, unique(value)]
brkp = c(-Inf, brkp[-1], Inf)
} else {
if (method == 'freq') {
brkp = copy(dtm)[order(value)
][, group := ceiling(.I/(.N/bin_num_limit))
][, .(value=value[1]), by = group
][, c(-Inf, value[-1], Inf)]
} else if (method == 'width') {
minmax = dtm[, .(maxv = max(value), minv = min(value))]
brkp = seq(minmax[,minv], minmax[,maxv], length.out = bin_num_limit+1)
brkp = c(-Inf, brkp[-c(1, length(brkp))], Inf)
}
}
binning_equal = dtm[, bin := cut(value, unique(brkp), right = FALSE, dig.lab = 10, ordered_result = F)
][, .(good = sum(y==0), bad = sum(y==1), count = .N), keyby = .(variable, bin)
][, `:=`(brkp = as.numeric( sub("^\\[(.*),.+", "\\1", bin)), badprob = bad/(good+bad))
][, .(variable, bin, brkp, count, good, bad, badprob)]
# create binning
binning_equal = check_empty_bins(dtm, binning_equal)
binning_equal = check_zero_goodbad(dtm, binning_equal)
binning_equal = check_count_distri(dtm, binning_equal, count_distr_limit)
return(list(binning_sv=binning_sv, binning=binning_equal))
}
# required in woebin2 # # format binning output
binning_format = function(binning) {
# global variables or functions
. = bad = badprob = bin = bin_iv = good = total_iv = variable = woe = is_sv = count = NULL
# required columns in input binning: variable, bin, good, bad
if (!('count' %in% names(binning))) binning[, count := good+bad]
binning = binning[
, badprob:=bad/(good+bad)
][, woe := lapply(.SD, woe_01, bad), .SDcols = "good"
][, bin_iv := lapply(.SD, miv_01, bad), .SDcols = "good"
][, total_iv := sum(bin_iv)
][, bin := ifelse(is.na(bin) | bin=="NA", "missing", as.character(bin)) # replace NA by missing
][, .(variable, bin, count, count_distr=(good+bad)/sum(good+bad), good, bad, badprob, woe, bin_iv, total_iv, breaks = sub("^\\[(.*), *(.*)\\)((%,%missing)*)", "\\2\\3", bin), is_special_values=is_sv)]
# move missing from last row to first
if ( "missing" %in% binning$bin ) {
binning = rbind(binning[bin=="missing"], binning[bin != "missing"])
}
return(binning)
}
# woebin2
# This function provides woe binning for only two columns (one x and one y) data frame.
woebin2 = function(dtm, breaks=NULL, spl_val=NULL, init_count_distr=0.02, count_distr_limit=0.05, stop_limit=0.1, bin_num_limit=8, method="tree") {
# global variables or functions
. = bad = badprob = bin = bin_iv = good = total_iv = variable = woe = is_sv = NULL
# binning
if (!anyNA(breaks) & !is.null(breaks)) {
# 1.return binning if breaks provided
bin_list = woebin2_breaks(dtm=dtm, breaks=breaks, spl_val=spl_val)
} else {
if (stop_limit == "N") {
# binning of initial & specialvalues
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
} else {
if (method == "tree") {
# 2.tree-like optimal binning
bin_list = woebin2_tree(dtm, init_count_distr, count_distr_limit, stop_limit, bin_num_limit, breaks=breaks, spl_val=spl_val)
} else if (method == "chimerge") {
# 2.chimerge optimal binning
bin_list = woebin2_chimerge(dtm, init_count_distr, count_distr_limit, stop_limit, bin_num_limit, breaks=breaks, spl_val=spl_val)
} else if (method %in% c('freq','width')) {
# 3. in equal freq or width
bin_list = woebin2_equal(dtm, init_count_distr, count_distr_limit, stop_limit, bin_num_limit, breaks=breaks, spl_val=spl_val, method = method)
}
}
}
# # binding binning_sv and binning
if (any(sapply(bin_list, is.null))) {
binning = rbindlist(bin_list)[, is_sv := names(bin_list)[!sapply(bin_list, is.null)]]
} else {
binning = rbindlist(bin_list, use.names = TRUE, fill = TRUE, idcol = 'is_sv')
}
binning = binning[, is_sv := is_sv == 'binning_sv']
return(binning_format(binning))
}
# convert bins to breaks_list
bins_to_breaks = function(bins, dt, to_string=FALSE, save_name=NULL) {
.= bin= bin2= is_special_values= variable= x_breaks= x_class = NULL
# bins # if (is.list(bins)) rbindlist(bins)
if (!is.data.table(bins)) {
if (is.data.frame(bins)) {
bins = setDT(bins)
} else {
bins = rbindlist(bins)
}
}
# x variables
xs_all = bins[,unique(variable)]
# class of variables
vars_class = data.table(
variable = xs_all,
x_class = dt[,sapply(.SD, class), .SDcols = xs_all]
)
# breaks
bins_breakslist = bins[
, bin2 := sub("^\\[(.*), *(.*)\\)((%,%missing)*)", "\\2\\3", bin)
][!(bin2 %in% c("-Inf","Inf","missing") & !is_special_values)
][vars_class, on="variable"
][, .(
x_breaks = paste(ifelse(x_class=="numeric", bin2, paste0("\"",bin2,"\"")), collapse=", "),
x_class=unique(x_class)
), by=variable]
if (to_string) {
bins_breakslist = paste0(bins_breakslist[, paste0(variable, "=c(", x_breaks, ")")], collapse = ", \n ")
bins_breakslist = paste0(c("breaks_list=list(", bins_breakslist, ")"), collapse = "\n ")
if (!is.null(save_name)) {
save_name = sprintf('%s_%s.R', save_name, format(Sys.time(),"%Y%m%d_%H%M%S"))
writeLines(bins_breakslist, save_name)
cat(sprintf('[INFO] The breaks_list is saved as %s\n', save_name))
return()
}
}
return(bins_breakslist)
}
# @param init_count_distr The minimum percentage of initial binning class number over total. Accepted range: 0.01-0.2; default is 0.02, which means initial cut into 50 fine bins for continuous variables.
#' WOE Binning
#'
#' \code{woebin} generates optimal binning for numerical, factor and categorical variables using methods including tree-like segmentation or chi-square merge. \code{woebin} can also customizing breakpoints if the `breaks_list` was provided. The default `woe` is defined as ln(Bad_i/Good_i). If you prefer ln(Good_i/Bad_i), please set the argument `positive` as negative value, such as '0' or 'good'. If there is a zero frequency class when calculating woe, the zero will replaced by 0.99 to make the woe calculable.
#'
#' @param dt A data frame with both x (predictor/feature) and y (response/label) variables.
#' @param y Name of y variable.
#' @param x Name of x variables. Default is NULL. If x is NULL, then all columns except y and var_skip are counted as x variables.
#' @param var_skip Name of variables that will skip for binning. Default is NULL.
#' @param breaks_list List of break points, default is NULL. If it is not NULL, variable binning will based on the provided breaks.
#' @param special_values the values specified in special_values will be in separate bins. Default is NULL.
#' @param stop_limit Stop binning segmentation when information value gain ratio less than the stop_limit if using tree method; or stop binning merge when the chi-square of each neighbor bins are larger than 'qchisq(1-stoplimit, 1)' if using chimerge method. Accepted range: 0-0.5; default is 0.1.
#' @param count_distr_limit The minimum count distribution percentage. Accepted range: 0.01-0.2; default is 0.05.
#' @param bin_num_limit Integer. The maximum number of binning. Default is 8.
#' @param positive Value of positive class, default "bad|1".
#' @param no_cores Number of CPU cores for parallel computation. Defaults NULL. If no_cores is NULL, the no_cores will set as 1 if length of x variables less than 10, and will set as the number of all CPU cores if the length of x variables greater than or equal to 10.
#' @param print_step A non-negative integer. Default is 1. If print_step>0, print variable names by each print_step-th iteration. If print_step=0 or no_cores>1, no message is print.
#' @param method Optimal binning method, it should be "tree" or "chimerge". Default is "tree".
#' @param save_breaks_list A string. The file name to save breaks_list. Default is None.
#' @param ignore_const_cols Logical. Ignore constant columns. Default is TRUE.
#' @param ignore_datetime_cols Logical. Ignore datetime columns. Default is TRUE.
#' @param check_cate_num Logical. Check whether the number of unique values in categorical columns larger than 50. It might make the binning process slow if there are too many unique categories. Default is TRUE.
#' @param replace_blank_na Logical. Replace blank values with NA. Default is TRUE.
#' @param ... Additional parameters.
#'
#' @return A list of data frames include binning information for each x variables.
#'
#' @seealso \code{\link{woebin_ply}}, \code{\link{woebin_plot}}, \code{\link{woebin_adj}}
#'
#' @examples
#' # load germancredit data
#' data(germancredit)
#'
#' # Example I
#' # binning of two variables in germancredit dataset
#' # using tree method
#' bins2_tree = woebin(germancredit, y="creditability",
#' x=c("credit.amount","housing"), method="tree")
#' bins2_tree
#'
#' \donttest{
#' # using chimerge method
#' bins2_chi = woebin(germancredit, y="creditability",
#' x=c("credit.amount","housing"), method="chimerge")
#'
#' # binning in equal freq/width # only supports numerical variables
#' numeric_cols = c("duration.in.month", "credit.amount",
#' "installment.rate.in.percentage.of.disposable.income", "present.residence.since",
#' "age.in.years", "number.of.existing.credits.at.this.bank",
#' "number.of.people.being.liable.to.provide.maintenance.for")
#' bins_freq = woebin(germancredit, y="creditability", x=numeric_cols, method="freq")
#' bins_width = woebin(germancredit, y="creditability", x=numeric_cols, method="width")
#'
#' # y can be NULL if no label column in dataset
#' bins_freq_noy = woebin(germancredit, y=NULL, x=numeric_cols)
#'
#' # Example II
#' # binning of the germancredit dataset
#' bins_germ = woebin(germancredit, y = "creditability")
#' # converting bins_germ into a data frame
#' # bins_germ_df = data.table::rbindlist(bins_germ)
#'
#' # Example III
#' # customizing the breakpoints of binning
#' library(data.table)
#' dat = rbind(
#' germancredit,
#' data.table(creditability=sample(c("good","bad"),10,replace=TRUE)),
#' fill=TRUE)
#'
#' breaks_list = list(
#' age.in.years = c(26, 35, 37, "Inf%,%missing"),
#' housing = c("own", "for free%,%rent")
#' )
#'
#' special_values = list(
#' credit.amount = c(2600, 9960, "6850%,%missing"),
#' purpose = c("education", "others%,%missing")
#' )
#'
#' bins_cus_brk = woebin(dat, y="creditability",
#' x=c("age.in.years","credit.amount","housing","purpose"),
#' breaks_list=breaks_list, special_values=special_values)
#'
#' # Example IV
#' # save breaks_list as a R file
#' bins2 = woebin(germancredit, y="creditability",
#' x=c("credit.amount","housing"), save_breaks_list='breaks_list')
#'
#' }
#'
#' @import data.table foreach
#' @importFrom stats IQR quantile setNames
#' @importFrom doParallel registerDoParallel stopImplicitCluster
#' @importFrom parallel detectCores
#' @export
woebin = function(dt, y, x=NULL, var_skip=NULL, breaks_list=NULL, special_values=NULL, stop_limit=0.1, count_distr_limit=0.05, bin_num_limit=8, positive="bad|1", no_cores=NULL, print_step=0L, method="tree", save_breaks_list=NULL, ignore_const_cols=TRUE, ignore_datetime_cols=TRUE, check_cate_num=TRUE, replace_blank_na=TRUE, ...) {
# start time
start_time = proc.time()
# global variable
i = NULL
# arguments ------
# print_info
print_info = list(...)[['print_info']]
if (is.null(print_info)) print_info = TRUE
# init_count_distr
min_perc_fine_bin = list(...)[['init_count_distr']]
init_count_distr = list(...)[['init_count_distr']]
if (is.null(init_count_distr)) {
init_count_distr <- ifelse(!is.null(min_perc_fine_bin), min_perc_fine_bin, 0.02)
}
# count_distr_limit
min_perc_coarse_bin = list(...)[['min_perc_coarse_bin']]
if (!is.null(min_perc_coarse_bin)) count_distr_limit = min_perc_coarse_bin
# bin_num_limit
max_num_bin = list(...)[['max_num_bin']]
if (!is.null(max_num_bin)) bin_num_limit = max_num_bin
# print info
if (print_info) cat('[INFO] creating woe binning ... \n')
# set dt as data.table
dt = setDT(copy(dt)) #copy(setDT(dt))
if (!is.null(x)) dt = dt[, c(y,x), with=FALSE]
# check y
if (!is.null(y)) dt = check_y(dt, y, positive)
# remove constant columns
if (ignore_const_cols) dt = check_const_cols(dt)
# remove date/time columns
if (ignore_datetime_cols) dt = check_datetime_cols(dt)
# check categorical columns' unique values
if (check_cate_num) check_cateCols_uniqueValues(dt, var_skip)
# replace black with na
if (replace_blank_na) dt = rep_blank_na(dt)
# x variable names
xs = x_variable(dt, y, x, var_skip)
xs_len = length(xs)
# print_step
print_step = check_print_step(print_step)
# breaks_list
breaks_list = check_breaks_list(breaks_list, xs)
# special_values
special_values = check_special_values(special_values, xs)
# # stop_limit vector
# if (length(stop_limit) == 1) {
# stop_limit = rep(stop_limit, length(xs))
# } else if (length(stop_limit) != length(xs)) {
# stop("Incorrect inputs; the length of stop_limit should be 1 or the same as x variables.")
# }
# stop_limit range
if ( stop_limit<0 || stop_limit>0.5 || !is.numeric(stop_limit) ) {
warning("Incorrect parameter specification; accepted stop_limit parameter range is 0-0.5. Parameter was set to default (0.1).")
stop_limit = 0.1
}
# init_count_distr range
if ( init_count_distr<0.01 || init_count_distr>0.2 || !is.numeric(init_count_distr) ) {
warning("Incorrect parameter specification; accepted init_count_distr parameter range is 0.01-0.2. Parameter was set to default (0.02).")
init_count_distr = 0.02
}
# count_distr_limit
if ( count_distr_limit<0.01 || count_distr_limit>0.2 || !is.numeric(count_distr_limit) ) {
warning("Incorrect parameter specification; accepted count_distr_limit parameter range is 0.01-0.2. Parameter was set to default (0.05).")
count_distr_limit = 0.05
}
# bin_num_limit
if (!is.numeric(bin_num_limit)) {
warning("Incorrect inputs; bin_num_limit should be numeric variable. Parameter was set to default (8).")
bin_num_limit = 8
}
# method
if (!(method %in% c("tree", "chimerge", 'freq', 'width'))) {
warning("Incorrect inputs; method should be tree or chimerge. Parameter was set to default (tree).")
method = "tree"
}
if (is.null(y) & !(method %in% c('freq', 'width'))) method = 'freq'
# binning ------
# loop on xs # https://www.r-bloggers.com/how-to-go-parallel-in-r-basics-tips/
if (is.null(no_cores) || no_cores<1) {
no_cores = ifelse(xs_len < 10, 1, detectCores(logical=F))
}
bins = list()
if (!is.null(y)) {
y = dt[[y]]
} else y = NA
if (no_cores == 1) {
for (i in 1:xs_len) {
x_i = xs[i]
# print xs
if (print_step>0 & i %% print_step == 0) cat(paste0(format(c(i,xs_len)),collapse = "/"), x_i,"\n")
# woebining on one variable
bins[[x_i]] <-
try(do.call(woebin2, args = list(
dtm = data.table(y=y, variable=x_i, value=dt[[x_i]]),
breaks = breaks_list[[x_i]],
spl_val = special_values[[x_i]],
init_count_distr = init_count_distr,
count_distr_limit= count_distr_limit,
stop_limit = stop_limit,
bin_num_limit = bin_num_limit,
method = method
)), silent = TRUE)
}
} else {
registerDoParallel(no_cores)
# run
bins <-
foreach(
i = seq_len(xs_len),
.combine = list,
.multicombine = TRUE,
.maxcombine = xs_len+1,
.inorder = FALSE,
.errorhandling = "pass",
.final = function(bs) {
if (xs_len==1) bs = list(bs)
setNames(bs, xs)
},
.export = c('dt', 'xs', 'y', 'breaks_list', 'special_values', 'init_count_distr', 'count_distr_limit', 'stop_limit', 'bin_num_limit', 'method')
) %dopar% {
x_i = xs[i]
# woebining on one variable
try(do.call(woebin2, args = list(
dtm = data.table(y=y, variable=x_i, value=dt[[x_i]]),
breaks = breaks_list[[x_i]],
spl_val = special_values[[x_i]],
init_count_distr = init_count_distr,
count_distr_limit= count_distr_limit,
stop_limit = stop_limit,
bin_num_limit = bin_num_limit,
method = method
)), silent = TRUE)
}
# finish
stopImplicitCluster()
}
# check errors in binning
error_variables = names(bins)[which(sapply(bins, function(x) inherits(x, 'try-error')))]
if (length(error_variables) > 0) {
warning(sprintf('The following columns are removed from binning results due to errors:\n%s', paste0(error_variables, collapse=', ')))
bins = bins[setdiff(names(bins), error_variables)]
}
# running time
rs = proc.time() - start_time
# hms
if (rs[3] > 10 & print_info) cat(sprintf("[INFO] Binning on %s rows and %s columns in %s",nrow(dt),ncol(dt),sec_to_hms(rs[3])),"\n")
# save breaks_list
if (!is.null(save_breaks_list)) bins_to_breaks(bins, dt, to_string=TRUE, save_name=save_breaks_list)
return(bins)
}
#' @import data.table
woepoints_ply1 = function(dtx, binx, x_i, woe_points) {
# woe_points: "woe" "points"
. = V1 = bin = woe = NULL
# binx
binx = binx[
, bin:=as.character(bin)
][,.(unlist(strsplit(bin, "%,%", fixed=TRUE)),
eval(parse(text = woe_points)) ), by=bin]
# dtx
## cut numeric variable
if ( is.numeric(dtx[[x_i]]) ) {
binx_sv = binx[!grepl("\\[",V1)]
binx_other = binx[grepl("\\[",V1)]
dtx[[x_i]] = ifelse(
dtx[[x_i]] %in% binx_sv$V1,
dtx[[x_i]],
as.character(cut(dtx[[x_i]], unique(c(-Inf, binx_other[bin != "missing", as.numeric(sub("^\\[(.*),.+", "\\1", V1))], Inf)), right = FALSE, dig.lab = 10, ordered_result = FALSE))
)
}
## to charcarter, na to missing
dtx[[x_i]] = as.character(dtx[[x_i]])
dtx[[x_i]] = ifelse(is.na(dtx[[x_i]]), "missing", dtx[[x_i]])
## add rowid column
dtx = setDT(dtx)[, rowid := .I]
# rename binx
setnames(binx, c("bin", x_i, paste(x_i, woe_points, sep="_")))
# merge
dtx_suffix = merge(setDF(dtx), setDF(binx), by=x_i, all.x = TRUE)
dtx_suffix = setDT(dtx_suffix)[order(rowid)][, (c("rowid", "bin", x_i)) := NULL]
return(dtx_suffix)
}
#' WOE Transformation
#'
#' \code{woebin_ply} converts original input data into woe values based on the binning information generated from \code{woebin}.
#'
#' @param dt A data frame.
#' @param bins Binning information generated from \code{woebin}.
#' @param no_cores Number of CPU cores for parallel computation. Defaults NULL. If no_cores is NULL, the no_cores will set as 1 if length of x variables less than 10, and will set as the number of all CPU cores if the length of x variables greater than or equal to 10.
#' @param print_step A non-negative integer. Default is 1. If print_step>0, print variable names by each print_step-th iteration. If print_step=0 or no_cores>1, no message is print.
#' @param replace_blank_na Logical. Replace blank values with NA. Default is TRUE. This argument should be the same with \code{woebin}'s.
#' @param ... Additional parameters.
#'
#' @return A data frame with columns for variables converted into woe values.
#'
#' @seealso \code{\link{woebin}}, \code{\link{woebin_plot}}, \code{\link{woebin_adj}}
#'
#' @examples
#' # load germancredit data
#' data(germancredit)
#'
#' # Example I
#' dt = germancredit[, c("creditability", "credit.amount", "purpose")]
#'
#' # binning for dt
#' bins = woebin(dt, y = "creditability")
#'
#' # converting original value to woe
#' dt_woe = woebin_ply(dt, bins=bins)
#' str(dt_woe)
#'
#' \donttest{
#' # Example II
#' # binning for germancredit dataset
#' bins_germancredit = woebin(germancredit, y="creditability")
#'
#' # converting the values in germancredit to woe
#' # bins is a list which generated from woebin()
#' germancredit_woe = woebin_ply(germancredit, bins_germancredit)
#'
#' # bins is a data frame
#' bins_df = data.table::rbindlist(bins_germancredit)
#' germancredit_woe = woebin_ply(germancredit, bins_df)
#'
#' # return value is bin but not woe
#' germancredit_bin = woebin_ply(germancredit, bins_germancredit, value = 'bin')
#' }
#'
#' @import data.table
#' @export
#'
woebin_ply = function(dt, bins, no_cores=NULL, print_step=0L, replace_blank_na=TRUE, ...) {
# start time
start_time = proc.time()
# print info
print_info = list(...)[['print_info']]
if (is.null(print_info)) print_info = TRUE
if (print_info) cat('[INFO] converting into woe values ... \n')
# value
value = list(...)[['value']]
if (is.null(value) || !(value %in% c('woe', 'bin'))) value = 'woe'
# global variables or functions
. = V1 = bin = variable = woe = i = databc_colomun_placeholder = NULL
# set dt as data.table
dt = setDT(copy(dt))
# # remove date/time col
# dt = rmcol_datetime_unique1(dt)
# replace "" by NA
if (replace_blank_na) dt = rep_blank_na(dt)
# print_step
print_step = check_print_step(print_step)
# bins # if (is.list(bins)) rbindlist(bins)
if (inherits(bins, 'list') && all(sapply(bins, is.data.frame))) {bins = rbindlist(bins)}
bins = setDT(bins)
# x variables
xs_bin = bins[,unique(variable)]
xs_dt = names(dt)
xs = intersect(xs_bin, xs_dt)
# loop on x variables
xs_len = length(xs)
# initial data set
n = 0
while (paste0('dat_col_placeholder',n) %in% xs) n = n+1
dt_init = copy(dt)[, (paste0('dat_col_placeholder',n)) := 1][,(xs) := NULL]
# the databc_colomun_placeholder will be remove in the result, in case dt_init is an empty dataframe
# loop on xs # https://www.r-bloggers.com/how-to-go-parallel-in-r-basics-tips/
if (is.null(no_cores)) {
no_cores = ifelse(xs_len < 10, 1, detectCores(logical=F))
}
if (no_cores == 1) {
dat = dt_init
for (i in 1:xs_len) {
x_i = xs[i]
# print x
if (print_step > 0 & i %% print_step == 0) cat(paste0(format(c(i,xs_len)),collapse = "/"), x_i,"\n")
binx = bins[variable==x_i]
dtx = dt[, x_i, with=FALSE]
dat = cbind(dat, woepoints_ply1(dtx, binx, x_i, woe_points=value))
}
} else {
registerDoParallel(no_cores)
# run
dat <-
foreach(
i = 1:xs_len,
.combine=cbind,
.init = dt_init,
.inorder = FALSE,
.errorhandling = "pass",
.export = c('dt', 'bins', 'xs')
) %dopar% {
x_i = xs[i]
binx = bins[variable==x_i]
dtx = dt[, x_i, with=FALSE]
woepoints_ply1(dtx, binx, x_i, woe_points=value)
}
# finish
stopImplicitCluster()
}
# running time
rs = proc.time() - start_time
# hms
if (rs[3] > 10 & print_info) cat(sprintf("[INFO] Woe transformating on %s rows and %s columns in %s",nrow(dt),xs_len,sec_to_hms(rs[3])),"\n")
return(dat[, (paste0('dat_col_placeholder',n)) := NULL])
}
# required in woebin_plot
#' @import data.table ggplot2
plot_bin = function(bin, title, show_iv) {
# global variables or functions
. = bad = badprob = badprob2 = count = count_distr = count_distr2 = count_num = good = goodbad = total_iv = value = variable = woe = NULL
# data
## y_right_max
y_right_max = ceiling(max(bin$badprob, na.rm=T)*10)
if (y_right_max %% 2 ==1) y_right_max=y_right_max+1
if (y_right_max - max(bin$badprob, na.rm=T)*10 <= 0.3) y_right_max = y_right_max+2
y_right_max = y_right_max/10
if (y_right_max>1 || y_right_max<=0 || is.na(y_right_max) || is.null(y_right_max)) y_right_max=1
## y_left_max
y_left_max = ceiling(max(bin$count_distr, na.rm=T)*10)/10
if (y_left_max>1 || y_left_max<=0 || is.na(y_left_max) || is.null(y_left_max)) y_left_max=1
## data set
bin = setDT(bin)
dat = bin[,.(
variable, bin, count_num=count, count_distr2=count_distr, count_distr, good=good/sum(count), bad=bad/sum(count), badprob, woe
)][, `:=`(
bin = ifelse(is.na(bin), "NA", bin),
badprob2 = badprob*(y_left_max/y_right_max),
badprob = round(badprob,4),
rowid = .I
)][, bin := factor(bin, levels = bin)]
dat_melt = melt(dat, id.vars = c("variable", "bin","rowid"), measure.vars =c("good", "bad"), variable.name = "goodbad")[
,goodbad:=factor(goodbad, levels=c( "bad", "good"))
]
# title
if (!is.null(title)) title = paste0(title, "-")
if (show_iv) {
title_string = paste0(title, dat[1, variable]," (iv:",bin[1,round(total_iv,4)],")")
} else {
title_string = paste0(title, dat[1, variable])
}
# plot
ggplot() +
# geom_text(aes(label="@shichen.name/getpedr", x=dat[, x[.N], by=symbol][,V1[1]], y=Inf), vjust = -0.5, hjust = 1, color = "#F0F0F0") +
# coord_cartesian(clip = 'off') +
geom_bar(data=dat_melt, aes(x=bin, y=value, fill=goodbad), stat="identity") +
geom_text(data=dat, aes(x = bin, y = count_distr2, label = paste0(round(count_distr2*100, 1), "%, ", count_num) ), vjust = 0.5) +
geom_line(data=dat, aes(x = rowid, y = badprob2), colour = "blue") +
geom_point(data=dat, aes(x = rowid, y=badprob2), colour = "blue", shape=21, fill="white") +
geom_text(data=dat, aes(x = rowid, y = badprob2, label = paste0(round(badprob*100, 1), "%")), colour="blue", vjust = -0.5) +
scale_y_continuous(limits = c(0,y_left_max), sec.axis = sec_axis(~./(y_left_max/y_right_max), name = "Bad probability")) +
labs(title = title_string, x=NULL, y="Bin count distribution", fill=NULL) +
theme_bw() +
theme(
legend.position="bottom", legend.direction="horizontal",
axis.title.y.right = element_text(colour = "blue"),
axis.text.y.right = element_text(colour = "blue",angle=90, hjust = 0.5),
axis.text.y = element_text(angle=90, hjust = 0.5) )
}
#' WOE Binning Visualization
#'
#' \code{woebin_plot} create plots of count distribution and bad probability for each bin. The binning informations are generates by \code{woebin}.
#'
#' @name woebin_plot
#' @param bins A list of data frames. Binning information generated by \code{woebin}.
#' @param x Name of x variables. Default is NULL. If x is NULL, then all columns except y are counted as x variables.
#' @param title String added to the plot title. Default is NULL.
#' @param show_iv Logical. Default is TRUE, which means show information value in the plot title.
#'
#' @return A list of binning graphics.
#'
#' @seealso \code{\link{woebin}}, \code{\link{woebin_ply}}, \code{\link{woebin_adj}}
#'
#' @examples
#' # Load German credit data
#' data(germancredit)
#'
#' # Example I
#' bins1 = woebin(germancredit, y="creditability", x="credit.amount")
#'
#' p1 = woebin_plot(bins1)
#' print(p1)
#'
#' \donttest{
#' # Example II
#' bins = woebin(germancredit, y="creditability")
#' plotlist = woebin_plot(bins)
#' print(plotlist$credit.amount)
#'
#' # # save binning plot
#' # for (i in 1:length(plotlist)) {
#' # ggplot2::ggsave(
#' # paste0(names(plotlist[i]), ".png"), plotlist[[i]],
#' # width = 15, height = 9, units="cm" )
#' # }
#' }
#'
#' @import data.table ggplot2
#' @export
#'
woebin_plot = function(bins, x=NULL, title=NULL, show_iv = TRUE) {
# global variables or functions
variable = NULL
xs = x
# converting data.frame into list
# bins # if (is.list(bins)) rbindlist(bins)
if (!is.data.table(bins)) {
if (is.data.frame(bins)) {
bins = setDT(bins)
} else {
bins = rbindlist(bins)
}
}
# x variable names
if (is.null(xs)) xs = bins[,unique(variable)]
# plot export
plotlist = list()
for (i in xs) plotlist[[i]] = plot_bin(bins[variable==i], title, show_iv)
return(plotlist)
}
# print basic information in woebin_adj
woebin_adj_print_basic_info = function(i, xs_adj, bins, dt, bins_breakslist) {
x_i = xs_adj[i]
xs_len = length(xs_adj)
variable = x_breaks = NULL
bin = bins[variable==x_i]
cat("--------", paste0(i, "/", xs_len), x_i, "--------\n")
## class
cat(paste0("> class(",x_i,"): "),"\n",class(dt[[x_i]]),"\n","\n")
## summary
cat(paste0("> summary(",x_i,"): "),"\n")
print(summary(dt[[x_i]]))
cat("\n")
## table
if (length(table(dt[[x_i]])) < 10 || !is.numeric(dt[[x_i]])) {
cat(paste0("> table(",x_i,"): "))
print(table(dt[[x_i]]))
cat("\n")
} else {
if ( is.numeric(dt[[x_i]])) {
ht = hist(dt[[x_i]], plot = FALSE)
plot(ht, main = x_i, xlab = NULL)
}
}
## current breaks
breaks_bin = bins_breakslist[variable == x_i, x_breaks]
cat("> Current breaks: \n", breaks_bin,"\n \n")
## woebin plotting
plist = woebin_plot(bin)
print(plist[[1]])
}
# plot adjusted binning in woebin_adj
woebin_adj_break_plot = function(dt, y, x_i, breaks, stop_limit, sv_i, method) {
bin_adj = NULL
text_woebin = paste0("bin_adj=woebin(dt[,c(\"",x_i,"\",\"",y,"\"),with=F], \"",y,"\", breaks_list=list(",x_i,"=c(",breaks,")), special_values =list(",x_i,"=c(", sv_i, ")), ", ifelse(stop_limit=="N","stop_limit = \"N\", ",NULL), "print_step=0L, print_info=FALSE, method=\"",method,"\")")
eval(parse(text = text_woebin))
## print adjust breaks
breaks_bin = setdiff(sub("^\\[(.*), *(.*)\\)((%,%missing)*)", "\\2\\3", bin_adj[[1]]$bin), c("-Inf","Inf","missing"))
breaks_bin = ifelse(
is.numeric(dt[[x_i]]),
paste0(breaks_bin, collapse=", "),
paste0(paste0("\"",breaks_bin,"\""), collapse=", "))
cat("> Current breaks: ","\n",breaks_bin,"\n","\n")
# print bin_adj
print(woebin_plot(bin_adj)[[1]])
# # breaks
if (breaks == "" || is.null(breaks)) breaks = breaks_bin
return(breaks)
}
#' WOE Binning Adjustment
#'
#' \code{woebin_adj} interactively adjust the binning breaks.
#'
#' @param dt A data frame.
#' @param y Name of y variable.
#' @param bins A list of data frames. Binning information generated from \code{woebin}.
#' @param adj_all_var Logical, whether to show variables have monotonic woe trends. Default is TRUE
#' @param special_values The values specified in special_values will in separate bins. Default is NULL.
#' @param method Optimal binning method, it should be "tree" or "chimerge". Default is "tree".
#' @param save_breaks_list A string. The file name to save breaks_list. Default is None.
#' @param count_distr_limit The minimum count distribution percentage. Accepted range: 0.01-0.2; default is 0.05. This argument should be the same with woebin's.
#'
#' @return A list of modified break points of each x variables.
#'
#' @seealso \code{\link{woebin}}, \code{\link{woebin_ply}}, \code{\link{woebin_plot}}
#'
#' @examples
#' \donttest{
#' # Load German credit data
#' data(germancredit)
#'
#' # Example I
#' dt = germancredit[, c("creditability", "age.in.years", "credit.amount")]
#' bins = woebin(dt, y="creditability")
#' breaks_adj = woebin_adj(dt, y="creditability", bins)
#' bins_final = woebin(dt, y="creditability",
#' breaks_list=breaks_adj)
#'
#' # Example II
#' binsII = woebin(germancredit, y="creditability")
#' breaks_adjII = woebin_adj(germancredit, "creditability", binsII)
#' bins_finalII = woebin(germancredit, y="creditability",
#' breaks_list=breaks_adjII)
#' }
#'
#' @import data.table
#' @importFrom utils menu
#' @importFrom graphics hist plot
#' @export
woebin_adj = function(dt, y, bins, adj_all_var=TRUE, special_values=NULL, method="tree", save_breaks_list=NULL, count_distr_limit = 0.05) {
# global variables or functions
. = V1 = badprob = badprob2 = bin2 = bin = bin_adj = count_distr = variable = x_breaks = x_class = NULL
dt = setDT(copy(dt))
# bins # if (is.list(bins)) rbindlist(bins)
if (!is.data.table(bins)) {
if (is.data.frame(bins)) {
bins = setDT(bins)
} else {
bins = rbindlist(bins)
}
}
# x variables
xs_all = bins[,unique(variable)]
if (adj_all_var == FALSE) {
xs_adj = bins[
!(bin == "missing" & count_distr >= count_distr_limit)
][, badprob2 := badprob >= shift(badprob, type = "lag"), by=variable
][!is.na(badprob2), length(unique(badprob2)), by=variable
][V1 > 1, variable]
} else {
xs_adj = xs_all
}
# length of adjusting variables
xs_len = length(xs_adj)
# special_values
special_values = check_special_values(special_values, xs_adj)
# breakslist of bins
bins_breakslist = bins_to_breaks(bins, dt)
# loop on adjusting variables
if (xs_len == 0) {
warning("The binning breaks of all variables are perfect according to default settings.")
breaks_list = paste0(bins_breakslist[, paste0(variable, "=c(", x_breaks, ")")], collapse = ", \n ")
breaks_list = paste0(c("list(", breaks_list, ")"), collapse = "\n ")
return(breaks_list)
}
i = 1
breaks_list = NULL
while (i <= xs_len) {
# x variable
breaks = stop_limit = NULL
x_i = xs_adj[i]
sv_i = paste(paste0("\'",special_values[[x_i]],"\'"), collapse = ",")
# basic information of x_i variable ------
woebin_adj_print_basic_info(i, xs_adj, bins, dt, bins_breakslist)
# adjusting breaks ------
adj_brk = menu(c("next", "yes", "back"), title=paste0("> Adjust breaks for (", i, "/", xs_len, ") ", x_i, "?"))
while (adj_brk == 2) {
# modify breaks adj_brk == 2
breaks = readline("> Enter modified breaks: ")
breaks = gsub("^[,\\.]+|[,\\.]+$", "", breaks)
if (breaks == "N") {
stop_limit = "N"
breaks = NULL
} else {
stop_limit = NULL
}
tryCatch(breaks <- woebin_adj_break_plot(dt, y, x_i, breaks, stop_limit, sv_i, method=method), error = function(e) e)
adj_brk = menu(c("next", "yes", "back"), title=paste0("> Adjust breaks for (", i, "/", xs_len, ") ", x_i, "?"))
}
if (adj_brk == 3) {
# go back adj_brk == 3
i = ifelse(i > 1, i-1, i)
} else {
# go next adj_brk == 1
if (!(is.null(breaks) || breaks == "")) bins_breakslist[variable == x_i][["x_breaks"]] <- breaks
i = i + 1
}
}
breaks_list = paste0(bins_breakslist[, paste0(variable, "=c(", x_breaks, ")")], collapse = ", \n ")
breaks_list = paste0(c("list(", breaks_list, ")"), collapse = "\n ")
cat(breaks_list,"\n")
if (!is.null(save_breaks_list)) {
bins_adj = woebin(dt, y, x=bins_breakslist[,variable], breaks_list=breaks_list, print_info=FALSE)
bins_to_breaks(bins_adj, dt, to_string=TRUE, save_name=save_breaks_list)
}
return(breaks_list)
}
|
/R/woebin.R
|
permissive
|
SimaShanhe/scorecard
|
R
| false
| false
| 55,011
|
r
|
# woebin woebin_plot woebin_ply woebin_adj
# converting vector (breaks & special_values) to data frame
split_vec_todf = function(vec) {
value = . = bin_chr = V1 = NULL
if (!is.null(vec)) data.table(
value=vec, bin_chr=vec
)[, rowid := .I
][, strsplit(as.character(value), "%,%", fixed=TRUE), by = .(rowid, bin_chr)
][, .(rowid, bin_chr, value = ifelse(V1=="missing", NA, as.character(V1)) )]
}
# add missing to spl_val if there is na in dtm$value and
# missing is not specified in breaks and spl_val
add_missing_spl_val = function(dtm, breaks, spl_val) {
value = NULL
if (dtm[,any(is.na(value))]) {
no_missing = !any(grepl('missing', c(breaks, spl_val)))
if (no_missing) {
spl_val = c('missing',spl_val)
}
}
return(spl_val)
}
# split dtm into bin_sv and dtm (without speical_values)
dtm_binning_sv = function(dtm, breaks, spl_val) {
binning_sv = value = . = y = variable = good = bad = bin = NULL
# spl_val
spl_val = add_missing_spl_val(dtm, breaks, spl_val)
if (!is.null(spl_val)) {
# special_values from vector to data frame
sv_df = split_vec_todf(spl_val)
# dtm_sv & dtm
dtm_sv = setDT(dtm)[value %in% sv_df$value]
dtm = setDT(dtm)[!(value %in% sv_df$value)]
# if (nrow(dtm_sv) == 0) return(list(binning_sv=NULL, dtm=dtm))
# binning_sv
binning_sv = merge(
dtm_sv[, .(good = sum(y==0), bad = sum(y==1), variable=unique(variable)) , by = value][,value:=as.character(value)],
sv_df[,value:=as.character(value)],
all.x = TRUE, by='value'
)[, value:=ifelse(is.na(value), "missing", as.character(value))
][, .(bin=paste0(value,collapse="%,%"), good=sum(good), bad=sum(bad), variable=unique(variable)), by=rowid
][, .(variable, bin, good, bad)]
}
return(list(binning_sv=binning_sv, dtm=dtm))
}
# check empty bins for unmeric variable
check_empty_bins = function(dtm, binning) {
. = bin = value = variable = y = NULL
# check empty bins
## break points from bin
breaks_list = lapply(
list(left="\\1", right="\\2"),
function(x) setdiff(sub("^\\[(.*), *(.*)\\)", x, unique(binning$bin)), c("Inf","-Inf")) )
## if there are empty bins
if (!setequal(breaks_list$left, breaks_list$right)) {
bstbrks = unique(c(-Inf, unique(breaks_list$right), Inf))
binning = dtm[
, bin := cut(value, bstbrks, right = FALSE, dig.lab = 10, ordered_result = FALSE)
][, .(good = sum(y==0), bad = sum(y==1), variable=unique(variable)) , by = .(bin)
][order(bin)]
# warning( paste0("The break points are modified into \'", paste0(breaks_list$right, collapse = ", "), "\'. There are empty bins based on the provided break points." ) )
}
return(binning)
}
# check zero in good bad, remove bins that have zeros in good or bad column
check_zero_goodbad = function(dtm, binning, count_distr_limit = NULL) {
brkp = good = bad = count = merge_tolead = count_lag = count_lead = brkp2 = . = variable = bin = badprob = value = NULL
while (binning[!is.na(brkp)][good==0 | bad==0,.N] > 0) {
# brkp needs to be removed if good==0 or bad==0
rm_brkp = binning[!is.na(brkp)][
,count := good+bad
][,`:=`(
count_lag=shift(count,type="lag", fill=nrow(dtm)+1),
count_lead=shift(count,type="lead", fill=nrow(dtm)+1)
)][, merge_tolead := count_lag > count_lead
][good == 0 | bad == 0][count == min(count)]
# set brkp to lead's or lag's
shift_type = ifelse(rm_brkp[1,merge_tolead], 'lead', 'lag')
binning = binning[
,brkp2 := shift(brkp,type=shift_type)
][brkp == rm_brkp[1,brkp], brkp := brkp2]
# groupby brkp
binning = binning[
,.(variable=unique(variable), bin=paste0(bin, collapse = "%,%"), good=sum(good), bad=sum(bad)), by=brkp
][, badprob:=bad/(good+bad)]
}
# format bin
if (is.numeric(dtm[,value])) {
binning = binning[
grepl("%,%",bin), bin := sub("^(\\[.+?,).+,(.+?\\))$", "\\1\\2", bin)
][bin == 'missing', brkp := NA
][bin != 'missing', brkp := as.numeric(sub("^\\[(.*),.+", "\\1", bin))]
}
return(binning)
}
# check count distri, remove bins that count_distribution rate less than count_distr_limit
check_count_distri = function(dtm, binning, count_distr_limit) {
count_distr = count = good = bad = brkp = merge_tolead = count_lag = count_lead = brkp2 = . = variable = bin = value = NULL
if (!('count' %in% names(binning))) binning[, count := good + bad]
binning[, count_distr := (count)/sum(count)]
while (binning[!is.na(brkp)][count_distr<count_distr_limit,.N] > 0) {
# brkp needs to be removed if good==0 or bad==0
rm_brkp = binning[!is.na(brkp)][
,count_distr := (count)/sum(count)
][,`:=`(
count_lag=shift(count_distr,type="lag", fill=nrow(dtm)+1),
count_lead=shift(count_distr,type="lead", fill=nrow(dtm)+1)
)][, merge_tolead := count_lag > count_lead
][count_distr<count_distr_limit][count_distr == min(count_distr)]
# set brkp to lead's or lag's
shift_type = ifelse(rm_brkp[1,merge_tolead], 'lead', 'lag')
binning = binning[
,brkp2 := shift(brkp,type=shift_type)
][brkp == rm_brkp[1,brkp], brkp := brkp2]
# groupby brkp
binning = binning[
,.(variable=unique(variable), bin=paste0(bin, collapse = "%,%"), count=sum(count), good=sum(good), bad=sum(bad)), by=brkp
][, count_distr := (count)/sum(count)]
}
# format bin
if (is.numeric(dtm[,value])) {
binning = binning[
grepl("%,%",bin), bin := sub("^(\\[.+?,).+,(.+?\\))$", "\\1\\2", bin)
][bin == 'missing', brkp := NA
][bin != 'missing', brkp := as.numeric(sub("^\\[(.*),.+", "\\1", bin))]
}
return(binning)
}
# required in woebin2 # return binning if breaks provided
#' @import data.table
woebin2_breaks = function(dtm, breaks, spl_val) {
# global variables or functions
value = bin = . = y = variable = bad = good = V1 = badprob = bksv_list = bin_chr = NULL
# breaks from vector to data frame
bk_df = split_vec_todf(breaks)
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list$dtm
binning_sv = dtm_binsv_list$binning_sv
if (dtm[,.N] == 0 || is.null(dtm)) return(list(binning_sv=binning_sv, binning=NULL))
# binning
if (is.numeric(dtm[,value])) {
bstbrks = c(-Inf, setdiff(unique(bk_df$value), c(NA, Inf, -Inf)), Inf)
binning = dtm[
, bin := cut(value, bstbrks, right = FALSE, dig.lab = 10, ordered_result = FALSE)
][, .(good = sum(y==0), bad = sum(y==1), variable=unique(variable)) , by = .(bin)
][order(bin)]
# check empty bins
binning = check_empty_bins(dtm, binning)
# merge binning with bk_df
if (bk_df[is.na(value),.N] == 1) {
binning = merge(
binning[, value:=sub("^\\[(.*), *(.*)\\)","\\2",bin)],
bk_df,
all.x = TRUE, by="value"
)[order(rowid,value)][, bin:=ifelse(is.na(bin), "missing", as.character(bin))
][, .(bin=paste0(bin,collapse="%,%"), good=sum(good), bad=sum(bad), variable=unique(variable)), by=rowid
][order(rowid)]
}
} else if (is.factor(dtm[,value]) || is.character(dtm[,value])) {
dtm = dtm[,value := as.character(value)]
# the values not specified in breaks_list
diff_dt_brk = setdiff(dtm[,unique(value)], bk_df[,value])
if (length(diff_dt_brk) > 0) {
warning(sprintf('The categorical values (`%s`) are not specified in `breaks_list` for the column `%s`.', paste0(diff_dt_brk, collapse = ', '), dtm[1,variable]) )
stop()
}
# merge binning with bk_df
binning = merge(
dtm, bk_df[,bin:=bin_chr], all.x = TRUE
)[order(rowid, bin)][, .(good = sum(y==0), bad = sum(y==1), variable=unique(variable)) , by = .(rowid, bin)]
}
# # remove rowid column in binning data frame
binning = binning[,rowid:=1][,rowid:=NULL]
# # bind binning_sv and binning
# if (setDT(binning_sv)[,.N] > 0) binning = rbind(binning_sv, binning)
return(list(binning_sv=binning_sv, binning=binning))
}
# required in woebin2 # return initial binning
woebin2_init_bin = function(dtm, init_count_distr, breaks, spl_val) {
# global variables or functions
. = bad = badprob = bin = brkp = good = value = variable = y = NULL
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list$dtm
binning_sv = dtm_binsv_list$binning_sv
if (is.null(dtm) || dtm[,.N]==0) return(list(binning_sv=binning_sv, initial_binning=NULL))
# binning
if (is.numeric(dtm[,value])) {
# numeric variable ------
xvalue = dtm[, value]
# breaks vector & outlier
iq = quantile(xvalue, na.rm = TRUE)
iqr = IQR(xvalue, na.rm = TRUE)
if (iqr == 0) {
xvalue_rm_outlier = xvalue
} else {
xvalue_rm_outlier = xvalue[which(xvalue >= iq[2]-3*iqr & xvalue <= iq[4]+3*iqr)]
}
# number of initial binning
n = trunc(1/init_count_distr)
len_uniq_x = length(setdiff(unique(xvalue_rm_outlier), c(NA,Inf,-Inf)))
if (len_uniq_x < n) n = len_uniq_x
# initial breaks
if (len_uniq_x < 10) {
brk = setdiff(unique(xvalue_rm_outlier), c(NA, Inf, -Inf))
} else {
brk = pretty(xvalue_rm_outlier, n)
}
brk = sort(brk[(brk < max(xvalue, na.rm =TRUE)) & (brk > min(xvalue, na.rm =TRUE))])
brk = unique(c(-Inf, brk, Inf))
if (anyNA(xvalue)) brk = c(brk, NA)
# initial binning datatable
init_bin = dtm[
, bin := cut(value, brk, right = FALSE, dig.lab = 10, ordered_result = FALSE)
][, .(good = sum(y==0), bad = sum(y==1), variable=unique(variable)) , by = bin
][order(bin)]
# check empty bins
init_bin = check_empty_bins(dtm, init_bin)
init_bin = init_bin[
, `:=`(brkp = as.numeric( sub("^\\[(.*),.+", "\\1", bin)), badprob = bad/(good+bad))
][, .(variable, bin, brkp, good, bad, badprob)]
} else if ( is.logical(dtm[,value]) || is.factor(dtm[,value]) || is.character(dtm[,value]) ) {
# other variable ------
# initial binning datatable
init_bin = dtm[
, .(variable = unique(variable), good = sum(y==0), bad = sum(y==1)), by=value
][, badprob := bad/(good+bad)]
# order by bin if is.factor, or by badprob if is.character
if (is.logical(dtm[,value]) || is.factor(dtm[,value])) {
init_bin = init_bin[
order(value)
][, brkp := ifelse(is.na(value), NA, .I)
][, .(variable, bin=value, brkp, good, bad, badprob)]
} else {
init_bin = init_bin[
order(badprob)
# next 3 lines make NA located at the last rows
][, brkp := ifelse(is.na(value), NA, .I)
][order(brkp)
][, brkp := ifelse(is.na(value), NA, .I)
][, .(variable, bin=value, brkp, good, bad, badprob)]
}
}
# remove brkp that good == 0 or bad == 0 ------
init_bin = check_zero_goodbad(dtm, init_bin)
return(list(binning_sv=binning_sv, initial_binning=init_bin))
}
# required in woebin2_tree # add 1 best break for tree-like binning
woebin2_tree_add_1brkp = function(dtm, initial_binning, count_distr_limit, bestbreaks=NULL) {
# global variables or functions
brkp = patterns = . = good = bad = variable = count_distr = value = min_count_distr = bstbin = min_count_distr = total_iv = bstbin = brkp = bin = NULL
# total_iv for all best breaks
total_iv_all_breaks = function(initial_binning, bestbreaks, dtm_rows) {
# best breaks set
breaks_set = setdiff( initial_binning[,brkp], c(bestbreaks, -Inf, Inf, NA) )
init_bin_all_breaks = copy(initial_binning)
# loop on breaks_set
for (i in breaks_set) {
# best break + i
bestbreaks_i = sort(c(bestbreaks, i))
# best break datatable
init_bin_all_breaks = init_bin_all_breaks[
, paste0("bstbin",i) := cut(brkp, c(-Inf, bestbreaks_i, Inf), right = FALSE, dig.lab = 10, ordered_result = FALSE) ]
}
# best break dt
total_iv_all_brks = melt(
init_bin_all_breaks, id = c("variable", "good", "bad"), variable.name = "bstbin", measure = patterns("bstbin.+")
)[, .(good = sum(good), bad = sum(bad), variable = unique(variable))
, by=.(bstbin, value)
][, count_distr := (good+bad)/dtm_rows, by=bstbin
][!is.na(value), min_count_distr := min(count_distr), by=bstbin
][, .(total_iv = iv_01(good, bad), variable = unique(variable), min_count_distr = min(min_count_distr,na.rm=TRUE)), by=bstbin
][, bstbin := as.numeric(sub("bstbin(.+)", "\\1", bstbin))][]
return(total_iv_all_brks)
}
# binning add 1best break
binning_add_1bst = function(initial_binning, bestbreaks) {
value = bstbin = . = good = bad = variable = woe = bin_iv = total_iv = bstbrkp = badprob = NULL # no visible binding for global variable
if ( is.numeric(dtm[,value]) ) {
binning_1bst_brk = initial_binning[
, bstbin := cut(brkp, c(-Inf, bestbreaks, Inf), right = FALSE, dig.lab = 10, ordered_result = FALSE)
][, .(variable=unique(variable), bin=unique(bstbin), good = sum(good), bad = sum(bad)) , by = bstbin
]
} else if (is.logical(dtm[,value]) || is.factor(dtm[,value]) || is.character(dtm[,value]) ) {
bestbreaks = setdiff(bestbreaks, min(initial_binning[,brkp]))
binning_1bst_brk = initial_binning[
, bstbin := cut(brkp, c(-Inf, bestbreaks, Inf), right = FALSE,dig.lab = 10, ordered_result = FALSE)
][, .(variable=unique(variable), bin = paste0(bin, collapse = "%,%"), good = sum(good), bad = sum(bad)), by = bstbin ]
}
binning_1bst_brk = binning_1bst_brk[
order(bstbin)
][, total_iv := iv_01(good, bad)
][, bstbrkp := as.numeric( sub("^\\[(.*),.+", "\\1", bstbin) )
][, .(variable, bin, bstbin, bstbrkp, good, bad, total_iv)]
return(binning_1bst_brk)
}
# adding 1 best breakpoint
dtm_rows = nrow(dtm)
total_iv_all_brks = total_iv_all_breaks(initial_binning, bestbreaks, dtm_rows)
# bestbreaks: total_iv == max(total_iv) & min(count_distr) >= count_distr_limit
bstbrk_max_iv = total_iv_all_brks[min_count_distr >= count_distr_limit][total_iv==max(total_iv)][, bstbin]
# add 1best break to bestbreaks
bestbreaks = unique(c(bestbreaks, bstbrk_max_iv[1]))
bin_add_1bst = binning_add_1bst(initial_binning, bestbreaks)
return(bin_add_1bst)
}
# required in woebin2 # return tree-like binning
woebin2_tree = function(dtm, init_count_distr=0.02, count_distr_limit=0.05, stop_limit=0.1, bin_num_limit=8, breaks=NULL, spl_val=NULL) {
# global variables or functions
brkp = bstbrkp = total_iv = NULL
# initial binning
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
initial_binning = bin_list$initial_binning
binning_sv = bin_list$binning_sv
if (nrow(initial_binning)<=1 || is.null(initial_binning)) {
return(list(binning_sv=binning_sv, binning=initial_binning))
}
# initialize parameters
## length all breaks
len_brks = initial_binning[!is.na(brkp), .N]
## param
bestbreaks = NULL ## best breaks
IVt1 = IVt2 = 1e-10
IVchg = 1 ## IV gain ratio
step_num = 1
# best breaks from three to n+1 bins
binning_tree = NULL
while ( (IVchg >= stop_limit) & (step_num+1 <= min(bin_num_limit, len_brks)) ) {
binning_tree = woebin2_tree_add_1brkp(dtm, initial_binning, count_distr_limit, bestbreaks)
# print(binning_tree)
# update parameters
## best breaks
bestbreaks = binning_tree[bstbrkp != -Inf & !is.na(bstbrkp), bstbrkp]
## information value
IVt2 = binning_tree[1, total_iv]
IVchg = IVt2/IVt1-1 ## ratio gain
IVt1 = IVt2
# print(IVchg)
step_num = step_num + 1
}
if (is.null(binning_tree)) binning_tree = initial_binning
return(list(binning_sv=binning_sv, binning=binning_tree))
# return(binning_tree)
}
# examples
# system.time( binning_list <- woebin2_init_bin(dtm, init_count_distr=0.02, breaks =NULL, spl_val=NULL) )
# initial_binning=binning_list$initial_binning
# binning_sv = binning_list$binning_sv
# system.time( woebin2_tree_add_1brkp(dtm, initial_binning, count_distr_limit=0.05) )
# system.time( woebin2_tree(dtm, initial_binning, count_distr_limit=0.05) )
# required in woebin2 # return chimerge binning
#' @importFrom stats qchisq
woebin2_chimerge = function(dtm, init_count_distr=0.02, count_distr_limit=0.05, stop_limit=0.1, bin_num_limit=8, breaks=NULL, spl_val=NULL) {
.= a= a_colsum= a_lag= a_lag_rowsum= a_rowsum= a_sum= bad= bin= brkp= brkp2= chisq= count= count_distr= e= e_lag= chisq_lead= good= goodbad= merge_tolead =value= variable= NULL
# [chimerge](http://blog.csdn.net/qunxingvip/article/details/50449376)
# [ChiMerge:Discretization of numeric attributs](http://www.aaai.org/Papers/AAAI/1992/AAAI92-019.pdf)
# chisq = function(a11, a12, a21, a22) {
# A = list(a1 = c(a11, a12), a2 = c(a21, a22))
# Adf = do.call(rbind, A)
#
# Edf =
# matrix(rowSums(Adf), ncol = 1) %*%
# matrix(colSums(Adf), nrow = 1) /
# sum(Adf)
#
# sum((Adf-Edf)^2/Edf)
# }
# initial binning
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
initial_binning = bin_list$initial_binning
binning_sv = bin_list$binning_sv
if (nrow(initial_binning)<=1 || is.null(initial_binning)) {
return(list(binning_sv=binning_sv, binning=initial_binning))
}
# function to create a chisq column in initial_binning
add_chisq = function(initial_binning) {
chisq_df = melt(initial_binning[!is.na(brkp)], id.vars = c("brkp", "variable", "bin"), measure.vars = c("good", "bad"), variable.name = "goodbad", value.name = "a"
)[order(brkp)
][, a_lag := shift(a, type="lag"), by=.(goodbad)
][, `:=`(
a_rowsum = sum(a),
a_lag_rowsum = sum(a_lag),
a_colsum = a+a_lag,
a_sum = sum(a+a_lag)), by=brkp
][, `:=`(
e = a_rowsum/a_sum*a_colsum,
e_lag = a_lag_rowsum/a_sum*a_colsum
)][, .(chisq=sum((a-e)^2/e + (a_lag-e_lag)^2/e_lag)), by=brkp]
return(merge(initial_binning[,count:=good+bad], chisq_df, all.x = TRUE))
}
# dtm_rows
dtm_rows = nrow(dtm)
# chisq limit
chisq_limit = qchisq(1-stop_limit,1)
# binning with chisq column
binning_chisq = add_chisq(initial_binning)
# param
bin_chisq_min = binning_chisq[, min(chisq, na.rm = TRUE)]
bin_count_distr_min = binning_chisq[!is.na(brkp), min((good+bad)/dtm_rows)]
bin_nrow = binning_chisq[,.N]
# remove brkp if chisq < chisq_limit
while (
bin_chisq_min < chisq_limit ||
bin_count_distr_min < count_distr_limit ||
bin_nrow > bin_num_limit) {
# brkp needs to be removed
if (bin_chisq_min < chisq_limit) {
rm_brkp = binning_chisq[, merge_tolead := FALSE][order(chisq, count)][1,]
} else if (bin_count_distr_min < count_distr_limit) {
rm_brkp = binning_chisq[,`:=`(
count_distr = count/sum(count),
chisq_lead = shift(chisq, type = "lead", fill = Inf)
)][,merge_tolead := ifelse(is.na(chisq), TRUE, chisq > chisq_lead)
][!is.na(brkp)][order(count_distr)][1,]
} else if (bin_nrow > bin_num_limit) {
rm_brkp = binning_chisq[, merge_tolead := FALSE][order(chisq, count)][1,]
}
# groupby brkp
shift_type = ifelse(rm_brkp[1,merge_tolead], 'lead', 'lag')
binning_chisq = binning_chisq[
,brkp2 := shift(brkp,type=shift_type)
][brkp == rm_brkp[1,brkp], brkp := brkp2
][,.(variable=unique(variable), bin=paste0(bin, collapse = "%,%"), good=sum(good), bad=sum(bad)), by=brkp
]#[, badprob:=bad/(good+bad)]
# update
## add chisq to new binning data frame
binning_chisq = add_chisq(binning_chisq)
## param
bin_chisq_min = binning_chisq[, min(chisq, na.rm = TRUE)]
bin_count_distr_min = binning_chisq[!is.na(brkp), min((good+bad)/dtm_rows)]
bin_nrow = binning_chisq[,.N]
}
# format bin # remove (.+\\)%,%\\[.+,)
if (is.numeric(dtm[,value])) {
binning_chisq = binning_chisq[grepl("%,%",bin), bin := sub("^(\\[.+?,).+,(.+?\\))$", "\\1\\2", bin)]
}
return(list(binning_sv=binning_sv, binning=binning_chisq))
# return(binning_chisq)
}
# required in woebin2 # return equal binning, supports numerical variables only
woebin2_equal = function(dtm, init_count_distr=0.02, count_distr_limit=0.05, stop_limit=0.1, bin_num_limit=8, breaks=NULL, spl_val=NULL, method='freq') {
count = value = group = . = minv = maxv = bin = y = variable = bad = good = badprob = NULL
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list$dtm
binning_sv = dtm_binsv_list$binning_sv
if (is.null(dtm) || dtm[,.N]==0) return(list(binning_sv=binning_sv, binning=NULL))
# dt_sl = dtm[,.(label=y, datset=variable, score=value)]
# dtm = dt_sl[,.(y=label, variable=datset, value=score)]
# breaks
if (bin_num_limit >= dtm[, length(unique(value))] ) {
# in each value
brkp = dtm[order(value)][, unique(value)]
brkp = c(-Inf, brkp[-1], Inf)
} else {
if (method == 'freq') {
brkp = copy(dtm)[order(value)
][, group := ceiling(.I/(.N/bin_num_limit))
][, .(value=value[1]), by = group
][, c(-Inf, value[-1], Inf)]
} else if (method == 'width') {
minmax = dtm[, .(maxv = max(value), minv = min(value))]
brkp = seq(minmax[,minv], minmax[,maxv], length.out = bin_num_limit+1)
brkp = c(-Inf, brkp[-c(1, length(brkp))], Inf)
}
}
binning_equal = dtm[, bin := cut(value, unique(brkp), right = FALSE, dig.lab = 10, ordered_result = F)
][, .(good = sum(y==0), bad = sum(y==1), count = .N), keyby = .(variable, bin)
][, `:=`(brkp = as.numeric( sub("^\\[(.*),.+", "\\1", bin)), badprob = bad/(good+bad))
][, .(variable, bin, brkp, count, good, bad, badprob)]
# create binning
binning_equal = check_empty_bins(dtm, binning_equal)
binning_equal = check_zero_goodbad(dtm, binning_equal)
binning_equal = check_count_distri(dtm, binning_equal, count_distr_limit)
return(list(binning_sv=binning_sv, binning=binning_equal))
}
# required in woebin2 # # format binning output
binning_format = function(binning) {
# global variables or functions
. = bad = badprob = bin = bin_iv = good = total_iv = variable = woe = is_sv = count = NULL
# required columns in input binning: variable, bin, good, bad
if (!('count' %in% names(binning))) binning[, count := good+bad]
binning = binning[
, badprob:=bad/(good+bad)
][, woe := lapply(.SD, woe_01, bad), .SDcols = "good"
][, bin_iv := lapply(.SD, miv_01, bad), .SDcols = "good"
][, total_iv := sum(bin_iv)
][, bin := ifelse(is.na(bin) | bin=="NA", "missing", as.character(bin)) # replace NA by missing
][, .(variable, bin, count, count_distr=(good+bad)/sum(good+bad), good, bad, badprob, woe, bin_iv, total_iv, breaks = sub("^\\[(.*), *(.*)\\)((%,%missing)*)", "\\2\\3", bin), is_special_values=is_sv)]
# move missing from last row to first
if ( "missing" %in% binning$bin ) {
binning = rbind(binning[bin=="missing"], binning[bin != "missing"])
}
return(binning)
}
# woebin2
# This function provides woe binning for only two columns (one x and one y) data frame.
woebin2 = function(dtm, breaks=NULL, spl_val=NULL, init_count_distr=0.02, count_distr_limit=0.05, stop_limit=0.1, bin_num_limit=8, method="tree") {
# global variables or functions
. = bad = badprob = bin = bin_iv = good = total_iv = variable = woe = is_sv = NULL
# binning
if (!anyNA(breaks) & !is.null(breaks)) {
# 1.return binning if breaks provided
bin_list = woebin2_breaks(dtm=dtm, breaks=breaks, spl_val=spl_val)
} else {
if (stop_limit == "N") {
# binning of initial & specialvalues
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
} else {
if (method == "tree") {
# 2.tree-like optimal binning
bin_list = woebin2_tree(dtm, init_count_distr, count_distr_limit, stop_limit, bin_num_limit, breaks=breaks, spl_val=spl_val)
} else if (method == "chimerge") {
# 2.chimerge optimal binning
bin_list = woebin2_chimerge(dtm, init_count_distr, count_distr_limit, stop_limit, bin_num_limit, breaks=breaks, spl_val=spl_val)
} else if (method %in% c('freq','width')) {
# 3. in equal freq or width
bin_list = woebin2_equal(dtm, init_count_distr, count_distr_limit, stop_limit, bin_num_limit, breaks=breaks, spl_val=spl_val, method = method)
}
}
}
# # binding binning_sv and binning
if (any(sapply(bin_list, is.null))) {
binning = rbindlist(bin_list)[, is_sv := names(bin_list)[!sapply(bin_list, is.null)]]
} else {
binning = rbindlist(bin_list, use.names = TRUE, fill = TRUE, idcol = 'is_sv')
}
binning = binning[, is_sv := is_sv == 'binning_sv']
return(binning_format(binning))
}
# convert bins to breaks_list
bins_to_breaks = function(bins, dt, to_string=FALSE, save_name=NULL) {
.= bin= bin2= is_special_values= variable= x_breaks= x_class = NULL
# bins # if (is.list(bins)) rbindlist(bins)
if (!is.data.table(bins)) {
if (is.data.frame(bins)) {
bins = setDT(bins)
} else {
bins = rbindlist(bins)
}
}
# x variables
xs_all = bins[,unique(variable)]
# class of variables
vars_class = data.table(
variable = xs_all,
x_class = dt[,sapply(.SD, class), .SDcols = xs_all]
)
# breaks
bins_breakslist = bins[
, bin2 := sub("^\\[(.*), *(.*)\\)((%,%missing)*)", "\\2\\3", bin)
][!(bin2 %in% c("-Inf","Inf","missing") & !is_special_values)
][vars_class, on="variable"
][, .(
x_breaks = paste(ifelse(x_class=="numeric", bin2, paste0("\"",bin2,"\"")), collapse=", "),
x_class=unique(x_class)
), by=variable]
if (to_string) {
bins_breakslist = paste0(bins_breakslist[, paste0(variable, "=c(", x_breaks, ")")], collapse = ", \n ")
bins_breakslist = paste0(c("breaks_list=list(", bins_breakslist, ")"), collapse = "\n ")
if (!is.null(save_name)) {
save_name = sprintf('%s_%s.R', save_name, format(Sys.time(),"%Y%m%d_%H%M%S"))
writeLines(bins_breakslist, save_name)
cat(sprintf('[INFO] The breaks_list is saved as %s\n', save_name))
return()
}
}
return(bins_breakslist)
}
# @param init_count_distr The minimum percentage of initial binning class number over total. Accepted range: 0.01-0.2; default is 0.02, which means initial cut into 50 fine bins for continuous variables.
#' WOE Binning
#'
#' \code{woebin} generates optimal binning for numerical, factor and categorical variables using methods including tree-like segmentation or chi-square merge. \code{woebin} can also customizing breakpoints if the `breaks_list` was provided. The default `woe` is defined as ln(Bad_i/Good_i). If you prefer ln(Good_i/Bad_i), please set the argument `positive` as negative value, such as '0' or 'good'. If there is a zero frequency class when calculating woe, the zero will replaced by 0.99 to make the woe calculable.
#'
#' @param dt A data frame with both x (predictor/feature) and y (response/label) variables.
#' @param y Name of y variable.
#' @param x Name of x variables. Default is NULL. If x is NULL, then all columns except y and var_skip are counted as x variables.
#' @param var_skip Name of variables that will skip for binning. Default is NULL.
#' @param breaks_list List of break points, default is NULL. If it is not NULL, variable binning will based on the provided breaks.
#' @param special_values the values specified in special_values will be in separate bins. Default is NULL.
#' @param stop_limit Stop binning segmentation when information value gain ratio less than the stop_limit if using tree method; or stop binning merge when the chi-square of each neighbor bins are larger than 'qchisq(1-stoplimit, 1)' if using chimerge method. Accepted range: 0-0.5; default is 0.1.
#' @param count_distr_limit The minimum count distribution percentage. Accepted range: 0.01-0.2; default is 0.05.
#' @param bin_num_limit Integer. The maximum number of binning. Default is 8.
#' @param positive Value of positive class, default "bad|1".
#' @param no_cores Number of CPU cores for parallel computation. Defaults NULL. If no_cores is NULL, the no_cores will set as 1 if length of x variables less than 10, and will set as the number of all CPU cores if the length of x variables greater than or equal to 10.
#' @param print_step A non-negative integer. Default is 1. If print_step>0, print variable names by each print_step-th iteration. If print_step=0 or no_cores>1, no message is print.
#' @param method Optimal binning method, it should be "tree" or "chimerge". Default is "tree".
#' @param save_breaks_list A string. The file name to save breaks_list. Default is None.
#' @param ignore_const_cols Logical. Ignore constant columns. Default is TRUE.
#' @param ignore_datetime_cols Logical. Ignore datetime columns. Default is TRUE.
#' @param check_cate_num Logical. Check whether the number of unique values in categorical columns larger than 50. It might make the binning process slow if there are too many unique categories. Default is TRUE.
#' @param replace_blank_na Logical. Replace blank values with NA. Default is TRUE.
#' @param ... Additional parameters.
#'
#' @return A list of data frames include binning information for each x variables.
#'
#' @seealso \code{\link{woebin_ply}}, \code{\link{woebin_plot}}, \code{\link{woebin_adj}}
#'
#' @examples
#' # load germancredit data
#' data(germancredit)
#'
#' # Example I
#' # binning of two variables in germancredit dataset
#' # using tree method
#' bins2_tree = woebin(germancredit, y="creditability",
#' x=c("credit.amount","housing"), method="tree")
#' bins2_tree
#'
#' \donttest{
#' # using chimerge method
#' bins2_chi = woebin(germancredit, y="creditability",
#' x=c("credit.amount","housing"), method="chimerge")
#'
#' # binning in equal freq/width # only supports numerical variables
#' numeric_cols = c("duration.in.month", "credit.amount",
#' "installment.rate.in.percentage.of.disposable.income", "present.residence.since",
#' "age.in.years", "number.of.existing.credits.at.this.bank",
#' "number.of.people.being.liable.to.provide.maintenance.for")
#' bins_freq = woebin(germancredit, y="creditability", x=numeric_cols, method="freq")
#' bins_width = woebin(germancredit, y="creditability", x=numeric_cols, method="width")
#'
#' # y can be NULL if no label column in dataset
#' bins_freq_noy = woebin(germancredit, y=NULL, x=numeric_cols)
#'
#' # Example II
#' # binning of the germancredit dataset
#' bins_germ = woebin(germancredit, y = "creditability")
#' # converting bins_germ into a data frame
#' # bins_germ_df = data.table::rbindlist(bins_germ)
#'
#' # Example III
#' # customizing the breakpoints of binning
#' library(data.table)
#' dat = rbind(
#' germancredit,
#' data.table(creditability=sample(c("good","bad"),10,replace=TRUE)),
#' fill=TRUE)
#'
#' breaks_list = list(
#' age.in.years = c(26, 35, 37, "Inf%,%missing"),
#' housing = c("own", "for free%,%rent")
#' )
#'
#' special_values = list(
#' credit.amount = c(2600, 9960, "6850%,%missing"),
#' purpose = c("education", "others%,%missing")
#' )
#'
#' bins_cus_brk = woebin(dat, y="creditability",
#' x=c("age.in.years","credit.amount","housing","purpose"),
#' breaks_list=breaks_list, special_values=special_values)
#'
#' # Example IV
#' # save breaks_list as a R file
#' bins2 = woebin(germancredit, y="creditability",
#' x=c("credit.amount","housing"), save_breaks_list='breaks_list')
#'
#' }
#'
#' @import data.table foreach
#' @importFrom stats IQR quantile setNames
#' @importFrom doParallel registerDoParallel stopImplicitCluster
#' @importFrom parallel detectCores
#' @export
woebin = function(dt, y, x=NULL, var_skip=NULL, breaks_list=NULL, special_values=NULL, stop_limit=0.1, count_distr_limit=0.05, bin_num_limit=8, positive="bad|1", no_cores=NULL, print_step=0L, method="tree", save_breaks_list=NULL, ignore_const_cols=TRUE, ignore_datetime_cols=TRUE, check_cate_num=TRUE, replace_blank_na=TRUE, ...) {
# start time
start_time = proc.time()
# global variable
i = NULL
# arguments ------
# print_info
print_info = list(...)[['print_info']]
if (is.null(print_info)) print_info = TRUE
# init_count_distr
min_perc_fine_bin = list(...)[['init_count_distr']]
init_count_distr = list(...)[['init_count_distr']]
if (is.null(init_count_distr)) {
init_count_distr <- ifelse(!is.null(min_perc_fine_bin), min_perc_fine_bin, 0.02)
}
# count_distr_limit
min_perc_coarse_bin = list(...)[['min_perc_coarse_bin']]
if (!is.null(min_perc_coarse_bin)) count_distr_limit = min_perc_coarse_bin
# bin_num_limit
max_num_bin = list(...)[['max_num_bin']]
if (!is.null(max_num_bin)) bin_num_limit = max_num_bin
# print info
if (print_info) cat('[INFO] creating woe binning ... \n')
# set dt as data.table
dt = setDT(copy(dt)) #copy(setDT(dt))
if (!is.null(x)) dt = dt[, c(y,x), with=FALSE]
# check y
if (!is.null(y)) dt = check_y(dt, y, positive)
# remove constant columns
if (ignore_const_cols) dt = check_const_cols(dt)
# remove date/time columns
if (ignore_datetime_cols) dt = check_datetime_cols(dt)
# check categorical columns' unique values
if (check_cate_num) check_cateCols_uniqueValues(dt, var_skip)
# replace black with na
if (replace_blank_na) dt = rep_blank_na(dt)
# x variable names
xs = x_variable(dt, y, x, var_skip)
xs_len = length(xs)
# print_step
print_step = check_print_step(print_step)
# breaks_list
breaks_list = check_breaks_list(breaks_list, xs)
# special_values
special_values = check_special_values(special_values, xs)
# # stop_limit vector
# if (length(stop_limit) == 1) {
# stop_limit = rep(stop_limit, length(xs))
# } else if (length(stop_limit) != length(xs)) {
# stop("Incorrect inputs; the length of stop_limit should be 1 or the same as x variables.")
# }
# stop_limit range
if ( stop_limit<0 || stop_limit>0.5 || !is.numeric(stop_limit) ) {
warning("Incorrect parameter specification; accepted stop_limit parameter range is 0-0.5. Parameter was set to default (0.1).")
stop_limit = 0.1
}
# init_count_distr range
if ( init_count_distr<0.01 || init_count_distr>0.2 || !is.numeric(init_count_distr) ) {
warning("Incorrect parameter specification; accepted init_count_distr parameter range is 0.01-0.2. Parameter was set to default (0.02).")
init_count_distr = 0.02
}
# count_distr_limit
if ( count_distr_limit<0.01 || count_distr_limit>0.2 || !is.numeric(count_distr_limit) ) {
warning("Incorrect parameter specification; accepted count_distr_limit parameter range is 0.01-0.2. Parameter was set to default (0.05).")
count_distr_limit = 0.05
}
# bin_num_limit
if (!is.numeric(bin_num_limit)) {
warning("Incorrect inputs; bin_num_limit should be numeric variable. Parameter was set to default (8).")
bin_num_limit = 8
}
# method
if (!(method %in% c("tree", "chimerge", 'freq', 'width'))) {
warning("Incorrect inputs; method should be tree or chimerge. Parameter was set to default (tree).")
method = "tree"
}
if (is.null(y) & !(method %in% c('freq', 'width'))) method = 'freq'
# binning ------
# loop on xs # https://www.r-bloggers.com/how-to-go-parallel-in-r-basics-tips/
if (is.null(no_cores) || no_cores<1) {
no_cores = ifelse(xs_len < 10, 1, detectCores(logical=F))
}
bins = list()
if (!is.null(y)) {
y = dt[[y]]
} else y = NA
if (no_cores == 1) {
for (i in 1:xs_len) {
x_i = xs[i]
# print xs
if (print_step>0 & i %% print_step == 0) cat(paste0(format(c(i,xs_len)),collapse = "/"), x_i,"\n")
# woebining on one variable
bins[[x_i]] <-
try(do.call(woebin2, args = list(
dtm = data.table(y=y, variable=x_i, value=dt[[x_i]]),
breaks = breaks_list[[x_i]],
spl_val = special_values[[x_i]],
init_count_distr = init_count_distr,
count_distr_limit= count_distr_limit,
stop_limit = stop_limit,
bin_num_limit = bin_num_limit,
method = method
)), silent = TRUE)
}
} else {
registerDoParallel(no_cores)
# run
bins <-
foreach(
i = seq_len(xs_len),
.combine = list,
.multicombine = TRUE,
.maxcombine = xs_len+1,
.inorder = FALSE,
.errorhandling = "pass",
.final = function(bs) {
if (xs_len==1) bs = list(bs)
setNames(bs, xs)
},
.export = c('dt', 'xs', 'y', 'breaks_list', 'special_values', 'init_count_distr', 'count_distr_limit', 'stop_limit', 'bin_num_limit', 'method')
) %dopar% {
x_i = xs[i]
# woebining on one variable
try(do.call(woebin2, args = list(
dtm = data.table(y=y, variable=x_i, value=dt[[x_i]]),
breaks = breaks_list[[x_i]],
spl_val = special_values[[x_i]],
init_count_distr = init_count_distr,
count_distr_limit= count_distr_limit,
stop_limit = stop_limit,
bin_num_limit = bin_num_limit,
method = method
)), silent = TRUE)
}
# finish
stopImplicitCluster()
}
# check errors in binning
error_variables = names(bins)[which(sapply(bins, function(x) inherits(x, 'try-error')))]
if (length(error_variables) > 0) {
warning(sprintf('The following columns are removed from binning results due to errors:\n%s', paste0(error_variables, collapse=', ')))
bins = bins[setdiff(names(bins), error_variables)]
}
# running time
rs = proc.time() - start_time
# hms
if (rs[3] > 10 & print_info) cat(sprintf("[INFO] Binning on %s rows and %s columns in %s",nrow(dt),ncol(dt),sec_to_hms(rs[3])),"\n")
# save breaks_list
if (!is.null(save_breaks_list)) bins_to_breaks(bins, dt, to_string=TRUE, save_name=save_breaks_list)
return(bins)
}
#' @import data.table
woepoints_ply1 = function(dtx, binx, x_i, woe_points) {
# woe_points: "woe" "points"
. = V1 = bin = woe = NULL
# binx
binx = binx[
, bin:=as.character(bin)
][,.(unlist(strsplit(bin, "%,%", fixed=TRUE)),
eval(parse(text = woe_points)) ), by=bin]
# dtx
## cut numeric variable
if ( is.numeric(dtx[[x_i]]) ) {
binx_sv = binx[!grepl("\\[",V1)]
binx_other = binx[grepl("\\[",V1)]
dtx[[x_i]] = ifelse(
dtx[[x_i]] %in% binx_sv$V1,
dtx[[x_i]],
as.character(cut(dtx[[x_i]], unique(c(-Inf, binx_other[bin != "missing", as.numeric(sub("^\\[(.*),.+", "\\1", V1))], Inf)), right = FALSE, dig.lab = 10, ordered_result = FALSE))
)
}
## to charcarter, na to missing
dtx[[x_i]] = as.character(dtx[[x_i]])
dtx[[x_i]] = ifelse(is.na(dtx[[x_i]]), "missing", dtx[[x_i]])
## add rowid column
dtx = setDT(dtx)[, rowid := .I]
# rename binx
setnames(binx, c("bin", x_i, paste(x_i, woe_points, sep="_")))
# merge
dtx_suffix = merge(setDF(dtx), setDF(binx), by=x_i, all.x = TRUE)
dtx_suffix = setDT(dtx_suffix)[order(rowid)][, (c("rowid", "bin", x_i)) := NULL]
return(dtx_suffix)
}
#' WOE Transformation
#'
#' \code{woebin_ply} converts original input data into woe values based on the binning information generated from \code{woebin}.
#'
#' @param dt A data frame.
#' @param bins Binning information generated from \code{woebin}.
#' @param no_cores Number of CPU cores for parallel computation. Defaults NULL. If no_cores is NULL, the no_cores will set as 1 if length of x variables less than 10, and will set as the number of all CPU cores if the length of x variables greater than or equal to 10.
#' @param print_step A non-negative integer. Default is 1. If print_step>0, print variable names by each print_step-th iteration. If print_step=0 or no_cores>1, no message is print.
#' @param replace_blank_na Logical. Replace blank values with NA. Default is TRUE. This argument should be the same with \code{woebin}'s.
#' @param ... Additional parameters.
#'
#' @return A data frame with columns for variables converted into woe values.
#'
#' @seealso \code{\link{woebin}}, \code{\link{woebin_plot}}, \code{\link{woebin_adj}}
#'
#' @examples
#' # load germancredit data
#' data(germancredit)
#'
#' # Example I
#' dt = germancredit[, c("creditability", "credit.amount", "purpose")]
#'
#' # binning for dt
#' bins = woebin(dt, y = "creditability")
#'
#' # converting original value to woe
#' dt_woe = woebin_ply(dt, bins=bins)
#' str(dt_woe)
#'
#' \donttest{
#' # Example II
#' # binning for germancredit dataset
#' bins_germancredit = woebin(germancredit, y="creditability")
#'
#' # converting the values in germancredit to woe
#' # bins is a list which generated from woebin()
#' germancredit_woe = woebin_ply(germancredit, bins_germancredit)
#'
#' # bins is a data frame
#' bins_df = data.table::rbindlist(bins_germancredit)
#' germancredit_woe = woebin_ply(germancredit, bins_df)
#'
#' # return value is bin but not woe
#' germancredit_bin = woebin_ply(germancredit, bins_germancredit, value = 'bin')
#' }
#'
#' @import data.table
#' @export
#'
woebin_ply = function(dt, bins, no_cores=NULL, print_step=0L, replace_blank_na=TRUE, ...) {
# start time
start_time = proc.time()
# print info
print_info = list(...)[['print_info']]
if (is.null(print_info)) print_info = TRUE
if (print_info) cat('[INFO] converting into woe values ... \n')
# value
value = list(...)[['value']]
if (is.null(value) || !(value %in% c('woe', 'bin'))) value = 'woe'
# global variables or functions
. = V1 = bin = variable = woe = i = databc_colomun_placeholder = NULL
# set dt as data.table
dt = setDT(copy(dt))
# # remove date/time col
# dt = rmcol_datetime_unique1(dt)
# replace "" by NA
if (replace_blank_na) dt = rep_blank_na(dt)
# print_step
print_step = check_print_step(print_step)
# bins # if (is.list(bins)) rbindlist(bins)
if (inherits(bins, 'list') && all(sapply(bins, is.data.frame))) {bins = rbindlist(bins)}
bins = setDT(bins)
# x variables
xs_bin = bins[,unique(variable)]
xs_dt = names(dt)
xs = intersect(xs_bin, xs_dt)
# loop on x variables
xs_len = length(xs)
# initial data set
n = 0
while (paste0('dat_col_placeholder',n) %in% xs) n = n+1
dt_init = copy(dt)[, (paste0('dat_col_placeholder',n)) := 1][,(xs) := NULL]
# the databc_colomun_placeholder will be remove in the result, in case dt_init is an empty dataframe
# loop on xs # https://www.r-bloggers.com/how-to-go-parallel-in-r-basics-tips/
if (is.null(no_cores)) {
no_cores = ifelse(xs_len < 10, 1, detectCores(logical=F))
}
if (no_cores == 1) {
dat = dt_init
for (i in 1:xs_len) {
x_i = xs[i]
# print x
if (print_step > 0 & i %% print_step == 0) cat(paste0(format(c(i,xs_len)),collapse = "/"), x_i,"\n")
binx = bins[variable==x_i]
dtx = dt[, x_i, with=FALSE]
dat = cbind(dat, woepoints_ply1(dtx, binx, x_i, woe_points=value))
}
} else {
registerDoParallel(no_cores)
# run
dat <-
foreach(
i = 1:xs_len,
.combine=cbind,
.init = dt_init,
.inorder = FALSE,
.errorhandling = "pass",
.export = c('dt', 'bins', 'xs')
) %dopar% {
x_i = xs[i]
binx = bins[variable==x_i]
dtx = dt[, x_i, with=FALSE]
woepoints_ply1(dtx, binx, x_i, woe_points=value)
}
# finish
stopImplicitCluster()
}
# running time
rs = proc.time() - start_time
# hms
if (rs[3] > 10 & print_info) cat(sprintf("[INFO] Woe transformating on %s rows and %s columns in %s",nrow(dt),xs_len,sec_to_hms(rs[3])),"\n")
return(dat[, (paste0('dat_col_placeholder',n)) := NULL])
}
# required in woebin_plot
#' @import data.table ggplot2
plot_bin = function(bin, title, show_iv) {
# global variables or functions
. = bad = badprob = badprob2 = count = count_distr = count_distr2 = count_num = good = goodbad = total_iv = value = variable = woe = NULL
# data
## y_right_max
y_right_max = ceiling(max(bin$badprob, na.rm=T)*10)
if (y_right_max %% 2 ==1) y_right_max=y_right_max+1
if (y_right_max - max(bin$badprob, na.rm=T)*10 <= 0.3) y_right_max = y_right_max+2
y_right_max = y_right_max/10
if (y_right_max>1 || y_right_max<=0 || is.na(y_right_max) || is.null(y_right_max)) y_right_max=1
## y_left_max
y_left_max = ceiling(max(bin$count_distr, na.rm=T)*10)/10
if (y_left_max>1 || y_left_max<=0 || is.na(y_left_max) || is.null(y_left_max)) y_left_max=1
## data set
bin = setDT(bin)
dat = bin[,.(
variable, bin, count_num=count, count_distr2=count_distr, count_distr, good=good/sum(count), bad=bad/sum(count), badprob, woe
)][, `:=`(
bin = ifelse(is.na(bin), "NA", bin),
badprob2 = badprob*(y_left_max/y_right_max),
badprob = round(badprob,4),
rowid = .I
)][, bin := factor(bin, levels = bin)]
dat_melt = melt(dat, id.vars = c("variable", "bin","rowid"), measure.vars =c("good", "bad"), variable.name = "goodbad")[
,goodbad:=factor(goodbad, levels=c( "bad", "good"))
]
# title
if (!is.null(title)) title = paste0(title, "-")
if (show_iv) {
title_string = paste0(title, dat[1, variable]," (iv:",bin[1,round(total_iv,4)],")")
} else {
title_string = paste0(title, dat[1, variable])
}
# plot
ggplot() +
# geom_text(aes(label="@shichen.name/getpedr", x=dat[, x[.N], by=symbol][,V1[1]], y=Inf), vjust = -0.5, hjust = 1, color = "#F0F0F0") +
# coord_cartesian(clip = 'off') +
geom_bar(data=dat_melt, aes(x=bin, y=value, fill=goodbad), stat="identity") +
geom_text(data=dat, aes(x = bin, y = count_distr2, label = paste0(round(count_distr2*100, 1), "%, ", count_num) ), vjust = 0.5) +
geom_line(data=dat, aes(x = rowid, y = badprob2), colour = "blue") +
geom_point(data=dat, aes(x = rowid, y=badprob2), colour = "blue", shape=21, fill="white") +
geom_text(data=dat, aes(x = rowid, y = badprob2, label = paste0(round(badprob*100, 1), "%")), colour="blue", vjust = -0.5) +
scale_y_continuous(limits = c(0,y_left_max), sec.axis = sec_axis(~./(y_left_max/y_right_max), name = "Bad probability")) +
labs(title = title_string, x=NULL, y="Bin count distribution", fill=NULL) +
theme_bw() +
theme(
legend.position="bottom", legend.direction="horizontal",
axis.title.y.right = element_text(colour = "blue"),
axis.text.y.right = element_text(colour = "blue",angle=90, hjust = 0.5),
axis.text.y = element_text(angle=90, hjust = 0.5) )
}
#' WOE Binning Visualization
#'
#' \code{woebin_plot} create plots of count distribution and bad probability for each bin. The binning informations are generates by \code{woebin}.
#'
#' @name woebin_plot
#' @param bins A list of data frames. Binning information generated by \code{woebin}.
#' @param x Name of x variables. Default is NULL. If x is NULL, then all columns except y are counted as x variables.
#' @param title String added to the plot title. Default is NULL.
#' @param show_iv Logical. Default is TRUE, which means show information value in the plot title.
#'
#' @return A list of binning graphics.
#'
#' @seealso \code{\link{woebin}}, \code{\link{woebin_ply}}, \code{\link{woebin_adj}}
#'
#' @examples
#' # Load German credit data
#' data(germancredit)
#'
#' # Example I
#' bins1 = woebin(germancredit, y="creditability", x="credit.amount")
#'
#' p1 = woebin_plot(bins1)
#' print(p1)
#'
#' \donttest{
#' # Example II
#' bins = woebin(germancredit, y="creditability")
#' plotlist = woebin_plot(bins)
#' print(plotlist$credit.amount)
#'
#' # # save binning plot
#' # for (i in 1:length(plotlist)) {
#' # ggplot2::ggsave(
#' # paste0(names(plotlist[i]), ".png"), plotlist[[i]],
#' # width = 15, height = 9, units="cm" )
#' # }
#' }
#'
#' @import data.table ggplot2
#' @export
#'
woebin_plot = function(bins, x=NULL, title=NULL, show_iv = TRUE) {
# global variables or functions
variable = NULL
xs = x
# converting data.frame into list
# bins # if (is.list(bins)) rbindlist(bins)
if (!is.data.table(bins)) {
if (is.data.frame(bins)) {
bins = setDT(bins)
} else {
bins = rbindlist(bins)
}
}
# x variable names
if (is.null(xs)) xs = bins[,unique(variable)]
# plot export
plotlist = list()
for (i in xs) plotlist[[i]] = plot_bin(bins[variable==i], title, show_iv)
return(plotlist)
}
# print basic information in woebin_adj
woebin_adj_print_basic_info = function(i, xs_adj, bins, dt, bins_breakslist) {
x_i = xs_adj[i]
xs_len = length(xs_adj)
variable = x_breaks = NULL
bin = bins[variable==x_i]
cat("--------", paste0(i, "/", xs_len), x_i, "--------\n")
## class
cat(paste0("> class(",x_i,"): "),"\n",class(dt[[x_i]]),"\n","\n")
## summary
cat(paste0("> summary(",x_i,"): "),"\n")
print(summary(dt[[x_i]]))
cat("\n")
## table
if (length(table(dt[[x_i]])) < 10 || !is.numeric(dt[[x_i]])) {
cat(paste0("> table(",x_i,"): "))
print(table(dt[[x_i]]))
cat("\n")
} else {
if ( is.numeric(dt[[x_i]])) {
ht = hist(dt[[x_i]], plot = FALSE)
plot(ht, main = x_i, xlab = NULL)
}
}
## current breaks
breaks_bin = bins_breakslist[variable == x_i, x_breaks]
cat("> Current breaks: \n", breaks_bin,"\n \n")
## woebin plotting
plist = woebin_plot(bin)
print(plist[[1]])
}
# plot adjusted binning in woebin_adj
woebin_adj_break_plot = function(dt, y, x_i, breaks, stop_limit, sv_i, method) {
bin_adj = NULL
text_woebin = paste0("bin_adj=woebin(dt[,c(\"",x_i,"\",\"",y,"\"),with=F], \"",y,"\", breaks_list=list(",x_i,"=c(",breaks,")), special_values =list(",x_i,"=c(", sv_i, ")), ", ifelse(stop_limit=="N","stop_limit = \"N\", ",NULL), "print_step=0L, print_info=FALSE, method=\"",method,"\")")
eval(parse(text = text_woebin))
## print adjust breaks
breaks_bin = setdiff(sub("^\\[(.*), *(.*)\\)((%,%missing)*)", "\\2\\3", bin_adj[[1]]$bin), c("-Inf","Inf","missing"))
breaks_bin = ifelse(
is.numeric(dt[[x_i]]),
paste0(breaks_bin, collapse=", "),
paste0(paste0("\"",breaks_bin,"\""), collapse=", "))
cat("> Current breaks: ","\n",breaks_bin,"\n","\n")
# print bin_adj
print(woebin_plot(bin_adj)[[1]])
# # breaks
if (breaks == "" || is.null(breaks)) breaks = breaks_bin
return(breaks)
}
#' WOE Binning Adjustment
#'
#' \code{woebin_adj} interactively adjust the binning breaks.
#'
#' @param dt A data frame.
#' @param y Name of y variable.
#' @param bins A list of data frames. Binning information generated from \code{woebin}.
#' @param adj_all_var Logical, whether to show variables have monotonic woe trends. Default is TRUE
#' @param special_values The values specified in special_values will in separate bins. Default is NULL.
#' @param method Optimal binning method, it should be "tree" or "chimerge". Default is "tree".
#' @param save_breaks_list A string. The file name to save breaks_list. Default is None.
#' @param count_distr_limit The minimum count distribution percentage. Accepted range: 0.01-0.2; default is 0.05. This argument should be the same with woebin's.
#'
#' @return A list of modified break points of each x variables.
#'
#' @seealso \code{\link{woebin}}, \code{\link{woebin_ply}}, \code{\link{woebin_plot}}
#'
#' @examples
#' \donttest{
#' # Load German credit data
#' data(germancredit)
#'
#' # Example I
#' dt = germancredit[, c("creditability", "age.in.years", "credit.amount")]
#' bins = woebin(dt, y="creditability")
#' breaks_adj = woebin_adj(dt, y="creditability", bins)
#' bins_final = woebin(dt, y="creditability",
#' breaks_list=breaks_adj)
#'
#' # Example II
#' binsII = woebin(germancredit, y="creditability")
#' breaks_adjII = woebin_adj(germancredit, "creditability", binsII)
#' bins_finalII = woebin(germancredit, y="creditability",
#' breaks_list=breaks_adjII)
#' }
#'
#' @import data.table
#' @importFrom utils menu
#' @importFrom graphics hist plot
#' @export
woebin_adj = function(dt, y, bins, adj_all_var=TRUE, special_values=NULL, method="tree", save_breaks_list=NULL, count_distr_limit = 0.05) {
# global variables or functions
. = V1 = badprob = badprob2 = bin2 = bin = bin_adj = count_distr = variable = x_breaks = x_class = NULL
dt = setDT(copy(dt))
# bins # if (is.list(bins)) rbindlist(bins)
if (!is.data.table(bins)) {
if (is.data.frame(bins)) {
bins = setDT(bins)
} else {
bins = rbindlist(bins)
}
}
# x variables
xs_all = bins[,unique(variable)]
if (adj_all_var == FALSE) {
xs_adj = bins[
!(bin == "missing" & count_distr >= count_distr_limit)
][, badprob2 := badprob >= shift(badprob, type = "lag"), by=variable
][!is.na(badprob2), length(unique(badprob2)), by=variable
][V1 > 1, variable]
} else {
xs_adj = xs_all
}
# length of adjusting variables
xs_len = length(xs_adj)
# special_values
special_values = check_special_values(special_values, xs_adj)
# breakslist of bins
bins_breakslist = bins_to_breaks(bins, dt)
# loop on adjusting variables
if (xs_len == 0) {
warning("The binning breaks of all variables are perfect according to default settings.")
breaks_list = paste0(bins_breakslist[, paste0(variable, "=c(", x_breaks, ")")], collapse = ", \n ")
breaks_list = paste0(c("list(", breaks_list, ")"), collapse = "\n ")
return(breaks_list)
}
i = 1
breaks_list = NULL
while (i <= xs_len) {
# x variable
breaks = stop_limit = NULL
x_i = xs_adj[i]
sv_i = paste(paste0("\'",special_values[[x_i]],"\'"), collapse = ",")
# basic information of x_i variable ------
woebin_adj_print_basic_info(i, xs_adj, bins, dt, bins_breakslist)
# adjusting breaks ------
adj_brk = menu(c("next", "yes", "back"), title=paste0("> Adjust breaks for (", i, "/", xs_len, ") ", x_i, "?"))
while (adj_brk == 2) {
# modify breaks adj_brk == 2
breaks = readline("> Enter modified breaks: ")
breaks = gsub("^[,\\.]+|[,\\.]+$", "", breaks)
if (breaks == "N") {
stop_limit = "N"
breaks = NULL
} else {
stop_limit = NULL
}
tryCatch(breaks <- woebin_adj_break_plot(dt, y, x_i, breaks, stop_limit, sv_i, method=method), error = function(e) e)
adj_brk = menu(c("next", "yes", "back"), title=paste0("> Adjust breaks for (", i, "/", xs_len, ") ", x_i, "?"))
}
if (adj_brk == 3) {
# go back adj_brk == 3
i = ifelse(i > 1, i-1, i)
} else {
# go next adj_brk == 1
if (!(is.null(breaks) || breaks == "")) bins_breakslist[variable == x_i][["x_breaks"]] <- breaks
i = i + 1
}
}
breaks_list = paste0(bins_breakslist[, paste0(variable, "=c(", x_breaks, ")")], collapse = ", \n ")
breaks_list = paste0(c("list(", breaks_list, ")"), collapse = "\n ")
cat(breaks_list,"\n")
if (!is.null(save_breaks_list)) {
bins_adj = woebin(dt, y, x=bins_breakslist[,variable], breaks_list=breaks_list, print_info=FALSE)
bins_to_breaks(bins_adj, dt, to_string=TRUE, save_name=save_breaks_list)
}
return(breaks_list)
}
|
.setUp <-
function()
{
## create a project to fill with entities
project <- createEntity(Project())
synapseClient:::.setCache("testProject", project)
}
.tearDown <-
function()
{
## delete the test project
deleteEntity(synapseClient:::.getCache("testProject"))
}
#
# This code exercises the file services underlying upload/download to/from an entity
#
integrationTestEntityFileAccess <-
function()
{
# create a Project
project <- synapseClient:::.getCache("testProject")
checkTrue(!is.null(project))
# create a file attachment which will be used in the wiki page
# upload a file and receive the file handle
fileName<-"NAMESPACE"
filePath<- system.file(fileName, package = "synapseClient")
fileHandle<-synapseClient:::synapseUploadToFileHandle(filePath)
# create an entity with the file
entity <- list(
entityType="org.sagebionetworks.repo.model.FileEntity", # doesn't work for 'Data'
name="foo",
parentId=propertyValue(project, "id"),
dataFileHandleId=fileHandle$id)
entity <- synapseClient:::synapsePost("/entity", entity)
# download the file
# /entity/{enityId}/file
downloadUri<-sprintf("/entity/%s/file", entity$id)
# download into a temp file
downloadedFile<-synapseClient:::synapseDownloadFromRepoServiceToDestination(downloadUri)
origChecksum<- as.character(tools::md5sum(filePath))
downloadedChecksum <- as.character(tools::md5sum(downloadedFile))
checkEquals(origChecksum, downloadedChecksum)
# delete the entity
deleteEntity(entity$id)
# delete the file handle
handleUri<-sprintf("/fileHandle/%s", fileHandle$id)
synapseClient:::synapseDelete(handleUri, service="FILE")
handleUri<-sprintf("/fileHandle/%s", fileHandle$id)
synapseClient:::synapseDelete(handleUri, service="FILE")
}
|
/inst/integrationTests/test_entityFileAccess.R
|
no_license
|
cbare/rSynapseClient
|
R
| false
| false
| 1,880
|
r
|
.setUp <-
function()
{
## create a project to fill with entities
project <- createEntity(Project())
synapseClient:::.setCache("testProject", project)
}
.tearDown <-
function()
{
## delete the test project
deleteEntity(synapseClient:::.getCache("testProject"))
}
#
# This code exercises the file services underlying upload/download to/from an entity
#
integrationTestEntityFileAccess <-
function()
{
# create a Project
project <- synapseClient:::.getCache("testProject")
checkTrue(!is.null(project))
# create a file attachment which will be used in the wiki page
# upload a file and receive the file handle
fileName<-"NAMESPACE"
filePath<- system.file(fileName, package = "synapseClient")
fileHandle<-synapseClient:::synapseUploadToFileHandle(filePath)
# create an entity with the file
entity <- list(
entityType="org.sagebionetworks.repo.model.FileEntity", # doesn't work for 'Data'
name="foo",
parentId=propertyValue(project, "id"),
dataFileHandleId=fileHandle$id)
entity <- synapseClient:::synapsePost("/entity", entity)
# download the file
# /entity/{enityId}/file
downloadUri<-sprintf("/entity/%s/file", entity$id)
# download into a temp file
downloadedFile<-synapseClient:::synapseDownloadFromRepoServiceToDestination(downloadUri)
origChecksum<- as.character(tools::md5sum(filePath))
downloadedChecksum <- as.character(tools::md5sum(downloadedFile))
checkEquals(origChecksum, downloadedChecksum)
# delete the entity
deleteEntity(entity$id)
# delete the file handle
handleUri<-sprintf("/fileHandle/%s", fileHandle$id)
synapseClient:::synapseDelete(handleUri, service="FILE")
handleUri<-sprintf("/fileHandle/%s", fileHandle$id)
synapseClient:::synapseDelete(handleUri, service="FILE")
}
|
#data
x=c(2.5,0.5 ,2.2 ,1.9 ,3.1 ,2.3 ,2 ,1 ,1.5 ,1.1)
y=c(2.4, 0.7, 2.9, 2.2, 3.0, 2.7, 1.6,1.1,1.6,0.9)
#mean
mean_x=mean(x)
mean_y=mean(y)
x_minus_mean = x - mean_x
y_minus_mean = y - mean_y
#covariance matrix
cov_matrix = matrix(nrow=2, ncol=2)
cov_matrix[1,1] = cov(x,x)
cov_matrix[2,2] = cov(y,y)
cov_matrix[1,2] = cov(x,y)
cov_matrix[2,1] = cov(y,x)
#calculate eigen-value and eigenvectors
eigen_values = eigen(cov_matrix)$values
eigen_vector = eigen(cov_matrix)$vector
|
/trials/R/pca_understanding.R
|
no_license
|
sahilmakkar1983/PocAndTrials
|
R
| false
| false
| 502
|
r
|
#data
x=c(2.5,0.5 ,2.2 ,1.9 ,3.1 ,2.3 ,2 ,1 ,1.5 ,1.1)
y=c(2.4, 0.7, 2.9, 2.2, 3.0, 2.7, 1.6,1.1,1.6,0.9)
#mean
mean_x=mean(x)
mean_y=mean(y)
x_minus_mean = x - mean_x
y_minus_mean = y - mean_y
#covariance matrix
cov_matrix = matrix(nrow=2, ncol=2)
cov_matrix[1,1] = cov(x,x)
cov_matrix[2,2] = cov(y,y)
cov_matrix[1,2] = cov(x,y)
cov_matrix[2,1] = cov(y,x)
#calculate eigen-value and eigenvectors
eigen_values = eigen(cov_matrix)$values
eigen_vector = eigen(cov_matrix)$vector
|
dvStudyAtom <-
function( objectid, dv=getOption('dvn'), user=getOption('dvn.user'),
pwd=getOption('dvn.pwd'), browser=FALSE, ...){
if(is.null(user) | is.null(pwd))
stop('Must specify username (`user`) and password (`pwd`)')
xml <- dvDepositQuery(query=paste('edit/study/',objectid,sep=''), user=user, pwd=pwd, dv=dv, browser=browser, ...)
if(is.null(xml))
invisible(NULL)
else if(browser==FALSE)
.dvParseAtom(xml)
}
.dvParseAtom <- function(xml){
xmllist <- xmlToList(xml)
xmlout <- list( bibliographicCitation = xmllist$bibliographicCitation,
generator = xmllist$generator,
id = xmllist$id)
xmlout$objectId <- strsplit(xmlout$id,'study/')[[1]][2]
xmlout$xml <- xml
class(xmlout) <- c(class(xmlout),'dvStudyAtom')
return(xmlout)
}
print.dvStudyAtom <- function(x,...){
cat('Citation: ',x$bibliographicCitation,'\n')
cat('ObjectId: ',x$objectId,'\n')
cat('Study URI: ',x$id,'\n')
cat('Generated by: ',x$generator['uri'],x$generator['version'],'\n')
invisible(x)
}
|
/R/dvStudyAtom.r
|
no_license
|
imclab/dvn
|
R
| false
| false
| 1,114
|
r
|
dvStudyAtom <-
function( objectid, dv=getOption('dvn'), user=getOption('dvn.user'),
pwd=getOption('dvn.pwd'), browser=FALSE, ...){
if(is.null(user) | is.null(pwd))
stop('Must specify username (`user`) and password (`pwd`)')
xml <- dvDepositQuery(query=paste('edit/study/',objectid,sep=''), user=user, pwd=pwd, dv=dv, browser=browser, ...)
if(is.null(xml))
invisible(NULL)
else if(browser==FALSE)
.dvParseAtom(xml)
}
.dvParseAtom <- function(xml){
xmllist <- xmlToList(xml)
xmlout <- list( bibliographicCitation = xmllist$bibliographicCitation,
generator = xmllist$generator,
id = xmllist$id)
xmlout$objectId <- strsplit(xmlout$id,'study/')[[1]][2]
xmlout$xml <- xml
class(xmlout) <- c(class(xmlout),'dvStudyAtom')
return(xmlout)
}
print.dvStudyAtom <- function(x,...){
cat('Citation: ',x$bibliographicCitation,'\n')
cat('ObjectId: ',x$objectId,'\n')
cat('Study URI: ',x$id,'\n')
cat('Generated by: ',x$generator['uri'],x$generator['version'],'\n')
invisible(x)
}
|
testlist <- list(x = c(2.08654997741738e-308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(myTAI:::cpp_geom_mean,testlist)
str(result)
|
/myTAI/inst/testfiles/cpp_geom_mean/AFL_cpp_geom_mean/cpp_geom_mean_valgrind_files/1615843232-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 169
|
r
|
testlist <- list(x = c(2.08654997741738e-308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(myTAI:::cpp_geom_mean,testlist)
str(result)
|
V1 <- read.table('Pica-pau.txt', header = TRUE, dec='.')
sum(V1$especie=="verde")
Caracol_data <- as.data.frame(read.csv(file="https://www.dropbox.com/s/9wnr69i6bjhqyct/Snail_feeding.csv?dl=1",
header = T,
strip.white = T,
na.strings = ""))
str(Caracol_data)
Caracol_data <- Caracol_data[,1:7]
Caracol_data$Sex <- replace(as.character(Caracol_data$Sex), Caracol_data$Sex == "males", "male")
Caracol_data$Sex <- replace(as.character(Caracol_data$Sex), Caracol_data$Sex == "Male", "male")
Caracol_data$Sex <- replace(as.character(Caracol_data$Sex), Caracol_data$Sex == "female s", "female")
unique(Caracol_data$Sex)
Caracol_data$Distance <- as.numeric(Caracol_data$Distance)
which(is.na(Caracol_data$Distance))
Caracol_data[682, "Distance"] <- 0.58
Caracol_data[755, "Distance"] <- 0.356452
which(duplicated(Caracol_data))
index <- which(duplicated(Caracol_data))
Caracol_data <- Caracol_data[-index, ]
summary(Caracol_data)
Caracol_data[which(Caracol_data$Depth > 2),]
Caracol_data[8, 6] <- 1.62
mean(Caracol_data$Depth, na.rm = T)
sb <- subset(x=Caracol_data, subset = Sex=="female")
max(sb$Distance)
# -------------------------------------
# Questão 1
catsM <- as.data.frame(read.csv('https://www.dropbox.com/s/w4xv9urbowbig3s/catsM.csv?dl=1'))
str(catsM)
mean(catsM$Bwt, na.rm=TRUE)
#--------------------------------------------
Sparrows <- read.table('https://www.dropbox.com/s/jci311cfsj6uva7/Sparrows.csv?dl=1', header = T, sep=",")
Sparrows <- as.data.frame(read.csv('https://www.dropbox.com/s/jci311cfsj6uva7/Sparrows.csv?dl=1'),
header = T,
strip.white = T,
na.strings = "")
indexes <- which(duplicated(Sparrows))
Sparrows <- Sparrows[-indexes, ]
str(Sparrows)
View(Sparrows_tab)
View(Sparrows)
head_SSTS <- Sparrows[Sparrows$Species == "SSTS",]$Head
min(head_SSTS)
max(head_SSTS, na.rm = T)
unique(Sparrows$Sex)
Sparrows$Sex <- replace(as.character(Sparrows$Sex), Sparrows$Sex == "Males", "Male")
Sparrows$Sex <- replace(as.character(Sparrows$Sex), Sparrows$Sex == "Femal", "Female")
Sparrows$Sex <- replace(as.character(Sparrows$Sex), Sparrows$Sex == "Femal e", "Female")
median(Sparrows[Sparrows$Sex == "Female","Tarsus"])
median(Sparrows[Sparrows$Sex == "Male","Tarsus"])
which(is.na(Sparrows$Wing))
Sparrows$Wing[62] <- 59
Sparrows$Wing[247] <- 56.5
Sparrows$Wing[803] <- 57
median(Sparrows$Wing)
which(is.na(Sparrows$Head))
Sparrows$Head[811] <- 0
Sparrows_Ordenado <- Sparrows[order(Sparrows$Wing, Sparrows$Head),]
|
/exercio7.R
|
no_license
|
Tiagoblima/r-course-ufrpe
|
R
| false
| false
| 2,678
|
r
|
V1 <- read.table('Pica-pau.txt', header = TRUE, dec='.')
sum(V1$especie=="verde")
Caracol_data <- as.data.frame(read.csv(file="https://www.dropbox.com/s/9wnr69i6bjhqyct/Snail_feeding.csv?dl=1",
header = T,
strip.white = T,
na.strings = ""))
str(Caracol_data)
Caracol_data <- Caracol_data[,1:7]
Caracol_data$Sex <- replace(as.character(Caracol_data$Sex), Caracol_data$Sex == "males", "male")
Caracol_data$Sex <- replace(as.character(Caracol_data$Sex), Caracol_data$Sex == "Male", "male")
Caracol_data$Sex <- replace(as.character(Caracol_data$Sex), Caracol_data$Sex == "female s", "female")
unique(Caracol_data$Sex)
Caracol_data$Distance <- as.numeric(Caracol_data$Distance)
which(is.na(Caracol_data$Distance))
Caracol_data[682, "Distance"] <- 0.58
Caracol_data[755, "Distance"] <- 0.356452
which(duplicated(Caracol_data))
index <- which(duplicated(Caracol_data))
Caracol_data <- Caracol_data[-index, ]
summary(Caracol_data)
Caracol_data[which(Caracol_data$Depth > 2),]
Caracol_data[8, 6] <- 1.62
mean(Caracol_data$Depth, na.rm = T)
sb <- subset(x=Caracol_data, subset = Sex=="female")
max(sb$Distance)
# -------------------------------------
# Questão 1
catsM <- as.data.frame(read.csv('https://www.dropbox.com/s/w4xv9urbowbig3s/catsM.csv?dl=1'))
str(catsM)
mean(catsM$Bwt, na.rm=TRUE)
#--------------------------------------------
Sparrows <- read.table('https://www.dropbox.com/s/jci311cfsj6uva7/Sparrows.csv?dl=1', header = T, sep=",")
Sparrows <- as.data.frame(read.csv('https://www.dropbox.com/s/jci311cfsj6uva7/Sparrows.csv?dl=1'),
header = T,
strip.white = T,
na.strings = "")
indexes <- which(duplicated(Sparrows))
Sparrows <- Sparrows[-indexes, ]
str(Sparrows)
View(Sparrows_tab)
View(Sparrows)
head_SSTS <- Sparrows[Sparrows$Species == "SSTS",]$Head
min(head_SSTS)
max(head_SSTS, na.rm = T)
unique(Sparrows$Sex)
Sparrows$Sex <- replace(as.character(Sparrows$Sex), Sparrows$Sex == "Males", "Male")
Sparrows$Sex <- replace(as.character(Sparrows$Sex), Sparrows$Sex == "Femal", "Female")
Sparrows$Sex <- replace(as.character(Sparrows$Sex), Sparrows$Sex == "Femal e", "Female")
median(Sparrows[Sparrows$Sex == "Female","Tarsus"])
median(Sparrows[Sparrows$Sex == "Male","Tarsus"])
which(is.na(Sparrows$Wing))
Sparrows$Wing[62] <- 59
Sparrows$Wing[247] <- 56.5
Sparrows$Wing[803] <- 57
median(Sparrows$Wing)
which(is.na(Sparrows$Head))
Sparrows$Head[811] <- 0
Sparrows_Ordenado <- Sparrows[order(Sparrows$Wing, Sparrows$Head),]
|
library( "ape" )
library( "geiger" )
library( "expm" )
library( "nloptr" )
source( "masternegloglikeeps1.R" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
sim.tree<-read.tree("tree50time73.txt")
sim.chrom<-read.table("chrom50time73.txt", header=FALSE)
last.state=50
x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,11)
my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000)
mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0)
print(mle)
results[1:10]<-mle$solution
results[11]<-mle$objective
write.table(results,file="globalmax50tree73.csv",sep=",")
|
/Simulations tree height/50 my/optim50tree73.R
|
no_license
|
roszenil/Bichromdryad
|
R
| false
| false
| 821
|
r
|
library( "ape" )
library( "geiger" )
library( "expm" )
library( "nloptr" )
source( "masternegloglikeeps1.R" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
sim.tree<-read.tree("tree50time73.txt")
sim.chrom<-read.table("chrom50time73.txt", header=FALSE)
last.state=50
x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,11)
my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000)
mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0)
print(mle)
results[1:10]<-mle$solution
results[11]<-mle$objective
write.table(results,file="globalmax50tree73.csv",sep=",")
|
## Programming Assignment 2: Lexical Scoping
## Data Science Specialization Track
## Name: Jose Alberto Valdez Crespo
## Date: June 18, 2015
## makeCacheMatrix is a function that creates a special "matrix" object
## that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
## Variable "cache" is used to store the cache value
## Needs to be initialized to NULL
cache <- NULL
## Creating the matrix
set <- function(y) {
x <<- y
cache <<- NULL
}
## Getting the value of the matrix
get <- function()x
## Here we are inverting the matrix and storing the value on the
## variable cache
setinv <- function(solve) cache <<- solve
getinv <- function () cache
## Here we are returning the created functions
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## cacheSolve is a function that computes the inverse of the special "matrix"
## returned by makeCacheMatrix function above. If the inverse of the matrix
## has already been calculated (and the matrix has not changed!), then the
## cachesolve function should retrieve the inverse from the cache.
cacheSolve <- function(x=matrix(), ...) {
## Return the inverted matrix from cache if it exists
cache <- x$getinv()
## Checking cache is not empty. If it is not empty, then we are pulling
## the cache data
if (!is.null(cache)){
message("Pulling data from cache")
## returning the inverse matrix from cache
return(cache)
}
## Create matrix since it doesn't exist yet
matrix <- x$get()
cache <- solve(matrix, ...)
x$setinv(cache)
## Display matrix in console
return(cache)
}
|
/cachematrix.R
|
no_license
|
javcsjc/ProgrammingAssignment2
|
R
| false
| false
| 1,863
|
r
|
## Programming Assignment 2: Lexical Scoping
## Data Science Specialization Track
## Name: Jose Alberto Valdez Crespo
## Date: June 18, 2015
## makeCacheMatrix is a function that creates a special "matrix" object
## that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
## Variable "cache" is used to store the cache value
## Needs to be initialized to NULL
cache <- NULL
## Creating the matrix
set <- function(y) {
x <<- y
cache <<- NULL
}
## Getting the value of the matrix
get <- function()x
## Here we are inverting the matrix and storing the value on the
## variable cache
setinv <- function(solve) cache <<- solve
getinv <- function () cache
## Here we are returning the created functions
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## cacheSolve is a function that computes the inverse of the special "matrix"
## returned by makeCacheMatrix function above. If the inverse of the matrix
## has already been calculated (and the matrix has not changed!), then the
## cachesolve function should retrieve the inverse from the cache.
cacheSolve <- function(x=matrix(), ...) {
## Return the inverted matrix from cache if it exists
cache <- x$getinv()
## Checking cache is not empty. If it is not empty, then we are pulling
## the cache data
if (!is.null(cache)){
message("Pulling data from cache")
## returning the inverse matrix from cache
return(cache)
}
## Create matrix since it doesn't exist yet
matrix <- x$get()
cache <- solve(matrix, ...)
x$setinv(cache)
## Display matrix in console
return(cache)
}
|
orthpredfilters<-function(filter = c(0.5, 1, 0.5)){
n<-length(filter)
ri<-(n+1)/2 # only works for odd length filter
A<-matrix (NA,n,n)
if(n==2){ # nbr , remove
A<-matrix(c(-1,1,1,1),2,2)
}
if (n==3){
A[,1]<-c(1,-1,1)
A[,2]<-filter
A[,3]<-c(-(1+filter[3])/(filter[1]+1),-(filter[3]-filter[1])/(filter[1]+1),1)
}
if(n==2){
QA<-A
}
else{
# orthogonal and equal norms (filter 1)
nn<-colSums(A^2)
n1<-nn[2] # should be the same as sum(filter^2)
QA<-A
QA[,3:ncol(QA)]<-sqrt(n1)*QA[,3:ncol(QA)]/rep(sqrt(nn[3:ncol(QA)]),each=nrow(QA))
# remove first column and minus
QA<-QA[,-1]
QA<- -QA
QA[ri,]<--QA[ri,]
}
return(QA)
}
|
/R/orthpredfilters.R
|
no_license
|
nunesmatt/CNLTreg
|
R
| false
| false
| 641
|
r
|
orthpredfilters<-function(filter = c(0.5, 1, 0.5)){
n<-length(filter)
ri<-(n+1)/2 # only works for odd length filter
A<-matrix (NA,n,n)
if(n==2){ # nbr , remove
A<-matrix(c(-1,1,1,1),2,2)
}
if (n==3){
A[,1]<-c(1,-1,1)
A[,2]<-filter
A[,3]<-c(-(1+filter[3])/(filter[1]+1),-(filter[3]-filter[1])/(filter[1]+1),1)
}
if(n==2){
QA<-A
}
else{
# orthogonal and equal norms (filter 1)
nn<-colSums(A^2)
n1<-nn[2] # should be the same as sum(filter^2)
QA<-A
QA[,3:ncol(QA)]<-sqrt(n1)*QA[,3:ncol(QA)]/rep(sqrt(nn[3:ncol(QA)]),each=nrow(QA))
# remove first column and minus
QA<-QA[,-1]
QA<- -QA
QA[ri,]<--QA[ri,]
}
return(QA)
}
|
complete <- function(directory, id = 1:332){
# create files list
filenames <- list.files(directory, pattern = '*.csv')
# create vector to store data
vals <- vector()
#loop over id's
for(i in id){
filename <- sprintf('%03d.csv', i)
filepath <- paste(directory, filename, sep = '/')
## load data
data <- read.csv(filepath)
## process data
data <- subset(data, !is.na(data[, 2]) & !is.na(data[, 3]),
select = ID)
data_col <- nrow(data)
vals <- rbind(vals, c(data[1, 1], data_col))
}
data_frame <- as.data.frame(vals)
colnames(data_frame) <- c('id', 'nobs')
data_frame
}
|
/Course02_RProgramming/week2_quiz/complete.R
|
no_license
|
sqzhang-jeremy/coursera-JHU-data-science
|
R
| false
| false
| 774
|
r
|
complete <- function(directory, id = 1:332){
# create files list
filenames <- list.files(directory, pattern = '*.csv')
# create vector to store data
vals <- vector()
#loop over id's
for(i in id){
filename <- sprintf('%03d.csv', i)
filepath <- paste(directory, filename, sep = '/')
## load data
data <- read.csv(filepath)
## process data
data <- subset(data, !is.na(data[, 2]) & !is.na(data[, 3]),
select = ID)
data_col <- nrow(data)
vals <- rbind(vals, c(data[1, 1], data_col))
}
data_frame <- as.data.frame(vals)
colnames(data_frame) <- c('id', 'nobs')
data_frame
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/b_names.R
\name{b_names}
\alias{b_names}
\title{Show the column names for a data.frame in alphabetical order}
\usage{
b_names(df)
}
\arguments{
\item{df}{the data.frame to inspect}
}
\description{
This is a slight modification of \code{names}
}
\examples{
b_names(mtcars)
}
|
/man/b_names.Rd
|
no_license
|
willbmisled/bm
|
R
| false
| true
| 352
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/b_names.R
\name{b_names}
\alias{b_names}
\title{Show the column names for a data.frame in alphabetical order}
\usage{
b_names(df)
}
\arguments{
\item{df}{the data.frame to inspect}
}
\description{
This is a slight modification of \code{names}
}
\examples{
b_names(mtcars)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ah02_extract_user_info.R
\name{extract_personal_info}
\alias{extract_personal_info}
\title{Extracts information about the user}
\usage{
extract_personal_info(fitness_data)
}
\arguments{
\item{fitness_data}{Fitness data created with
[read_apple_health](read_apple_health)}
}
\description{
This function extracts certain chacterisitcs (see \emph{Extracted Information})
about the athlete if they are present in the data.
}
\section{Extracted Information}{
\itemize{
\item Date of Birth (Column \code{date_of_birth})
\item Sex (Column \code{sex})
\item Blood Type (Column \code{blood_type})
\item Fitzpatrick Skin Type (Column \code{skin_type})
}
}
\examples{
library(dplyr)
data_file <- system.file("example_data/Export.zip", package = "healthieR")
read_apple_health(data_file) \%>\% extract_personal_info
}
\seealso{
Other extraction_functions: \code{\link{extract_export_time}},
\code{\link{extract_records}},
\code{\link{extract_steps}}, \code{\link{extract_weight}}
}
|
/man/extract_personal_info.Rd
|
no_license
|
paulstaab/healthieR
|
R
| false
| true
| 1,054
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ah02_extract_user_info.R
\name{extract_personal_info}
\alias{extract_personal_info}
\title{Extracts information about the user}
\usage{
extract_personal_info(fitness_data)
}
\arguments{
\item{fitness_data}{Fitness data created with
[read_apple_health](read_apple_health)}
}
\description{
This function extracts certain chacterisitcs (see \emph{Extracted Information})
about the athlete if they are present in the data.
}
\section{Extracted Information}{
\itemize{
\item Date of Birth (Column \code{date_of_birth})
\item Sex (Column \code{sex})
\item Blood Type (Column \code{blood_type})
\item Fitzpatrick Skin Type (Column \code{skin_type})
}
}
\examples{
library(dplyr)
data_file <- system.file("example_data/Export.zip", package = "healthieR")
read_apple_health(data_file) \%>\% extract_personal_info
}
\seealso{
Other extraction_functions: \code{\link{extract_export_time}},
\code{\link{extract_records}},
\code{\link{extract_steps}}, \code{\link{extract_weight}}
}
|
#Created a vector containing species names
Species=c("Myotragus balearicus",
"Ursus spelaeus",
"Homotherium sp",
"Cygnus falconeri",
"Theriosuchus sympiestodon",
"Eurazhdarcho langendorfensis",
"Bos primigenius",
"Prolagus sardus",
"Pinguinus impennis",
"Romanogobio antipai")
#set seed to keep the generated numbers constant
#created vector containing random numbers from a normal distribution
set.seed(4997)
Limb.Width=abs(rnorm(10,mean=10,sd=10))
#set seed to keep the generated numbers constant
#created vector containing random numbers from a binomial distribution
set.seed(12597)
UnitsW=rbinom(10,1,0.5)
# loop to associate randomly generated numbers with "cm" or "mm"
for(i in 1:length(UnitsW)){
if(UnitsW[i] == 1){
UnitsW[i] = 'cm'
}else{
UnitsW[i] = 'mm'
}
}
#section below, same as above, repeated for length
set.seed(4998)
Limb.Length=abs(rnorm(10,mean=100,sd=100))
set.seed(12598)
UnitsL=rbinom(10,1,0.5)
for(i in 1:length(UnitsL)){
if(UnitsL[i] == 1){
UnitsL[i] = 'cm'
}else{
UnitsL[i] = 'mm'
}
}
#combined all vectors into a matrix
tempMatrix=cbind(Species,Limb.Width,UnitsW,Limb.Length,UnitsL)
#created data frame from matrix
MyData=as.data.frame(tempMatrix)
#saved data
fileloc = paste(getwd(),"Data","measurements.csv", sep="/")
write.csv(MyData,fileloc,row.names=FALSE)
|
/biol432/DataGeneration.R
|
no_license
|
avalmahsa/Dataset-simulation
|
R
| false
| false
| 1,415
|
r
|
#Created a vector containing species names
Species=c("Myotragus balearicus",
"Ursus spelaeus",
"Homotherium sp",
"Cygnus falconeri",
"Theriosuchus sympiestodon",
"Eurazhdarcho langendorfensis",
"Bos primigenius",
"Prolagus sardus",
"Pinguinus impennis",
"Romanogobio antipai")
#set seed to keep the generated numbers constant
#created vector containing random numbers from a normal distribution
set.seed(4997)
Limb.Width=abs(rnorm(10,mean=10,sd=10))
#set seed to keep the generated numbers constant
#created vector containing random numbers from a binomial distribution
set.seed(12597)
UnitsW=rbinom(10,1,0.5)
# loop to associate randomly generated numbers with "cm" or "mm"
for(i in 1:length(UnitsW)){
if(UnitsW[i] == 1){
UnitsW[i] = 'cm'
}else{
UnitsW[i] = 'mm'
}
}
#section below, same as above, repeated for length
set.seed(4998)
Limb.Length=abs(rnorm(10,mean=100,sd=100))
set.seed(12598)
UnitsL=rbinom(10,1,0.5)
for(i in 1:length(UnitsL)){
if(UnitsL[i] == 1){
UnitsL[i] = 'cm'
}else{
UnitsL[i] = 'mm'
}
}
#combined all vectors into a matrix
tempMatrix=cbind(Species,Limb.Width,UnitsW,Limb.Length,UnitsL)
#created data frame from matrix
MyData=as.data.frame(tempMatrix)
#saved data
fileloc = paste(getwd(),"Data","measurements.csv", sep="/")
write.csv(MyData,fileloc,row.names=FALSE)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 11849
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11848
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11848
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt19_90_352.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3540
c no.of clauses 11849
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 11848
c
c QBFLIB/Basler/terminator/stmt19_90_352.qdimacs 3540 11849 E1 [1] 0 260 3279 11848 RED
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/terminator/stmt19_90_352/stmt19_90_352.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 718
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 11849
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11848
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11848
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt19_90_352.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3540
c no.of clauses 11849
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 11848
c
c QBFLIB/Basler/terminator/stmt19_90_352.qdimacs 3540 11849 E1 [1] 0 260 3279 11848 RED
|
library(httr)
library(jsonlite)
library(shiny)
library(tidyverse)
library(scales)
library(rsconnect)
covid_dta <- data.frame(Date=as.Date(character()),
Country=character(),
Confirmed=integer(),
Deaths=integer(),
stringsAsFactors=FALSE)
countries <- read.csv("countrybasic.csv")[ ,c('Country')]
# api call below inspired by
#https://www.programmableweb.com/news/how-to-access-any-restful-api-using-r-language/how-to/2017/07/21?page=2
for (country in countries) {
covid_url <- "https://api.covid19api.com/dayone/country/"
covid_url <- paste(covid_url, country, sep ="")
#the below call possible due to the httr library
get_covid <- GET(covid_url)
get_covid_text <- content(get_covid, "text")
#the below json conversion possible due to the jsonlite library
get_covid_json <- fromJSON(get_covid_text, flatten = TRUE)
get_covid_df <- as.data.frame(get_covid_json)
get_covid_df[-c(1, 3:8, 11:12)]
cdata <- get_covid_df[c("Date", "Country", "Confirmed", "Deaths")]
cdata$Date <- as.Date(paste(cdata$Date))
covid_dta <- rbind(covid_dta, cdata)
}
long_df <- covid_dta %>% gather(Type,Value,3:4)
long_df <- na.omit(long_df)
long_df$Value <- as.integer(paste(long_df$Value))
#R shiny inspiration from https://shiny.rstudio.com/tutorial/
ui <- fluidPage(
sidebarPanel(
selectInput(
inputId = "country",
label = "Select Country",
long_df$Country
),
selectInput(inputId = "type",
label = "Data to Show:",
choices=unique(long_df$Type),
multiple = TRUE)),
mainPanel('COVID in Select Countries',
plotOutput("chart")),
position = 'left'
)
server <- function(input, output) {
output$chart <- renderPlot({
long_df %>%
filter(Country %in% input$country) %>%
filter(Type %in% input$type) %>%
ggplot(aes(Date, Value)) +
# info on multiple y variables
# https://stackoverflow.com/questions/55155912/plot-graph-with-multiple-y-axis-variable-in-r?
#newreg=e0d0131c5d90481fb841170850c12449
geom_line(aes(color = Type)) +
scale_colour_manual(values = c("Deaths" = "red",
"Confirmed" = "blue")) +
scale_y_continuous(labels = comma,
expand = expansion(mult = c(0, 0.1)),
limits = c(0, NA)) +
scale_x_date(expand = expansion(mult = 0))
})
}
shinyApp(ui = ui, server = server)
|
/app.r
|
no_license
|
iambabs/covid-app
|
R
| false
| false
| 2,532
|
r
|
library(httr)
library(jsonlite)
library(shiny)
library(tidyverse)
library(scales)
library(rsconnect)
covid_dta <- data.frame(Date=as.Date(character()),
Country=character(),
Confirmed=integer(),
Deaths=integer(),
stringsAsFactors=FALSE)
countries <- read.csv("countrybasic.csv")[ ,c('Country')]
# api call below inspired by
#https://www.programmableweb.com/news/how-to-access-any-restful-api-using-r-language/how-to/2017/07/21?page=2
for (country in countries) {
covid_url <- "https://api.covid19api.com/dayone/country/"
covid_url <- paste(covid_url, country, sep ="")
#the below call possible due to the httr library
get_covid <- GET(covid_url)
get_covid_text <- content(get_covid, "text")
#the below json conversion possible due to the jsonlite library
get_covid_json <- fromJSON(get_covid_text, flatten = TRUE)
get_covid_df <- as.data.frame(get_covid_json)
get_covid_df[-c(1, 3:8, 11:12)]
cdata <- get_covid_df[c("Date", "Country", "Confirmed", "Deaths")]
cdata$Date <- as.Date(paste(cdata$Date))
covid_dta <- rbind(covid_dta, cdata)
}
long_df <- covid_dta %>% gather(Type,Value,3:4)
long_df <- na.omit(long_df)
long_df$Value <- as.integer(paste(long_df$Value))
#R shiny inspiration from https://shiny.rstudio.com/tutorial/
ui <- fluidPage(
sidebarPanel(
selectInput(
inputId = "country",
label = "Select Country",
long_df$Country
),
selectInput(inputId = "type",
label = "Data to Show:",
choices=unique(long_df$Type),
multiple = TRUE)),
mainPanel('COVID in Select Countries',
plotOutput("chart")),
position = 'left'
)
server <- function(input, output) {
output$chart <- renderPlot({
long_df %>%
filter(Country %in% input$country) %>%
filter(Type %in% input$type) %>%
ggplot(aes(Date, Value)) +
# info on multiple y variables
# https://stackoverflow.com/questions/55155912/plot-graph-with-multiple-y-axis-variable-in-r?
#newreg=e0d0131c5d90481fb841170850c12449
geom_line(aes(color = Type)) +
scale_colour_manual(values = c("Deaths" = "red",
"Confirmed" = "blue")) +
scale_y_continuous(labels = comma,
expand = expansion(mult = c(0, 0.1)),
limits = c(0, NA)) +
scale_x_date(expand = expansion(mult = 0))
})
}
shinyApp(ui = ui, server = server)
|
# #set working directory
# setwd(paste0(Sys.getenv("HOME"), "/v4_MDG/GP"))
#
# #get z drive path
# load("../zdrive_path.RData")
path_output <- Sys.getenv("path_output")
path_input <- Sys.getenv("path_input")
setwd(path_input)
print(getwd())
library(TMB)
library(INLA)
library(geometry)
rmse_vec <- function(x, y, na.rm=T){
return(sqrt(mean((x-y)^2, na.rm=na.rm)))
}
# covs_use <- c(1:4, 20)
# covs_use <- 1:28
# covs_use <- c(5:8, 20)
#add some fake covariates
# load("../covariates.RData")
load("covariates_all.RData")
n_hf_fit <- dim(all_covs$covmats_normalised[[1]])[1]
n_hf_pred <- dim(pred_covs$covmats_normalised[[1]])[1]
models_silent <- TRUE
n_months_pred <- 24
# all_cov_cor <- c()
# causal_cov_cor <- c()
# lasso_cov_cor <- c()
subset_list <- list(1:28,
c(1:4, 20),
c(5:8, 20)
)
N_subsets <- length(subset_list)
subset_names <- c("all", "causal", "lasso")
residual_cors <- list()
all_cors <- list()
rowSds <- function(mat, na.rm=T){
return(apply(mat, 1, sd, na.rm=T))
}
standardise_mat <- function(mat, na.rm=T){
return((mat - rowMeans(mat, na.rm=na.rm)) / rowSds(mat, na.rm=na.rm))
}
#make mesh and spde object
mesh <- inla.mesh.2d(loc = coords_fit, cutoff = 0.1, max.edge = 3)
plot(mesh)
points(coords_fit, col="red")
spde <- (inla.spde2.matern(mesh=mesh, alpha=2)$param.inla)[c("M0","M1","M2")]
A_fit <- inla.spde.make.A(mesh=mesh, loc=as.matrix(coords_fit))
A_pred <- inla.spde.make.A(mesh=mesh, loc=as.matrix(coords_pred))
n_s <- nrow(spde$M0)
mesh_coords <- mesh$loc[, 1:2]
tryCatch(dyn.unload(dynlib("gp_rff_t2")),
error = function(e) print(e))
compile("gp_rff_t2.cpp")
dyn.load(dynlib("gp_rff_t2"))
source("model_fit_function.R")
# ##just do all covs for now
# time_iters <- 1:13
# n_months_fit <- 12
time_iters <- 1:2
n_months_fit <- 12
####try and discretise space to have fewer points in GP
##how many points with just two time points? this seems to go okay
##with fixed hyperparameters...
# time_iter <- 1
# n_months_fit <- 2
# cov_mat_list <- all_covs$covmats_normalised
# time_index_fit <- (time_iter - 1) + 1:n_months_fit
# cov_mat_fit <- cov_mat_list[time_index_fit]
# cov_mat_all <- do.call("rbind", cov_mat_fit)
# print("dimension of full covariate matrix (2 months of data): ")
# print(dim(cov_mat_all))
# #get unique elmements
# cov_mat <- unique(cov_mat_all)
# print("dimension of unique cov mat: ")
# print(dim(cov_mat))
## ~ 1000 points
#
##now with 12 months
n_months_fit <- 12
cov_mat_list <- all_covs$covmats_normalised
time_index_fit <- (time_iter - 1) + 1:n_months_fit
cov_mat_fit <- cov_mat_list[time_index_fit]
cov_mat_all <- do.call("rbind", cov_mat_fit)
print("dimension of full covariate matrix (12 months of data): ")
print(dim(cov_mat_all))
#get unique elmements
cov_mat <- unique(cov_mat_all)
print("dimension of unique cov mat: ")
print(dim(cov_mat))
## ~ 6000 points - makes sense
cov_mat_2d <- cov_mat[, c(1, 5)]
n_dim <- dim(cov_mat_2d)[2]
n_pts <- dim(cov_mat)[1]
plot(cov_mat[, 1], cov_mat[, 2])
# min_vec <- c()
# max_vec <- c()
# n_disc <- 10
min_dist <- 3
cov_mat_out <- cov_mat_2d
##start with convex hull
ptm <- proc.time()
hull_index <- unique(as.vector(convhulln(cov_mat_2d)))
print("finding conxed hull")
print(proc.time() - ptm)
index_keep <- hull_index
index_test <- setdiff(1:n_pts, index_keep)
##randomly add 500 points to set
n_add <- 400
index_add <- sample(index_test, n_add)
index_keep <- c(index_keep, index_add)
index_test <- setdiff(index_test, index_add)
library(Rcpp)
# sourceCpp("mesh_function.cpp")
# ptm <- Sys.time()
# index_keep_cpp <- create_mesh_cpp(cov_mat_2d, index_keep-1,
# index_test-1, min_dist)
# print(paste0("cpp function ", Sys.time() - ptm))
sourceCpp("mesh_function_fast.cpp")
covs_use <- 1:28
ptm <- Sys.time()
index_keep_cpp <- create_mesh_cpp(cov_mat[, covs_use], index_keep-1,
index_test-1, min_dist, TRUE)
print(paste0("cpp function ", Sys.time() - ptm))
#
# d <- delaunayn(cov_mat[index_keep_cpp, 1:2])
#
# i <- 2
# ptm <- Sys.time()
# while(i < dim(cov_mat_out)[1]){
# cov_keep <- cov_mat_out[index_keep, , drop=FALSE]
# cov_rest <- cov_mat_out[index_test, , drop=FALSE]
#
# # print(i)
# # print(length(index_test))
# # cov_keep <- cov_mat_out[1:i, ]
# # cov_rest <- cov_mat_out[-(1:i), , drop=FALSE]
#
# #check if any of the points are less than min dist away from existing points
# min_dists <- apply(cov_rest, 1, function(row_test){
# min(apply(cov_keep, 1, function(row){
# sqrt(sum((row - row_test)^2))
# }
# ), na.rm=T)
# }
# )
#
# keep_i <- which(min_dists > min_dist)
# # plot(cov_keep)
# # plot(cov_mat_2d)
# # points(cov_rest[keep_i, ], col="green")
# # points(cov_keep, col="red")
# if(length(keep_i) == 0){
# # cov_mat_out <- cov_keep
# break
# }else if(length(keep_i) == 1){
# index_keep <- c(index_keep, index_test[keep_i])
# break
# }
#
# index_keep <- c(index_keep, index_test[keep_i[1]])
# index_test <- index_test[keep_i[-1]]
# # cov_rest <- cov_rest[keep_i, ]
# # cov_mat_out <- rbind(cov_keep, cov_rest)
#
# i <- i + 1
# }
# print(Sys.time() - ptm)
# plot(cov_mat_2d[, 1:2])
# points(cov_mat_out[index_keep, 1:2], col="red", pch=19)
plot(cov_mat[, 1:2])
points(cov_mat[index_keep_cpp, 1:2], col="red", pch=19)
print(length(index_keep_cpp))
ptm <- proc.time()
# d <- delaunayn(cov_mat[index_keep_cpp, covs_use])
# print("time to triangulate")
# print(proc.time() - ptm)
|
/GP/all_fits_tri.R
|
no_license
|
rarambepola/google_cloud_stuff
|
R
| false
| false
| 5,539
|
r
|
# #set working directory
# setwd(paste0(Sys.getenv("HOME"), "/v4_MDG/GP"))
#
# #get z drive path
# load("../zdrive_path.RData")
path_output <- Sys.getenv("path_output")
path_input <- Sys.getenv("path_input")
setwd(path_input)
print(getwd())
library(TMB)
library(INLA)
library(geometry)
rmse_vec <- function(x, y, na.rm=T){
return(sqrt(mean((x-y)^2, na.rm=na.rm)))
}
# covs_use <- c(1:4, 20)
# covs_use <- 1:28
# covs_use <- c(5:8, 20)
#add some fake covariates
# load("../covariates.RData")
load("covariates_all.RData")
n_hf_fit <- dim(all_covs$covmats_normalised[[1]])[1]
n_hf_pred <- dim(pred_covs$covmats_normalised[[1]])[1]
models_silent <- TRUE
n_months_pred <- 24
# all_cov_cor <- c()
# causal_cov_cor <- c()
# lasso_cov_cor <- c()
subset_list <- list(1:28,
c(1:4, 20),
c(5:8, 20)
)
N_subsets <- length(subset_list)
subset_names <- c("all", "causal", "lasso")
residual_cors <- list()
all_cors <- list()
rowSds <- function(mat, na.rm=T){
return(apply(mat, 1, sd, na.rm=T))
}
standardise_mat <- function(mat, na.rm=T){
return((mat - rowMeans(mat, na.rm=na.rm)) / rowSds(mat, na.rm=na.rm))
}
#make mesh and spde object
mesh <- inla.mesh.2d(loc = coords_fit, cutoff = 0.1, max.edge = 3)
plot(mesh)
points(coords_fit, col="red")
spde <- (inla.spde2.matern(mesh=mesh, alpha=2)$param.inla)[c("M0","M1","M2")]
A_fit <- inla.spde.make.A(mesh=mesh, loc=as.matrix(coords_fit))
A_pred <- inla.spde.make.A(mesh=mesh, loc=as.matrix(coords_pred))
n_s <- nrow(spde$M0)
mesh_coords <- mesh$loc[, 1:2]
tryCatch(dyn.unload(dynlib("gp_rff_t2")),
error = function(e) print(e))
compile("gp_rff_t2.cpp")
dyn.load(dynlib("gp_rff_t2"))
source("model_fit_function.R")
# ##just do all covs for now
# time_iters <- 1:13
# n_months_fit <- 12
time_iters <- 1:2
n_months_fit <- 12
####try and discretise space to have fewer points in GP
##how many points with just two time points? this seems to go okay
##with fixed hyperparameters...
# time_iter <- 1
# n_months_fit <- 2
# cov_mat_list <- all_covs$covmats_normalised
# time_index_fit <- (time_iter - 1) + 1:n_months_fit
# cov_mat_fit <- cov_mat_list[time_index_fit]
# cov_mat_all <- do.call("rbind", cov_mat_fit)
# print("dimension of full covariate matrix (2 months of data): ")
# print(dim(cov_mat_all))
# #get unique elmements
# cov_mat <- unique(cov_mat_all)
# print("dimension of unique cov mat: ")
# print(dim(cov_mat))
## ~ 1000 points
#
##now with 12 months
n_months_fit <- 12
cov_mat_list <- all_covs$covmats_normalised
time_index_fit <- (time_iter - 1) + 1:n_months_fit
cov_mat_fit <- cov_mat_list[time_index_fit]
cov_mat_all <- do.call("rbind", cov_mat_fit)
print("dimension of full covariate matrix (12 months of data): ")
print(dim(cov_mat_all))
#get unique elmements
cov_mat <- unique(cov_mat_all)
print("dimension of unique cov mat: ")
print(dim(cov_mat))
## ~ 6000 points - makes sense
cov_mat_2d <- cov_mat[, c(1, 5)]
n_dim <- dim(cov_mat_2d)[2]
n_pts <- dim(cov_mat)[1]
plot(cov_mat[, 1], cov_mat[, 2])
# min_vec <- c()
# max_vec <- c()
# n_disc <- 10
min_dist <- 3
cov_mat_out <- cov_mat_2d
##start with convex hull
ptm <- proc.time()
hull_index <- unique(as.vector(convhulln(cov_mat_2d)))
print("finding conxed hull")
print(proc.time() - ptm)
index_keep <- hull_index
index_test <- setdiff(1:n_pts, index_keep)
##randomly add 500 points to set
n_add <- 400
index_add <- sample(index_test, n_add)
index_keep <- c(index_keep, index_add)
index_test <- setdiff(index_test, index_add)
library(Rcpp)
# sourceCpp("mesh_function.cpp")
# ptm <- Sys.time()
# index_keep_cpp <- create_mesh_cpp(cov_mat_2d, index_keep-1,
# index_test-1, min_dist)
# print(paste0("cpp function ", Sys.time() - ptm))
sourceCpp("mesh_function_fast.cpp")
covs_use <- 1:28
ptm <- Sys.time()
index_keep_cpp <- create_mesh_cpp(cov_mat[, covs_use], index_keep-1,
index_test-1, min_dist, TRUE)
print(paste0("cpp function ", Sys.time() - ptm))
#
# d <- delaunayn(cov_mat[index_keep_cpp, 1:2])
#
# i <- 2
# ptm <- Sys.time()
# while(i < dim(cov_mat_out)[1]){
# cov_keep <- cov_mat_out[index_keep, , drop=FALSE]
# cov_rest <- cov_mat_out[index_test, , drop=FALSE]
#
# # print(i)
# # print(length(index_test))
# # cov_keep <- cov_mat_out[1:i, ]
# # cov_rest <- cov_mat_out[-(1:i), , drop=FALSE]
#
# #check if any of the points are less than min dist away from existing points
# min_dists <- apply(cov_rest, 1, function(row_test){
# min(apply(cov_keep, 1, function(row){
# sqrt(sum((row - row_test)^2))
# }
# ), na.rm=T)
# }
# )
#
# keep_i <- which(min_dists > min_dist)
# # plot(cov_keep)
# # plot(cov_mat_2d)
# # points(cov_rest[keep_i, ], col="green")
# # points(cov_keep, col="red")
# if(length(keep_i) == 0){
# # cov_mat_out <- cov_keep
# break
# }else if(length(keep_i) == 1){
# index_keep <- c(index_keep, index_test[keep_i])
# break
# }
#
# index_keep <- c(index_keep, index_test[keep_i[1]])
# index_test <- index_test[keep_i[-1]]
# # cov_rest <- cov_rest[keep_i, ]
# # cov_mat_out <- rbind(cov_keep, cov_rest)
#
# i <- i + 1
# }
# print(Sys.time() - ptm)
# plot(cov_mat_2d[, 1:2])
# points(cov_mat_out[index_keep, 1:2], col="red", pch=19)
plot(cov_mat[, 1:2])
points(cov_mat[index_keep_cpp, 1:2], col="red", pch=19)
print(length(index_keep_cpp))
ptm <- proc.time()
# d <- delaunayn(cov_mat[index_keep_cpp, covs_use])
# print("time to triangulate")
# print(proc.time() - ptm)
|
# date time=2019/4/16 19:43:55
setwd('/Users/takatoosetsuo/Dropbox/2019polytec/lectures/0520/presen/fig/drawsine')
source('/Applications/kettex/texlive/texmf-dist/scripts/ketcindy/ketlib/ketpiccurrent.r')
Ketinit()
cat(ThisVersion,'\n')
Fnametex='p025.tex'
FnameR='p025.r'
Fnameout='p025.txt'
arccos=acos; arcsin=asin; arctan=atan
Acos<- function(x){acos(max(-1,min(1,x)))}
Asin<- function(x){asin(max(-1,min(1,x)))}
Atan=atan
Sqr<- function(x){if(x>=0){sqrt(x)}else{0}}
Factorial=factorial
Norm<- function(x){norm(matrix(x,nrow=1),"2")}
Setwindow(c(-3,6), c(-2,3.5))
X=c(1.22699,-2.5);Assignadd('X',X)
T=c(5.49514,-3);Assignadd('T',T)
mdag1=c(-0.70565,0.20726);Assignadd('mdag1',mdag1)
mdbw1=c(-0.5,-0.14);Assignadd('mdbw1',mdbw1)
sgXlXXr=Listplot(c(c(0,-2.5),c(3.14159,-2.5)))
sgTlTTr=Listplot(c(c(0,-3),c(7.5708,-3)))
Setunitlen("15mm")
sgaxx1=Listplot(c(c(-3,0),c(6,0)))
sgaxy1=Listplot(c(c(0,-2),c(0,3.5)))
cr1=Circledata(c(c(-1,0),c(0,0)))
sg1=Listplot(c(c(-1,0),c(0,0)))
sg2=Listplot(c(c(-1,0),c(-0.66293,0.94148)))
bw1=Bowdata(c(-1,0),c(0,0),1.4,0.3)
ag1=Anglemark(c(0,0),c(-1,0),c(-0.66293,0.94148),0.6)
sgt1=Listplot(c(c(0,0),c(0.28644,1.19309)))
PtL=list()
GrL=list()
# Windisp(GrL)
if(1==1){
Openfile('/Users/takatoosetsuo/Dropbox/2019polytec/lectures/0520/presen/fig/drawsine/p025.tex','15mm','Cdy=presen0521a.cdy')
Drwline(sgaxx1)
Drwline(sgaxy1)
Letter(c(6,0),"e","$x$")
Letter(c(0,3.5),"cn","$y$")
Letter(c(0,0),"se","O")
Drwline(cr1)
Drwline(sg1)
Drwline(sg2)
Letter(c(-0.5,-0.14),"c","$1$")
Dottedline(bw1,0.75,1.2)
Letter(c(-0.71,0.21),"c","$x$")
Drwline(ag1)
Texcom("{")
Setcolor(c(1,0,0))
Drwline(sgt1,2)
Texcom("}")
Closefile("0")
}
quit()
|
/examples/sankakugraph/fig/drawsine/p025.r
|
no_license
|
s-takato/s-takato.github.io
|
R
| false
| false
| 1,669
|
r
|
# date time=2019/4/16 19:43:55
setwd('/Users/takatoosetsuo/Dropbox/2019polytec/lectures/0520/presen/fig/drawsine')
source('/Applications/kettex/texlive/texmf-dist/scripts/ketcindy/ketlib/ketpiccurrent.r')
Ketinit()
cat(ThisVersion,'\n')
Fnametex='p025.tex'
FnameR='p025.r'
Fnameout='p025.txt'
arccos=acos; arcsin=asin; arctan=atan
Acos<- function(x){acos(max(-1,min(1,x)))}
Asin<- function(x){asin(max(-1,min(1,x)))}
Atan=atan
Sqr<- function(x){if(x>=0){sqrt(x)}else{0}}
Factorial=factorial
Norm<- function(x){norm(matrix(x,nrow=1),"2")}
Setwindow(c(-3,6), c(-2,3.5))
X=c(1.22699,-2.5);Assignadd('X',X)
T=c(5.49514,-3);Assignadd('T',T)
mdag1=c(-0.70565,0.20726);Assignadd('mdag1',mdag1)
mdbw1=c(-0.5,-0.14);Assignadd('mdbw1',mdbw1)
sgXlXXr=Listplot(c(c(0,-2.5),c(3.14159,-2.5)))
sgTlTTr=Listplot(c(c(0,-3),c(7.5708,-3)))
Setunitlen("15mm")
sgaxx1=Listplot(c(c(-3,0),c(6,0)))
sgaxy1=Listplot(c(c(0,-2),c(0,3.5)))
cr1=Circledata(c(c(-1,0),c(0,0)))
sg1=Listplot(c(c(-1,0),c(0,0)))
sg2=Listplot(c(c(-1,0),c(-0.66293,0.94148)))
bw1=Bowdata(c(-1,0),c(0,0),1.4,0.3)
ag1=Anglemark(c(0,0),c(-1,0),c(-0.66293,0.94148),0.6)
sgt1=Listplot(c(c(0,0),c(0.28644,1.19309)))
PtL=list()
GrL=list()
# Windisp(GrL)
if(1==1){
Openfile('/Users/takatoosetsuo/Dropbox/2019polytec/lectures/0520/presen/fig/drawsine/p025.tex','15mm','Cdy=presen0521a.cdy')
Drwline(sgaxx1)
Drwline(sgaxy1)
Letter(c(6,0),"e","$x$")
Letter(c(0,3.5),"cn","$y$")
Letter(c(0,0),"se","O")
Drwline(cr1)
Drwline(sg1)
Drwline(sg2)
Letter(c(-0.5,-0.14),"c","$1$")
Dottedline(bw1,0.75,1.2)
Letter(c(-0.71,0.21),"c","$x$")
Drwline(ag1)
Texcom("{")
Setcolor(c(1,0,0))
Drwline(sgt1,2)
Texcom("}")
Closefile("0")
}
quit()
|
#' Ruscio's A - the probability of superiority
#'
#' This function bootstraps confidence intervals for Ruscio's A effect size (2008).
#' Code adapted from adapted from John Ruscio's original implementation of his metric: https://ruscio.pages.tcnj.edu/quantitative-methods-program-code/
#' @param data data
#' @param variable continuous variable
#' @param group dichotomous group
#' @param value1 assignment of group 1
#' @param value2 assignment of group 2
#' @return Ruscio's A.
#' @export
#' @examples
#' ruscios_A(data = simulated_data, variable = "Score", group = "Condition", value1 = "B", value2 = "A")
#'
ruscios_A <- function(data, variable, group, value1 = 1, value2 = 0, adjust_ceiling = FALSE) {
# Fast calculation of the A statistic
ruscios_A_function <- function(x, y) {
nx <- length(x)
ny <- length(y)
rx <- sum(rank(c(x, y))[1:nx])
A = (rx / nx - (nx + 1) / 2) / ny
# if adjust_ceiling == TRUE & A == 0 or 1, rescore it as if a single data point was inferior to a single second data point between conditions.
# Ie., use the lowest granularity allowed by the data for rescoring. More data points will result in a higher adjusted A.
if(adjust_ceiling == TRUE & A == 1){
A <- ruscios_A_function(c(rep(4, length(x)), 2), c(rep(1, length(y)), 3))
} else if(adjust_ceiling == TRUE & A == 0){
A <- 1 - ruscios_A_function(c(rep(4, length(x)), 2), c(rep(1, length(y)), 3))
}
return(A)
}
# Ensure data is a data frame (e.g., not a tbl_data)
data <- as.data.frame(data)
# Select the observations for group 1
x <- data[data[[group]] == value1, variable]
# Select the observations for group 2
y <- data[data[[group]] == value2, variable]
# initialize variables
A.obs <- ruscios_A_function(x, y)
return(A.obs)
}
|
/R/ruscios_A.R
|
no_license
|
ianhussey/SCED
|
R
| false
| false
| 1,821
|
r
|
#' Ruscio's A - the probability of superiority
#'
#' This function bootstraps confidence intervals for Ruscio's A effect size (2008).
#' Code adapted from adapted from John Ruscio's original implementation of his metric: https://ruscio.pages.tcnj.edu/quantitative-methods-program-code/
#' @param data data
#' @param variable continuous variable
#' @param group dichotomous group
#' @param value1 assignment of group 1
#' @param value2 assignment of group 2
#' @return Ruscio's A.
#' @export
#' @examples
#' ruscios_A(data = simulated_data, variable = "Score", group = "Condition", value1 = "B", value2 = "A")
#'
ruscios_A <- function(data, variable, group, value1 = 1, value2 = 0, adjust_ceiling = FALSE) {
# Fast calculation of the A statistic
ruscios_A_function <- function(x, y) {
nx <- length(x)
ny <- length(y)
rx <- sum(rank(c(x, y))[1:nx])
A = (rx / nx - (nx + 1) / 2) / ny
# if adjust_ceiling == TRUE & A == 0 or 1, rescore it as if a single data point was inferior to a single second data point between conditions.
# Ie., use the lowest granularity allowed by the data for rescoring. More data points will result in a higher adjusted A.
if(adjust_ceiling == TRUE & A == 1){
A <- ruscios_A_function(c(rep(4, length(x)), 2), c(rep(1, length(y)), 3))
} else if(adjust_ceiling == TRUE & A == 0){
A <- 1 - ruscios_A_function(c(rep(4, length(x)), 2), c(rep(1, length(y)), 3))
}
return(A)
}
# Ensure data is a data frame (e.g., not a tbl_data)
data <- as.data.frame(data)
# Select the observations for group 1
x <- data[data[[group]] == value1, variable]
# Select the observations for group 2
y <- data[data[[group]] == value2, variable]
# initialize variables
A.obs <- ruscios_A_function(x, y)
return(A.obs)
}
|
rm(list=ls())
knitr::opts_chunk$set(fig.width=8, fig.height=5,
echo=TRUE, warning=FALSE, message=FALSE, cache=TRUE)
suppressPackageStartupMessages(c("dplyr","langcog","tidyr","ggplot2","lme4"))
#library(langcog)a
library(dplyr)
library(ggplot2)
library(rjson)
library(stringr)
library(tidyr)
library(lme4)
## load datafiles and put in a data frame
files <- dir("../production-results/")
d.raw <- data.frame()
# function for shorter filename extraction
getCategory <- function(fileName){
out=strsplit(as.character(fileName),"/")[[1]][8]
}
imageNameShort <- function(fileName){
out=strsplit(as.character(fileName),"/")[[1]][9]
}
getAge <- function(imageNameShort){
out=as.numeric(strsplit(imageNameShort,"_")[[1]][3])
}
getSessionId <- function(imageNameShort){
out=(strsplit(imageNameShort,"_")[[1]][5])
}
for (f in files) {
jf <- paste("../production-results/",f,sep="")
jd <- fromJSON(paste(readLines(jf), collapse=""))
id <- data.frame(workerid = jd$WorkerId,
rating = jd$answers$data$rating,
imageName = jd$answers$data$imageName)
d.raw <- bind_rows(d.raw, id)
}
## get rid of weird characters where filename had spaces
d.raw$imageName <- str_replace_all(d.raw$imageName,"%20"," ")
# prettify data: make shorter iamge names for plots, etc.
d.pretty <- d.raw %>%
group_by(imageName) %>%
mutate(category = getCategory(imageName)) %>%
mutate(imNameShort = imageNameShort(imageName)) %>%
mutate(age = getAge(imNameShort)) %>%
mutate(sessionId = getSessionId(imNameShort)) %>%
mutate(correct = (rating == category))
write.table(d.pretty, "../output/museumdraw_E1c_recognitionData.csv", sep=",")
|
/experiments/ratings/_old/recognition_ratings_cogsci2018/analysis/analysis_recRatingsv1.R
|
no_license
|
brialorelle/kiddraw
|
R
| false
| false
| 1,696
|
r
|
rm(list=ls())
knitr::opts_chunk$set(fig.width=8, fig.height=5,
echo=TRUE, warning=FALSE, message=FALSE, cache=TRUE)
suppressPackageStartupMessages(c("dplyr","langcog","tidyr","ggplot2","lme4"))
#library(langcog)a
library(dplyr)
library(ggplot2)
library(rjson)
library(stringr)
library(tidyr)
library(lme4)
## load datafiles and put in a data frame
files <- dir("../production-results/")
d.raw <- data.frame()
# function for shorter filename extraction
getCategory <- function(fileName){
out=strsplit(as.character(fileName),"/")[[1]][8]
}
imageNameShort <- function(fileName){
out=strsplit(as.character(fileName),"/")[[1]][9]
}
getAge <- function(imageNameShort){
out=as.numeric(strsplit(imageNameShort,"_")[[1]][3])
}
getSessionId <- function(imageNameShort){
out=(strsplit(imageNameShort,"_")[[1]][5])
}
for (f in files) {
jf <- paste("../production-results/",f,sep="")
jd <- fromJSON(paste(readLines(jf), collapse=""))
id <- data.frame(workerid = jd$WorkerId,
rating = jd$answers$data$rating,
imageName = jd$answers$data$imageName)
d.raw <- bind_rows(d.raw, id)
}
## get rid of weird characters where filename had spaces
d.raw$imageName <- str_replace_all(d.raw$imageName,"%20"," ")
# prettify data: make shorter iamge names for plots, etc.
d.pretty <- d.raw %>%
group_by(imageName) %>%
mutate(category = getCategory(imageName)) %>%
mutate(imNameShort = imageNameShort(imageName)) %>%
mutate(age = getAge(imNameShort)) %>%
mutate(sessionId = getSessionId(imNameShort)) %>%
mutate(correct = (rating == category))
write.table(d.pretty, "../output/museumdraw_E1c_recognitionData.csv", sep=",")
|
library(datasets)
library(data.table)
#1 read and subset the data
df.project <- fread("./household_power_consumption.txt", sep = ";", header = T, colClasses = 'character')
df.project <- subset(df.project, df.project$Date =="1/2/2007" | df.project$Date =="2/2/2007")
#4 Create a png file with required dimentions
png<-png(file = "plot4.png",480,480)
#3 Give the par value for the plot
par(mfcol = c(2,2),mar = c(4,4,2,2))
#5 Let us create the Global Active power line plot
dateTime <- as.POSIXlt(paste(as.Date(df.project$Date,format="%d/%m/%Y"), df.project$Time, sep=" "))
plot(dateTime,df.project$Global_active_power,type = "l", xlab = " ", ylab = "Global Active Power (Kilowatts)")
#6 Let us now create the energy submetering plot
plot(dateTime,df.project$Sub_metering_1,type="l",xlab=" ",ylab="Energy sub metering",ylim=c(0,40))
lines(dateTime,y=as.numeric(df.project$Sub_metering_2),ylim=c(0,40),col="red")
lines(dateTime,y=as.numeric(df.project$Sub_metering_3),ylim=c(0,40),col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),pch=c(NA,NA,NA),col=c("black","red","blue"),lty=c(1,1,1),xjust=1)
#7 Time to create the 3rd plot for voltage
plot(dateTime,y=as.numeric(df.project$Voltage),type="l",ylab="Voltage")
#8 Lastly, create the plot for Global Reactive Power
plot(dateTime,y=as.numeric(df.project$Global_reactive_power),type="l",ylab="Global_reactive_power")
# Dont forget to close the connection
dev.off()
|
/plot4.R
|
no_license
|
masroorrizvi/ExData_Plotting1
|
R
| false
| false
| 1,464
|
r
|
library(datasets)
library(data.table)
#1 read and subset the data
df.project <- fread("./household_power_consumption.txt", sep = ";", header = T, colClasses = 'character')
df.project <- subset(df.project, df.project$Date =="1/2/2007" | df.project$Date =="2/2/2007")
#4 Create a png file with required dimentions
png<-png(file = "plot4.png",480,480)
#3 Give the par value for the plot
par(mfcol = c(2,2),mar = c(4,4,2,2))
#5 Let us create the Global Active power line plot
dateTime <- as.POSIXlt(paste(as.Date(df.project$Date,format="%d/%m/%Y"), df.project$Time, sep=" "))
plot(dateTime,df.project$Global_active_power,type = "l", xlab = " ", ylab = "Global Active Power (Kilowatts)")
#6 Let us now create the energy submetering plot
plot(dateTime,df.project$Sub_metering_1,type="l",xlab=" ",ylab="Energy sub metering",ylim=c(0,40))
lines(dateTime,y=as.numeric(df.project$Sub_metering_2),ylim=c(0,40),col="red")
lines(dateTime,y=as.numeric(df.project$Sub_metering_3),ylim=c(0,40),col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),pch=c(NA,NA,NA),col=c("black","red","blue"),lty=c(1,1,1),xjust=1)
#7 Time to create the 3rd plot for voltage
plot(dateTime,y=as.numeric(df.project$Voltage),type="l",ylab="Voltage")
#8 Lastly, create the plot for Global Reactive Power
plot(dateTime,y=as.numeric(df.project$Global_reactive_power),type="l",ylab="Global_reactive_power")
# Dont forget to close the connection
dev.off()
|
##Read data & set tidy column names
FullPower <-read.table("household_power_consumption.txt", header = TRUE, sep = ";")
colnames(FullPower) <- c("Date","Time","Active","Reactive","Voltage","Intensity","SubMeter1","SubMeter2","SubMeter3")
##Subset data by date, merge date and time
FullPower$Date <- as.Date(FullPower$Date, format="%d/%m/%Y")
Power <- subset(FullPower, Date == "2007-02-01" | Date == "2007-02-02")
Power$datetime <- paste(Power$Date, Power$Time)
Power$datetime <- strptime(Power$datetime, "%Y-%m-%d %H:%M:%S")
##Open graphic device and plot data
png(filename="plot3.png", width=480, height=480)
with(Power, {
plot(datetime, as.numeric(as.character(SubMeter1)),type="l", xlab="", ylab="Energy sub metering")
lines(datetime, as.numeric(as.character(SubMeter2)),type="l", col="red")
lines(datetime, as.numeric(as.character(SubMeter3)),type="l", col="blue")
})
legend("topright", col = c("black","red","blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), lwd=1)
dev.off()
|
/plot3.R
|
no_license
|
heatherhessel/ExData_Plotting1
|
R
| false
| false
| 1,017
|
r
|
##Read data & set tidy column names
FullPower <-read.table("household_power_consumption.txt", header = TRUE, sep = ";")
colnames(FullPower) <- c("Date","Time","Active","Reactive","Voltage","Intensity","SubMeter1","SubMeter2","SubMeter3")
##Subset data by date, merge date and time
FullPower$Date <- as.Date(FullPower$Date, format="%d/%m/%Y")
Power <- subset(FullPower, Date == "2007-02-01" | Date == "2007-02-02")
Power$datetime <- paste(Power$Date, Power$Time)
Power$datetime <- strptime(Power$datetime, "%Y-%m-%d %H:%M:%S")
##Open graphic device and plot data
png(filename="plot3.png", width=480, height=480)
with(Power, {
plot(datetime, as.numeric(as.character(SubMeter1)),type="l", xlab="", ylab="Energy sub metering")
lines(datetime, as.numeric(as.character(SubMeter2)),type="l", col="red")
lines(datetime, as.numeric(as.character(SubMeter3)),type="l", col="blue")
})
legend("topright", col = c("black","red","blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), lwd=1)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exRawChnkAdultData.R
\docType{data}
\name{exRawChnkAdultData}
\alias{exRawChnkAdultData}
\title{Example Chinook Adult Data from LGTrappingDB}
\format{Data exported from the LGTrappingDB and originally saved as a csv file}
\usage{
exRawChnkAdultData
}
\description{
This is raw Chinook adult data 'dumped' out of the LGTrappingDB that was originally saved as a .csv file. No formatting
has been done whatsoever. Data is from SY2015.
}
\keyword{Chinook}
\keyword{adult}
\keyword{data}
|
/man/exRawChnkAdultData.Rd
|
no_license
|
lawrykatherine/SCOBI
|
R
| false
| true
| 562
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exRawChnkAdultData.R
\docType{data}
\name{exRawChnkAdultData}
\alias{exRawChnkAdultData}
\title{Example Chinook Adult Data from LGTrappingDB}
\format{Data exported from the LGTrappingDB and originally saved as a csv file}
\usage{
exRawChnkAdultData
}
\description{
This is raw Chinook adult data 'dumped' out of the LGTrappingDB that was originally saved as a .csv file. No formatting
has been done whatsoever. Data is from SY2015.
}
\keyword{Chinook}
\keyword{adult}
\keyword{data}
|
#### Loading the data in #####
data <- read.csv("C:/Users/talmo/Documents/activity.csv", stringsAsFactors=FALSE)
head(data)
# set date column and seperate week versus weekend days:
data$date <- as.POSIXct(data$date, format="%Y-%m-%d")
data$weekday<-weekdays(data$date)
data$weekend.week <- ""
for (i in 1:nrow(data)){
if (data$weekday[i] == "Saturday" | data$weekday[i] == "Sunday"){
data$weekend.week[i]<-"weekend"}
else{data$weekend.week[i]<-"weekday"}}
##### calculate the mean ######
sum_data <- aggregate(data$steps, by=list(data$date), FUN=sum, na.rm=TRUE)
names(sum_data) <- c("date", "Step")
# plot number of steps each day
library(ggplot2)
p<-ggplot(sum_data, aes(x=Step)) +
geom_vline(aes(xintercept=mean(Step)),color="blue", linetype="dashed", size=1)+
geom_histogram(binwidth=1000,color="darkblue", fill="lightblue")+ggtitle("Steps per day Frequency")
p
mean(sum_data$Step)
median(sum_data$Step)
#### What is the average daily activity pattern? #####
# Compute the means per interval
intervals<- aggregate(data$steps,by=list(data$interval),FUN=mean, na.rm=TRUE)
names(intervals) <- c("interval", "Avg")
# plot
plot(intervals$interval, intervals$Avg,type="l",
col="red",
lwd=2,
xlab="Interval [minutes]",
ylab="Average number of steps",
main="Average number of steps per intervals")
# maximum mean interval
maximum<- which(intervals$Avg == max(intervals$Avg))
max_interval <- intervals[maximum, 1]
max_interval
######## Inputing the missing values ##############
sum(is.na(data$steps))
data$steps[is.na(data$steps)]<-mean(data$steps, na.rm=TRUE) #replace NA with total mean
# Compute the total number of steps each day (NA values removed)
sum_data1 <- aggregate(data$steps, by=list(data$date), FUN=sum)
names(sum_data1) <- c("date", "Step")
# Compute the histogram of the total number of steps each day
p1<-ggplot(sum_data1, aes(x=Step)) +
geom_vline(aes(xintercept=mean(Step)),color="blue", linetype="dashed", size=1)+
geom_histogram(binwidth=1000,color="red", fill="yellow")+ggtitle("Steps per day Frequency (NA replaced by Avg value)")
p1
mean(sum_data1$Step)
median(sum_data1$Step)
####Are there differences in activity patterns between weekdays and weekends?
library(lattice)
# Compute the average number of steps taken, averaged across all daytype variable
mean_data <- aggregate(data$steps,
by=list(data$weekend.week,
data$weekday, data$interval), mean)
names(mean_data) <- c("daytype", "weekday", "interval", "mean")
# Compute the time serie plot
xyplot(mean ~ interval | daytype, mean_data,
type="l",
lwd=1,
xlab="Interval",
ylab="Number of steps",
col='black',
layout=c(1,2))
|
/reporting_data_assig1_TM.R
|
no_license
|
talmormeir/RepData_PeerAssessment1
|
R
| false
| false
| 2,800
|
r
|
#### Loading the data in #####
data <- read.csv("C:/Users/talmo/Documents/activity.csv", stringsAsFactors=FALSE)
head(data)
# set date column and seperate week versus weekend days:
data$date <- as.POSIXct(data$date, format="%Y-%m-%d")
data$weekday<-weekdays(data$date)
data$weekend.week <- ""
for (i in 1:nrow(data)){
if (data$weekday[i] == "Saturday" | data$weekday[i] == "Sunday"){
data$weekend.week[i]<-"weekend"}
else{data$weekend.week[i]<-"weekday"}}
##### calculate the mean ######
sum_data <- aggregate(data$steps, by=list(data$date), FUN=sum, na.rm=TRUE)
names(sum_data) <- c("date", "Step")
# plot number of steps each day
library(ggplot2)
p<-ggplot(sum_data, aes(x=Step)) +
geom_vline(aes(xintercept=mean(Step)),color="blue", linetype="dashed", size=1)+
geom_histogram(binwidth=1000,color="darkblue", fill="lightblue")+ggtitle("Steps per day Frequency")
p
mean(sum_data$Step)
median(sum_data$Step)
#### What is the average daily activity pattern? #####
# Compute the means per interval
intervals<- aggregate(data$steps,by=list(data$interval),FUN=mean, na.rm=TRUE)
names(intervals) <- c("interval", "Avg")
# plot
plot(intervals$interval, intervals$Avg,type="l",
col="red",
lwd=2,
xlab="Interval [minutes]",
ylab="Average number of steps",
main="Average number of steps per intervals")
# maximum mean interval
maximum<- which(intervals$Avg == max(intervals$Avg))
max_interval <- intervals[maximum, 1]
max_interval
######## Inputing the missing values ##############
sum(is.na(data$steps))
data$steps[is.na(data$steps)]<-mean(data$steps, na.rm=TRUE) #replace NA with total mean
# Compute the total number of steps each day (NA values removed)
sum_data1 <- aggregate(data$steps, by=list(data$date), FUN=sum)
names(sum_data1) <- c("date", "Step")
# Compute the histogram of the total number of steps each day
p1<-ggplot(sum_data1, aes(x=Step)) +
geom_vline(aes(xintercept=mean(Step)),color="blue", linetype="dashed", size=1)+
geom_histogram(binwidth=1000,color="red", fill="yellow")+ggtitle("Steps per day Frequency (NA replaced by Avg value)")
p1
mean(sum_data1$Step)
median(sum_data1$Step)
####Are there differences in activity patterns between weekdays and weekends?
library(lattice)
# Compute the average number of steps taken, averaged across all daytype variable
mean_data <- aggregate(data$steps,
by=list(data$weekend.week,
data$weekday, data$interval), mean)
names(mean_data) <- c("daytype", "weekday", "interval", "mean")
# Compute the time serie plot
xyplot(mean ~ interval | daytype, mean_data,
type="l",
lwd=1,
xlab="Interval",
ylab="Number of steps",
col='black',
layout=c(1,2))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.ec2_operations.R
\name{delete_vpc_peering_connection}
\alias{delete_vpc_peering_connection}
\title{Deletes a VPC peering connection}
\usage{
delete_vpc_peering_connection(DryRun = NULL, VpcPeeringConnectionId)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is \code{DryRunOperation}. Otherwise, it is \code{UnauthorizedOperation}.}
\item{VpcPeeringConnectionId}{[required] The ID of the VPC peering connection.}
}
\description{
Deletes a VPC peering connection. Either the owner of the requester VPC or the owner of the accepter VPC can delete the VPC peering connection if it's in the \code{active} state. The owner of the requester VPC can delete a VPC peering connection in the \code{pending-acceptance} state. You cannot delete a VPC peering connection that's in the \code{failed} state.
}
\section{Accepted Parameters}{
\preformatted{delete_vpc_peering_connection(
DryRun = TRUE|FALSE,
VpcPeeringConnectionId = "string"
)
}
}
|
/service/paws.ec2/man/delete_vpc_peering_connection.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 1,192
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.ec2_operations.R
\name{delete_vpc_peering_connection}
\alias{delete_vpc_peering_connection}
\title{Deletes a VPC peering connection}
\usage{
delete_vpc_peering_connection(DryRun = NULL, VpcPeeringConnectionId)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is \code{DryRunOperation}. Otherwise, it is \code{UnauthorizedOperation}.}
\item{VpcPeeringConnectionId}{[required] The ID of the VPC peering connection.}
}
\description{
Deletes a VPC peering connection. Either the owner of the requester VPC or the owner of the accepter VPC can delete the VPC peering connection if it's in the \code{active} state. The owner of the requester VPC can delete a VPC peering connection in the \code{pending-acceptance} state. You cannot delete a VPC peering connection that's in the \code{failed} state.
}
\section{Accepted Parameters}{
\preformatted{delete_vpc_peering_connection(
DryRun = TRUE|FALSE,
VpcPeeringConnectionId = "string"
)
}
}
|
env.cov<-function(covariate,month,threshold=NA,high.low=NA,
freq.dur=NA,FUN=NULL){
if(!is.null(FUN) & any(!is.na(threshold),!is.na(high.low),!is.na(freq.dur))){
stop("cannot define both 'FUN' and threshold/high.low/freq.dur")
}
if(!covariate %in% c('discharge','temperature')){
stop("covariate must equal 'discharge' or 'temperature'")}
if(covariate=='discharge'){data<-dailyDischarge}
if(covariate=='temperature'){data<-ed}
data<-data[date >= as.Date(paste0(start_year-1,"-10-01")) &
date < as.Date(paste0(end_year,"-10-01"))]
data[,year_of_effect:=year(date)]
data[month(date)>=10,year_of_effect:=year(date)+1]
rivers<-unique(data$river)
result<-array(NA,dim=c(length(unique(year(data$date)))-1,4))
if(is.null(FUN)){
if(!high.low %in% c('high','low')){
stop("high.low must equal 'high' or 'low' defining whether the event is a high or low extreme")}
if(!freq.dur %in% c('frequency','duration')){
stop("freq.dur must equal 'frequency' or 'duration' defining whether the function returns the number of days when the threshold is crossed (duration) or the number of times(frequency)")}
if(covariate == 'discharge' & (is.na(threshold) | threshold<0 | threshold > 1)){
stop("for discharge covariates threshold must be a number between 0 and 1 representing the quantile definition of an extreme event")
}
num.sets<-function(x){
if(length(x)==0){return(as.integer(0))} else
xlag<-c(-1,x[1:(length(x)-1)])
return(length(which(x-xlag>1)))
}
limit<-NULL
for(r in 1:4){
if(covariate=="discharge") {limit[r]<-quantile(data[river==rivers[r],get(covariate)],probs=threshold,na.rm=T)}
if(covariate=="temperature") {limit[r]<-threshold}
}
for(r in 1:4){
if(freq.dur=="frequency"){result[,r]<-data[river==rivers[r]&month(date) %in% month,
ifelse(high.low=="high",
num.sets(which(get(covariate) >= limit[r])),
num.sets(which(get(covariate) <= limit[r]))),
by=year_of_effect]$V1
}
if(freq.dur=="duration"){result[,r]<-data[river==rivers[r]&month(date) %in% month,
ifelse(high.low=="high",
length(which(get(covariate) >= limit[r])),
length(which(get(covariate) <= limit[r]))),
by=year_of_effect]$V1
}
}} else{ #if FUN isn't null
for(r in 1:4){
result[,r]<-data[river == rivers[r] & month(date) %in% month,
FUN(get(covariate)),
by = year_of_effect]$V1
}
}
scale_simple<-function(x) {
a<-scale(x)
return(a[,1])}
result<-apply(result,2,scale_simple)
return(result)
}
covInt<-function()
covariates<-array(dim=c(end_year-start_year+1,4,length(names(covariate_inputs))+2))
dimnames(covariates)<-list(start_year:end_year,
unique(ed$rivers),
c(names(covariate_inputs),
"summerTempHighRes",
"summerTempMeanHighRes"))
for(i in 1:length(names(covariate_inputs))){
covariates[,,i]<-do.call(env.cov,covariate_inputs[[i]])
}
covariates[,,"summerTempHighRes"]<-apply(summerT,2,
function(x){return(scale(x)[,1])})
covariates[,,"summerTempMeanHighRes"]<-apply(meanSummerT,2,
function(x){return(scale(x)[,1])})
assign('covariates',covariates,env=shared_data)
saveRDS(covariates,"~/process-data/data_store/processed_data/covariates.rds")
|
/oldCode/env_prep/create_covariates.R
|
no_license
|
evanchildress/trout_yoy
|
R
| false
| false
| 3,820
|
r
|
env.cov<-function(covariate,month,threshold=NA,high.low=NA,
freq.dur=NA,FUN=NULL){
if(!is.null(FUN) & any(!is.na(threshold),!is.na(high.low),!is.na(freq.dur))){
stop("cannot define both 'FUN' and threshold/high.low/freq.dur")
}
if(!covariate %in% c('discharge','temperature')){
stop("covariate must equal 'discharge' or 'temperature'")}
if(covariate=='discharge'){data<-dailyDischarge}
if(covariate=='temperature'){data<-ed}
data<-data[date >= as.Date(paste0(start_year-1,"-10-01")) &
date < as.Date(paste0(end_year,"-10-01"))]
data[,year_of_effect:=year(date)]
data[month(date)>=10,year_of_effect:=year(date)+1]
rivers<-unique(data$river)
result<-array(NA,dim=c(length(unique(year(data$date)))-1,4))
if(is.null(FUN)){
if(!high.low %in% c('high','low')){
stop("high.low must equal 'high' or 'low' defining whether the event is a high or low extreme")}
if(!freq.dur %in% c('frequency','duration')){
stop("freq.dur must equal 'frequency' or 'duration' defining whether the function returns the number of days when the threshold is crossed (duration) or the number of times(frequency)")}
if(covariate == 'discharge' & (is.na(threshold) | threshold<0 | threshold > 1)){
stop("for discharge covariates threshold must be a number between 0 and 1 representing the quantile definition of an extreme event")
}
num.sets<-function(x){
if(length(x)==0){return(as.integer(0))} else
xlag<-c(-1,x[1:(length(x)-1)])
return(length(which(x-xlag>1)))
}
limit<-NULL
for(r in 1:4){
if(covariate=="discharge") {limit[r]<-quantile(data[river==rivers[r],get(covariate)],probs=threshold,na.rm=T)}
if(covariate=="temperature") {limit[r]<-threshold}
}
for(r in 1:4){
if(freq.dur=="frequency"){result[,r]<-data[river==rivers[r]&month(date) %in% month,
ifelse(high.low=="high",
num.sets(which(get(covariate) >= limit[r])),
num.sets(which(get(covariate) <= limit[r]))),
by=year_of_effect]$V1
}
if(freq.dur=="duration"){result[,r]<-data[river==rivers[r]&month(date) %in% month,
ifelse(high.low=="high",
length(which(get(covariate) >= limit[r])),
length(which(get(covariate) <= limit[r]))),
by=year_of_effect]$V1
}
}} else{ #if FUN isn't null
for(r in 1:4){
result[,r]<-data[river == rivers[r] & month(date) %in% month,
FUN(get(covariate)),
by = year_of_effect]$V1
}
}
scale_simple<-function(x) {
a<-scale(x)
return(a[,1])}
result<-apply(result,2,scale_simple)
return(result)
}
covInt<-function()
covariates<-array(dim=c(end_year-start_year+1,4,length(names(covariate_inputs))+2))
dimnames(covariates)<-list(start_year:end_year,
unique(ed$rivers),
c(names(covariate_inputs),
"summerTempHighRes",
"summerTempMeanHighRes"))
for(i in 1:length(names(covariate_inputs))){
covariates[,,i]<-do.call(env.cov,covariate_inputs[[i]])
}
covariates[,,"summerTempHighRes"]<-apply(summerT,2,
function(x){return(scale(x)[,1])})
covariates[,,"summerTempMeanHighRes"]<-apply(meanSummerT,2,
function(x){return(scale(x)[,1])})
assign('covariates',covariates,env=shared_data)
saveRDS(covariates,"~/process-data/data_store/processed_data/covariates.rds")
|
options(scipen=999)
library(HMDHFDplus)
library(dplyr)
library(tidyr)
## reading the data from Mx_1x1 tables from HMD for Countries listed in appendix A of paper:
## A Neural Network Extension of the Lee-Carter Modelto Multiple Populations Ronald Richman∗Mario V. W ̈uthrich (Version of October 22, 2018)
stat <- "Mx_1x1"
filetype <- "txt"
countries <- c("AUS", "AUT", "BEL", "BGR", "BLR", "CAN","CHE","CHL","CZE","DEUTNP","DNK",
"ESP", "EST", "FIN", "FRATNP", "GBRTENW", "GBR_NIR", "GBR_SCO",
"GRC", "HRV", "HUN", "IRL", "ISL", "ISR", "ITA", "JPN", "LTU", "LUX", "LVA",
"NLD", "NOR", "NZL_NM", "POL", "PRT", "RUS", "SVK", "SVN","SWE", "TWN", "UKR", "USA")
files <- paste(countries, stat, filetype, sep = ".")
long_dir_files <- paste(data_folder,files, sep="/")
HMD_list <- list()
## filtering the data by Age (<100) and Year(1950 <= Year <= 2016)
## getting rid of columns "Total" and "OpenInterval"
## pivoting the data to create column Gender and column mx from columns "Female" and "Male".
for (i in 1:length(long_dir_files)){
HMD_list[[countries[i]]] <- readHMD(long_dir_files[i]) %>% filter(Year >= 1950 & Year <= 2016 & Age < 100) %>%
select(-c(Total,OpenInterval)) %>%
pivot_longer(c(Female,Male), names_to = "Gender", values_to = "mx" )
}
rm(list = c("files", "countries", "filetype","stat", "i"))
HMD_data <- bind_rows(HMD_list, .id = "Country") %>% data.table()
## Choosing the cuntries with more than 10 years of observation before year 2000
Analyzed_Countries <- HMD_data %>% select(Country, Year) %>% distinct() %>% filter(Year < 2000) %>%
group_by(Country) %>% summarise(n= n()) %>% filter(n>10) %>% select(Country) %>% unlist()
HMD_data_chosen <- HMD_data %>% filter(Country %in% Analyzed_Countries)
## settin mx to NA if mx =< 0 or mx >= 1
HMD_data_chosen <- HMD_data_chosen %>%
mutate(mx = if_else(mx > 0, mx, NA_real_))
rm(list = c("Analyzed_Countries","HMD_data", "HMD_list"))
## imputting values with NA in mx column by average by Age, Gednder and Year from all countries read in point 1
toImpute <- HMD_data_chosen %>% filter(mx >0) %>% group_by(Age, Gender, Year) %>% summarize(mx_avg = mean(mx))
HMD_final <- HMD_data_chosen %>% inner_join(toImpute, by = c("Age", "Gender", "Year")) %>%
mutate(imputed_flag = is.na(mx), mx = if_else(is.na(mx),mx_avg,mx), logmx = log(mx)) %>%
select(-mx_avg)
rm(list = c("HMD_data_chosen","toImpute"))
|
/02 - Life/Mortality_forecasting/LC CNN/_scripts/0_dataReading.R
|
no_license
|
tongliaowuqilong/grupa_ads
|
R
| false
| false
| 2,553
|
r
|
options(scipen=999)
library(HMDHFDplus)
library(dplyr)
library(tidyr)
## reading the data from Mx_1x1 tables from HMD for Countries listed in appendix A of paper:
## A Neural Network Extension of the Lee-Carter Modelto Multiple Populations Ronald Richman∗Mario V. W ̈uthrich (Version of October 22, 2018)
stat <- "Mx_1x1"
filetype <- "txt"
countries <- c("AUS", "AUT", "BEL", "BGR", "BLR", "CAN","CHE","CHL","CZE","DEUTNP","DNK",
"ESP", "EST", "FIN", "FRATNP", "GBRTENW", "GBR_NIR", "GBR_SCO",
"GRC", "HRV", "HUN", "IRL", "ISL", "ISR", "ITA", "JPN", "LTU", "LUX", "LVA",
"NLD", "NOR", "NZL_NM", "POL", "PRT", "RUS", "SVK", "SVN","SWE", "TWN", "UKR", "USA")
files <- paste(countries, stat, filetype, sep = ".")
long_dir_files <- paste(data_folder,files, sep="/")
HMD_list <- list()
## filtering the data by Age (<100) and Year(1950 <= Year <= 2016)
## getting rid of columns "Total" and "OpenInterval"
## pivoting the data to create column Gender and column mx from columns "Female" and "Male".
for (i in 1:length(long_dir_files)){
HMD_list[[countries[i]]] <- readHMD(long_dir_files[i]) %>% filter(Year >= 1950 & Year <= 2016 & Age < 100) %>%
select(-c(Total,OpenInterval)) %>%
pivot_longer(c(Female,Male), names_to = "Gender", values_to = "mx" )
}
rm(list = c("files", "countries", "filetype","stat", "i"))
HMD_data <- bind_rows(HMD_list, .id = "Country") %>% data.table()
## Choosing the cuntries with more than 10 years of observation before year 2000
Analyzed_Countries <- HMD_data %>% select(Country, Year) %>% distinct() %>% filter(Year < 2000) %>%
group_by(Country) %>% summarise(n= n()) %>% filter(n>10) %>% select(Country) %>% unlist()
HMD_data_chosen <- HMD_data %>% filter(Country %in% Analyzed_Countries)
## settin mx to NA if mx =< 0 or mx >= 1
HMD_data_chosen <- HMD_data_chosen %>%
mutate(mx = if_else(mx > 0, mx, NA_real_))
rm(list = c("Analyzed_Countries","HMD_data", "HMD_list"))
## imputting values with NA in mx column by average by Age, Gednder and Year from all countries read in point 1
toImpute <- HMD_data_chosen %>% filter(mx >0) %>% group_by(Age, Gender, Year) %>% summarize(mx_avg = mean(mx))
HMD_final <- HMD_data_chosen %>% inner_join(toImpute, by = c("Age", "Gender", "Year")) %>%
mutate(imputed_flag = is.na(mx), mx = if_else(is.na(mx),mx_avg,mx), logmx = log(mx)) %>%
select(-mx_avg)
rm(list = c("HMD_data_chosen","toImpute"))
|
library(parsnip)
### Name: fit.model_spec
### Title: Fit a Model Specification to a Dataset
### Aliases: fit.model_spec fit_xy.model_spec
### ** Examples
# Although `glm` only has a formula interface, different
# methods for specifying the model can be used
library(dplyr)
data("lending_club")
lr_mod <- logistic_reg()
lr_mod <- logistic_reg()
using_formula <-
lr_mod %>%
set_engine("glm") %>%
fit(Class ~ funded_amnt + int_rate, data = lending_club)
using_xy <-
lr_mod %>%
set_engine("glm") %>%
fit_xy(x = lending_club[, c("funded_amnt", "int_rate")],
y = lending_club$Class)
using_formula
using_xy
|
/data/genthat_extracted_code/parsnip/examples/fit.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 635
|
r
|
library(parsnip)
### Name: fit.model_spec
### Title: Fit a Model Specification to a Dataset
### Aliases: fit.model_spec fit_xy.model_spec
### ** Examples
# Although `glm` only has a formula interface, different
# methods for specifying the model can be used
library(dplyr)
data("lending_club")
lr_mod <- logistic_reg()
lr_mod <- logistic_reg()
using_formula <-
lr_mod %>%
set_engine("glm") %>%
fit(Class ~ funded_amnt + int_rate, data = lending_club)
using_xy <-
lr_mod %>%
set_engine("glm") %>%
fit_xy(x = lending_club[, c("funded_amnt", "int_rate")],
y = lending_club$Class)
using_formula
using_xy
|
\name{proteinLocsToGenomic}
\alias{proteinLocsToGenomic}
\title{
Obtaining the genomic coordinates for a list of protein sections
}
\description{
The function takes a list of protein sections and the corresponding ENSEMBL ID
of these proteins, and tries to find the genomic coordinates of these protein
sections.
}
\usage{
proteinLocsToGenomic(inputLoci, CDSaaFile)
}
\arguments{
\item{inputLoci}{
A data frame containing the protein sections as the input. The 1st column must
be the ENSEMBL ID of either the protein or the transcript encoding the protein
(or the equivalent of
ENSEMBL ID if you have created your own gene annotation GTF file). But you have
to use only one of two formats (namely either protein ID or transcript ID), and
cannot use both of them in the input of one function call. The 2nd and 3rd
columns give the coordinate of the first and last amino acids of the section
along the protein sequence. Other columns are optional and will not be used by
the function.
}
\item{CDSaaFile}{
The data file generated by the package's function \code{generatingCDSaaFile},
containing the genomic locations, DNA sequences and protein sequences of all
coding regions in a specific genome which is used in your analysis.
}
}
\value{
The function returns a data frame containing the original protein locations
specified in the input and before them, the six added columns for the
corresponding genomic coordinates of the protein sections:
\itemize{
\item The 1st, 2nd, 3rd and 4th columns give the chromosome name, the
coordinates of the start and end positions, and the strand in the chromosome,
which specify the genomic locus corresponding to the protein section.
\item The 5th and 6th columns give the first and last coding exons in the given
transcript which correspond to the given protein section.
}
}
\author{
Yaoyong Li
}
\examples{
dataFolder = system.file("extdata", package="geno2proteo")
inputFile_loci=file.path(dataFolder,
"transId_pfamDomainStartEnd_chr16_Zdomains_22examples.txt")
CDSaaFile=file.path(dataFolder,
"Homo_sapiens.GRCh37.74_chromosome16_35Mlong.gtf.gz_AAseq.txt.gz")
inputLoci = read.table(inputFile_loci, sep="\t", stringsAsFactors=FALSE)
genomicLoci = proteinLocsToGenomic(inputLoci=inputLoci, CDSaaFile=CDSaaFile)
}
|
/man/proteinLocsToGenomic.Rd
|
no_license
|
cran/geno2proteo
|
R
| false
| false
| 2,321
|
rd
|
\name{proteinLocsToGenomic}
\alias{proteinLocsToGenomic}
\title{
Obtaining the genomic coordinates for a list of protein sections
}
\description{
The function takes a list of protein sections and the corresponding ENSEMBL ID
of these proteins, and tries to find the genomic coordinates of these protein
sections.
}
\usage{
proteinLocsToGenomic(inputLoci, CDSaaFile)
}
\arguments{
\item{inputLoci}{
A data frame containing the protein sections as the input. The 1st column must
be the ENSEMBL ID of either the protein or the transcript encoding the protein
(or the equivalent of
ENSEMBL ID if you have created your own gene annotation GTF file). But you have
to use only one of two formats (namely either protein ID or transcript ID), and
cannot use both of them in the input of one function call. The 2nd and 3rd
columns give the coordinate of the first and last amino acids of the section
along the protein sequence. Other columns are optional and will not be used by
the function.
}
\item{CDSaaFile}{
The data file generated by the package's function \code{generatingCDSaaFile},
containing the genomic locations, DNA sequences and protein sequences of all
coding regions in a specific genome which is used in your analysis.
}
}
\value{
The function returns a data frame containing the original protein locations
specified in the input and before them, the six added columns for the
corresponding genomic coordinates of the protein sections:
\itemize{
\item The 1st, 2nd, 3rd and 4th columns give the chromosome name, the
coordinates of the start and end positions, and the strand in the chromosome,
which specify the genomic locus corresponding to the protein section.
\item The 5th and 6th columns give the first and last coding exons in the given
transcript which correspond to the given protein section.
}
}
\author{
Yaoyong Li
}
\examples{
dataFolder = system.file("extdata", package="geno2proteo")
inputFile_loci=file.path(dataFolder,
"transId_pfamDomainStartEnd_chr16_Zdomains_22examples.txt")
CDSaaFile=file.path(dataFolder,
"Homo_sapiens.GRCh37.74_chromosome16_35Mlong.gtf.gz_AAseq.txt.gz")
inputLoci = read.table(inputFile_loci, sep="\t", stringsAsFactors=FALSE)
genomicLoci = proteinLocsToGenomic(inputLoci=inputLoci, CDSaaFile=CDSaaFile)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_named_entities.R
\name{remove_named_entities}
\alias{remove_named_entities}
\title{Remove named entities using Python module ntlk}
\usage{
remove_named_entities(input_dir, output_dir, nes_dir)
}
\arguments{
\item{input_dir}{Directory containing text files to remove named entities from.}
\item{output_dir}{Directory in which to save texts with named entities removed.}
\item{nes_dir}{Directory in which to save named entities for review.}
}
\description{
Remove named entities using Python module ntlk.
}
|
/man/remove_named_entities.Rd
|
no_license
|
dtburk/gensci.stm
|
R
| false
| true
| 592
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_named_entities.R
\name{remove_named_entities}
\alias{remove_named_entities}
\title{Remove named entities using Python module ntlk}
\usage{
remove_named_entities(input_dir, output_dir, nes_dir)
}
\arguments{
\item{input_dir}{Directory containing text files to remove named entities from.}
\item{output_dir}{Directory in which to save texts with named entities removed.}
\item{nes_dir}{Directory in which to save named entities for review.}
}
\description{
Remove named entities using Python module ntlk.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfcompare.R
\name{dfcompare}
\alias{dfcompare}
\title{Compare two data frames}
\usage{
dfcompare(source, target, keys, summary = TRUE)
}
\arguments{
\item{source}{data frame of the source}
\item{target}{data frame of the target}
\item{keys}{a string containing a key. Pass a character vector for multiple keys. The keys will be used in a data.table join using on =.}
}
\value{
a list of data.tables for each column and the corresponding mismatches
}
\description{
This function compares two data frames. It checks if the source and target
variables are data frames, and if there are rows in both,
and if the keys are a character vector, and that they keys
exist in both source and target data frames.
}
|
/man/dfcompare.Rd
|
permissive
|
meihkv/dfcompare
|
R
| false
| true
| 784
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfcompare.R
\name{dfcompare}
\alias{dfcompare}
\title{Compare two data frames}
\usage{
dfcompare(source, target, keys, summary = TRUE)
}
\arguments{
\item{source}{data frame of the source}
\item{target}{data frame of the target}
\item{keys}{a string containing a key. Pass a character vector for multiple keys. The keys will be used in a data.table join using on =.}
}
\value{
a list of data.tables for each column and the corresponding mismatches
}
\description{
This function compares two data frames. It checks if the source and target
variables are data frames, and if there are rows in both,
and if the keys are a character vector, and that they keys
exist in both source and target data frames.
}
|
## read data from txt file
data <- read.table("household_power_consumption.txt",
header = TRUE, sep = ";", na.string = "?")
## convert Date
data$Date <- as.Date(as.character(data$Date), "%d/%m/%Y")
## select data from dates 2007-02-01 and 2007-02-02
datau <- data[(data[,1] < "2007-02-03" & data[,1] > "2007-01-31"),]
## combind date and time together
pasd <- paste(datau$Date, as.character(datau$Time))
pa <- strptime(pasd, format = "%Y-%m-%d %H:%M:%S")
datau <- cbind(pa, datau)
## open png device
png(file="plot3.png")
## make three line plot with legends plot3
with(datau, plot(pa, Sub_metering_1, type = "l",
xlab = "", ylab="Energy sub metering"))
lines(datau$pa, datau$Sub_metering_3, type = "l", col = "blue")
lines(datau$pa, datau$Sub_metering_2, type = "l", col = "red")
## add legends
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lty = c(1,1,1), lwd=c(1, 2, 2))
## close png device
dev.off()
|
/Documents/RworkingDir/plot3.R
|
no_license
|
bossli/ExData_Plotting1
|
R
| false
| false
| 1,162
|
r
|
## read data from txt file
data <- read.table("household_power_consumption.txt",
header = TRUE, sep = ";", na.string = "?")
## convert Date
data$Date <- as.Date(as.character(data$Date), "%d/%m/%Y")
## select data from dates 2007-02-01 and 2007-02-02
datau <- data[(data[,1] < "2007-02-03" & data[,1] > "2007-01-31"),]
## combind date and time together
pasd <- paste(datau$Date, as.character(datau$Time))
pa <- strptime(pasd, format = "%Y-%m-%d %H:%M:%S")
datau <- cbind(pa, datau)
## open png device
png(file="plot3.png")
## make three line plot with legends plot3
with(datau, plot(pa, Sub_metering_1, type = "l",
xlab = "", ylab="Energy sub metering"))
lines(datau$pa, datau$Sub_metering_3, type = "l", col = "blue")
lines(datau$pa, datau$Sub_metering_2, type = "l", col = "red")
## add legends
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lty = c(1,1,1), lwd=c(1, 2, 2))
## close png device
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_documentation.R
\docType{data}
\name{male_first_name}
\alias{male_first_name}
\title{Male first names}
\format{A vector with 159 elements}
\usage{
male_first_name
}
\description{
A character vector containing male first names.
}
\keyword{datasets}
|
/man/male_first_name.Rd
|
no_license
|
LudvigOlsen/bhappyr
|
R
| false
| true
| 331
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_documentation.R
\docType{data}
\name{male_first_name}
\alias{male_first_name}
\title{Male first names}
\format{A vector with 159 elements}
\usage{
male_first_name
}
\description{
A character vector containing male first names.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boolean.R
\name{consistency}
\alias{consistency}
\title{Compute the consistency value}
\usage{
consistency(formula1, type = "->", formula2, data)
}
\arguments{
\item{formula1}{A string, list of strings or function representing a Boolean formula in disjunctive normal form}
\item{type}{either "->", "<-" or "<->", depending on the direction of the implication that is to be evaluated}
\item{formula2}{A string, list of strings or function representing a Boolean formula in disjunctive normal form}
\item{data}{A data frame where the rows represent cases and the columns the sets. Column names must be as in the formula.}
}
\value{
the consistency score of the implication described by \code{formula1}, \code{type} and \code{formula2}
}
\description{
Computes the consistency score of "formula1 -> formula2" (sufficient condition)
or "formula1 <- formula2" (necessary condition), depending on whether \code{type}
is "->" or "<-".
If \code{type} is "<->" it computes an equivalence score of formula1 and formula2
via the formula \code{sum(min(X,Y))/(sum(max(X,Y))}
}
\details{
Compute a consistency score for an implication/necessity/sufficiency statement.
If \code{formula} is a function, it must take a \code{data.frame} and return
a vector.
If \code{formula} is a string or list of strings, the following conventions hold:
Set names must be capitalized in the formula and the data; if they are
lowercase, they are interpreted as the negation of the set.
If \code{formula} is a string, logical 'or' is expressed as a '+',
and logical 'and' as a '*'.
If \code{formula} is a list of strings, the strings are assumed to be
the dosjuncts and are concatenated with '+'.
The formula must be in disjunctive normal form, i.e. it must be a disjunction of
conjunctions of elementary or negated elementary sets. Example:
\code{A*b*C + a*B}
}
\examples{
require(QCA)
data(d.urban)
consistency("MLC + FRB", "->", "CP", d.urban)
}
|
/man/consistency.Rd
|
no_license
|
cran/QCAtools
|
R
| false
| true
| 2,003
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boolean.R
\name{consistency}
\alias{consistency}
\title{Compute the consistency value}
\usage{
consistency(formula1, type = "->", formula2, data)
}
\arguments{
\item{formula1}{A string, list of strings or function representing a Boolean formula in disjunctive normal form}
\item{type}{either "->", "<-" or "<->", depending on the direction of the implication that is to be evaluated}
\item{formula2}{A string, list of strings or function representing a Boolean formula in disjunctive normal form}
\item{data}{A data frame where the rows represent cases and the columns the sets. Column names must be as in the formula.}
}
\value{
the consistency score of the implication described by \code{formula1}, \code{type} and \code{formula2}
}
\description{
Computes the consistency score of "formula1 -> formula2" (sufficient condition)
or "formula1 <- formula2" (necessary condition), depending on whether \code{type}
is "->" or "<-".
If \code{type} is "<->" it computes an equivalence score of formula1 and formula2
via the formula \code{sum(min(X,Y))/(sum(max(X,Y))}
}
\details{
Compute a consistency score for an implication/necessity/sufficiency statement.
If \code{formula} is a function, it must take a \code{data.frame} and return
a vector.
If \code{formula} is a string or list of strings, the following conventions hold:
Set names must be capitalized in the formula and the data; if they are
lowercase, they are interpreted as the negation of the set.
If \code{formula} is a string, logical 'or' is expressed as a '+',
and logical 'and' as a '*'.
If \code{formula} is a list of strings, the strings are assumed to be
the dosjuncts and are concatenated with '+'.
The formula must be in disjunctive normal form, i.e. it must be a disjunction of
conjunctions of elementary or negated elementary sets. Example:
\code{A*b*C + a*B}
}
\examples{
require(QCA)
data(d.urban)
consistency("MLC + FRB", "->", "CP", d.urban)
}
|
library( "ape" )
library( "geiger" )
library( "expm" )
library( "nloptr" )
source( "masternegloglikeeps1.R" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
sim.tree<-read.tree("tree2500taxa46.txt")
sim.chrom<-read.table("chrom2500taxa46.txt", header=FALSE)
last.state=50
x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,11)
my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000)
mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0)
print(mle)
results[1:10]<-mle$solution
results[11]<-mle$objective
write.table(results,file="globalmax2500taxa46.csv",sep=",")
|
/SImulations number of taxa/2500 taxa/optim2500taxa46.R
|
no_license
|
roszenil/Bichromdryad
|
R
| false
| false
| 827
|
r
|
library( "ape" )
library( "geiger" )
library( "expm" )
library( "nloptr" )
source( "masternegloglikeeps1.R" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
sim.tree<-read.tree("tree2500taxa46.txt")
sim.chrom<-read.table("chrom2500taxa46.txt", header=FALSE)
last.state=50
x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,11)
my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000)
mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0)
print(mle)
results[1:10]<-mle$solution
results[11]<-mle$objective
write.table(results,file="globalmax2500taxa46.csv",sep=",")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.R
\name{get.admb.cov}
\alias{get.admb.cov}
\title{Read in the ADMB covariance file.}
\usage{
get.admb.cov(model.path = getwd())
}
\arguments{
\item{model.path}{Path to model (defaults to working directory)}
}
\description{
Read in the ADMB covariance file.
}
|
/man/get.admb.cov.Rd
|
no_license
|
jmannseth/adnuts
|
R
| false
| true
| 345
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.R
\name{get.admb.cov}
\alias{get.admb.cov}
\title{Read in the ADMB covariance file.}
\usage{
get.admb.cov(model.path = getwd())
}
\arguments{
\item{model.path}{Path to model (defaults to working directory)}
}
\description{
Read in the ADMB covariance file.
}
|
#######################################
# R script for plotting scatterplots #
#######################################
library(psych)
library("ggplot2")
## plot A samples scatter plot
A<-read.table("/.../5_others/scatterplot/Data/A_CPM_merged_all.new.csv", header=TRUE, sep="," )
colnames(A) <-c("Gene","BK_RNA-seq","10X_LLU","10X_NCI","10X_NCI_M","C1_FDA","C1_LLU","ICELL8_PE","ICELL8_SE")
png(file="scatterplot_Sample-A.png",width=12,height=12, units = "in", res=300)
pairs.panels(A[,2:9],
method = "pearson",
hist.col = "#00AFBB",
density = TRUE,
ellipses = TRUE)
dev.off()
## plot B samples scatter plot
B<-read.table("/.../5_others/scatterplot/Data/B_CPM_merged_all.new.csv", header=TRUE, sep="," )
colnames(B) <-c("gene","BK_RNA-seq","10X_LLU","10X_NCI","10X_NCI_M","C1_FDA","C1_LLU","ICELL8_PE","ICELL8_SE")
png(file="scatterplot_Sample-B.png",width=12,height=12, units = "in", res=300)
pairs.panels(B[,2:9],
method = "pearson",
hist.col = "#00AFBB",
density = TRUE,
ellipses = TRUE)
dev.off()
|
/5_others/scatterplot/scatterplot.R
|
no_license
|
oxwang/fda_scRNA-seq
|
R
| false
| false
| 1,112
|
r
|
#######################################
# R script for plotting scatterplots #
#######################################
library(psych)
library("ggplot2")
## plot A samples scatter plot
A<-read.table("/.../5_others/scatterplot/Data/A_CPM_merged_all.new.csv", header=TRUE, sep="," )
colnames(A) <-c("Gene","BK_RNA-seq","10X_LLU","10X_NCI","10X_NCI_M","C1_FDA","C1_LLU","ICELL8_PE","ICELL8_SE")
png(file="scatterplot_Sample-A.png",width=12,height=12, units = "in", res=300)
pairs.panels(A[,2:9],
method = "pearson",
hist.col = "#00AFBB",
density = TRUE,
ellipses = TRUE)
dev.off()
## plot B samples scatter plot
B<-read.table("/.../5_others/scatterplot/Data/B_CPM_merged_all.new.csv", header=TRUE, sep="," )
colnames(B) <-c("gene","BK_RNA-seq","10X_LLU","10X_NCI","10X_NCI_M","C1_FDA","C1_LLU","ICELL8_PE","ICELL8_SE")
png(file="scatterplot_Sample-B.png",width=12,height=12, units = "in", res=300)
pairs.panels(B[,2:9],
method = "pearson",
hist.col = "#00AFBB",
density = TRUE,
ellipses = TRUE)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gridmet_utils.R
\name{jennings_snow_threshold}
\alias{jennings_snow_threshold}
\title{Jennings Snow Threshold}
\usage{
jennings_snow_threshold(dir = geo_path())
}
\arguments{
\item{dir}{the directory to cache data. Default is \code{geo_path()}}
}
\value{
file to Jennings Snow Threshold
}
\description{
\href{https://doi.org/10.5061/dryad.c9h35}{Jennings, Keith S.; Winchell, Taylor S.; Livneh, Ben; Molotch, Noah P. (2019), Data from: Spatial variation of the rain-snow temperature threshold across the Northern Hemisphere, Dryad, Dataset, }
}
\concept{snow}
|
/man/jennings_snow_threshold.Rd
|
permissive
|
NOAA-OWP/geogrids
|
R
| false
| true
| 638
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gridmet_utils.R
\name{jennings_snow_threshold}
\alias{jennings_snow_threshold}
\title{Jennings Snow Threshold}
\usage{
jennings_snow_threshold(dir = geo_path())
}
\arguments{
\item{dir}{the directory to cache data. Default is \code{geo_path()}}
}
\value{
file to Jennings Snow Threshold
}
\description{
\href{https://doi.org/10.5061/dryad.c9h35}{Jennings, Keith S.; Winchell, Taylor S.; Livneh, Ben; Molotch, Noah P. (2019), Data from: Spatial variation of the rain-snow temperature threshold across the Northern Hemisphere, Dryad, Dataset, }
}
\concept{snow}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_functions.R
\name{backendBuckets.getIamPolicy}
\alias{backendBuckets.getIamPolicy}
\title{Gets the access control policy for a resource. May be empty if no such policy or resource exists.}
\usage{
backendBuckets.getIamPolicy(project, resource)
}
\arguments{
\item{project}{Project ID for this request}
\item{resource}{Name of the resource for this request}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/compute
\item https://www.googleapis.com/auth/compute.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/compute, https://www.googleapis.com/auth/compute.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/compute/docs/reference/latest/}{Google Documentation}
}
|
/googlecomputealpha.auto/man/backendBuckets.getIamPolicy.Rd
|
permissive
|
GVersteeg/autoGoogleAPI
|
R
| false
| true
| 1,163
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_functions.R
\name{backendBuckets.getIamPolicy}
\alias{backendBuckets.getIamPolicy}
\title{Gets the access control policy for a resource. May be empty if no such policy or resource exists.}
\usage{
backendBuckets.getIamPolicy(project, resource)
}
\arguments{
\item{project}{Project ID for this request}
\item{resource}{Name of the resource for this request}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/compute
\item https://www.googleapis.com/auth/compute.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/compute, https://www.googleapis.com/auth/compute.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/compute/docs/reference/latest/}{Google Documentation}
}
|
# Plot 3 Class project Week 1
# Read data
dataHousehold<-read.table("household_power_consumption.txt",header=T,sep=";", stringsAsFactors = F)
head(dataHousehold)
# Transform Time column from string to time type
dataHousehold$Time<-strptime(paste(dataHousehold$Date,dataHousehold$Time),"%d/%m/%Y %H:%M:%S")
head(dataHousehold)
class(dataHousehold$Time)
head(dataHousehold$Time)
# Transform Date column to date type
dataHousehold$Date<-as.Date(dataHousehold$Date, "%d/%m/%Y")
#class(dataHousehold$Date)
head(dataHousehold$Date,10)
# Extracting the right subset
startDate = as.Date("2007-02-01")
endDate = as.Date("2007-02-02");
dataHousehold$Global_active_power<-as.numeric(dataHousehold$Global_active_power)
#class(dataHousehold$Global_active_power)
data_h<-subset(dataHousehold,(Date==startDate)|(Date==endDate))
head(data_h)
# Building the graph
# First Sub metering 1 Color:black
plot(data_h$Time, data_h$Sub_metering_1,
ylab="Energy sub metering",
xlab = "",
type="l")
# Second Sub metering 2 Color=red
lines(data_h$Time, data_h$Sub_metering_2,
type="l",
col="red")
# Third Sub metering 3 Color:darkmagenta ??
lines(data_h$Time, data_h$Sub_metering_3,
type="l",
col="darkmagenta")
# Now the legend:
col_p<-c("black","red","darkmagenta")
legend("topright",c("Sub metering 1","Sub metering 2", "Sub metering 3"),col=col_p,lty = c(1,1,1))
dev.copy(png,'plot3.png')
dev.off()
|
/plot3.R
|
no_license
|
RodrigRicardo/ExData_Plotting1
|
R
| false
| false
| 1,413
|
r
|
# Plot 3 Class project Week 1
# Read data
dataHousehold<-read.table("household_power_consumption.txt",header=T,sep=";", stringsAsFactors = F)
head(dataHousehold)
# Transform Time column from string to time type
dataHousehold$Time<-strptime(paste(dataHousehold$Date,dataHousehold$Time),"%d/%m/%Y %H:%M:%S")
head(dataHousehold)
class(dataHousehold$Time)
head(dataHousehold$Time)
# Transform Date column to date type
dataHousehold$Date<-as.Date(dataHousehold$Date, "%d/%m/%Y")
#class(dataHousehold$Date)
head(dataHousehold$Date,10)
# Extracting the right subset
startDate = as.Date("2007-02-01")
endDate = as.Date("2007-02-02");
dataHousehold$Global_active_power<-as.numeric(dataHousehold$Global_active_power)
#class(dataHousehold$Global_active_power)
data_h<-subset(dataHousehold,(Date==startDate)|(Date==endDate))
head(data_h)
# Building the graph
# First Sub metering 1 Color:black
plot(data_h$Time, data_h$Sub_metering_1,
ylab="Energy sub metering",
xlab = "",
type="l")
# Second Sub metering 2 Color=red
lines(data_h$Time, data_h$Sub_metering_2,
type="l",
col="red")
# Third Sub metering 3 Color:darkmagenta ??
lines(data_h$Time, data_h$Sub_metering_3,
type="l",
col="darkmagenta")
# Now the legend:
col_p<-c("black","red","darkmagenta")
legend("topright",c("Sub metering 1","Sub metering 2", "Sub metering 3"),col=col_p,lty = c(1,1,1))
dev.copy(png,'plot3.png')
dev.off()
|
/Pages/2018 국토교통 빅데이터 해커톤 참가작/2018_molit_DataBrothers-master/fig_p_value_combined.R
|
no_license
|
molit-korea/main
|
R
| false
| false
| 2,756
|
r
| ||
####################################################################################
# SECexplorer
####################################################################################
# Server
## prepare environment
if (!require("shiny")){
install.packages("shiny")
}
if (!require("ggplot2")){
install.packages("ggplot2")
}
if (!require("plotly")){
install.packages("plotly")
}
if (!require("data.table")){
install.packages("data.table")
}
if (!require("DT")){
install.packages("DT")
}
if (!requireNamespace("BiocManager", quietly = TRUE)){
install.packages("BiocManager")
}
if (!require("GenomeInfoDbData")){
BiocManager::install("GenomeInfoDbData")
}
# Get CCprofiler package differential branch
if (!require("CCprofiler")){
devtools::install_github("CCprofiler/CCprofiler", ref = "differential")
}
# load packages
library(shiny)
library(ggplot2)
library(plotly)
library(data.table)
library(DT)
library(CCprofiler)
# load modified methods
source("methods.R")
## prepare data
#calibration_functions <- readRDS("www/data/calibration_functions.rda")
calibration_functions <- readRDS("data/calibration.rds")
proteins <- readRDS("data/proteins.rds")
protTraces = readRDS("data/protTracesList.rds")
designMatrix = readRDS("data/design_matrix.rds")
# default_proteins <- c("GPS1 COPS1 CSN1", "COPS3 CSN3", "COPS8 CSN8")
default_proteins <- c("NDC80 HEC HEC1 KNTC2",
"SPC24 SPBC24",
"NUF2 CDCA1 NUF2R",
"SPC25 SPBC25 AD024")
default_complexftid <- "127_corum_corum"
# Differentials
# Protein-level
diffProteins_differentiated_undifferentiated <- readRDS("data/protein_DiffExprProtein_differentiated_undifferentiated.rda")
diffProteins_stimulated_undifferentiated <- readRDS("data/protein_DiffExprProtein_stimulated_undifferentiated.rda")
diffProteins_stimulated_differentiated <- readRDS("data/protein_DiffExprProtein_stimulated_differentiated.rda")
diffAssemblyState_stimulated_undifferentiated <- readRDS("data/diffAssemblyState_stimulated_undifferentiated.rda")
diffAssemblyState_stimulated_differentiated <- readRDS("data/diffAssemblyState_stimulated_differentiated.rda")
diffAssemblyState_differentiated_undifferentiated <- readRDS("data/diffAssemblyState_differentiated_undifferentiated.rda")
# Complex-level
complexFeaturesCollapsed = readRDS("data/complexFeaturesCollapsed.rda")
diffComplexes_stimulated_undifferentiated <- readRDS("data/complex_DiffExprComplex_stimulated_undifferentiated.rda")
diffComplexes_stimulated_differentiated <- readRDS("data/complex_DiffExprComplex_stimulated_differentiated.rda")
diffComplexes_differentiated_undifferentiated <- readRDS("data/complex_DiffExprComplex_differentiated_undifferentiated.rda")
## define server roles
#######################
shinyServer(function(input, output, session) {
## Generate Reactive Filter Value Field for UI, depending on filter column chosen
# protein selection
output$fcolumnvalues <- renderUI({
values <- sort(unique(proteins[[input$fcolumn]]))
# values <- values[nchar(values)>0]
selectizeInput("fvalue", "Search and select proteins of interest", values,
multiple = TRUE, options = list(maxOptions = 6000),
selected = default_proteins)
})
# complex feature selection
output$cfcolumnvalues <- renderUI({
values <- sort(unique(complexFeaturesCollapsed[[input$cfcolumn]]))
selectizeInput("cfvalue", "Search and select complex features of interest", values,
multiple = FALSE, options = list(maxOptions = 6000),
selected = default_complexftid)
})
############################
## Viewer Tab #
############################
## generate selected protein SEC traces plot
# Subset traces
target_id_traces <- eventReactive(input$fvalue,{
selected_protein_ids = proteins[get(input$fcolumn) %in% input$fvalue]$protein_id
target_id_traces = subset(protTraces, trace_subset_ids = selected_protein_ids,
trace_subset_type = "id")
})
## Plot the selected traces
output$plot <- renderPlotly({
vplot <<- plot(target_id_traces(),
# colour_by = input$fcolumn, ## causes problems in combination with collapsing
collapse_conditions = input$collapse_conditions,
aggregateReplicates = input$collapse_replicates,
name = "",
monomer_MW = input$show_monomers,
log = input$logscale,
design_matrix = designMatrix,
plot = FALSE)
ggplotly(vplot)
})
## Download the displayed plot
output$downloadPlot <- downloadHandler(
filename = function() { paste("currentPlot", '.pdf', sep='') },
content = function(file) {
ggsave(file, width=10, height=6, plot = vplot, device = "pdf")
}
)
# Display the annotation table for the selected proteins
output$anntable <- renderDT({
proteins[get(input$fcolumn) %in% input$fvalue]
})
#####################################
## Differential protein intensity #
#####################################
# Select dataset based on user-defined comparison
# choices= c("Differentiated vs. undifferentiated",
# "Stimulated vs. differentiated",
# "Stimulated vs. undifferentiated")
diffProteins <- eventReactive(input$comparison_pINT,{
if (input$comparison_pINT == "Differentiated vs. undifferentiated"){
diffProteins = diffProteins_differentiated_undifferentiated
} else if (input$comparison_pINT == "Stimulated vs. differentiated"){
diffProteins = diffProteins_stimulated_differentiated
} else {
diffProteins = diffProteins_stimulated_undifferentiated
}
})
# render pc volcano
output$pc_volcano <- renderPlotly({
selected_protein_ids = proteins[get(input$fcolumn) %in% input$fvalue]$protein_id
dplot <<- plotVolcano(diffProteins(),
pBHadj_cutoff = input$pc_volcano_pvalcutoff,
FC_cutoff = input$pc_volcano_fccutoff,
highlight = selected_protein_ids,
plot = FALSE)
ggplotly(dplot)
})
# render pc diff table
output$pc_difftable <- DT::renderDT({
diffProteins.s = diffProteins()[, .(feature_id, Entry_name, Gene_names, Npeptides,
apex, pBHadj, medianLog2FC, qVal, global_pBHadj, global_medianLog2FC, global_qVal)]
if(input$pc_difftable_show_all){
diffProteins.s
} else {
diffProteins.s[pBHadj <= input$pc_volcano_pvalcutoff][abs(medianLog2FC) >=
log2(input$pc_volcano_fccutoff)]
}
})
#####################################
## Differential protein assembly #
#####################################
# Select dataset based on comparison
diffProteinAssemblyState <- eventReactive(input$comparison_pAMF,{
if (input$comparison_pAMF == "Differentiated vs. undifferentiated"){
diffProteinAssemblyState = diffAssemblyState_differentiated_undifferentiated
} else if (input$comparison_pAMF == "Stimulated vs. differentiated"){
diffProteinAssemblyState = diffAssemblyState_stimulated_differentiated
} else {
diffProteinAssemblyState = diffAssemblyState_stimulated_undifferentiated
}
})
# render assembly state scatter plot
output$pc_assemblyScatter <- renderPlotly({
selected_protein_ids = proteins[get(input$fcolumn) %in% input$fvalue]$protein_id
meanDiff_cutoff = input$pc_assemblyScatter_meanDiffcutoff
splot1 <<- ggplot(diffProteinAssemblyState(),
aes(x=meanAMF1, y=meanAMF2, colour=-log10(betaPval_BHadj), label = paste(protein_id,Entry_name,Gene_names))) +
geom_abline(intercept = meanDiff_cutoff, slope = 1) +
geom_abline(intercept = -meanDiff_cutoff, slope = 1) +
geom_point() +
theme_bw()
splot2 = splot1 + geom_point(data = diffProteinAssemblyState()[protein_id %in% selected_protein_ids],
aes(x=meanAMF1, y=meanAMF2, label = paste(protein_id,Entry_name,Gene_names)),
colour="red", size = 3)
ggplotly(splot2)
})
# render assembly state output table
output$pc_assemblyTable <- DT::renderDT({
if(input$pc_diffAssemblytable_show_all){
dt = diffProteinAssemblyState()
dt[, more_assembled_in:=NA]
dt[meanDiff >= input$pc_assemblyScatter_meanDiffcutoff, more_assembled_in:=2]
dt[meanDiff <= -input$pc_assemblyScatter_meanDiffcutoff, more_assembled_in:=1]
dt
} else {
rbind(diffProteinAssemblyState()[meanDiff >= input$pc_assemblyScatter_meanDiffcutoff, more_assembled_in:=2],
diffProteinAssemblyState()[meanDiff <= -input$pc_assemblyScatter_meanDiffcutoff, more_assembled_in:=1])
}
})
#####################################
## Complex feature viewer #
#####################################
# render complex feature plot
# Note: This plot is non-interactive as export of plot object from base function
# plotFeatures (in combination with tracesList input to traces arg) doesn't fly
output$cf_plot <- renderPlot({
selected_complex_ft_id = complexFeaturesCollapsed[get(input$cfcolumn) %in% input$cfvalue]$complex_id
CCprofiler::plotFeatures(feature_table = complexFeaturesCollapsed,
traces = protTraces,
feature_id = selected_complex_ft_id,
design_matrix=designMatrix,
calibration=calibration_functions,
annotation_label = "Entry_name",
peak_area = T,
legend = F,
onlyBest = F,
PDF = FALSE,
monomer_MW=T,
aggregateReplicates=T)
})
# re-render plot for download (no plot object available)
output$downloadPlot_cF <- downloadHandler(
filename = function() { paste("complexFeatures_", input$cfvalue, '.pdf', sep='') },
content = function(file) {ggsave(file, width=10, height=6,
plot = CCprofiler::plotFeatures(feature_table = complexFeaturesCollapsed,
traces = protTraces,
feature_id = complexFeaturesCollapsed[get(input$cfcolumn) %in% input$cfvalue]$complex_id,
design_matrix=designMatrix,
calibration=calibration_functions,
annotation_label = "Entry_name",
peak_area = T,
legend = F,
onlyBest = F,
PDF = FALSE,
monomer_MW=T,
aggregateReplicates=T),
device = "pdf")}
)
# render complex feature table
output$cf_table <- DT::renderDT({
if(input$cf_table_show_all){
complexFeaturesCollapsed
} else {
complexFeaturesCollapsed[get(input$cfcolumn) %in% input$cfvalue]
}
}
)
#####################################
## Differential complex intensity #
#####################################
# Select dataset based on comparison
diffComplexes <- eventReactive(input$comparison_cINT,{
if (input$comparison_cINT == "Differentiated vs. undifferentiated"){
diffComplexes = diffComplexes_differentiated_undifferentiated
} else if (input$comparison_cINT == "Stimulated vs. differentiated"){
diffComplexes = diffComplexes_stimulated_differentiated
} else {
diffComplexes = diffComplexes_stimulated_undifferentiated
}
})
# Render complex volcano
output$cc_volcano <- renderPlotly({
selected_complex_id = input$complexid
cvplot <<- plotVolcano_c(diffComplexes(),
pBHadj_cutoff = input$cc_volcano_pvalcutoff,
FC_cutoff = input$cc_volcano_fccutoff,
highlight = selected_complex_id,
plot = FALSE)
ggplotly(cvplot)
})
# Render complex table
output$cc_difftable <- DT::renderDT({
if(input$cc_difftable_show_all){
diffComplexes()
} else {
diffComplexes()[pBHadj <= input$cc_volcano_pvalcutoff][abs(medianLog2FC) >=
log2(input$cc_volcano_fccutoff)]
}
}
#, options = list(autoWidth = TRUE,
# columnDefs = list(list(width = '100px', targets = 1))
# )
# attempt to limit column width failed.. move on
)
})
|
/server.R
|
permissive
|
heuselm/SECexplorer_THP1
|
R
| false
| false
| 13,254
|
r
|
####################################################################################
# SECexplorer
####################################################################################
# Server
## prepare environment
if (!require("shiny")){
install.packages("shiny")
}
if (!require("ggplot2")){
install.packages("ggplot2")
}
if (!require("plotly")){
install.packages("plotly")
}
if (!require("data.table")){
install.packages("data.table")
}
if (!require("DT")){
install.packages("DT")
}
if (!requireNamespace("BiocManager", quietly = TRUE)){
install.packages("BiocManager")
}
if (!require("GenomeInfoDbData")){
BiocManager::install("GenomeInfoDbData")
}
# Get CCprofiler package differential branch
if (!require("CCprofiler")){
devtools::install_github("CCprofiler/CCprofiler", ref = "differential")
}
# load packages
library(shiny)
library(ggplot2)
library(plotly)
library(data.table)
library(DT)
library(CCprofiler)
# load modified methods
source("methods.R")
## prepare data
#calibration_functions <- readRDS("www/data/calibration_functions.rda")
calibration_functions <- readRDS("data/calibration.rds")
proteins <- readRDS("data/proteins.rds")
protTraces = readRDS("data/protTracesList.rds")
designMatrix = readRDS("data/design_matrix.rds")
# default_proteins <- c("GPS1 COPS1 CSN1", "COPS3 CSN3", "COPS8 CSN8")
default_proteins <- c("NDC80 HEC HEC1 KNTC2",
"SPC24 SPBC24",
"NUF2 CDCA1 NUF2R",
"SPC25 SPBC25 AD024")
default_complexftid <- "127_corum_corum"
# Differentials
# Protein-level
diffProteins_differentiated_undifferentiated <- readRDS("data/protein_DiffExprProtein_differentiated_undifferentiated.rda")
diffProteins_stimulated_undifferentiated <- readRDS("data/protein_DiffExprProtein_stimulated_undifferentiated.rda")
diffProteins_stimulated_differentiated <- readRDS("data/protein_DiffExprProtein_stimulated_differentiated.rda")
diffAssemblyState_stimulated_undifferentiated <- readRDS("data/diffAssemblyState_stimulated_undifferentiated.rda")
diffAssemblyState_stimulated_differentiated <- readRDS("data/diffAssemblyState_stimulated_differentiated.rda")
diffAssemblyState_differentiated_undifferentiated <- readRDS("data/diffAssemblyState_differentiated_undifferentiated.rda")
# Complex-level
complexFeaturesCollapsed = readRDS("data/complexFeaturesCollapsed.rda")
diffComplexes_stimulated_undifferentiated <- readRDS("data/complex_DiffExprComplex_stimulated_undifferentiated.rda")
diffComplexes_stimulated_differentiated <- readRDS("data/complex_DiffExprComplex_stimulated_differentiated.rda")
diffComplexes_differentiated_undifferentiated <- readRDS("data/complex_DiffExprComplex_differentiated_undifferentiated.rda")
## define server roles
#######################
shinyServer(function(input, output, session) {
## Generate Reactive Filter Value Field for UI, depending on filter column chosen
# protein selection
output$fcolumnvalues <- renderUI({
values <- sort(unique(proteins[[input$fcolumn]]))
# values <- values[nchar(values)>0]
selectizeInput("fvalue", "Search and select proteins of interest", values,
multiple = TRUE, options = list(maxOptions = 6000),
selected = default_proteins)
})
# complex feature selection
output$cfcolumnvalues <- renderUI({
values <- sort(unique(complexFeaturesCollapsed[[input$cfcolumn]]))
selectizeInput("cfvalue", "Search and select complex features of interest", values,
multiple = FALSE, options = list(maxOptions = 6000),
selected = default_complexftid)
})
############################
## Viewer Tab #
############################
## generate selected protein SEC traces plot
# Subset traces
target_id_traces <- eventReactive(input$fvalue,{
selected_protein_ids = proteins[get(input$fcolumn) %in% input$fvalue]$protein_id
target_id_traces = subset(protTraces, trace_subset_ids = selected_protein_ids,
trace_subset_type = "id")
})
## Plot the selected traces
output$plot <- renderPlotly({
vplot <<- plot(target_id_traces(),
# colour_by = input$fcolumn, ## causes problems in combination with collapsing
collapse_conditions = input$collapse_conditions,
aggregateReplicates = input$collapse_replicates,
name = "",
monomer_MW = input$show_monomers,
log = input$logscale,
design_matrix = designMatrix,
plot = FALSE)
ggplotly(vplot)
})
## Download the displayed plot
output$downloadPlot <- downloadHandler(
filename = function() { paste("currentPlot", '.pdf', sep='') },
content = function(file) {
ggsave(file, width=10, height=6, plot = vplot, device = "pdf")
}
)
# Display the annotation table for the selected proteins
output$anntable <- renderDT({
proteins[get(input$fcolumn) %in% input$fvalue]
})
#####################################
## Differential protein intensity #
#####################################
# Select dataset based on user-defined comparison
# choices= c("Differentiated vs. undifferentiated",
# "Stimulated vs. differentiated",
# "Stimulated vs. undifferentiated")
diffProteins <- eventReactive(input$comparison_pINT,{
if (input$comparison_pINT == "Differentiated vs. undifferentiated"){
diffProteins = diffProteins_differentiated_undifferentiated
} else if (input$comparison_pINT == "Stimulated vs. differentiated"){
diffProteins = diffProteins_stimulated_differentiated
} else {
diffProteins = diffProteins_stimulated_undifferentiated
}
})
# render pc volcano
output$pc_volcano <- renderPlotly({
selected_protein_ids = proteins[get(input$fcolumn) %in% input$fvalue]$protein_id
dplot <<- plotVolcano(diffProteins(),
pBHadj_cutoff = input$pc_volcano_pvalcutoff,
FC_cutoff = input$pc_volcano_fccutoff,
highlight = selected_protein_ids,
plot = FALSE)
ggplotly(dplot)
})
# render pc diff table
output$pc_difftable <- DT::renderDT({
diffProteins.s = diffProteins()[, .(feature_id, Entry_name, Gene_names, Npeptides,
apex, pBHadj, medianLog2FC, qVal, global_pBHadj, global_medianLog2FC, global_qVal)]
if(input$pc_difftable_show_all){
diffProteins.s
} else {
diffProteins.s[pBHadj <= input$pc_volcano_pvalcutoff][abs(medianLog2FC) >=
log2(input$pc_volcano_fccutoff)]
}
})
#####################################
## Differential protein assembly #
#####################################
# Select dataset based on comparison
diffProteinAssemblyState <- eventReactive(input$comparison_pAMF,{
if (input$comparison_pAMF == "Differentiated vs. undifferentiated"){
diffProteinAssemblyState = diffAssemblyState_differentiated_undifferentiated
} else if (input$comparison_pAMF == "Stimulated vs. differentiated"){
diffProteinAssemblyState = diffAssemblyState_stimulated_differentiated
} else {
diffProteinAssemblyState = diffAssemblyState_stimulated_undifferentiated
}
})
# render assembly state scatter plot
output$pc_assemblyScatter <- renderPlotly({
selected_protein_ids = proteins[get(input$fcolumn) %in% input$fvalue]$protein_id
meanDiff_cutoff = input$pc_assemblyScatter_meanDiffcutoff
splot1 <<- ggplot(diffProteinAssemblyState(),
aes(x=meanAMF1, y=meanAMF2, colour=-log10(betaPval_BHadj), label = paste(protein_id,Entry_name,Gene_names))) +
geom_abline(intercept = meanDiff_cutoff, slope = 1) +
geom_abline(intercept = -meanDiff_cutoff, slope = 1) +
geom_point() +
theme_bw()
splot2 = splot1 + geom_point(data = diffProteinAssemblyState()[protein_id %in% selected_protein_ids],
aes(x=meanAMF1, y=meanAMF2, label = paste(protein_id,Entry_name,Gene_names)),
colour="red", size = 3)
ggplotly(splot2)
})
# render assembly state output table
output$pc_assemblyTable <- DT::renderDT({
if(input$pc_diffAssemblytable_show_all){
dt = diffProteinAssemblyState()
dt[, more_assembled_in:=NA]
dt[meanDiff >= input$pc_assemblyScatter_meanDiffcutoff, more_assembled_in:=2]
dt[meanDiff <= -input$pc_assemblyScatter_meanDiffcutoff, more_assembled_in:=1]
dt
} else {
rbind(diffProteinAssemblyState()[meanDiff >= input$pc_assemblyScatter_meanDiffcutoff, more_assembled_in:=2],
diffProteinAssemblyState()[meanDiff <= -input$pc_assemblyScatter_meanDiffcutoff, more_assembled_in:=1])
}
})
#####################################
## Complex feature viewer #
#####################################
# render complex feature plot
# Note: This plot is non-interactive as export of plot object from base function
# plotFeatures (in combination with tracesList input to traces arg) doesn't fly
output$cf_plot <- renderPlot({
selected_complex_ft_id = complexFeaturesCollapsed[get(input$cfcolumn) %in% input$cfvalue]$complex_id
CCprofiler::plotFeatures(feature_table = complexFeaturesCollapsed,
traces = protTraces,
feature_id = selected_complex_ft_id,
design_matrix=designMatrix,
calibration=calibration_functions,
annotation_label = "Entry_name",
peak_area = T,
legend = F,
onlyBest = F,
PDF = FALSE,
monomer_MW=T,
aggregateReplicates=T)
})
# re-render plot for download (no plot object available)
output$downloadPlot_cF <- downloadHandler(
filename = function() { paste("complexFeatures_", input$cfvalue, '.pdf', sep='') },
content = function(file) {ggsave(file, width=10, height=6,
plot = CCprofiler::plotFeatures(feature_table = complexFeaturesCollapsed,
traces = protTraces,
feature_id = complexFeaturesCollapsed[get(input$cfcolumn) %in% input$cfvalue]$complex_id,
design_matrix=designMatrix,
calibration=calibration_functions,
annotation_label = "Entry_name",
peak_area = T,
legend = F,
onlyBest = F,
PDF = FALSE,
monomer_MW=T,
aggregateReplicates=T),
device = "pdf")}
)
# render complex feature table
output$cf_table <- DT::renderDT({
if(input$cf_table_show_all){
complexFeaturesCollapsed
} else {
complexFeaturesCollapsed[get(input$cfcolumn) %in% input$cfvalue]
}
}
)
#####################################
## Differential complex intensity #
#####################################
# Select dataset based on comparison
diffComplexes <- eventReactive(input$comparison_cINT,{
if (input$comparison_cINT == "Differentiated vs. undifferentiated"){
diffComplexes = diffComplexes_differentiated_undifferentiated
} else if (input$comparison_cINT == "Stimulated vs. differentiated"){
diffComplexes = diffComplexes_stimulated_differentiated
} else {
diffComplexes = diffComplexes_stimulated_undifferentiated
}
})
# Render complex volcano
output$cc_volcano <- renderPlotly({
selected_complex_id = input$complexid
cvplot <<- plotVolcano_c(diffComplexes(),
pBHadj_cutoff = input$cc_volcano_pvalcutoff,
FC_cutoff = input$cc_volcano_fccutoff,
highlight = selected_complex_id,
plot = FALSE)
ggplotly(cvplot)
})
# Render complex table
output$cc_difftable <- DT::renderDT({
if(input$cc_difftable_show_all){
diffComplexes()
} else {
diffComplexes()[pBHadj <= input$cc_volcano_pvalcutoff][abs(medianLog2FC) >=
log2(input$cc_volcano_fccutoff)]
}
}
#, options = list(autoWidth = TRUE,
# columnDefs = list(list(width = '100px', targets = 1))
# )
# attempt to limit column width failed.. move on
)
})
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{fixtrajdominance}
\alias{fixtrajdominance}
\title{Returns a trajectory of a mutation that has fixed due to selection}
\usage{
fixtrajdominance(N, s, h)
}
\arguments{
\item{N}{The diploid population size}
\item{s}{The selection coefficient}
\item{h}{The domimance of the mutant allele}
}
\value{
A list with the allele frequency trakectory plus the number of attempts needed before a mutation fixed
}
\description{
Returns a trajectory of a mutation that has fixed due to selection
}
\details{
Fitnesses are 1, 1+hs, and 1+2s for genotypes AA, Aa, and aa, respectively. Thus, a is the mutant allele.
}
\examples{
ft = fixtrajdominance(100,0.1,0.05)
}
|
/man/fixtrajdominance.Rd
|
no_license
|
BioinformaticsMaterials/Rcode4teaching
|
R
| false
| false
| 713
|
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{fixtrajdominance}
\alias{fixtrajdominance}
\title{Returns a trajectory of a mutation that has fixed due to selection}
\usage{
fixtrajdominance(N, s, h)
}
\arguments{
\item{N}{The diploid population size}
\item{s}{The selection coefficient}
\item{h}{The domimance of the mutant allele}
}
\value{
A list with the allele frequency trakectory plus the number of attempts needed before a mutation fixed
}
\description{
Returns a trajectory of a mutation that has fixed due to selection
}
\details{
Fitnesses are 1, 1+hs, and 1+2s for genotypes AA, Aa, and aa, respectively. Thus, a is the mutant allele.
}
\examples{
ft = fixtrajdominance(100,0.1,0.05)
}
|
install.packages("psych")
library(psych)
library(readxl)
CDQ_train <- read_excel("D:/work/dataset/NOx_prediction_(Boiler_9)1.xlsx", sheet = "train")
color.index<-as.factor(CDQ_train$Grade)
pairs.panels(CDQ_train[,2:13], scale=FALSE, font.labels=2, cex.labels=1, bg=c("red","orange","green","blue","black")[color.index], pch=23, cex=2.5)
|
/Sample Code/CDQ_Pairplot.R
|
no_license
|
shin-nyum/R_Programming_Self-Practice
|
R
| false
| false
| 347
|
r
|
install.packages("psych")
library(psych)
library(readxl)
CDQ_train <- read_excel("D:/work/dataset/NOx_prediction_(Boiler_9)1.xlsx", sheet = "train")
color.index<-as.factor(CDQ_train$Grade)
pairs.panels(CDQ_train[,2:13], scale=FALSE, font.labels=2, cex.labels=1, bg=c("red","orange","green","blue","black")[color.index], pch=23, cex=2.5)
|
rm(list=ls())
library(DEoptim)
library(mvtnorm)
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
########################################################
# Load data
########################################################
logit <- function(p) log(p / (1-p))
load("smoking.RData")
dat <- data.frame(t = smoking$year, y = logit(smoking$p/100))
tPred <- seq(1998, 2018, length.out = 200) #length = 500 used for manuscript
########################################################
# Estimate Empirical Bayes parameters
########################################################
rqCov <- function(s, t, alpha, rho, nu) {
alpha^2 * (1 + (s-t)^2 / (2 * nu * rho^2))^(-nu)
}
ctl <- DEoptim.control(itermax = 5000, trace = 100)
set.seed(1234)
opt.rq <- DEoptim(function(par) {
mu <- rep(par[1], nrow(dat))
cMat <- outer(dat$t, dat$t, rqCov, par[2], par[3], par[4]) + diag(par[5]^2 , nrow(dat))
-mvtnorm::dmvnorm(dat$y, mu, cMat, log=TRUE)
}, lower = c(-50,0,0,0,0), upper = c(50,50,50,50,50), control = ctl)
par.rq <- opt.rq$optim$bestmem
########################################################
# Run Stan script
########################################################
sDat <- list(n = nrow(dat), t = dat$t, y = dat$y, p = length(tPred), tPred = tPred)
sDat$mu <- par.rq[1]
sDat$alpha <- par.rq[2]
sDat$rho <- par.rq[3]
sDat$nu <- par.rq[4]
sDat$sigma <- par.rq[5]
iter <- 10000
seed <- 12345
m <- stan_model("../../Stan/gptrendFixed.stan")
fit <- sampling(m, data = sDat, iter = iter, seed = seed, algorithm = "Fixed_param")
pred <- extract(fit, "pred")$pred
########################################################
# Plot fit
########################################################
band <- function(t, l, u, col) {
polygon(c(t, rev(t)), c(l, rev(u)), col=col, border = NA)
}
pdf("../../figures/smoking_gpFixed_logit.pdf", width = 8, height = 3)
par(mfrow=c(1,3), bty="n", mar = c(2.3, 2.3, 1, 0), mgp=c(1.3,0.4,0))
plot(dat$t, dat$y, pch = 19, ylim=c(-1.4, -0.4), xaxt="n", xlab="Year", ylab="f | logit(Y)", type="n")
axis(1, 1998:2018)
band(tPred, apply(pred[,,1], 2, quantile, prob = 0.005), apply(pred[,,1], 2, quantile, prob = 0.995), col = "gray80")
band(tPred, apply(pred[,,1], 2, quantile, prob = 0.025), apply(pred[,,1], 2, quantile, prob = 0.975), col = "gray65")
band(tPred, apply(pred[,,1], 2, quantile, prob = 0.25), apply(pred[,,1], 2, quantile, prob = 0.75), col = "gray45")
lines(tPred, apply(pred[,,1], 2, mean), lwd = 2)
points(dat$t, dat$y, pch = 19, cex=0.8)
legend("topleft", c("Mean", "50%", "95%", "99%"), col = c("black", "gray45", "gray65", "gray85"),
lwd = 2, bty="n", cex=0.7, lty = c(1, NA, NA, NA), pch = c(NA, 15, 15, 15), pt.cex=1.5)
plot(tPred, apply(pred[,,3], 2, mean), lwd = 2, type="n", ylim = c(-0.15, 0.1), xaxt="n", xlab="Year", ylab="df | logit(Y)")
axis(1, 1998:2018)
band(tPred, apply(pred[,,3], 2, quantile, prob = 0.005), apply(pred[,,3], 2, quantile, prob = 0.995), col = "gray80")
band(tPred, apply(pred[,,3], 2, quantile, prob = 0.025), apply(pred[,,3], 2, quantile, prob = 0.975), col = "gray65")
band(tPred, apply(pred[,,3], 2, quantile, prob = 0.25), apply(pred[,,3], 2, quantile, prob = 0.75), col = "gray45")
lines(tPred, apply(pred[,,3], 2, mean), lwd = 2)
abline(h = 0, lty = 2)
legend("topleft", c("Mean", "50%", "95%", "99%"), col = c("black", "gray45", "gray65", "gray85"),
lwd = 2, bty="n", cex=0.7, lty = c(1, NA, NA, NA), pch = c(NA, 15, 15, 15), pt.cex=1.5)
plot(tPred, t(pred[1,,5])*100, type="l", lty = 1, lwd = 2, xaxt="n", xlab="Year",
ylab="Trend Direction Index [%]", ylim=c(0,100))
axis(1, 1998:2018)
abline(h = 50, lty = 2)
title("TDI(2018, Year - 2018)", font.main=1)
dev.off()
|
/applications/smoking/fitFixed_logit.R
|
no_license
|
aejensen/TrendinessOfTrends
|
R
| false
| false
| 3,729
|
r
|
rm(list=ls())
library(DEoptim)
library(mvtnorm)
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
########################################################
# Load data
########################################################
logit <- function(p) log(p / (1-p))
load("smoking.RData")
dat <- data.frame(t = smoking$year, y = logit(smoking$p/100))
tPred <- seq(1998, 2018, length.out = 200) #length = 500 used for manuscript
########################################################
# Estimate Empirical Bayes parameters
########################################################
rqCov <- function(s, t, alpha, rho, nu) {
alpha^2 * (1 + (s-t)^2 / (2 * nu * rho^2))^(-nu)
}
ctl <- DEoptim.control(itermax = 5000, trace = 100)
set.seed(1234)
opt.rq <- DEoptim(function(par) {
mu <- rep(par[1], nrow(dat))
cMat <- outer(dat$t, dat$t, rqCov, par[2], par[3], par[4]) + diag(par[5]^2 , nrow(dat))
-mvtnorm::dmvnorm(dat$y, mu, cMat, log=TRUE)
}, lower = c(-50,0,0,0,0), upper = c(50,50,50,50,50), control = ctl)
par.rq <- opt.rq$optim$bestmem
########################################################
# Run Stan script
########################################################
sDat <- list(n = nrow(dat), t = dat$t, y = dat$y, p = length(tPred), tPred = tPred)
sDat$mu <- par.rq[1]
sDat$alpha <- par.rq[2]
sDat$rho <- par.rq[3]
sDat$nu <- par.rq[4]
sDat$sigma <- par.rq[5]
iter <- 10000
seed <- 12345
m <- stan_model("../../Stan/gptrendFixed.stan")
fit <- sampling(m, data = sDat, iter = iter, seed = seed, algorithm = "Fixed_param")
pred <- extract(fit, "pred")$pred
########################################################
# Plot fit
########################################################
band <- function(t, l, u, col) {
polygon(c(t, rev(t)), c(l, rev(u)), col=col, border = NA)
}
pdf("../../figures/smoking_gpFixed_logit.pdf", width = 8, height = 3)
par(mfrow=c(1,3), bty="n", mar = c(2.3, 2.3, 1, 0), mgp=c(1.3,0.4,0))
plot(dat$t, dat$y, pch = 19, ylim=c(-1.4, -0.4), xaxt="n", xlab="Year", ylab="f | logit(Y)", type="n")
axis(1, 1998:2018)
band(tPred, apply(pred[,,1], 2, quantile, prob = 0.005), apply(pred[,,1], 2, quantile, prob = 0.995), col = "gray80")
band(tPred, apply(pred[,,1], 2, quantile, prob = 0.025), apply(pred[,,1], 2, quantile, prob = 0.975), col = "gray65")
band(tPred, apply(pred[,,1], 2, quantile, prob = 0.25), apply(pred[,,1], 2, quantile, prob = 0.75), col = "gray45")
lines(tPred, apply(pred[,,1], 2, mean), lwd = 2)
points(dat$t, dat$y, pch = 19, cex=0.8)
legend("topleft", c("Mean", "50%", "95%", "99%"), col = c("black", "gray45", "gray65", "gray85"),
lwd = 2, bty="n", cex=0.7, lty = c(1, NA, NA, NA), pch = c(NA, 15, 15, 15), pt.cex=1.5)
plot(tPred, apply(pred[,,3], 2, mean), lwd = 2, type="n", ylim = c(-0.15, 0.1), xaxt="n", xlab="Year", ylab="df | logit(Y)")
axis(1, 1998:2018)
band(tPred, apply(pred[,,3], 2, quantile, prob = 0.005), apply(pred[,,3], 2, quantile, prob = 0.995), col = "gray80")
band(tPred, apply(pred[,,3], 2, quantile, prob = 0.025), apply(pred[,,3], 2, quantile, prob = 0.975), col = "gray65")
band(tPred, apply(pred[,,3], 2, quantile, prob = 0.25), apply(pred[,,3], 2, quantile, prob = 0.75), col = "gray45")
lines(tPred, apply(pred[,,3], 2, mean), lwd = 2)
abline(h = 0, lty = 2)
legend("topleft", c("Mean", "50%", "95%", "99%"), col = c("black", "gray45", "gray65", "gray85"),
lwd = 2, bty="n", cex=0.7, lty = c(1, NA, NA, NA), pch = c(NA, 15, 15, 15), pt.cex=1.5)
plot(tPred, t(pred[1,,5])*100, type="l", lty = 1, lwd = 2, xaxt="n", xlab="Year",
ylab="Trend Direction Index [%]", ylim=c(0,100))
axis(1, 1998:2018)
abline(h = 50, lty = 2)
title("TDI(2018, Year - 2018)", font.main=1)
dev.off()
|
matching<-function(textname,prefix,postfix)
{
if(textname=="text1")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text1) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text1) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text1) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text2")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text2) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text2) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text2) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text3")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text3) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text3) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text3) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text4")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text4) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text4) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text4) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text5")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text5) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text5) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text5) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text6")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text6) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text6) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text6) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text7")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text7) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text7) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text7) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text8")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text8) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text8) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text8) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text9")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text9) if(word.isalpha() and word.startswith(newprefix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text9) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
file<-scan(file=textname,what="list",n=-1,sep="",skip=0,na.strings="NA")
str<-paste(file,collapse=" ")
prop<-str_replace_all(str,"'","")
prop<-str_replace_all(prop,'"',"")
python.assign("pystr",prop)
python.exec("pylst=pystr.split()")
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(pylst) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(pylst) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(pylst) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
}
|
/word_match.R
|
no_license
|
dependency-injection/NLP
|
R
| false
| false
| 9,074
|
r
|
matching<-function(textname,prefix,postfix)
{
if(textname=="text1")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text1) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text1) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text1) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text2")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text2) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text2) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text2) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text3")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text3) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text3) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text3) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text4")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text4) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text4) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text4) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text5")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text5) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text5) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text5) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text6")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text6) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text6) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text6) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text7")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text7) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text7) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text7) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text8")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text8) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(text8) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text8) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else if(textname=="text9")
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(text9) if(word.isalpha() and word.startswith(newprefix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(text9) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
else
{
python.assign("pyprefix",prefix)
python.assign("pypostfix",postfix)
file<-scan(file=textname,what="list",n=-1,sep="",skip=0,na.strings="NA")
str<-paste(file,collapse=" ")
prop<-str_replace_all(str,"'","")
prop<-str_replace_all(prop,'"',"")
python.assign("pystr",prop)
python.exec("pylst=pystr.split()")
python.exec("newprefix=pyprefix.lower()")
python.exec("newpostfix=pypostfix.lower()")
python.exec("pypre=[word.lower() for word in set(pylst) if(word.isalpha() and word.startswith(newprefix))]")
python.exec("pypost=[word.lower() for word in set(pylst) if(word.isalpha() and word.endswith(newpostfix))]")
rpre<-python.get("pypre")
rpost<-python.get("pypost")
print(paste("THE WORDS BEGINNING WITH ",prefix))
print(rpre)
cat("\n")
cat("\n")
print(paste("THE WORDS ENDING WITH ",postfix))
print(rpost)
cat("\n")
cat("\n")
python.exec("both=[word.lower() for word in set(pylst) if(word.isalpha() and word.startswith(newprefix) and word.endswith(newpostfix))]")
rboth<-python.get("both")
print(paste("THE WORDS THAT BOTH START AND BEGIN WITH THE GIVEN INPUTS "))
print(rboth)
}
}
|
#DATA ANALYSIS WITH R
#SESSION#1
#How to install packages in R: vegan
#The vegan package provides tools for descriptive community ecology.
#It has most basic functions of diversity analysis, community ordination and
#dissimilarity analysis.
#Most of its multivariate tools can be used for other data types as well.
#1.Installing by console
install.packages("vegan")
#2.Installing via the "packages" tab in the "Utility panel"
#Once a specific package has been installed, we need to recall it
#we will use the function "library"
library(vegan)
#Let's have a look to the help
#we can use the function "help"
help("vegan")
#we can use the symbol "?"
?vegan
#..or, we can use the "help" Tab on the Utility panel and search for the
#package's help
#Loading data: the iris dataset
#This famous data set gives the measurements
#in centimeters of the variables sepal length and width and petal length and width,
#respectively, for 50 flowers from each of 3 species of iris.
#The species are Iris setosa, I. versicolor, and I. virginica.
#iris loads automatically in R, being one of the most popular dataset on which to
#learn the use of R for statistical analysis.
#Anyway, for the purpose of this tutorial, will load the iris dataset
#from a directory we previously set. This is because the script and the data MUST be
#on the same directory.
#We will use different data type and functions.
#1. loading a .txt file
iris.txt=read.table("iris.txt",sep="",dec=".")
#2. loading a .csv file
iris.csv=read.csv("iris.csv",sep=",",dec=".")
#3. loading an excel file (.xls or .xlsx)
#To be able to load an excel file we need to install and load a specific library
#called "readxl"
install.packages("readxl")
library(readxl)
iris.xls=read_xls("iris.xls")#this will load the entire file
#If we need to load a specific sheet, we should use the argument
#"sheet", specifying the number of the sheet (1,2,3,..) or, eventually, its name
iris.xls=read_xls("iris.xls",sheet=1)
iris.xls.2=read_xls("iris.xls",sheet=2)#In this case we will receive an error, as the file does not have a second sheet
#When loading an excel file with "read_xls", R automatically gives back a "tibble" object,
#which is a "nicest" way to format a data frame
#To convert this object to a data frame we can use the function "as.data.frame()"
as.data.frame(iris.xls)#!If we want to make as a permanent object, we must assign a name
#If needed, we can explore the data structure of the dataset, by using the function "str()"
str(iris.txt)
str(iris.csv)
str(iris.xls)
#We can test the type of data structure by using the function is.matrix OR is.dataframe OR is.list.
is.matrix(iris.txt)
is.matrix(iris.csv)
is.matrix(iris.xls)
is.data.frame(iris.txt)
is.data.frame(iris.csv)
is.data.frame(iris.xls)
#The function gives back a logic value TRUE/FALSE, depending on the nature of the dataset.
#Depending on our needs, we can decide to split out our dataset to create a matrix
#where ALL the values MUST be of the same type (numbers, text, categories and so on)
#we can use the function "as.matrix()"
iris.txt.matrix=as.matrix(iris.txt[,1:3])
#QUICK EXERCISE######################################################################
#1.Create a new matrix using one of the different iris datasets we previously loaded.
#2.Gives it a name
#3.Select a different number of columns
#4.Try to create a matrix by selecting a non numeric column
#####
#In the above function we use the syntax "iris.txt[,1:3]" to select the range of columns
#from 1 to 3. In R a data frame or matrix object is always given in the form
#data[rows,columns]
#To select a specific range of rows we then can use data[rows-from:to,]
is.matrix(iris.txt.matrix)#with this we can check whether the new object called "iris.txt.matrix"
#IS or IS NOT a matrix
#Data types
#Lists
#R list is the object which contains elements of different types – like strings,
#numbers, vectors and another list inside it. R list can also contain a matrix
#or a function as its elements. The list is created using the list() function in R.
#In other words, a list is a generic vector containing other objects.
#Let's create a list composed by the first two columns of the iris dataset
colnames(iris)#this function allows showing the names of the columns
pts <- list(x=iris$Sepal.Length, y = iris$Sepal.Width)
#Arrays
#An array in R can have one, two or more dimensions.
#It is simply a vector which is stored with additional attributes
#giving the dimensions (attribute "dim") and optionally names for those
#dimensions (attribute "dimnames").
#Select two columns from the iris dataset
#Create two vectors by selecting two of the three columns
v1<-iris$Sepal.Length
v2<-iris$Sepal.Width
final = array (c (v1, v2),dim =c(4,4,3))#Take two vectors above as an input to an array
#where the dimension is considered as 4 * 4, and three matrices or dimensional data is created.
#QUICK EXERCISE######################################################################
#1.Create a new list by using the Petal.Length and Petal.Width attributes of the dataset and gives it a name
#2.Create an array with the same variables above and store it in 4 matrix having dimension 10 x10
#####
#Now that we learned how to import data, let's make some basic maths
#For this, we will use the iris datasets, as we would need number-only variables..
#Functions
#Functions are fundamental to any programming language,
#and R is no different. There are three main sources for functions.
#1-Defined in Base R
#There are many of these, and we will touch on some common ones throughout this class.
#Consider the code below:
sum(v1)#this function will perform the sum over v1
#2-Defined in Packages
#When you install a package, what you’re really doing is adding more
#functions to the universe in which you are working
#3-Defined in Your Script
#A fundamental part in learning any programming language.
#Implementing your own script allows organizing your work, preventing repeating the same code within a single script,
#and define functions within your script that you then call from elsewhere in the script.
#As an example, imagine we want to define a function which we call 'cubeValue'
cubeValue <- function(x){
cat("The cube of ",x," is ",x^3,".\n",sep="")
}
# Loop through the numbers 1 through 3 and print the cube of each.
for (i in 1:3){
cubeValue(i)
}
#QUICK EXERCISE######################################################################
#1.Loop the function cubeValue through 1 to 100
#2.Create a function called squareValue
#####
#Maths on two vectors
v1+v2#sum
v1*v2#multiplication
v1/v2#division
v1-v2#subtraction
sqrt(v1)#square root
#Writing math functions in R follows the usual syntax, with operators following the order given by
#parentheses
(v1+v2)*v1
v1+v2*v1
#Type of variables
#Let's have a look to different type of variables in the dataset
class(iris)
class(iris$Sepal.Length)
class(iris$Species)
#Basic plots
#1.Strip chart
#A strip chart is the most basic type of plot available.
#It plots the data in order along a line with each data point represented as a box.
stripchart(iris$Sepal.Length)
#2.Histograms
#A histogram is very common plot. It plots the frequencies that data appears within certain ranges.
hist(iris$Sepal.Length)
#3.Boxplots
#A boxplot provides a graphical view of the median, quartiles, maximum, and minimum of a data set.
boxplot(iris$Sepal.Length)
#4.Scatterplot
#A scatter plot provides a graphical view of the relationship between two sets of numbers.
plot(iris$Sepal.Length,iris$Sepal.Width)#..or
plot(pts)
#5.Normal QQ plot
#This plot is used to determine if your data is close to being normally distributed.
#You cannot be sure that the data is normally distributed, but you can rule out
#if it is not normally distributed.
qqnorm(iris$Sepal.Length)
#Customizing plot
#When plotting, we can customize the color, the width of the plotting window and many other features
#This will allow us to obtain a pretty nice figure, which we can use as it is for our purposes
#or to save in a vector graphic (e.g., pdf, eps or svg) and then manipulate for a better aspect
#in other vector-graphic softwares (e.g., Inkscape)
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19)#pch determines the shape of the points
plot(iris$Sepal.Length,iris$Sepal.Width,pch=12)
plot(iris$Sepal.Length,iris$Sepal.Width,pch=2)
?pch
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red")#col determines the color of the points
plot(iris$Sepal.Length,iris$Sepal.Width,pch=2,col="red")#col determines the color of the points
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot")
#xlab, ylab and main set the axis labels and the main title of the plot (if needed)
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot",
las=1)#las sets the direction of the axis plot
#How to make composite pictures
#Sometimes could be useful to create composite figures, plotting for instance two scatterplots at once
par(mfrow=c(1,2))#mfrow=c(1,2) determines how many rows (1) and columns (2) the plot areas should be
#divided into
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot")
plot(iris$Sepal.Length,iris$Sepal.Width,pch=4,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot2")
#We can also decide to allow the plots to be perfectly squared
par(pty="s",mfrow=c(1,2))#pty is a character specifying the type of plot region to be used;
#"s" generates a square plotting region and "m" generates the maximal plotting region.
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot")
plot(iris$Sepal.Length,iris$Sepal.Width,pch=4,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot2")
par(pty="m",mfrow=c(1,2))#pty is a character specifying the type of plot region to be used;
#"s" generates a square plotting region and "m" generates the maximal plotting region.
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot")
plot(iris$Sepal.Length,iris$Sepal.Width,pch=4,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot2")
par(pty="m",mfrow=c(1,2),cex=2)#cex is a magnifier parameter, to allow the elements omn the plot to
#scale in magnitude (bigger or lower)
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot")
plot(iris$Sepal.Length,iris$Sepal.Width,pch=4,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot2")
#QUICK EXERCISE######################################################################
#1.Let's create two vectors by using sepal length and width as values. Give them a name
#2.Create a simple equation for the pythagorean theorem by using these two vectors. Gives it a name
#3.Create and customize a plot by plotting the new variable measured in point 2 against the two in point one
#####
|
/R_scripts/Base/Session#1.R
|
no_license
|
brunobellisario/Rcourse
|
R
| false
| false
| 10,882
|
r
|
#DATA ANALYSIS WITH R
#SESSION#1
#How to install packages in R: vegan
#The vegan package provides tools for descriptive community ecology.
#It has most basic functions of diversity analysis, community ordination and
#dissimilarity analysis.
#Most of its multivariate tools can be used for other data types as well.
#1.Installing by console
install.packages("vegan")
#2.Installing via the "packages" tab in the "Utility panel"
#Once a specific package has been installed, we need to recall it
#we will use the function "library"
library(vegan)
#Let's have a look to the help
#we can use the function "help"
help("vegan")
#we can use the symbol "?"
?vegan
#..or, we can use the "help" Tab on the Utility panel and search for the
#package's help
#Loading data: the iris dataset
#This famous data set gives the measurements
#in centimeters of the variables sepal length and width and petal length and width,
#respectively, for 50 flowers from each of 3 species of iris.
#The species are Iris setosa, I. versicolor, and I. virginica.
#iris loads automatically in R, being one of the most popular dataset on which to
#learn the use of R for statistical analysis.
#Anyway, for the purpose of this tutorial, will load the iris dataset
#from a directory we previously set. This is because the script and the data MUST be
#on the same directory.
#We will use different data type and functions.
#1. loading a .txt file
iris.txt=read.table("iris.txt",sep="",dec=".")
#2. loading a .csv file
iris.csv=read.csv("iris.csv",sep=",",dec=".")
#3. loading an excel file (.xls or .xlsx)
#To be able to load an excel file we need to install and load a specific library
#called "readxl"
install.packages("readxl")
library(readxl)
iris.xls=read_xls("iris.xls")#this will load the entire file
#If we need to load a specific sheet, we should use the argument
#"sheet", specifying the number of the sheet (1,2,3,..) or, eventually, its name
iris.xls=read_xls("iris.xls",sheet=1)
iris.xls.2=read_xls("iris.xls",sheet=2)#In this case we will receive an error, as the file does not have a second sheet
#When loading an excel file with "read_xls", R automatically gives back a "tibble" object,
#which is a "nicest" way to format a data frame
#To convert this object to a data frame we can use the function "as.data.frame()"
as.data.frame(iris.xls)#!If we want to make as a permanent object, we must assign a name
#If needed, we can explore the data structure of the dataset, by using the function "str()"
str(iris.txt)
str(iris.csv)
str(iris.xls)
#We can test the type of data structure by using the function is.matrix OR is.dataframe OR is.list.
is.matrix(iris.txt)
is.matrix(iris.csv)
is.matrix(iris.xls)
is.data.frame(iris.txt)
is.data.frame(iris.csv)
is.data.frame(iris.xls)
#The function gives back a logic value TRUE/FALSE, depending on the nature of the dataset.
#Depending on our needs, we can decide to split out our dataset to create a matrix
#where ALL the values MUST be of the same type (numbers, text, categories and so on)
#we can use the function "as.matrix()"
iris.txt.matrix=as.matrix(iris.txt[,1:3])
#QUICK EXERCISE######################################################################
#1.Create a new matrix using one of the different iris datasets we previously loaded.
#2.Gives it a name
#3.Select a different number of columns
#4.Try to create a matrix by selecting a non numeric column
#####
#In the above function we use the syntax "iris.txt[,1:3]" to select the range of columns
#from 1 to 3. In R a data frame or matrix object is always given in the form
#data[rows,columns]
#To select a specific range of rows we then can use data[rows-from:to,]
is.matrix(iris.txt.matrix)#with this we can check whether the new object called "iris.txt.matrix"
#IS or IS NOT a matrix
#Data types
#Lists
#R list is the object which contains elements of different types – like strings,
#numbers, vectors and another list inside it. R list can also contain a matrix
#or a function as its elements. The list is created using the list() function in R.
#In other words, a list is a generic vector containing other objects.
#Let's create a list composed by the first two columns of the iris dataset
colnames(iris)#this function allows showing the names of the columns
pts <- list(x=iris$Sepal.Length, y = iris$Sepal.Width)
#Arrays
#An array in R can have one, two or more dimensions.
#It is simply a vector which is stored with additional attributes
#giving the dimensions (attribute "dim") and optionally names for those
#dimensions (attribute "dimnames").
#Select two columns from the iris dataset
#Create two vectors by selecting two of the three columns
v1<-iris$Sepal.Length
v2<-iris$Sepal.Width
final = array (c (v1, v2),dim =c(4,4,3))#Take two vectors above as an input to an array
#where the dimension is considered as 4 * 4, and three matrices or dimensional data is created.
#QUICK EXERCISE######################################################################
#1.Create a new list by using the Petal.Length and Petal.Width attributes of the dataset and gives it a name
#2.Create an array with the same variables above and store it in 4 matrix having dimension 10 x10
#####
#Now that we learned how to import data, let's make some basic maths
#For this, we will use the iris datasets, as we would need number-only variables..
#Functions
#Functions are fundamental to any programming language,
#and R is no different. There are three main sources for functions.
#1-Defined in Base R
#There are many of these, and we will touch on some common ones throughout this class.
#Consider the code below:
sum(v1)#this function will perform the sum over v1
#2-Defined in Packages
#When you install a package, what you’re really doing is adding more
#functions to the universe in which you are working
#3-Defined in Your Script
#A fundamental part in learning any programming language.
#Implementing your own script allows organizing your work, preventing repeating the same code within a single script,
#and define functions within your script that you then call from elsewhere in the script.
#As an example, imagine we want to define a function which we call 'cubeValue'
cubeValue <- function(x){
cat("The cube of ",x," is ",x^3,".\n",sep="")
}
# Loop through the numbers 1 through 3 and print the cube of each.
for (i in 1:3){
cubeValue(i)
}
#QUICK EXERCISE######################################################################
#1.Loop the function cubeValue through 1 to 100
#2.Create a function called squareValue
#####
#Maths on two vectors
v1+v2#sum
v1*v2#multiplication
v1/v2#division
v1-v2#subtraction
sqrt(v1)#square root
#Writing math functions in R follows the usual syntax, with operators following the order given by
#parentheses
(v1+v2)*v1
v1+v2*v1
#Type of variables
#Let's have a look to different type of variables in the dataset
class(iris)
class(iris$Sepal.Length)
class(iris$Species)
#Basic plots
#1.Strip chart
#A strip chart is the most basic type of plot available.
#It plots the data in order along a line with each data point represented as a box.
stripchart(iris$Sepal.Length)
#2.Histograms
#A histogram is very common plot. It plots the frequencies that data appears within certain ranges.
hist(iris$Sepal.Length)
#3.Boxplots
#A boxplot provides a graphical view of the median, quartiles, maximum, and minimum of a data set.
boxplot(iris$Sepal.Length)
#4.Scatterplot
#A scatter plot provides a graphical view of the relationship between two sets of numbers.
plot(iris$Sepal.Length,iris$Sepal.Width)#..or
plot(pts)
#5.Normal QQ plot
#This plot is used to determine if your data is close to being normally distributed.
#You cannot be sure that the data is normally distributed, but you can rule out
#if it is not normally distributed.
qqnorm(iris$Sepal.Length)
#Customizing plot
#When plotting, we can customize the color, the width of the plotting window and many other features
#This will allow us to obtain a pretty nice figure, which we can use as it is for our purposes
#or to save in a vector graphic (e.g., pdf, eps or svg) and then manipulate for a better aspect
#in other vector-graphic softwares (e.g., Inkscape)
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19)#pch determines the shape of the points
plot(iris$Sepal.Length,iris$Sepal.Width,pch=12)
plot(iris$Sepal.Length,iris$Sepal.Width,pch=2)
?pch
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red")#col determines the color of the points
plot(iris$Sepal.Length,iris$Sepal.Width,pch=2,col="red")#col determines the color of the points
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot")
#xlab, ylab and main set the axis labels and the main title of the plot (if needed)
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot",
las=1)#las sets the direction of the axis plot
#How to make composite pictures
#Sometimes could be useful to create composite figures, plotting for instance two scatterplots at once
par(mfrow=c(1,2))#mfrow=c(1,2) determines how many rows (1) and columns (2) the plot areas should be
#divided into
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot")
plot(iris$Sepal.Length,iris$Sepal.Width,pch=4,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot2")
#We can also decide to allow the plots to be perfectly squared
par(pty="s",mfrow=c(1,2))#pty is a character specifying the type of plot region to be used;
#"s" generates a square plotting region and "m" generates the maximal plotting region.
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot")
plot(iris$Sepal.Length,iris$Sepal.Width,pch=4,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot2")
par(pty="m",mfrow=c(1,2))#pty is a character specifying the type of plot region to be used;
#"s" generates a square plotting region and "m" generates the maximal plotting region.
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot")
plot(iris$Sepal.Length,iris$Sepal.Width,pch=4,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot2")
par(pty="m",mfrow=c(1,2),cex=2)#cex is a magnifier parameter, to allow the elements omn the plot to
#scale in magnitude (bigger or lower)
plot(iris$Sepal.Length,iris$Sepal.Width,pch=19,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot")
plot(iris$Sepal.Length,iris$Sepal.Width,pch=4,col="red",xlab="Hello!",ylab="Hi!",main="Test Plot2")
#QUICK EXERCISE######################################################################
#1.Let's create two vectors by using sepal length and width as values. Give them a name
#2.Create a simple equation for the pythagorean theorem by using these two vectors. Gives it a name
#3.Create and customize a plot by plotting the new variable measured in point 2 against the two in point one
#####
|
## Caching the Inverse of a Matrix:
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
temp <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setInverse <- function(inverse) temp <<- inverse
getInverse <- function() temp
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
}
## This function computes the inverse by the makeCacheMatrix above.
## If the inverse has already been calculated , then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
temp <- x$getInverse()
if (!is.null(temp)) {
message("getting the cached data")
return(temp)
}
mat <- x$get()
temp <- solve(mat, ...)
x$setInverse(temp)
temp
}
|
/cachematrix.R
|
no_license
|
voodooshen/ProgrammingAssignment2
|
R
| false
| false
| 1,035
|
r
|
## Caching the Inverse of a Matrix:
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
temp <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setInverse <- function(inverse) temp <<- inverse
getInverse <- function() temp
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
}
## This function computes the inverse by the makeCacheMatrix above.
## If the inverse has already been calculated , then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
temp <- x$getInverse()
if (!is.null(temp)) {
message("getting the cached data")
return(temp)
}
mat <- x$get()
temp <- solve(mat, ...)
x$setInverse(temp)
temp
}
|
## Started by saving data for Course Project 1 to working directory
# read data and store to data frame gr1
# noted that data is delimited by ";", NA values are denoted by "?"
gr1 <- read.table("household_power_consumption.txt",sep=";",header=T,na.strings="?")
# recategorize data in field "Date" as date, from text format dd/mm/yyyy
gr1$Date <- as.Date(gr1$Date, "%d/%m/%Y")
# subset data for specified dates
grsub <- subset(gr1,Date==as.Date("2007-02-01")|Date==as.Date("2007-02-02"))
# create a new column to store date and time combined in POSIXlt format
grsub$dt <- strptime(paste0(grsub$Date,grsub$Time),"%Y-%m-%d %H:%M:%S")
# set up plot area 2x2
par(mfrow=c(2,2))
# build plot (1,1)
plot(grsub$dt,grsub$Global_active_power,xlab="",ylab="Global Active Power", type="n")
lines(grsub$dt,grsub$Global_active_power)
# build plot (1,2)
plot(grsub$dt,grsub$Voltage,xlab="datetime",ylab="Voltage",type="n")
lines(grsub$dt,grsub$Voltage)
# build plot (2,1) with annotations and no plot markers
plot(grsub$dt,grsub$Sub_metering_1,ylab="Energy sub metering",type="n",xlab="")
# add lines to plot (2,1)
lines(grsub$dt,grsub$Sub_metering_1)
lines(grsub$dt,grsub$Sub_metering_2, col="red")
lines(grsub$dt,grsub$Sub_metering_3, col="blue")
# add legend to plot (2,1)
legend("topright",lty=1,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),cex=0.45,bty="n")
# build plot (2,2)
plot(grsub$dt,grsub$Global_reactive_power,xlab="datetime",ylab="Global_reactive_power",type="n")
lines(grsub$dt,grsub$Global_reactive_power)
# export to png
dev.copy(png,file="plot4.png", width=480,height=480)
dev.off()
|
/plot4.R
|
no_license
|
andycataldo/ExData_Plotting1
|
R
| false
| false
| 1,640
|
r
|
## Started by saving data for Course Project 1 to working directory
# read data and store to data frame gr1
# noted that data is delimited by ";", NA values are denoted by "?"
gr1 <- read.table("household_power_consumption.txt",sep=";",header=T,na.strings="?")
# recategorize data in field "Date" as date, from text format dd/mm/yyyy
gr1$Date <- as.Date(gr1$Date, "%d/%m/%Y")
# subset data for specified dates
grsub <- subset(gr1,Date==as.Date("2007-02-01")|Date==as.Date("2007-02-02"))
# create a new column to store date and time combined in POSIXlt format
grsub$dt <- strptime(paste0(grsub$Date,grsub$Time),"%Y-%m-%d %H:%M:%S")
# set up plot area 2x2
par(mfrow=c(2,2))
# build plot (1,1)
plot(grsub$dt,grsub$Global_active_power,xlab="",ylab="Global Active Power", type="n")
lines(grsub$dt,grsub$Global_active_power)
# build plot (1,2)
plot(grsub$dt,grsub$Voltage,xlab="datetime",ylab="Voltage",type="n")
lines(grsub$dt,grsub$Voltage)
# build plot (2,1) with annotations and no plot markers
plot(grsub$dt,grsub$Sub_metering_1,ylab="Energy sub metering",type="n",xlab="")
# add lines to plot (2,1)
lines(grsub$dt,grsub$Sub_metering_1)
lines(grsub$dt,grsub$Sub_metering_2, col="red")
lines(grsub$dt,grsub$Sub_metering_3, col="blue")
# add legend to plot (2,1)
legend("topright",lty=1,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),cex=0.45,bty="n")
# build plot (2,2)
plot(grsub$dt,grsub$Global_reactive_power,xlab="datetime",ylab="Global_reactive_power",type="n")
lines(grsub$dt,grsub$Global_reactive_power)
# export to png
dev.copy(png,file="plot4.png", width=480,height=480)
dev.off()
|
library("R.utils")
opager <- options(pager=mpager)
file <- system.file("DESCRIPTION", package="R.utils")
cat("Displaying: ", file, ":\n", sep="")
displayCode(file)
file <- system.file("NEWS.md", package="R.utils")
cat("Displaying: ", file, ":\n", sep="")
displayCode(file, numerate=FALSE, lines=100:110, wrap=65)
file <- system.file("NEWS.md", package="R.utils")
cat("Displaying: ", file, ":\n", sep="")
displayCode(file, lines=100:110, wrap=65, highlight=c(101,104:108))
con <- file(file)
displayCode(con, lines=1:10)
displayCode(file, lines=1:10, pager=mpager)
displayCode(file, lines=1:10, pager="mpager")
## Exception handling
res <- try(displayCode(file, lines=-10:110), silent=TRUE)
stopifnot(inherits(res, "try-error"))
res <- try(displayCode(file, wrap=integer(0)), silent=TRUE)
stopifnot(inherits(res, "try-error"))
res <- try(displayCode(file, wrap=55:66), silent=TRUE)
stopifnot(inherits(res, "try-error"))
res <- try(displayCode(2L), silent=TRUE)
stopifnot(inherits(res, "try-error"))
options(opager)
|
/tests/displayCode.R
|
no_license
|
HenrikBengtsson/R.utils
|
R
| false
| false
| 1,025
|
r
|
library("R.utils")
opager <- options(pager=mpager)
file <- system.file("DESCRIPTION", package="R.utils")
cat("Displaying: ", file, ":\n", sep="")
displayCode(file)
file <- system.file("NEWS.md", package="R.utils")
cat("Displaying: ", file, ":\n", sep="")
displayCode(file, numerate=FALSE, lines=100:110, wrap=65)
file <- system.file("NEWS.md", package="R.utils")
cat("Displaying: ", file, ":\n", sep="")
displayCode(file, lines=100:110, wrap=65, highlight=c(101,104:108))
con <- file(file)
displayCode(con, lines=1:10)
displayCode(file, lines=1:10, pager=mpager)
displayCode(file, lines=1:10, pager="mpager")
## Exception handling
res <- try(displayCode(file, lines=-10:110), silent=TRUE)
stopifnot(inherits(res, "try-error"))
res <- try(displayCode(file, wrap=integer(0)), silent=TRUE)
stopifnot(inherits(res, "try-error"))
res <- try(displayCode(file, wrap=55:66), silent=TRUE)
stopifnot(inherits(res, "try-error"))
res <- try(displayCode(2L), silent=TRUE)
stopifnot(inherits(res, "try-error"))
options(opager)
|
#' @title List updates about a person
#' @description Function to List updates about a person from pipedrive.
#' @param id ID of the person
#' @param start Pagination start
#' @param limit Items shown per page
#' @param api_token To validate your requests, you'll need your api_token - this means that our system will need to know who you are and be able to connect all actions you do with your chosen Pipedrive account. Have in mind that a user has a different api_token for each company. Please access the following link for more information: <https://pipedrive.readme.io/docs/how-to-find-the-api-token?utm_source=api_reference>
#' @param company_domain How to get the company domain: <https://pipedrive.readme.io/docs/how-to-get-the-company-domain>
#' @return return an object List()
#' @export
#' @examples \donttest{
#' persons.get.flow(id='e.g.',api_token='token',company_domain='exp')
#' }
persons.get.flow <- function(id, start=NULL, limit=NULL, api_token=NULL, company_domain='api'){
api_token <- check_api_token_(api_token)
url <- 'https://{company_domain}.pipedrive.com/v1/persons/{id}/flow?'
bodyList <- list(id=id,start=start,limit=limit)
bodyList$limit <- ifelse(is.null(limit), 500, limit)
bodyList$start <- ifelse(is.null(start), 0, start)
bodyList <- clear_list_(bodyList)
url <- paste0(url,prepare_url_parameters_(bodyList))
url <- sub('{company_domain}',company_domain, url, fixed = TRUE)
url <- paste0(url, 'api_token={api_token}')
url <- sub('{api_token}',api_token, url, fixed = TRUE)
url <- sub('{id}',id, url, fixed = TRUE)
return(get_all_(url))
}
|
/R/persons.get.flow.R
|
no_license
|
cran/Rpipedrive
|
R
| false
| false
| 1,598
|
r
|
#' @title List updates about a person
#' @description Function to List updates about a person from pipedrive.
#' @param id ID of the person
#' @param start Pagination start
#' @param limit Items shown per page
#' @param api_token To validate your requests, you'll need your api_token - this means that our system will need to know who you are and be able to connect all actions you do with your chosen Pipedrive account. Have in mind that a user has a different api_token for each company. Please access the following link for more information: <https://pipedrive.readme.io/docs/how-to-find-the-api-token?utm_source=api_reference>
#' @param company_domain How to get the company domain: <https://pipedrive.readme.io/docs/how-to-get-the-company-domain>
#' @return return an object List()
#' @export
#' @examples \donttest{
#' persons.get.flow(id='e.g.',api_token='token',company_domain='exp')
#' }
persons.get.flow <- function(id, start=NULL, limit=NULL, api_token=NULL, company_domain='api'){
api_token <- check_api_token_(api_token)
url <- 'https://{company_domain}.pipedrive.com/v1/persons/{id}/flow?'
bodyList <- list(id=id,start=start,limit=limit)
bodyList$limit <- ifelse(is.null(limit), 500, limit)
bodyList$start <- ifelse(is.null(start), 0, start)
bodyList <- clear_list_(bodyList)
url <- paste0(url,prepare_url_parameters_(bodyList))
url <- sub('{company_domain}',company_domain, url, fixed = TRUE)
url <- paste0(url, 'api_token={api_token}')
url <- sub('{api_token}',api_token, url, fixed = TRUE)
url <- sub('{id}',id, url, fixed = TRUE)
return(get_all_(url))
}
|
#########################################################################################
# IMPORT LIBRARIES
#########################################################################################
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(dplyr)) install.packages("dplyr", repos = "http://cran.us.r-project.org")
library(tidyverse)
library(caret)
library(data.table)
library(dplyr, warn.conflicts = FALSE)
# Suppress summarise info
options(dplyr.summarise.inform = FALSE)
#########################################################################################
# Create edx set, validation set (final hold-out test set)
#########################################################################################
# Note: this process could take a couple of minutes
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
# if using R 4.0 or later:
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(2, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)`
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
#########################################################################################
# DATA ANALYSIS ON CLEAN DATASET (edX Quiz)
#########################################################################################
# # Dimension edx dataset
# dim(edx)
#
# # Number of ratings
# table(edx$rating)
#
# # Number of unique movies
# n_distinct(edx$movieId)
#
# # Number of different users
# n_distinct(edx$userId)
#
# # How many movie ratings are in each genre in the edx dataset?
# genrecount <- edx %>%
# separate_rows(genres, sep = "\\|") %>%
# group_by(genres) %>%
# summarise(number = n()) %>%
# arrange(desc(number))
#
# genrecount
#
# #Barplot genrecount
#
# barplot(genrecount$number,
# main = "Number of movies per genre",
# names.arg = genrecount$genres)
#
#
# # Which movie has the greatest number of ratings?
# num_rating <- edx %>%
# group_by(title) %>%
# summarise(number = n()) %>%
# arrange(desc(number))
#
# # Number of ratings
# barplot(table(edx$rating))
#########################################################################################
# PREDICTION RATING
#########################################################################################
# Define loss function
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
# RMSE OBJECTTIVES
#
# 0 points: No RMSE reported AND/OR code used to generate the RMSE appears to violate the edX Honor Code.
# 5 points: RMSE >= 0.90000 AND/OR the reported RMSE is the result of overtraining (validation set - the final hold-out test set - ratings used for anything except reporting the final RMSE value) AND/OR the reported RMSE is the result of simply copying and running code provided in previous courses in the series.
# 10 points: 0.86550 <= RMSE <= 0.89999
# 15 points: 0.86500 <= RMSE <= 0.86549
# 20 points: 0.86490 <= RMSE <= 0.86499
# 25 points: RMSE < 0.86490
########################
# FIRST APPROACH
########################
# Lets suppose that all movies receive the same rating, what would that be?
# Average rating for movies
mu_hat <- mean(edx$rating)
# RMSE average rating
rmse_1 <- RMSE(validation$rating, mu_hat)
rmse_1
########################
# SECOND APPROACH
########################
# We know that certain movies have higher ratings than others, we will include in our prediction the movie bias.
# Compute difference to average per movie
movie_averages <- edx %>%
group_by(movieId) %>%
summarise(b_m = mean(rating - mu_hat))
# Compute the predicted rating using the movie bias
predicted_ratings <- mu_hat + validation %>%
left_join(movie_averages, by='movieId') %>%
pull(b_m)
# RMSE movie bias
rmse_2 <- RMSE(predicted_ratings, validation$rating)
rmse_2
# This is still too high, lets use data to improve our approach
# Plot average rating for users with over 100 ratings
edx %>%
group_by(userId) %>%
summarise(a_u = mean(rating)) %>%
filter(n()>=100) %>%
ggplot(aes(a_u)) +
geom_histogram(bins = 30, color = "black")
# We notice that some users tend to give more positive or negative reviews, the user bias.
# Calculate user rating average and compute user bias with movie bias for each movie
user_averages <- edx %>%
left_join(movie_averages, by="movieId") %>%
group_by(userId) %>%
summarise(b_u = mean(rating - mu_hat - b_m))
# Compute predicted ratings including movie and user bias
predicted_ratings <- validation %>%
left_join(movie_averages, by='movieId') %>%
left_join(user_averages, by='userId') %>%
mutate(pred = mu_hat + b_m + b_u) %>%
pull(pred)
# RMSE user effect
rmse_3 <- RMSE(predicted_ratings, validation$rating)
rmse_3
# By observing the predicted ratings we notice that some of them are lower than 0.5 or greater than 5.
# Limit
predicted_ratings_limit <- pmax(pmin(predicted_ratings, 5), 0.5) #limit values lower than 0.5 & values greater than 5
# Calculate RMSE
rmse_4 <- RMSE(predicted_ratings_limit, validation$rating)
rmse_4
# Results
rmse_results <- tibble(method = c("Overall average", "Movie effect","Movie + User effect","With limits"), RMSE = c(rmse_1, rmse_2, rmse_3, rmse_4))
rmse_results
########################
# THIRD APPROACH
########################
# We now use regularization to improve our prediction
#In setting the movie penalty, we will create a list between 0 and 10 with increments of 1
penalties <- seq(0, 10, 1)
# Compute RMSE on edx dataset to set best penalty
m_rmses <- sapply(penalties, function(p){
reg_movie_avgs <- edx %>%
group_by(movieId) %>%
summarize(regmoviebias = sum(rating - mu_hat)/(n()+p))
predicted_ratings <-
edx %>%
left_join(reg_movie_avgs, by = "movieId") %>%
left_join(user_averages, by = "userId") %>%
mutate(regmoviebias = mu_hat + b_u + regmoviebias) %>%
.$regmoviebias
return(RMSE(predicted_ratings, edx$rating))
})
# Determine the lowest penalty
moviepenalty_optimal <- penalties[which.min(m_rmses)]
# Use best penalty to compute movie averages
reg_movie_avgs <- edx %>%
group_by(movieId) %>%
summarize(regmoviebias = sum(rating - mu_hat)/(n()+moviepenalty_optimal))
# Compute prediction for validation dataset using the penalties calculated above
predicted_ratings <-
validation %>%
left_join(reg_movie_avgs, by = "movieId") %>%
left_join(user_averages, by = "userId") %>%
mutate(regmoviebias = mu_hat + b_u + regmoviebias) %>%
.$regmoviebias
# Compute RMSE for the validation dataset
regularized_movieeffects <- RMSE(predicted_ratings, validation$rating)
regularized_movieeffects
#We do the same for the user bias, we use regularization
# Set penalties to test
penalties <- seq(0, 1, 0.25)
# Compute RMSE on edx dataset to set best penalty
u_rmses <- sapply(penalties, function(p){
reg_user_avgs <- edx %>%
left_join(reg_movie_avgs, by="movieId") %>%
group_by(userId) %>%
summarize(reguserbias = sum(rating - regmoviebias - mu_hat)/(n()+p))
predicted_ratings <-
edx %>%
left_join(reg_movie_avgs, by = "movieId") %>%
left_join(reg_user_avgs, by = "userId") %>%
mutate(regusermoviebias = mu_hat + regmoviebias + reguserbias) %>%
.$regusermoviebias
return(RMSE(predicted_ratings, edx$rating))
})
# Determine the lowest penalty
userpenalty_optimal <- penalties[which.min(u_rmses)] #determine which is lowest
# Use best penalty to compute movie averages using both the movie and user bias regularization
reg_user_avgs <- edx %>%
left_join(reg_movie_avgs, by="movieId") %>%
group_by(userId) %>%
summarize(reguserbias = sum(rating - regmoviebias - mu_hat)/(n()+userpenalty_optimal))
# Compute prediction for validation dataset using the penalties calculated above
reg_predicted_ratings <-
validation %>%
left_join(reg_movie_avgs, by = "movieId") %>%
left_join(reg_user_avgs, by = "userId") %>%
mutate(regusermovie = mu_hat + regmoviebias + reguserbias) %>%
.$regusermovie
# Compute RMSE
regularized_effects <- RMSE(reg_predicted_ratings, validation$rating)
# Fix lower and upper limits
reg_predicted_ratings_limit <- pmax(pmin(predicted_ratings, 5), 0.5)
regularized_effects_limit <- RMSE(reg_predicted_ratings_limit, validation$rating)
regularized_effects_limit
##########################################################
# Neural Network: Data transformation
##########################################################
# Mutate the timestamp to be 0 or 1 depending on the moment ratings start to have 0.5 granularity = 1045526400
edx <- edx %>% mutate(timestamp_binary = ifelse(edx$timestamp > 1045526400, 1, 0))
validation <- validation %>% mutate(timestamp_binary = ifelse(validation$timestamp > 1045526400, 1, 0))
############
# One-hot encoding of genres
############
genres <- as.data.frame(edx$genres, stringsAsFactors=FALSE)
genres_v <- as.data.frame(validation$genres, stringsAsFactors=FALSE)
# n_distinct(edx_copy$genres)
genres2 <- as.data.frame(tstrsplit(genres[,1], '[|]',
type.convert=TRUE),
stringsAsFactors=FALSE)
genres2_v <- as.data.frame(tstrsplit(genres_v[,1], '[|]',
type.convert=TRUE),
stringsAsFactors=FALSE)
genre_list <- c("Action", "Adventure", "Animation", "Children",
"Comedy", "Crime","Documentary", "Drama", "Fantasy",
"Film-Noir", "Horror", "Imax", "Musical", "Mystery","Romance",
"Sci-Fi", "Thriller", "War", "Western") # There are 19 genres in total
genre_matrix <- matrix(0, length(edx$movieId)+1, n_distinct(genre_list))
genre_matrix[1,] <- genre_list #set first row to genre list
genre_matrix_v <- matrix(0, length(validation$movieId)+1, n_distinct(genre_list))
genre_matrix_v[1,] <- genre_list #set first row to genre list
colnames(genre_matrix) <- genre_list #set column names to genre list
colnames(genre_matrix_v) <- genre_list #set column names to genre list
#iterate through matrix
for (i in 1:nrow(genres2)) {
for (c in 1:ncol(genres2)) {
genmat_col <- which(genre_matrix[1,] == genres2[i,c])
genre_matrix[i+1,genmat_col] <- 1L
}
}
for (i in 1:nrow(genres2_v)) {
for (c in 1:ncol(genres2_v)) {
genmat_col <- which(genre_matrix_v[1,] == genres2_v[i,c])
genre_matrix_v[i+1,genmat_col] <- 1L
}
}
#convert into dataframe
genre_matrix <- as.data.frame(genre_matrix[-1,], stringsAsFactors=FALSE) #remove first row, which was the genre list
genre_matrix_v <- as.data.frame(genre_matrix_v[-1,], stringsAsFactors=FALSE)
edx_by_gen <- cbind(edx[,1:3], genre_matrix, edx$timestamp_binary)
val_by_gen <- cbind(validation[,1:3], genre_matrix_v, validation$timestamp_binary)
colnames(edx_by_gen) <- c("userId", "movieId", "rating", genre_list, "timestamp_binary")
colnames(val_by_gen) <- c("userId", "movieId", "rating", genre_list, "timestamp_binary")
edx_by_gen <- as.matrix(sapply(edx_by_gen, as.numeric))
val_by_gen <- as.matrix(sapply(val_by_gen, as.numeric))
# remove intermediary matrices
rm(genre_matrix, genre_matrix_v, genres, genres_v, genres2, genres2_v)
# Multiply the rating by the OHE for genre
edx_by_gen_mult <- cbind(edx_by_gen[,1:2], edx_by_gen[,"rating"], sweep(edx_by_gen[,4:22], 1, edx_by_gen[,"rating"], "*"), edx_by_gen[,"timestamp_binary"])
val_by_gen_mult <- cbind(val_by_gen[,1:2], val_by_gen[,"rating"], sweep(val_by_gen[,4:22], 1, val_by_gen[,"rating"], "*"), val_by_gen[,"timestamp_binary"])
colnames(edx_by_gen_mult) <- c("userId", "movieId", "rating", "Action", "Adventure", "Animation", "Children",
"Comedy", "Crime", "Documentary", "Drama", "Fantasy",
"Film.Noir", "Horror", "Imax", "Musical", "Mystery","Romance",
"Sci.Fi", "Thriller", "War", "Western", "timestamp_binary")
colnames(val_by_gen_mult) <- c("userId", "movieId", "rating", "Action", "Adventure", "Animation", "Children",
"Comedy", "Crime", "Documentary", "Drama", "Fantasy",
"Film.Noir", "Horror", "Imax", "Musical", "Mystery","Romance",
"Sci.Fi", "Thriller", "War", "Western", "timestamp_binary")
# Transform the multiplied one-hot-encoded matrix into a user profile for genre.
user_profiles <- edx_by_gen_mult %>%
as.data.frame() %>%
group_by(userId) %>%
summarise(Action_u = mean(Action),
Adventure_u = mean(Adventure),
Animation_u = mean(Animation),
Children_u = mean(Children),
Comedy_u = mean(Comedy),
Crime_u = mean(Crime),
Documentary_u = mean(Documentary),
Drama_u = mean(Drama),
Fantasy_u = mean(Fantasy),
FilmNoir_u = mean(Film.Noir),
Horror_u = mean(Horror),
Imax_u = mean(Imax),
Musical_u = mean(Musical),
Mystery_u = mean(Mystery),
Romance_u = mean(Romance),
Sci.Fi_u = mean(Sci.Fi),
Thriller_u = mean(Thriller),
War_u = mean(War),
Western_u = mean(Western)) %>%
as.data.frame()
user_profiles[is.na(user_profiles)] <- 0
# Transform the Test and Validation datasets to include the user profiles
edx_gen_norm <- edx %>%
left_join(user_profiles, by="userId") %>%
select(userId,
movieId,
rating,
Action_u,
Adventure_u,
Animation_u,
Children_u,
Comedy_u,
Crime_u,
Documentary_u,
Drama_u,
Fantasy_u,
FilmNoir_u,
Horror_u,
Imax_u,
Musical_u,
Mystery_u,
Romance_u,
Sci.Fi_u,
Thriller_u,
War_u,
Western_u,
timestamp_binary)
val_gen_norm <- validation %>%
left_join(user_profiles, by="userId") %>%
select(userId,
movieId,
rating,
Action_u,
Adventure_u,
Animation_u,
Children_u,
Comedy_u,
Crime_u,
Documentary_u,
Drama_u,
Fantasy_u,
FilmNoir_u,
Horror_u,
Imax_u,
Musical_u,
Mystery_u,
Romance_u,
Sci.Fi_u,
Thriller_u,
War_u,
Western_u,
timestamp_binary)
library(h2o)
h2o.init(nthreads = -1, max_mem_size = "16G")
##################
# Define the model in h2o
# turn the matrices into h2o objects
edx_h2o <- as.h2o(edx_gen_norm)
val_h2o <- as.h2o(val_gen_norm)
# Specify labels and predictors
y <- "rating"
x <- setdiff(names(edx_h2o), y)
# Turn the labels into categorical data.
edx_h2o[,y] <- as.factor(edx_h2o[,y])
val_h2o[,y] <- as.factor(val_h2o[,y])
# Train a deep learning model and validate on test set
DL_model <- h2o.deeplearning(
x = x,
y = y,
training_frame = edx_h2o,
validation_frame = val_h2o,
distribution = "AUTO",
activation = "RectifierWithDropout",
hidden = c(256, 256, 256, 256),
input_dropout_ratio = 0.2,
sparse = TRUE,
epochs = 15,
stopping_rounds = 3,
stopping_tolerance = 0.01, #stops if it doesn't improve at least 0.1%
stopping_metric = "AUTO",
nfolds = 10,
variable_importances = TRUE,
shuffle_training_data = TRUE,
mini_batch_size = 2000
)
# Get RMSE
DL_RMSE_validation <- h2o.rmse(DL_model, valid = TRUE) # Validation RMSE = 0.8236556
DL_RMSE_training <- h2o.rmse(DL_model) # Train RMSE = 0.8241222
|
/MovieLens_Dataset_Analysis.R
|
no_license
|
hfmart1/HarvardX_PH125.9x_MovieLens
|
R
| false
| false
| 17,002
|
r
|
#########################################################################################
# IMPORT LIBRARIES
#########################################################################################
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(dplyr)) install.packages("dplyr", repos = "http://cran.us.r-project.org")
library(tidyverse)
library(caret)
library(data.table)
library(dplyr, warn.conflicts = FALSE)
# Suppress summarise info
options(dplyr.summarise.inform = FALSE)
#########################################################################################
# Create edx set, validation set (final hold-out test set)
#########################################################################################
# Note: this process could take a couple of minutes
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
# if using R 4.0 or later:
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(2, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)`
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
#########################################################################################
# DATA ANALYSIS ON CLEAN DATASET (edX Quiz)
#########################################################################################
# # Dimension edx dataset
# dim(edx)
#
# # Number of ratings
# table(edx$rating)
#
# # Number of unique movies
# n_distinct(edx$movieId)
#
# # Number of different users
# n_distinct(edx$userId)
#
# # How many movie ratings are in each genre in the edx dataset?
# genrecount <- edx %>%
# separate_rows(genres, sep = "\\|") %>%
# group_by(genres) %>%
# summarise(number = n()) %>%
# arrange(desc(number))
#
# genrecount
#
# #Barplot genrecount
#
# barplot(genrecount$number,
# main = "Number of movies per genre",
# names.arg = genrecount$genres)
#
#
# # Which movie has the greatest number of ratings?
# num_rating <- edx %>%
# group_by(title) %>%
# summarise(number = n()) %>%
# arrange(desc(number))
#
# # Number of ratings
# barplot(table(edx$rating))
#########################################################################################
# PREDICTION RATING
#########################################################################################
# Define loss function
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
# RMSE OBJECTTIVES
#
# 0 points: No RMSE reported AND/OR code used to generate the RMSE appears to violate the edX Honor Code.
# 5 points: RMSE >= 0.90000 AND/OR the reported RMSE is the result of overtraining (validation set - the final hold-out test set - ratings used for anything except reporting the final RMSE value) AND/OR the reported RMSE is the result of simply copying and running code provided in previous courses in the series.
# 10 points: 0.86550 <= RMSE <= 0.89999
# 15 points: 0.86500 <= RMSE <= 0.86549
# 20 points: 0.86490 <= RMSE <= 0.86499
# 25 points: RMSE < 0.86490
########################
# FIRST APPROACH
########################
# Lets suppose that all movies receive the same rating, what would that be?
# Average rating for movies
mu_hat <- mean(edx$rating)
# RMSE average rating
rmse_1 <- RMSE(validation$rating, mu_hat)
rmse_1
########################
# SECOND APPROACH
########################
# We know that certain movies have higher ratings than others, we will include in our prediction the movie bias.
# Compute difference to average per movie
movie_averages <- edx %>%
group_by(movieId) %>%
summarise(b_m = mean(rating - mu_hat))
# Compute the predicted rating using the movie bias
predicted_ratings <- mu_hat + validation %>%
left_join(movie_averages, by='movieId') %>%
pull(b_m)
# RMSE movie bias
rmse_2 <- RMSE(predicted_ratings, validation$rating)
rmse_2
# This is still too high, lets use data to improve our approach
# Plot average rating for users with over 100 ratings
edx %>%
group_by(userId) %>%
summarise(a_u = mean(rating)) %>%
filter(n()>=100) %>%
ggplot(aes(a_u)) +
geom_histogram(bins = 30, color = "black")
# We notice that some users tend to give more positive or negative reviews, the user bias.
# Calculate user rating average and compute user bias with movie bias for each movie
user_averages <- edx %>%
left_join(movie_averages, by="movieId") %>%
group_by(userId) %>%
summarise(b_u = mean(rating - mu_hat - b_m))
# Compute predicted ratings including movie and user bias
predicted_ratings <- validation %>%
left_join(movie_averages, by='movieId') %>%
left_join(user_averages, by='userId') %>%
mutate(pred = mu_hat + b_m + b_u) %>%
pull(pred)
# RMSE user effect
rmse_3 <- RMSE(predicted_ratings, validation$rating)
rmse_3
# By observing the predicted ratings we notice that some of them are lower than 0.5 or greater than 5.
# Limit
predicted_ratings_limit <- pmax(pmin(predicted_ratings, 5), 0.5) #limit values lower than 0.5 & values greater than 5
# Calculate RMSE
rmse_4 <- RMSE(predicted_ratings_limit, validation$rating)
rmse_4
# Results
rmse_results <- tibble(method = c("Overall average", "Movie effect","Movie + User effect","With limits"), RMSE = c(rmse_1, rmse_2, rmse_3, rmse_4))
rmse_results
########################
# THIRD APPROACH
########################
# We now use regularization to improve our prediction
#In setting the movie penalty, we will create a list between 0 and 10 with increments of 1
penalties <- seq(0, 10, 1)
# Compute RMSE on edx dataset to set best penalty
m_rmses <- sapply(penalties, function(p){
reg_movie_avgs <- edx %>%
group_by(movieId) %>%
summarize(regmoviebias = sum(rating - mu_hat)/(n()+p))
predicted_ratings <-
edx %>%
left_join(reg_movie_avgs, by = "movieId") %>%
left_join(user_averages, by = "userId") %>%
mutate(regmoviebias = mu_hat + b_u + regmoviebias) %>%
.$regmoviebias
return(RMSE(predicted_ratings, edx$rating))
})
# Determine the lowest penalty
moviepenalty_optimal <- penalties[which.min(m_rmses)]
# Use best penalty to compute movie averages
reg_movie_avgs <- edx %>%
group_by(movieId) %>%
summarize(regmoviebias = sum(rating - mu_hat)/(n()+moviepenalty_optimal))
# Compute prediction for validation dataset using the penalties calculated above
predicted_ratings <-
validation %>%
left_join(reg_movie_avgs, by = "movieId") %>%
left_join(user_averages, by = "userId") %>%
mutate(regmoviebias = mu_hat + b_u + regmoviebias) %>%
.$regmoviebias
# Compute RMSE for the validation dataset
regularized_movieeffects <- RMSE(predicted_ratings, validation$rating)
regularized_movieeffects
#We do the same for the user bias, we use regularization
# Set penalties to test
penalties <- seq(0, 1, 0.25)
# Compute RMSE on edx dataset to set best penalty
u_rmses <- sapply(penalties, function(p){
reg_user_avgs <- edx %>%
left_join(reg_movie_avgs, by="movieId") %>%
group_by(userId) %>%
summarize(reguserbias = sum(rating - regmoviebias - mu_hat)/(n()+p))
predicted_ratings <-
edx %>%
left_join(reg_movie_avgs, by = "movieId") %>%
left_join(reg_user_avgs, by = "userId") %>%
mutate(regusermoviebias = mu_hat + regmoviebias + reguserbias) %>%
.$regusermoviebias
return(RMSE(predicted_ratings, edx$rating))
})
# Determine the lowest penalty
userpenalty_optimal <- penalties[which.min(u_rmses)] #determine which is lowest
# Use best penalty to compute movie averages using both the movie and user bias regularization
reg_user_avgs <- edx %>%
left_join(reg_movie_avgs, by="movieId") %>%
group_by(userId) %>%
summarize(reguserbias = sum(rating - regmoviebias - mu_hat)/(n()+userpenalty_optimal))
# Compute prediction for validation dataset using the penalties calculated above
reg_predicted_ratings <-
validation %>%
left_join(reg_movie_avgs, by = "movieId") %>%
left_join(reg_user_avgs, by = "userId") %>%
mutate(regusermovie = mu_hat + regmoviebias + reguserbias) %>%
.$regusermovie
# Compute RMSE
regularized_effects <- RMSE(reg_predicted_ratings, validation$rating)
# Fix lower and upper limits
reg_predicted_ratings_limit <- pmax(pmin(predicted_ratings, 5), 0.5)
regularized_effects_limit <- RMSE(reg_predicted_ratings_limit, validation$rating)
regularized_effects_limit
##########################################################
# Neural Network: Data transformation
##########################################################
# Mutate the timestamp to be 0 or 1 depending on the moment ratings start to have 0.5 granularity = 1045526400
edx <- edx %>% mutate(timestamp_binary = ifelse(edx$timestamp > 1045526400, 1, 0))
validation <- validation %>% mutate(timestamp_binary = ifelse(validation$timestamp > 1045526400, 1, 0))
############
# One-hot encoding of genres
############
genres <- as.data.frame(edx$genres, stringsAsFactors=FALSE)
genres_v <- as.data.frame(validation$genres, stringsAsFactors=FALSE)
# n_distinct(edx_copy$genres)
genres2 <- as.data.frame(tstrsplit(genres[,1], '[|]',
type.convert=TRUE),
stringsAsFactors=FALSE)
genres2_v <- as.data.frame(tstrsplit(genres_v[,1], '[|]',
type.convert=TRUE),
stringsAsFactors=FALSE)
genre_list <- c("Action", "Adventure", "Animation", "Children",
"Comedy", "Crime","Documentary", "Drama", "Fantasy",
"Film-Noir", "Horror", "Imax", "Musical", "Mystery","Romance",
"Sci-Fi", "Thriller", "War", "Western") # There are 19 genres in total
genre_matrix <- matrix(0, length(edx$movieId)+1, n_distinct(genre_list))
genre_matrix[1,] <- genre_list #set first row to genre list
genre_matrix_v <- matrix(0, length(validation$movieId)+1, n_distinct(genre_list))
genre_matrix_v[1,] <- genre_list #set first row to genre list
colnames(genre_matrix) <- genre_list #set column names to genre list
colnames(genre_matrix_v) <- genre_list #set column names to genre list
#iterate through matrix
for (i in 1:nrow(genres2)) {
for (c in 1:ncol(genres2)) {
genmat_col <- which(genre_matrix[1,] == genres2[i,c])
genre_matrix[i+1,genmat_col] <- 1L
}
}
for (i in 1:nrow(genres2_v)) {
for (c in 1:ncol(genres2_v)) {
genmat_col <- which(genre_matrix_v[1,] == genres2_v[i,c])
genre_matrix_v[i+1,genmat_col] <- 1L
}
}
#convert into dataframe
genre_matrix <- as.data.frame(genre_matrix[-1,], stringsAsFactors=FALSE) #remove first row, which was the genre list
genre_matrix_v <- as.data.frame(genre_matrix_v[-1,], stringsAsFactors=FALSE)
edx_by_gen <- cbind(edx[,1:3], genre_matrix, edx$timestamp_binary)
val_by_gen <- cbind(validation[,1:3], genre_matrix_v, validation$timestamp_binary)
colnames(edx_by_gen) <- c("userId", "movieId", "rating", genre_list, "timestamp_binary")
colnames(val_by_gen) <- c("userId", "movieId", "rating", genre_list, "timestamp_binary")
edx_by_gen <- as.matrix(sapply(edx_by_gen, as.numeric))
val_by_gen <- as.matrix(sapply(val_by_gen, as.numeric))
# remove intermediary matrices
rm(genre_matrix, genre_matrix_v, genres, genres_v, genres2, genres2_v)
# Multiply the rating by the OHE for genre
edx_by_gen_mult <- cbind(edx_by_gen[,1:2], edx_by_gen[,"rating"], sweep(edx_by_gen[,4:22], 1, edx_by_gen[,"rating"], "*"), edx_by_gen[,"timestamp_binary"])
val_by_gen_mult <- cbind(val_by_gen[,1:2], val_by_gen[,"rating"], sweep(val_by_gen[,4:22], 1, val_by_gen[,"rating"], "*"), val_by_gen[,"timestamp_binary"])
colnames(edx_by_gen_mult) <- c("userId", "movieId", "rating", "Action", "Adventure", "Animation", "Children",
"Comedy", "Crime", "Documentary", "Drama", "Fantasy",
"Film.Noir", "Horror", "Imax", "Musical", "Mystery","Romance",
"Sci.Fi", "Thriller", "War", "Western", "timestamp_binary")
colnames(val_by_gen_mult) <- c("userId", "movieId", "rating", "Action", "Adventure", "Animation", "Children",
"Comedy", "Crime", "Documentary", "Drama", "Fantasy",
"Film.Noir", "Horror", "Imax", "Musical", "Mystery","Romance",
"Sci.Fi", "Thriller", "War", "Western", "timestamp_binary")
# Transform the multiplied one-hot-encoded matrix into a user profile for genre.
user_profiles <- edx_by_gen_mult %>%
as.data.frame() %>%
group_by(userId) %>%
summarise(Action_u = mean(Action),
Adventure_u = mean(Adventure),
Animation_u = mean(Animation),
Children_u = mean(Children),
Comedy_u = mean(Comedy),
Crime_u = mean(Crime),
Documentary_u = mean(Documentary),
Drama_u = mean(Drama),
Fantasy_u = mean(Fantasy),
FilmNoir_u = mean(Film.Noir),
Horror_u = mean(Horror),
Imax_u = mean(Imax),
Musical_u = mean(Musical),
Mystery_u = mean(Mystery),
Romance_u = mean(Romance),
Sci.Fi_u = mean(Sci.Fi),
Thriller_u = mean(Thriller),
War_u = mean(War),
Western_u = mean(Western)) %>%
as.data.frame()
user_profiles[is.na(user_profiles)] <- 0
# Transform the Test and Validation datasets to include the user profiles
edx_gen_norm <- edx %>%
left_join(user_profiles, by="userId") %>%
select(userId,
movieId,
rating,
Action_u,
Adventure_u,
Animation_u,
Children_u,
Comedy_u,
Crime_u,
Documentary_u,
Drama_u,
Fantasy_u,
FilmNoir_u,
Horror_u,
Imax_u,
Musical_u,
Mystery_u,
Romance_u,
Sci.Fi_u,
Thriller_u,
War_u,
Western_u,
timestamp_binary)
val_gen_norm <- validation %>%
left_join(user_profiles, by="userId") %>%
select(userId,
movieId,
rating,
Action_u,
Adventure_u,
Animation_u,
Children_u,
Comedy_u,
Crime_u,
Documentary_u,
Drama_u,
Fantasy_u,
FilmNoir_u,
Horror_u,
Imax_u,
Musical_u,
Mystery_u,
Romance_u,
Sci.Fi_u,
Thriller_u,
War_u,
Western_u,
timestamp_binary)
library(h2o)
h2o.init(nthreads = -1, max_mem_size = "16G")
##################
# Define the model in h2o
# turn the matrices into h2o objects
edx_h2o <- as.h2o(edx_gen_norm)
val_h2o <- as.h2o(val_gen_norm)
# Specify labels and predictors
y <- "rating"
x <- setdiff(names(edx_h2o), y)
# Turn the labels into categorical data.
edx_h2o[,y] <- as.factor(edx_h2o[,y])
val_h2o[,y] <- as.factor(val_h2o[,y])
# Train a deep learning model and validate on test set
DL_model <- h2o.deeplearning(
x = x,
y = y,
training_frame = edx_h2o,
validation_frame = val_h2o,
distribution = "AUTO",
activation = "RectifierWithDropout",
hidden = c(256, 256, 256, 256),
input_dropout_ratio = 0.2,
sparse = TRUE,
epochs = 15,
stopping_rounds = 3,
stopping_tolerance = 0.01, #stops if it doesn't improve at least 0.1%
stopping_metric = "AUTO",
nfolds = 10,
variable_importances = TRUE,
shuffle_training_data = TRUE,
mini_batch_size = 2000
)
# Get RMSE
DL_RMSE_validation <- h2o.rmse(DL_model, valid = TRUE) # Validation RMSE = 0.8236556
DL_RMSE_training <- h2o.rmse(DL_model) # Train RMSE = 0.8241222
|
library(digest)
input <- "bgvyzdsv"
keys <- paste0(input, 1:5e5)
hashes <- sapply(keys, digest, algo = "md5", serialize = FALSE)
which(substr(hashes, 1, 5) == "00000")
# 254575
keys2 <- paste0(input, 1e6:2e6)
hashes2 <- sapply(keys2, digest, algo = "md5", serialize = FALSE)
which(substr(hashes2, 1, 6) == "000000")
# 1038736
# One liners:
which(substr(sapply(paste0(input, 1:5e5), digest, serialize = FALSE), 1, 5) == "00000")
which(substr(sapply(paste0(input, 1e6:2e6), digest, serialize = FALSE), 1, 6) == "000000") + 1e6
|
/day04/1,2-md5.R
|
no_license
|
pdil/adventR
|
R
| false
| false
| 532
|
r
|
library(digest)
input <- "bgvyzdsv"
keys <- paste0(input, 1:5e5)
hashes <- sapply(keys, digest, algo = "md5", serialize = FALSE)
which(substr(hashes, 1, 5) == "00000")
# 254575
keys2 <- paste0(input, 1e6:2e6)
hashes2 <- sapply(keys2, digest, algo = "md5", serialize = FALSE)
which(substr(hashes2, 1, 6) == "000000")
# 1038736
# One liners:
which(substr(sapply(paste0(input, 1:5e5), digest, serialize = FALSE), 1, 5) == "00000")
which(substr(sapply(paste0(input, 1e6:2e6), digest, serialize = FALSE), 1, 6) == "000000") + 1e6
|
#' Populations of US states and territories, 1790-2010
#'
#' Population figures for US states and territories from the decennial census.
#'
#' @section Variables:
#'
#' \itemize{
#'
#' \item \code{year}: date of the census.
#'
#' \item \code{state}: name of the state or territory.
#'
#' \item \code{population}: population of the state or territory.
#'
#' \item \code{GISJOIN}: a unique identifier for joining NHGIS data to spatial
#' data.
#'
#' }
#' @format A data frame with 983 observations of 4 variables.
#' @references This dataset has been gathered by the
#' \href{https://www.nhgis.org/}{NHGIS}. Minnesota Population Center,
#' \emph{National Historical Geographic Information System: Version 2.0}
#' (Minneapolis: University of Minnesota, 2011).
#' @examples
#' head(us_state_populations)
"us_state_populations"
|
/R/us-state-populations.R
|
permissive
|
ropensci/historydata
|
R
| false
| false
| 843
|
r
|
#' Populations of US states and territories, 1790-2010
#'
#' Population figures for US states and territories from the decennial census.
#'
#' @section Variables:
#'
#' \itemize{
#'
#' \item \code{year}: date of the census.
#'
#' \item \code{state}: name of the state or territory.
#'
#' \item \code{population}: population of the state or territory.
#'
#' \item \code{GISJOIN}: a unique identifier for joining NHGIS data to spatial
#' data.
#'
#' }
#' @format A data frame with 983 observations of 4 variables.
#' @references This dataset has been gathered by the
#' \href{https://www.nhgis.org/}{NHGIS}. Minnesota Population Center,
#' \emph{National Historical Geographic Information System: Version 2.0}
#' (Minneapolis: University of Minnesota, 2011).
#' @examples
#' head(us_state_populations)
"us_state_populations"
|
#' Dot-and-Whisker Plots of Regression Results
#'
#' \code{dwplot} is a function for quickly and easily generating dot-and-whisker plots of regression models saved in tidy data frames.
#'
#' @param x Either a tidy data.frame (see 'Details'), a model object to be tidied with \code{\link[broom]{tidy}}, or a list of such model objects.
#' @param alpha A number setting the criterion of the confidence intervals. The default value is .05, corresponding to 95-percent confidence intervals.
#' @param dodge_size A number (typically between 0 and 0.3) indicating how much vertical separation should be between different models' coefficients when multiple models are graphed in a single plot. Lower values tend to look better when the number of independent variables is small, while a higher value may be helpful when many models appear on the same plot.
#' @param order_vars A vector of variable names that specifies the order in which the variables are to appear along the y-axis of the plot.
#' @param show_intercept A logical constant indicating whether the coefficient of the intercept term should be plotted.
#' @param model_name The name of a variable that distinguishes separate models within a tidy data.frame.
#' @param dot_args A list of arguments specifying the appearance of the dots representing mean estimates. For supported arguments, see \code{\link{geom_point}}.
#' @param whisker_args A list of arguments specifying the appearance of the whiskers representing confidence intervals. For supported arguments, see \code{\link{geom_segment}}.
#' @param \dots Extra arguments to pass to \code{\link[broom]{tidy}}.
#'
#' @details \code{dwplot} visualizes regression results saved in tidy data.frames by, e.g., \code{\link[broom]{tidy}} as dot-and-whisker plots generated by \code{\link[ggplot2]{ggplot}}.
#'
#' Tidy data.frames to be plotted should include the variables \code{term} (names of predictors), \code{estimate} (corresponding estimates of coefficients or other quantities of interest), \code{std.error} (corresponding standard errors), and optionally \code{model} (when multiple models are desired on a single plot; a different name for this last variable may be specified using the model_name argument).
#' In place of \code{std.error} one may substitute \code{lb} (the lower bounds of the confidence intervals of each estimate) and \code{ub} (the corresponding upper bounds).
#'
#' For convenience, \code{dwplot} also accepts as input those model objects that can be tidied by \code{\link[broom]{tidy}}, or a list of such model objects.
#'
#' Because the function takes a data.frame as input, it is easily employed for a wide range of models, including those not supported by \code{\link[broom]{tidy}}.
#' And because the output is a \code{ggplot} object, it can easily be further customized with any additional arguments and layers supported by \code{ggplot2}.
#' Together, these two features make \code{dwplot} extremely flexible.
#'
#' @references
#' Kastellec, Jonathan P. and Leoni, Eduardo L. 2007. "Using Graphs Instead of Tables in Political Science." Perspectives on Politics, 5(4):755-771.
#'
#' @return The function returns a \code{ggplot} object.
#'
#' @import ggplot2
#' @importFrom dplyr "%>%" filter arrange left_join full_join bind_rows
#' @importFrom stats qnorm
#' @importFrom broom tidy
#' @importFrom plyr ldply
#'
#' @examples
#' library(broom)
#' library(dplyr)
#'
#' # Plot regression coefficients from a single model object
#' data(mtcars)
#' m1 <- lm(mpg ~ wt + cyl + disp, data = mtcars)
#'
#' dwplot(m1) +
#' scale_y_continuous(breaks = 3:1, labels=c("Weight", "Cylinders", "Displacement")) +
#' labs(x = "Coefficient", y = "") +
#' geom_vline(xintercept = 0, colour = "grey50", linetype = 2) +
#' theme(legend.position="none")
#'
#' # Plot regression coefficients from multiple models on the fly
#' m2 <- update(m1, . ~ . - disp)
#' dwplot(list(full = m1, nodisp = m2))
#'
#' # Change the appearance of dots and whiskers
#' dwplot(m1, dot_args = list(size = 6, pch = 21, fill = "white"),
#' whisker_args = list(lwd = 2))
#'
#' # Plot regression coefficients from multiple models in a tidy data.frame
#' by_trans <- mtcars %>% group_by(am) %>%
#' do(tidy(lm(mpg ~ wt + cyl + disp, data = .))) %>% rename(model=am)
#'
#' dwplot(by_trans, dodge_size = .05) +
#' scale_y_continuous(breaks = 3:1, labels=c("Weight", "Cylinders", "Displacement")) +
#' theme_bw() + labs(x = "Coefficient Estimate", y = "") +
#' geom_vline(xintercept = 0, colour = "grey60", linetype = 2) +
#' ggtitle("Predicting Gas Mileage, OLS Estimates") +
#' theme(plot.title = element_text(face = "bold"),
#' legend.justification=c(0, 0), legend.position=c(0, 0),
#' legend.background = element_rect(colour="grey80"),
#' legend.title.align = .5) +
#' scale_colour_grey(start = .4, end = .8,
#' name = "Transmission",
#' breaks = c(0, 1),
#' labels = c("Automatic", "Manual"))
#'
#' @export
dwplot <- function(x, alpha = .05, alpha2 = NULL, dodge_size = .15, order_vars = NULL,
show_intercept = FALSE, model_name = "model",
dot_args = NULL, whisker_args = NULL, ...) {
# If x is model object(s), convert to a tidy data.frame
df <- dw_tidy(x,...)
if (!show_intercept) df <- df %>% filter(!grepl("^\\(Intercept\\)$|^\\w+\\|\\w+$", term)) # enable detecting intercept in polr objects
# Set variables that will appear in pipelines to NULL to make R CMD check happy
estimate <- model <- lb <- ub <- lb_alpha2 <- ub_alpha2 <- term <- std.error <- NULL
n_vars <- length(unique(df$term))
dodge_size <- dodge_size
# Confirm number of models, get model names
if (model_name %in% names(df)) {
dfmod <- df[[model_name]]
n_models <- length(unique(dfmod))
## re-order/restore levels by order in data set
df[[model_name]] <- factor(dfmod, levels = unique(dfmod))
} else {
if (length(df$term) == n_vars) {
df[[model_name]] <- factor("one")
n_models <- 1
} else {
stop("Please add a variable named '",
model_name,"' to distinguish different models")
}
}
mod_names <- unique(df[[model_name]])
# Specify order of variables if an order is provided
if (!is.null(order_vars)) {
df$term <- factor(df$term, levels = order_vars)
df <- df[match(order_vars, df$term),] %>% stats::na.omit()
}
# Add rows of NAs for variables not included in a particular model
if (n_models > 1) {
df <- add_NAs(df, n_models, mod_names)
}
# Prep arguments to ggplot
var_names <- df$term
y_ind <- rep(seq(n_vars, 1), n_models)
df$y_ind <- y_ind
# Confirm alpha within bounds
if (alpha < 0 | alpha > 1) {
stop("Value of alpha for the confidence intervals should be between 0 and 1.")
}
# Generate lower and upper bound if not included in results
if ((!"lb" %in% names(df)) || (!"ub" %in% names(df))) {
if ("std.error" %in% names(df)) {
ci <- 1 - alpha/2
df <- transform(df,
lb = estimate - stats::qnorm(ci) * std.error,
ub = estimate + stats::qnorm(ci) * df$std.error)
} else {
df <- transform(df, lb=NA, ub=NA)
}
}
# Confirm alpha within bounds (for second alpha)
if (alpha2 < 0 | alpha2 > 1) {
stop("Value of alpha for the confidence intervals should be between 0 and 1.")
}
# Generate lower and upper bound if not included in results
if ((!"lb_alpha2" %in% names(df)) || (!"ub_alpha2" %in% names(df))) {
if ("std.error_alpha2" %in% names(df)) {
ci2 <- 1 - alpha2/2
df <- transform(df,
lb_alpha2 = estimate - stats::qnorm(ci2) * std.error,
ub_alpha2 = estimate + stats::qnorm(ci2) * df$std.error)
} else {
df <- transform(df, lb_alpha2=NA, ub_alpha2=NA)
}
}
# Calculate y-axis shift for plotting multiple models
if (n_models == 1) {
shift <- 0
} else {
shift <- seq(dodge_size, -dodge_size, length.out = n_models)
}
shift_index <- data.frame(model = mod_names, shift)
df <- left_join(df, shift_index, by="model")
# Catch difference between single and multiple models
if (length(y_ind) != length(var_names)) {
var_names <- unique(var_names)
}
# Generate arguments to geom_segment and geom_point
seg_args0 <- list(aes(x = lb, xend = ub,
y = y_ind + shift, yend = y_ind + shift),
na.rm = TRUE)
segment_args <- c(seg_args0, whisker_args)
# Generate argument to geom_segment for second alpha
seg_args0_alpha2 <- list(aes(x = lb_alpha2, xend = ub_alpha2,
y = y_ind + shift, yend = y_ind + shift), size = 3, alpha = 0.8,
na.rm = TRUE)
segment_args_alpha2 <- c(seg_args0_alpha2, whisker_args)
point_args0 <- list(na.rm = TRUE)
point_args <- c(point_args0, dot_args)
# Make the plot
p <- ggplot(transform(df, model = factor(model)),
aes(x = estimate, y = y_ind + shift, colour = model)) +
do.call(geom_segment, segment_args) + # Draw segments first ...
do.call(geom_point, point_args) +
do.call(geom_segment, segment_args_alpha2) +
scale_y_continuous(breaks = y_ind, labels = var_names) +
coord_cartesian(ylim = c(.5, n_vars+.5)) +
ylab("") + xlab("")
# Omit the legend if there is only one model
if (!"model" %in% names(df) | length(mod_names) == 1){
p <- p + theme(legend.position="none")
}
return(p)
}
dw_tidy <- function(x,...) {
# Set variables that will appear in pipelines to NULL to make R CMD check happy
process_lm <- tidy.summary.lm <- NULL
if (!is.data.frame(x)) {
if (class(x)=="list") {
ind <- seq(length(x))
nm <- paste("Model", ind)
if (!is.null(nm_orig <- names(x))) {
setNm <- nchar(nm)>0
nm[setNm] <- nm_orig[setNm]
}
names(x) <- nm
df <- do.call(plyr::ldply,
c(list(.data=x,.fun=broom::tidy,.id="model"),
list(...)))
} else if (class(x) == "lmerMod"){
group <- vector() # only for avoiding the NOTE in check
df <- broom::tidy(x) %>% filter(group == "fixed")
} else {
if (class(x) == "polr"){
family.polr <- function(object,...) NULL
tidy.polr <- function (x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE, quick = FALSE, ...) {
if (quick) {
co <- stats::coef(x)
ret <- data.frame(term = names(co), estimate = unname(co))
return(process_lm(ret, x, conf.int = FALSE, exponentiate = exponentiate))
}
s <- summary(x)
ret <- tidy.summary.lm(s)
process_lm(ret, x, conf.int = conf.int, conf.level = conf.level,
exponentiate = exponentiate)
}
}
df <- broom::tidy(x,...)
}
} else {
df <- x
}
return(df)
}
add_NAs <- function(df = df, n_models = n_models, mod_names = mod_names,
model_name = "model") {
# Set variables that will appear in pipelines to NULL to make R CMD check happy
term <- model <- NULL
if (!is.factor(df$term)) {
df$term <- factor(df$term, levels = unique(df$term))
}
if (!is.factor(dfmod <- df[[model_name]])) {
df[[model_name]] <- factor(dfmod, levels = unique(dfmod))
}
for (i in seq(n_models)) {
m <- df %>% filter(model==factor(mod_names[[i]], levels = mod_names))
not_in <- setdiff(unique(df$term), m$term)
for (j in seq(not_in)) {
t <- data.frame(term = factor(not_in[j], levels = levels(df$term)),
model = factor(mod_names[[i]], levels = mod_names))
if ("submodel" %in% names(m)) {
t$submodel <- m$submodel[1]
}
if ("submodel" %in% names(m)) {
m <- full_join(m, t, by = c("term", "model", "submodel"))
} else {
m <- full_join(m, t, by = c("term", "model"))
}
}
if (i==1) {
dft <- m %>% arrange(term)
} else {
dft <- bind_rows(dft, m %>% arrange(term))
}
}
df <- dft
df$estimate <- as.numeric(df$estimate)
if ("std.error" %in% names(df)) {
df$std.error <- as.numeric(df$std.error)
}
if ("ub" %in% names(df)) {
df$ub <- as.numeric(df$ub)
}
if ("lb" %in% names(df)) {
df$lb <- as.numeric(df$lb)
}
if ("ub_alpha2" %in% names(df)) {
df$ub_alpha2 <- as.numeric(df$ub_alpha2)
}
if ("lb_alpha2" %in% names(df)) {
df$lb_alpha2 <- as.numeric(df$lb_alpha2)
}
return(df)
}
|
/R/dwplot.R
|
no_license
|
stefan-mueller/dotwhisker
|
R
| false
| false
| 13,297
|
r
|
#' Dot-and-Whisker Plots of Regression Results
#'
#' \code{dwplot} is a function for quickly and easily generating dot-and-whisker plots of regression models saved in tidy data frames.
#'
#' @param x Either a tidy data.frame (see 'Details'), a model object to be tidied with \code{\link[broom]{tidy}}, or a list of such model objects.
#' @param alpha A number setting the criterion of the confidence intervals. The default value is .05, corresponding to 95-percent confidence intervals.
#' @param dodge_size A number (typically between 0 and 0.3) indicating how much vertical separation should be between different models' coefficients when multiple models are graphed in a single plot. Lower values tend to look better when the number of independent variables is small, while a higher value may be helpful when many models appear on the same plot.
#' @param order_vars A vector of variable names that specifies the order in which the variables are to appear along the y-axis of the plot.
#' @param show_intercept A logical constant indicating whether the coefficient of the intercept term should be plotted.
#' @param model_name The name of a variable that distinguishes separate models within a tidy data.frame.
#' @param dot_args A list of arguments specifying the appearance of the dots representing mean estimates. For supported arguments, see \code{\link{geom_point}}.
#' @param whisker_args A list of arguments specifying the appearance of the whiskers representing confidence intervals. For supported arguments, see \code{\link{geom_segment}}.
#' @param \dots Extra arguments to pass to \code{\link[broom]{tidy}}.
#'
#' @details \code{dwplot} visualizes regression results saved in tidy data.frames by, e.g., \code{\link[broom]{tidy}} as dot-and-whisker plots generated by \code{\link[ggplot2]{ggplot}}.
#'
#' Tidy data.frames to be plotted should include the variables \code{term} (names of predictors), \code{estimate} (corresponding estimates of coefficients or other quantities of interest), \code{std.error} (corresponding standard errors), and optionally \code{model} (when multiple models are desired on a single plot; a different name for this last variable may be specified using the model_name argument).
#' In place of \code{std.error} one may substitute \code{lb} (the lower bounds of the confidence intervals of each estimate) and \code{ub} (the corresponding upper bounds).
#'
#' For convenience, \code{dwplot} also accepts as input those model objects that can be tidied by \code{\link[broom]{tidy}}, or a list of such model objects.
#'
#' Because the function takes a data.frame as input, it is easily employed for a wide range of models, including those not supported by \code{\link[broom]{tidy}}.
#' And because the output is a \code{ggplot} object, it can easily be further customized with any additional arguments and layers supported by \code{ggplot2}.
#' Together, these two features make \code{dwplot} extremely flexible.
#'
#' @references
#' Kastellec, Jonathan P. and Leoni, Eduardo L. 2007. "Using Graphs Instead of Tables in Political Science." Perspectives on Politics, 5(4):755-771.
#'
#' @return The function returns a \code{ggplot} object.
#'
#' @import ggplot2
#' @importFrom dplyr "%>%" filter arrange left_join full_join bind_rows
#' @importFrom stats qnorm
#' @importFrom broom tidy
#' @importFrom plyr ldply
#'
#' @examples
#' library(broom)
#' library(dplyr)
#'
#' # Plot regression coefficients from a single model object
#' data(mtcars)
#' m1 <- lm(mpg ~ wt + cyl + disp, data = mtcars)
#'
#' dwplot(m1) +
#' scale_y_continuous(breaks = 3:1, labels=c("Weight", "Cylinders", "Displacement")) +
#' labs(x = "Coefficient", y = "") +
#' geom_vline(xintercept = 0, colour = "grey50", linetype = 2) +
#' theme(legend.position="none")
#'
#' # Plot regression coefficients from multiple models on the fly
#' m2 <- update(m1, . ~ . - disp)
#' dwplot(list(full = m1, nodisp = m2))
#'
#' # Change the appearance of dots and whiskers
#' dwplot(m1, dot_args = list(size = 6, pch = 21, fill = "white"),
#' whisker_args = list(lwd = 2))
#'
#' # Plot regression coefficients from multiple models in a tidy data.frame
#' by_trans <- mtcars %>% group_by(am) %>%
#' do(tidy(lm(mpg ~ wt + cyl + disp, data = .))) %>% rename(model=am)
#'
#' dwplot(by_trans, dodge_size = .05) +
#' scale_y_continuous(breaks = 3:1, labels=c("Weight", "Cylinders", "Displacement")) +
#' theme_bw() + labs(x = "Coefficient Estimate", y = "") +
#' geom_vline(xintercept = 0, colour = "grey60", linetype = 2) +
#' ggtitle("Predicting Gas Mileage, OLS Estimates") +
#' theme(plot.title = element_text(face = "bold"),
#' legend.justification=c(0, 0), legend.position=c(0, 0),
#' legend.background = element_rect(colour="grey80"),
#' legend.title.align = .5) +
#' scale_colour_grey(start = .4, end = .8,
#' name = "Transmission",
#' breaks = c(0, 1),
#' labels = c("Automatic", "Manual"))
#'
#' @export
dwplot <- function(x, alpha = .05, alpha2 = NULL, dodge_size = .15, order_vars = NULL,
show_intercept = FALSE, model_name = "model",
dot_args = NULL, whisker_args = NULL, ...) {
# If x is model object(s), convert to a tidy data.frame
df <- dw_tidy(x,...)
if (!show_intercept) df <- df %>% filter(!grepl("^\\(Intercept\\)$|^\\w+\\|\\w+$", term)) # enable detecting intercept in polr objects
# Set variables that will appear in pipelines to NULL to make R CMD check happy
estimate <- model <- lb <- ub <- lb_alpha2 <- ub_alpha2 <- term <- std.error <- NULL
n_vars <- length(unique(df$term))
dodge_size <- dodge_size
# Confirm number of models, get model names
if (model_name %in% names(df)) {
dfmod <- df[[model_name]]
n_models <- length(unique(dfmod))
## re-order/restore levels by order in data set
df[[model_name]] <- factor(dfmod, levels = unique(dfmod))
} else {
if (length(df$term) == n_vars) {
df[[model_name]] <- factor("one")
n_models <- 1
} else {
stop("Please add a variable named '",
model_name,"' to distinguish different models")
}
}
mod_names <- unique(df[[model_name]])
# Specify order of variables if an order is provided
if (!is.null(order_vars)) {
df$term <- factor(df$term, levels = order_vars)
df <- df[match(order_vars, df$term),] %>% stats::na.omit()
}
# Add rows of NAs for variables not included in a particular model
if (n_models > 1) {
df <- add_NAs(df, n_models, mod_names)
}
# Prep arguments to ggplot
var_names <- df$term
y_ind <- rep(seq(n_vars, 1), n_models)
df$y_ind <- y_ind
# Confirm alpha within bounds
if (alpha < 0 | alpha > 1) {
stop("Value of alpha for the confidence intervals should be between 0 and 1.")
}
# Generate lower and upper bound if not included in results
if ((!"lb" %in% names(df)) || (!"ub" %in% names(df))) {
if ("std.error" %in% names(df)) {
ci <- 1 - alpha/2
df <- transform(df,
lb = estimate - stats::qnorm(ci) * std.error,
ub = estimate + stats::qnorm(ci) * df$std.error)
} else {
df <- transform(df, lb=NA, ub=NA)
}
}
# Confirm alpha within bounds (for second alpha)
if (alpha2 < 0 | alpha2 > 1) {
stop("Value of alpha for the confidence intervals should be between 0 and 1.")
}
# Generate lower and upper bound if not included in results
if ((!"lb_alpha2" %in% names(df)) || (!"ub_alpha2" %in% names(df))) {
if ("std.error_alpha2" %in% names(df)) {
ci2 <- 1 - alpha2/2
df <- transform(df,
lb_alpha2 = estimate - stats::qnorm(ci2) * std.error,
ub_alpha2 = estimate + stats::qnorm(ci2) * df$std.error)
} else {
df <- transform(df, lb_alpha2=NA, ub_alpha2=NA)
}
}
# Calculate y-axis shift for plotting multiple models
if (n_models == 1) {
shift <- 0
} else {
shift <- seq(dodge_size, -dodge_size, length.out = n_models)
}
shift_index <- data.frame(model = mod_names, shift)
df <- left_join(df, shift_index, by="model")
# Catch difference between single and multiple models
if (length(y_ind) != length(var_names)) {
var_names <- unique(var_names)
}
# Generate arguments to geom_segment and geom_point
seg_args0 <- list(aes(x = lb, xend = ub,
y = y_ind + shift, yend = y_ind + shift),
na.rm = TRUE)
segment_args <- c(seg_args0, whisker_args)
# Generate argument to geom_segment for second alpha
seg_args0_alpha2 <- list(aes(x = lb_alpha2, xend = ub_alpha2,
y = y_ind + shift, yend = y_ind + shift), size = 3, alpha = 0.8,
na.rm = TRUE)
segment_args_alpha2 <- c(seg_args0_alpha2, whisker_args)
point_args0 <- list(na.rm = TRUE)
point_args <- c(point_args0, dot_args)
# Make the plot
p <- ggplot(transform(df, model = factor(model)),
aes(x = estimate, y = y_ind + shift, colour = model)) +
do.call(geom_segment, segment_args) + # Draw segments first ...
do.call(geom_point, point_args) +
do.call(geom_segment, segment_args_alpha2) +
scale_y_continuous(breaks = y_ind, labels = var_names) +
coord_cartesian(ylim = c(.5, n_vars+.5)) +
ylab("") + xlab("")
# Omit the legend if there is only one model
if (!"model" %in% names(df) | length(mod_names) == 1){
p <- p + theme(legend.position="none")
}
return(p)
}
dw_tidy <- function(x,...) {
# Set variables that will appear in pipelines to NULL to make R CMD check happy
process_lm <- tidy.summary.lm <- NULL
if (!is.data.frame(x)) {
if (class(x)=="list") {
ind <- seq(length(x))
nm <- paste("Model", ind)
if (!is.null(nm_orig <- names(x))) {
setNm <- nchar(nm)>0
nm[setNm] <- nm_orig[setNm]
}
names(x) <- nm
df <- do.call(plyr::ldply,
c(list(.data=x,.fun=broom::tidy,.id="model"),
list(...)))
} else if (class(x) == "lmerMod"){
group <- vector() # only for avoiding the NOTE in check
df <- broom::tidy(x) %>% filter(group == "fixed")
} else {
if (class(x) == "polr"){
family.polr <- function(object,...) NULL
tidy.polr <- function (x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE, quick = FALSE, ...) {
if (quick) {
co <- stats::coef(x)
ret <- data.frame(term = names(co), estimate = unname(co))
return(process_lm(ret, x, conf.int = FALSE, exponentiate = exponentiate))
}
s <- summary(x)
ret <- tidy.summary.lm(s)
process_lm(ret, x, conf.int = conf.int, conf.level = conf.level,
exponentiate = exponentiate)
}
}
df <- broom::tidy(x,...)
}
} else {
df <- x
}
return(df)
}
add_NAs <- function(df = df, n_models = n_models, mod_names = mod_names,
model_name = "model") {
# Set variables that will appear in pipelines to NULL to make R CMD check happy
term <- model <- NULL
if (!is.factor(df$term)) {
df$term <- factor(df$term, levels = unique(df$term))
}
if (!is.factor(dfmod <- df[[model_name]])) {
df[[model_name]] <- factor(dfmod, levels = unique(dfmod))
}
for (i in seq(n_models)) {
m <- df %>% filter(model==factor(mod_names[[i]], levels = mod_names))
not_in <- setdiff(unique(df$term), m$term)
for (j in seq(not_in)) {
t <- data.frame(term = factor(not_in[j], levels = levels(df$term)),
model = factor(mod_names[[i]], levels = mod_names))
if ("submodel" %in% names(m)) {
t$submodel <- m$submodel[1]
}
if ("submodel" %in% names(m)) {
m <- full_join(m, t, by = c("term", "model", "submodel"))
} else {
m <- full_join(m, t, by = c("term", "model"))
}
}
if (i==1) {
dft <- m %>% arrange(term)
} else {
dft <- bind_rows(dft, m %>% arrange(term))
}
}
df <- dft
df$estimate <- as.numeric(df$estimate)
if ("std.error" %in% names(df)) {
df$std.error <- as.numeric(df$std.error)
}
if ("ub" %in% names(df)) {
df$ub <- as.numeric(df$ub)
}
if ("lb" %in% names(df)) {
df$lb <- as.numeric(df$lb)
}
if ("ub_alpha2" %in% names(df)) {
df$ub_alpha2 <- as.numeric(df$ub_alpha2)
}
if ("lb_alpha2" %in% names(df)) {
df$lb_alpha2 <- as.numeric(df$lb_alpha2)
}
return(df)
}
|
#power analysis from Lab 3 of Biometry Spring 2014
setwd("C:/Users/avanderlaar/Dropbox/R/Biometry/Biometry_Lab2_Power_Analysis")
library(pwr)
library(compute.es)
trout = read.csv('Trout.csv', header=TRUE)
beetle = read.csv('Beetle.csv', header=TRUE)
crayfish = read.csv('Crayfish.csv', header=TRUE)
#two sided t-test power analysis of trout
#calculate effect size
e.s.trout = mean(trout$c.growth) - mean(trout$e.growth)
#gives you the current power of your test
pwr.t.test(n=NULL, d=e.s.trout, sig.level=0.05, power=0.8, alternative="two.sided")
#this will throw an error
#Error in uniroot(function(n) eval(p.body) - power, c(2 + 1e-10, 1e+07)) :
#f() values at end points not of opposite sign
#this means that you need a really tiny sample size
#try changing d (effect size) so that it is much smaller
pwr.t.test(n=NULL, d=5, sig.level=0.05, power=0.8, alternative="two.sided")
#and now it runs, because we actually need some samples now
#note that n is the sample size in each group
#beetle data
head(beetle)
#total # of beetles
totalb = beetle$Bright.Red + beetle$Not.BR
#proportion of Bright.Red
pro.br = beetle$Bright.Red/totalb
#proportion of Not.Br
pro.not = beetle$Not.BR/totalb
#effect size
e.s.beetle = pro.br-pro.not
#power analysis for two proportions
pwr.2p.test(h=e.s.beetle , n=NULL, sig.level=0.5, power=0.8, alternative="two.sided")
#crayfish data
summary(crayfish)
cray05 = crayfish[crayfish$Year==2005,]
cray06 = crayfish[crayfish$Year==2006,]
cray07 = crayfish[crayfish$Year==2007,]
pwr.anova.test(k=3 , n=NULL , f=e.s.crayfish , sig.level=0.5, power=0.8)
|
/biometry_spring_2014/Biometry_Lab2_Power_Analysis/Biometry_Lab_2.R
|
no_license
|
aurielfournier/courses
|
R
| false
| false
| 1,599
|
r
|
#power analysis from Lab 3 of Biometry Spring 2014
setwd("C:/Users/avanderlaar/Dropbox/R/Biometry/Biometry_Lab2_Power_Analysis")
library(pwr)
library(compute.es)
trout = read.csv('Trout.csv', header=TRUE)
beetle = read.csv('Beetle.csv', header=TRUE)
crayfish = read.csv('Crayfish.csv', header=TRUE)
#two sided t-test power analysis of trout
#calculate effect size
e.s.trout = mean(trout$c.growth) - mean(trout$e.growth)
#gives you the current power of your test
pwr.t.test(n=NULL, d=e.s.trout, sig.level=0.05, power=0.8, alternative="two.sided")
#this will throw an error
#Error in uniroot(function(n) eval(p.body) - power, c(2 + 1e-10, 1e+07)) :
#f() values at end points not of opposite sign
#this means that you need a really tiny sample size
#try changing d (effect size) so that it is much smaller
pwr.t.test(n=NULL, d=5, sig.level=0.05, power=0.8, alternative="two.sided")
#and now it runs, because we actually need some samples now
#note that n is the sample size in each group
#beetle data
head(beetle)
#total # of beetles
totalb = beetle$Bright.Red + beetle$Not.BR
#proportion of Bright.Red
pro.br = beetle$Bright.Red/totalb
#proportion of Not.Br
pro.not = beetle$Not.BR/totalb
#effect size
e.s.beetle = pro.br-pro.not
#power analysis for two proportions
pwr.2p.test(h=e.s.beetle , n=NULL, sig.level=0.5, power=0.8, alternative="two.sided")
#crayfish data
summary(crayfish)
cray05 = crayfish[crayfish$Year==2005,]
cray06 = crayfish[crayfish$Year==2006,]
cray07 = crayfish[crayfish$Year==2007,]
pwr.anova.test(k=3 , n=NULL , f=e.s.crayfish , sig.level=0.5, power=0.8)
|
#' Generate the UI Code for demographic questions
#'
#' @param df One element (a dataframe) in the list of unique questions.
#'
#' @keywords internal
#' @return UI Code for a Shiny App.
#'
surveyOutput_individual <- function(df) {
inputType <- base::unique(df$input_type)
if (length(inputType) != 1) {
if (!"instructions" %in% inputType) {
stop("Please double check your data frame and ensure that the input type for all questions is supported.")
} else if ("instructions" %in% inputType) {
instructions <- df[which(df$input_type == "instructions"), "question", drop = FALSE]$question
instructions <- shiny::tagList(
shiny::div(class = "question-instructions",
instructions)
)
inputType <- inputType[which(inputType != "instructions")]
df <- df[which(df$input_type != "instructions"),]
}
} else if (length(inputType == 1)) {
instructions <- NULL
}
if (grepl("rank_{{", inputType, perl = T)) {
stop('Ranking input types have been superseded by the "matrix" input type.')
}
survey_env$current_question <- df
if (inputType == "select") {
output <-
shiny::selectizeInput(
inputId = base::unique(df$input_id),
label = addRequiredUI_internal(df),
choices = df$option,
options = list(
placeholder = '',
onInitialize = I('function() { this.setValue(""); }')
)
)
} else if (inputType == "numeric") {
output <-
numberInput(
inputId = base::unique(df$input_id),
label = addRequiredUI_internal(df),
placeholder = df$option
)
} else if (inputType == "mc") {
output <-
shiny::radioButtons(
inputId = base::unique(df$input_id),
label = addRequiredUI_internal(df),
selected = base::character(0),
choices = df$option
)
} else if (inputType == "text") {
output <-
shiny::textInput(inputId = base::unique(df$input_id),
label = addRequiredUI_internal(df),
placeholder = df$option)
} else if (inputType == "y/n") {
output <-
shiny::radioButtons(
inputId = base::unique(df$input_id),
label = addRequiredUI_internal(df),
selected = base::character(0),
choices = df$option
)
} else if (inputType == "matrix") {
required_matrix <- ifelse(all(df$required), TRUE, FALSE)
output <-
radioMatrixInput(
inputId = base::unique(df$input_id),
responseItems = base::unique(df$question),
choices = base::unique(df$option),
selected = NULL,
.required = required_matrix
)
} else if (inputType == "instructions") {
output <- shiny::div(
class = "instructions-only",
df$question
)
} else if (inputType %in% survey_env$input_type) {
output <- eval(survey_env$input_extension[[inputType]])
} else {
stop(paste0("Input type '", inputType, "' from the supplied data frame of questions is not recognized by {shinysurveys}.
Did you mean to register a custom input extension with `extendInputType()`?"))
}
if (!base::is.na(df$dependence[1])) {
output <- shiny::div(class = "questions dependence",
id = paste0(df$input_id[1], "-question"),
shiny::div(class = "question-input",
instructions,
output))
} else if (base::is.na(df$dependence[1])) {
output <- shiny::div(class = "questions",
id = paste0(df$input_id[1], "-question"),
shiny::div(class = "question-input",
instructions,
output))
}
return(output)
}
#' Generate the UI Code for demographic questions
#'
#' Create the UI code for a Shiny app based on user-supplied questions.
#'
#' @param df A user supplied data frame in the format of teaching_r_questions.
#' @param survey_title (Optional) user supplied title for the survey
#' @param survey_description (Optional) user supplied description for the survey
#' @param theme A valid R color: predefined such as "red" or "blue"; hex colors
#' such as #63B8FF (default). To customize the survey's appearance entirely, supply NULL.
#' @param ... Additional arguments to pass into \link[shiny]{actionButton} used to submit survey responses.
#'
#' @return UI Code for a Shiny App.
#' @export
#'
#' @examples
#'
#' if (interactive()) {
#'
#' library(shiny)
#' library(shinysurveys)
#'
#' df <- data.frame(question = "What is your favorite food?",
#' option = "Your Answer",
#' input_type = "text",
#' input_id = "favorite_food",
#' dependence = NA,
#' dependence_value = NA,
#' required = F)
#'
#' ui <- fluidPage(
#' surveyOutput(df = df,
#' survey_title = "Hello, World!",
#' theme = "#63B8FF")
#' )
#'
#' server <- function(input, output, session) {
#' renderSurvey()
#'
#' observeEvent(input$submit, {
#' showModal(modalDialog(
#' title = "Congrats, you completed your first shinysurvey!",
#' "You can customize what actions happen when a user finishes a survey using input$submit."
#' ))
#' })
#' }
#'
#' shinyApp(ui, server)
#'
#' }
surveyOutput <- function(df, survey_title, survey_description, theme = "#63B8FF", ...) {
survey_env$theme <- theme
survey_env$question_df <- df
survey_env$unique_questions <- listUniqueQuestions(df)
if (!missing(survey_title)) {
survey_env$title <- survey_title
}
if (!missing(survey_description)) {
survey_env$description <- survey_description
}
if ("page" %in% names(df)) {
main_ui <- multipaged_ui(df = df)
} else if (!"page" %in% names(df)) {
main_ui <- shiny::tagList(
check_survey_metadata(survey_title = survey_title,
survey_description = survey_description),
lapply(survey_env$unique_questions, surveyOutput_individual),
shiny::div(class = "survey-buttons",
shiny::actionButton("submit",
"Submit",
...)
)
)
}
if (!is.null(survey_env$theme)) {
survey_style <- sass::sass(list(
list(color = survey_env$theme),
readLines(
system.file("render_survey.scss",
package = "shinysurveys")
)
))
} else if (is.null(survey_env$theme)) {
survey_style <- NULL
}
shiny::tagList(shiny::includeScript(system.file("shinysurveys-js.js",
package = "shinysurveys")),
shiny::includeScript(system.file("save_data.js",
package = "shinysurveys")),
shiny::tags$style(shiny::HTML(survey_style)),
shiny::div(class = "survey",
shiny::div(style = "display: none !important;",
shiny::textInput(inputId = "userID",
label = "Enter your username.",
value = "NO_USER_ID")),
main_ui))
}
|
/R/func_survey-output.R
|
permissive
|
nklepeis/shinysurveys
|
R
| false
| false
| 7,446
|
r
|
#' Generate the UI Code for demographic questions
#'
#' @param df One element (a dataframe) in the list of unique questions.
#'
#' @keywords internal
#' @return UI Code for a Shiny App.
#'
surveyOutput_individual <- function(df) {
inputType <- base::unique(df$input_type)
if (length(inputType) != 1) {
if (!"instructions" %in% inputType) {
stop("Please double check your data frame and ensure that the input type for all questions is supported.")
} else if ("instructions" %in% inputType) {
instructions <- df[which(df$input_type == "instructions"), "question", drop = FALSE]$question
instructions <- shiny::tagList(
shiny::div(class = "question-instructions",
instructions)
)
inputType <- inputType[which(inputType != "instructions")]
df <- df[which(df$input_type != "instructions"),]
}
} else if (length(inputType == 1)) {
instructions <- NULL
}
if (grepl("rank_{{", inputType, perl = T)) {
stop('Ranking input types have been superseded by the "matrix" input type.')
}
survey_env$current_question <- df
if (inputType == "select") {
output <-
shiny::selectizeInput(
inputId = base::unique(df$input_id),
label = addRequiredUI_internal(df),
choices = df$option,
options = list(
placeholder = '',
onInitialize = I('function() { this.setValue(""); }')
)
)
} else if (inputType == "numeric") {
output <-
numberInput(
inputId = base::unique(df$input_id),
label = addRequiredUI_internal(df),
placeholder = df$option
)
} else if (inputType == "mc") {
output <-
shiny::radioButtons(
inputId = base::unique(df$input_id),
label = addRequiredUI_internal(df),
selected = base::character(0),
choices = df$option
)
} else if (inputType == "text") {
output <-
shiny::textInput(inputId = base::unique(df$input_id),
label = addRequiredUI_internal(df),
placeholder = df$option)
} else if (inputType == "y/n") {
output <-
shiny::radioButtons(
inputId = base::unique(df$input_id),
label = addRequiredUI_internal(df),
selected = base::character(0),
choices = df$option
)
} else if (inputType == "matrix") {
required_matrix <- ifelse(all(df$required), TRUE, FALSE)
output <-
radioMatrixInput(
inputId = base::unique(df$input_id),
responseItems = base::unique(df$question),
choices = base::unique(df$option),
selected = NULL,
.required = required_matrix
)
} else if (inputType == "instructions") {
output <- shiny::div(
class = "instructions-only",
df$question
)
} else if (inputType %in% survey_env$input_type) {
output <- eval(survey_env$input_extension[[inputType]])
} else {
stop(paste0("Input type '", inputType, "' from the supplied data frame of questions is not recognized by {shinysurveys}.
Did you mean to register a custom input extension with `extendInputType()`?"))
}
if (!base::is.na(df$dependence[1])) {
output <- shiny::div(class = "questions dependence",
id = paste0(df$input_id[1], "-question"),
shiny::div(class = "question-input",
instructions,
output))
} else if (base::is.na(df$dependence[1])) {
output <- shiny::div(class = "questions",
id = paste0(df$input_id[1], "-question"),
shiny::div(class = "question-input",
instructions,
output))
}
return(output)
}
#' Generate the UI Code for demographic questions
#'
#' Create the UI code for a Shiny app based on user-supplied questions.
#'
#' @param df A user supplied data frame in the format of teaching_r_questions.
#' @param survey_title (Optional) user supplied title for the survey
#' @param survey_description (Optional) user supplied description for the survey
#' @param theme A valid R color: predefined such as "red" or "blue"; hex colors
#' such as #63B8FF (default). To customize the survey's appearance entirely, supply NULL.
#' @param ... Additional arguments to pass into \link[shiny]{actionButton} used to submit survey responses.
#'
#' @return UI Code for a Shiny App.
#' @export
#'
#' @examples
#'
#' if (interactive()) {
#'
#' library(shiny)
#' library(shinysurveys)
#'
#' df <- data.frame(question = "What is your favorite food?",
#' option = "Your Answer",
#' input_type = "text",
#' input_id = "favorite_food",
#' dependence = NA,
#' dependence_value = NA,
#' required = F)
#'
#' ui <- fluidPage(
#' surveyOutput(df = df,
#' survey_title = "Hello, World!",
#' theme = "#63B8FF")
#' )
#'
#' server <- function(input, output, session) {
#' renderSurvey()
#'
#' observeEvent(input$submit, {
#' showModal(modalDialog(
#' title = "Congrats, you completed your first shinysurvey!",
#' "You can customize what actions happen when a user finishes a survey using input$submit."
#' ))
#' })
#' }
#'
#' shinyApp(ui, server)
#'
#' }
surveyOutput <- function(df, survey_title, survey_description, theme = "#63B8FF", ...) {
survey_env$theme <- theme
survey_env$question_df <- df
survey_env$unique_questions <- listUniqueQuestions(df)
if (!missing(survey_title)) {
survey_env$title <- survey_title
}
if (!missing(survey_description)) {
survey_env$description <- survey_description
}
if ("page" %in% names(df)) {
main_ui <- multipaged_ui(df = df)
} else if (!"page" %in% names(df)) {
main_ui <- shiny::tagList(
check_survey_metadata(survey_title = survey_title,
survey_description = survey_description),
lapply(survey_env$unique_questions, surveyOutput_individual),
shiny::div(class = "survey-buttons",
shiny::actionButton("submit",
"Submit",
...)
)
)
}
if (!is.null(survey_env$theme)) {
survey_style <- sass::sass(list(
list(color = survey_env$theme),
readLines(
system.file("render_survey.scss",
package = "shinysurveys")
)
))
} else if (is.null(survey_env$theme)) {
survey_style <- NULL
}
shiny::tagList(shiny::includeScript(system.file("shinysurveys-js.js",
package = "shinysurveys")),
shiny::includeScript(system.file("save_data.js",
package = "shinysurveys")),
shiny::tags$style(shiny::HTML(survey_style)),
shiny::div(class = "survey",
shiny::div(style = "display: none !important;",
shiny::textInput(inputId = "userID",
label = "Enter your username.",
value = "NO_USER_ID")),
main_ui))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_rules.R
\name{power.w.test}
\alias{power.w.test}
\title{Power and sample size of the Wilcoxon Mann Whitney U test}
\usage{
power.w.test(n = NULL, p1 = NULL, p2 = NULL, p3 = NULL, propn = 1/2,
sig.level = 0.025, power = 0.9, silent = F)
}
\arguments{
\item{n}{Sample size in the control group}
\item{p1}{probability that X < Y}
\item{p2}{probability that X < Y and X < Y'}
\item{p3}{probability that X < Y and X' < Y}
\item{propn}{proportion of the total number of observations in the control group}
\item{sig.level}{significance level of the test}
\item{power}{desired target power}
\item{silent}{should hints be suppressed}
}
\value{
Either the required number of control group observations to achieve the targe power, or the power of the WMW test under the specified alternative and given sample size.
}
\description{
This function computes the power and sample size according to the asymptotic power formula given in [(2.23) p.71 of Lehmann (2006)]. If \code{is.null(n)} the sample size required to achieve the target power is computed using bisection search.
}
\author{
float
}
|
/man/power.w.test.Rd
|
no_license
|
floatofmath/adaperm
|
R
| false
| true
| 1,176
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_rules.R
\name{power.w.test}
\alias{power.w.test}
\title{Power and sample size of the Wilcoxon Mann Whitney U test}
\usage{
power.w.test(n = NULL, p1 = NULL, p2 = NULL, p3 = NULL, propn = 1/2,
sig.level = 0.025, power = 0.9, silent = F)
}
\arguments{
\item{n}{Sample size in the control group}
\item{p1}{probability that X < Y}
\item{p2}{probability that X < Y and X < Y'}
\item{p3}{probability that X < Y and X' < Y}
\item{propn}{proportion of the total number of observations in the control group}
\item{sig.level}{significance level of the test}
\item{power}{desired target power}
\item{silent}{should hints be suppressed}
}
\value{
Either the required number of control group observations to achieve the targe power, or the power of the WMW test under the specified alternative and given sample size.
}
\description{
This function computes the power and sample size according to the asymptotic power formula given in [(2.23) p.71 of Lehmann (2006)]. If \code{is.null(n)} the sample size required to achieve the target power is computed using bisection search.
}
\author{
float
}
|
data = Dataset
samplesize = 0.60 * nrow(data)
set.seed(80)
index = sample( seq_len ( nrow ( data ) ), size = samplesize )
datatrain = data[ index, ]
datatest = data[ -index, ]
data <- data[, sapply(data, is.numeric)]
max = as.numeric(apply(data , 2 , max))
min = as.numeric(apply(data, 2 , min))
scaled = as.data.frame(scale(data, center = min, scale = max - min))
install.packages("neuralnet ")
trainNN = scaled[index , ]
testNN = scaled[-index , ]
set.seed(2)
NN = neuralnet(rating ~ calories + protein + fat + sodium + fiber, trainNN, hidden = 3 , linear.output = T )
plot(NN)
predict_testNN = compute(NN, testNN[,c(1:5)])
predict_testNN = (predict_testNN$net.result * (max(data$rating) - min(data$rating))) + min(data$rating)
plot(datatest$rating, predict_testNN, col='blue', pch=16, ylab = "predicted rating NN", xlab = "real rating")
abline(0,1)
RMSE.NN = (sum((datatest$rating - predict_testNN)^2) / nrow(datatest)) ^ 0.5
install.packages("boot")
install.packages("plyr")
library(boot)
library(plyr)
set.seed(50)
k = 100
RMSE.NN = NULL
List = list( )
for(j in 10:65){
for (i in 1:k) {
index = sample(1:nrow(data),j )
trainNN = scaled[index,]
testNN = scaled[-index,]
datatest = data[-index,]
NN = neuralnet(rating ~ calories + protein + fat + sodium + fiber, trainNN, hidden = 3, linear.output= T)
predict_testNN = compute(NN,testNN[,c(1:5)])
predict_testNN = (predict_testNN$net.result*(max(data$rating)-min(data$rating)))+min(data$rating)
RMSE.NN [i]<- (sum((datatest$rating - predict_testNN)^2)/nrow(datatest))^0.5
}
List[[j]] = RMSE.NN
}
Matrix.RMSE = do.call(cbind, List)
boxplot(Matrix.RMSE[,56], ylab = "RMSE", main = "RMSE BoxPlot (length of traning set = 65)")
install.packages("matrixStats")
library(matrixStats)
med = colMedians(Matrix.RMSE)
X = seq(10,65)
plot (med~X, type = "l", xlab = "length of training set", ylab = "median RMSE", main = "Variation of RMSE with length of training set")
|
/Cerels.R
|
no_license
|
rishada/Data-Mining-and-Analysis-of-Different-Brand-of-Cereals-using-Neural-Networks-using-R-package
|
R
| false
| false
| 2,026
|
r
|
data = Dataset
samplesize = 0.60 * nrow(data)
set.seed(80)
index = sample( seq_len ( nrow ( data ) ), size = samplesize )
datatrain = data[ index, ]
datatest = data[ -index, ]
data <- data[, sapply(data, is.numeric)]
max = as.numeric(apply(data , 2 , max))
min = as.numeric(apply(data, 2 , min))
scaled = as.data.frame(scale(data, center = min, scale = max - min))
install.packages("neuralnet ")
trainNN = scaled[index , ]
testNN = scaled[-index , ]
set.seed(2)
NN = neuralnet(rating ~ calories + protein + fat + sodium + fiber, trainNN, hidden = 3 , linear.output = T )
plot(NN)
predict_testNN = compute(NN, testNN[,c(1:5)])
predict_testNN = (predict_testNN$net.result * (max(data$rating) - min(data$rating))) + min(data$rating)
plot(datatest$rating, predict_testNN, col='blue', pch=16, ylab = "predicted rating NN", xlab = "real rating")
abline(0,1)
RMSE.NN = (sum((datatest$rating - predict_testNN)^2) / nrow(datatest)) ^ 0.5
install.packages("boot")
install.packages("plyr")
library(boot)
library(plyr)
set.seed(50)
k = 100
RMSE.NN = NULL
List = list( )
for(j in 10:65){
for (i in 1:k) {
index = sample(1:nrow(data),j )
trainNN = scaled[index,]
testNN = scaled[-index,]
datatest = data[-index,]
NN = neuralnet(rating ~ calories + protein + fat + sodium + fiber, trainNN, hidden = 3, linear.output= T)
predict_testNN = compute(NN,testNN[,c(1:5)])
predict_testNN = (predict_testNN$net.result*(max(data$rating)-min(data$rating)))+min(data$rating)
RMSE.NN [i]<- (sum((datatest$rating - predict_testNN)^2)/nrow(datatest))^0.5
}
List[[j]] = RMSE.NN
}
Matrix.RMSE = do.call(cbind, List)
boxplot(Matrix.RMSE[,56], ylab = "RMSE", main = "RMSE BoxPlot (length of traning set = 65)")
install.packages("matrixStats")
library(matrixStats)
med = colMedians(Matrix.RMSE)
X = seq(10,65)
plot (med~X, type = "l", xlab = "length of training set", ylab = "median RMSE", main = "Variation of RMSE with length of training set")
|
#' Numeric vector with percentage representation
#'
#' Formats numbers as percentages.
#'
#' @family numeric vectors
#' @param x a numeric vector.
#' @param digits an integer to indicate the number of digits of the percentage string.
#' @param format format type passed to [formatC()].
#' @param ... additional parameters passed to [formattable()].
#' @export
#' @examples
#' num_percent(rnorm(10, 0, 0.1))
#' num_percent(rnorm(10, 0, 0.1), digits = 0)
num_percent <- function(x, digits = 2L, format = "f", ...) {
formattable(as_numeric(x),
format = format, digits = digits, ...,
preproc = "percent_preproc", postproc = "percent_postproc"
)
}
#' @rdname num_percent
#' @export
#' @examples
#' parse_percent("0.5%")
#' parse_percent(c("15.5%", "25.12%", "73.5"))
parse_percent <- function(x, digits = NA, format = "f", ...) {
valid <- grepl("^(.+)\\s*%$", x)
pct <- gsub("^(.+)\\s*%$", "\\1", x)
if (is.na(digits)) digits <- max(get_digits(x) - ifelse(valid, 0, 2))
copy_dim(x, percent.default(as.numeric(pct) / ifelse(valid, 100, 1),
digits = digits, format = "f"
))
}
percent_preproc <- function(x) x * 100
percent_postproc <- function(str, x) {
paste0(str, ifelse(is.finite(x), "%", ""))
}
|
/R/num_percent.R
|
permissive
|
renkun-ken/formattable
|
R
| false
| false
| 1,222
|
r
|
#' Numeric vector with percentage representation
#'
#' Formats numbers as percentages.
#'
#' @family numeric vectors
#' @param x a numeric vector.
#' @param digits an integer to indicate the number of digits of the percentage string.
#' @param format format type passed to [formatC()].
#' @param ... additional parameters passed to [formattable()].
#' @export
#' @examples
#' num_percent(rnorm(10, 0, 0.1))
#' num_percent(rnorm(10, 0, 0.1), digits = 0)
num_percent <- function(x, digits = 2L, format = "f", ...) {
formattable(as_numeric(x),
format = format, digits = digits, ...,
preproc = "percent_preproc", postproc = "percent_postproc"
)
}
#' @rdname num_percent
#' @export
#' @examples
#' parse_percent("0.5%")
#' parse_percent(c("15.5%", "25.12%", "73.5"))
parse_percent <- function(x, digits = NA, format = "f", ...) {
valid <- grepl("^(.+)\\s*%$", x)
pct <- gsub("^(.+)\\s*%$", "\\1", x)
if (is.na(digits)) digits <- max(get_digits(x) - ifelse(valid, 0, 2))
copy_dim(x, percent.default(as.numeric(pct) / ifelse(valid, 100, 1),
digits = digits, format = "f"
))
}
percent_preproc <- function(x) x * 100
percent_postproc <- function(str, x) {
paste0(str, ifelse(is.finite(x), "%", ""))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opal.token.R
\name{opal.token_r_create}
\alias{opal.token_r_create}
\title{Create a personal access token for R usage}
\usage{
opal.token_r_create(
opal,
name,
projects = NULL,
access = NULL,
commands = c("export")
)
}
\arguments{
\item{opal}{Opal object.}
\item{name}{Name of the token}
\item{projects}{Vector of project names, to which the token applies. Default is NULL (all projects).}
\item{access}{Data access level: 'READ' (read-only) or 'READ_NO_VALUES' (read-only, without access to individual-level data) or NULL (default).}
\item{commands}{Task commands that can launched on a project: 'import' and/or 'export'. Default is 'export' (use NULL for no task commands).}
}
\value{
The token value.
}
\description{
Create a personal access token for R (server) usage. Like for the other token functions,
this operation requires the user to authenticate with username/password credentials.
}
\examples{
\dontrun{
o <- opal.login('administrator','password', url='https://opal-demo.obiba.org')
token <- opal.token_r_create(o, 'r-1', access = 'READ', commands = 'export')
opal.logout(o)
}
}
\seealso{
Other token functions:
\code{\link{opal.token_datashield_create}()},
\code{\link{opal.token_delete}()},
\code{\link{opal.token_renew}()},
\code{\link{opal.token_sql_create}()},
\code{\link{opal.tokens}()},
\code{\link{opal.token}()}
}
\concept{token functions}
|
/man/opal.token_r_create.Rd
|
no_license
|
obiba/opalr
|
R
| false
| true
| 1,457
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opal.token.R
\name{opal.token_r_create}
\alias{opal.token_r_create}
\title{Create a personal access token for R usage}
\usage{
opal.token_r_create(
opal,
name,
projects = NULL,
access = NULL,
commands = c("export")
)
}
\arguments{
\item{opal}{Opal object.}
\item{name}{Name of the token}
\item{projects}{Vector of project names, to which the token applies. Default is NULL (all projects).}
\item{access}{Data access level: 'READ' (read-only) or 'READ_NO_VALUES' (read-only, without access to individual-level data) or NULL (default).}
\item{commands}{Task commands that can launched on a project: 'import' and/or 'export'. Default is 'export' (use NULL for no task commands).}
}
\value{
The token value.
}
\description{
Create a personal access token for R (server) usage. Like for the other token functions,
this operation requires the user to authenticate with username/password credentials.
}
\examples{
\dontrun{
o <- opal.login('administrator','password', url='https://opal-demo.obiba.org')
token <- opal.token_r_create(o, 'r-1', access = 'READ', commands = 'export')
opal.logout(o)
}
}
\seealso{
Other token functions:
\code{\link{opal.token_datashield_create}()},
\code{\link{opal.token_delete}()},
\code{\link{opal.token_renew}()},
\code{\link{opal.token_sql_create}()},
\code{\link{opal.tokens}()},
\code{\link{opal.token}()}
}
\concept{token functions}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.