content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
# uscrime.R #' U.S. Crime rates per 100,00 people #' #' U.S. Crime rates per 100,00 people for 7 categories in each of the 50 U.S. #' states in 1977. #' #' There are two missing values. #' #' @format #' A data frame with 50 observations on the following 8 variables. #' \describe{ #' \item{state}{U.S. state} #' \item{murder}{murders} #' \item{rape}{rapes} #' \item{robbery}{robbery} #' \item{assault}{assault} #' \item{burglary}{burglary} #' \item{larceny}{larceny} #' \item{autotheft}{automobile thefts} #' } #' #' @source #' Documentation Example 3 for PROC HPPRINCOMP. #' http://documentation.sas.com/api/docsets/stathpug/14.2/content/stathpug_code_hppriex3.htm?locale=en #' #' @references #' SAS/STAT User's Guide: High-Performance Procedures. The HPPRINCOMP Procedure. #' http://support.sas.com/documentation/cdl/en/stathpug/67524/HTML/default/viewer.htm#stathpug_hpprincomp_toc.htm #' #' @examples #' #' library(nipals) #' head(uscrime) #' #' # SAS deletes rows with missing values #' dat <- uscrime[complete.cases(uscrime), ] #' dat <- as.matrix(dat[ , -1]) #' m1 <- nipals(dat) # complete-data method #' #' # Traditional NIPALS with missing data #' dat <- uscrime #' dat <- as.matrix(dat[ , -1]) #' m2 <- nipals(dat, gramschmidt=FALSE) # missing #' round(crossprod(m2$loadings),3) # Prin Comps not quite orthogonal #' #' # Gram-Schmidt corrected NIPALS #' m3 <- nipals(dat, gramschmidt=TRUE) # TRUE is default #' round(crossprod(m3$loadings),3) # Prin Comps are orthogonal #' "uscrime"
/R/uscrime.R
no_license
cran/nipals
R
false
false
1,579
r
# uscrime.R #' U.S. Crime rates per 100,00 people #' #' U.S. Crime rates per 100,00 people for 7 categories in each of the 50 U.S. #' states in 1977. #' #' There are two missing values. #' #' @format #' A data frame with 50 observations on the following 8 variables. #' \describe{ #' \item{state}{U.S. state} #' \item{murder}{murders} #' \item{rape}{rapes} #' \item{robbery}{robbery} #' \item{assault}{assault} #' \item{burglary}{burglary} #' \item{larceny}{larceny} #' \item{autotheft}{automobile thefts} #' } #' #' @source #' Documentation Example 3 for PROC HPPRINCOMP. #' http://documentation.sas.com/api/docsets/stathpug/14.2/content/stathpug_code_hppriex3.htm?locale=en #' #' @references #' SAS/STAT User's Guide: High-Performance Procedures. The HPPRINCOMP Procedure. #' http://support.sas.com/documentation/cdl/en/stathpug/67524/HTML/default/viewer.htm#stathpug_hpprincomp_toc.htm #' #' @examples #' #' library(nipals) #' head(uscrime) #' #' # SAS deletes rows with missing values #' dat <- uscrime[complete.cases(uscrime), ] #' dat <- as.matrix(dat[ , -1]) #' m1 <- nipals(dat) # complete-data method #' #' # Traditional NIPALS with missing data #' dat <- uscrime #' dat <- as.matrix(dat[ , -1]) #' m2 <- nipals(dat, gramschmidt=FALSE) # missing #' round(crossprod(m2$loadings),3) # Prin Comps not quite orthogonal #' #' # Gram-Schmidt corrected NIPALS #' m3 <- nipals(dat, gramschmidt=TRUE) # TRUE is default #' round(crossprod(m3$loadings),3) # Prin Comps are orthogonal #' "uscrime"
## makeCacheMatrix() is a function that contains two variables and returns a list of four functions. ## variable x stores the matrix you have entered from the argument to makeCacheMatrix() ## x is created from makeCacheMatrix argument or by using $set ## Variable m stores the cached inverted matrix... but only after cacheSolve() is called. ## m is initialised on first run of makeCacheMatrix or by calling b$set(). This removes the cached matrix. ## $set will change your stored matrix (var x), if you wanted to ## $get will return your stored matrix (var x) ## $getMatrix will return your inverted matrix after cacheSolve is run (var m) ## $setMatrix will store your inverted matrix after cacheSolve is run (var m) ## call makeCacheMatrix(a) makeCacheMatrix <- function(x) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setmatrix <- function(matrix) m <<- matrix getmatrix <- function() m list(set = set, get = get, setmatrix = setmatrix, getmatrix = getmatrix) } ## cacheSolve(b) will call the $getmatrix function contained in our stored function b ## in order to set a value for m. This value will be NULL if it is the first call ## If x$getmatrix has a value (i.e !is.null()), then the cacheSolve() function will ## return this and the function ends ## However, if !is.null(m) = FALSE then the following occurs ## var data will be defined using b$get().. This obtains our original matrix a, stored in var b ## var m is created using R's solve() function on var data. This is our inverted matrix ## var m is then "sent back" to our stored function using $setmatrix. ## Our stored function, b, now has an updated var m containing our inverted matrix. ## This is possible due to R's lexical scoping rules that allows a function in R to ## access other environments outside of itself cacheSolve <- function(x, ...) { m <- x$getmatrix() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data) x$setmatrix(m) m } ## Here is a working example: # > source("cachematrix.R") # > a <- matrix(1:4,2,2) # > b <- makeCacheMatrix(a) # > b$get() # [,1] [,2] # [1,] 1 3 # [2,] 2 4 # > cacheSolve(b) # [,1] [,2] # [1,] -2 1.5 # [2,] 1 -0.5 # > cacheSolve(b) # getting cached data # [,1] [,2] # [1,] -2 1.5 # [2,] 1 -0.5 # >
/cachematrix.R
no_license
ross-ek/ProgrammingAssignment2
R
false
false
2,453
r
## makeCacheMatrix() is a function that contains two variables and returns a list of four functions. ## variable x stores the matrix you have entered from the argument to makeCacheMatrix() ## x is created from makeCacheMatrix argument or by using $set ## Variable m stores the cached inverted matrix... but only after cacheSolve() is called. ## m is initialised on first run of makeCacheMatrix or by calling b$set(). This removes the cached matrix. ## $set will change your stored matrix (var x), if you wanted to ## $get will return your stored matrix (var x) ## $getMatrix will return your inverted matrix after cacheSolve is run (var m) ## $setMatrix will store your inverted matrix after cacheSolve is run (var m) ## call makeCacheMatrix(a) makeCacheMatrix <- function(x) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setmatrix <- function(matrix) m <<- matrix getmatrix <- function() m list(set = set, get = get, setmatrix = setmatrix, getmatrix = getmatrix) } ## cacheSolve(b) will call the $getmatrix function contained in our stored function b ## in order to set a value for m. This value will be NULL if it is the first call ## If x$getmatrix has a value (i.e !is.null()), then the cacheSolve() function will ## return this and the function ends ## However, if !is.null(m) = FALSE then the following occurs ## var data will be defined using b$get().. This obtains our original matrix a, stored in var b ## var m is created using R's solve() function on var data. This is our inverted matrix ## var m is then "sent back" to our stored function using $setmatrix. ## Our stored function, b, now has an updated var m containing our inverted matrix. ## This is possible due to R's lexical scoping rules that allows a function in R to ## access other environments outside of itself cacheSolve <- function(x, ...) { m <- x$getmatrix() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data) x$setmatrix(m) m } ## Here is a working example: # > source("cachematrix.R") # > a <- matrix(1:4,2,2) # > b <- makeCacheMatrix(a) # > b$get() # [,1] [,2] # [1,] 1 3 # [2,] 2 4 # > cacheSolve(b) # [,1] [,2] # [1,] -2 1.5 # [2,] 1 -0.5 # > cacheSolve(b) # getting cached data # [,1] [,2] # [1,] -2 1.5 # [2,] 1 -0.5 # >
x<-matrix(c(1,1,1,1,1,-5,-1,3,7,5), nrow = 5) x y<-matrix(c(10,8,6,4,5),nc=1) y b<-solve((t(x)%*%x))%*%(t(x)%*%y) b xx<-x[,2] par(mfrow=c(1,1)) plot(xx,y) par(new=T) yy<-7.5+(-0.5*xx) par(new=T) plot(xx,yy,type = "l")
/최소제곱회귀곡선7번.R
no_license
0general/linear-algebra
R
false
false
217
r
x<-matrix(c(1,1,1,1,1,-5,-1,3,7,5), nrow = 5) x y<-matrix(c(10,8,6,4,5),nc=1) y b<-solve((t(x)%*%x))%*%(t(x)%*%y) b xx<-x[,2] par(mfrow=c(1,1)) plot(xx,y) par(new=T) yy<-7.5+(-0.5*xx) par(new=T) plot(xx,yy,type = "l")
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/Debug.R \name{Test_Box} \alias{Test_Box} \title{Test for class Box: Test of some funtionalities on default box (users can't modify them), I test all the method I use or I modified: constructor, print, operator [], physical, search and vector dimension, set coordinates} \usage{ Test_Box(n) } \arguments{ \item{n}{is an int and is the input integer (n != 0)} } \value{ An int with value: \item{0}{if it's all ok} \item{1}{if there's some problem} } \description{ Test for class Box: Test of some funtionalities on default box (users can't modify them), I test all the method I use or I modified: constructor, print, operator [], physical, search and vector dimension, set coordinates }
/man/Test_Box.Rd
no_license
sweetherb100/PointLoc_2D
R
false
false
788
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/Debug.R \name{Test_Box} \alias{Test_Box} \title{Test for class Box: Test of some funtionalities on default box (users can't modify them), I test all the method I use or I modified: constructor, print, operator [], physical, search and vector dimension, set coordinates} \usage{ Test_Box(n) } \arguments{ \item{n}{is an int and is the input integer (n != 0)} } \value{ An int with value: \item{0}{if it's all ok} \item{1}{if there's some problem} } \description{ Test for class Box: Test of some funtionalities on default box (users can't modify them), I test all the method I use or I modified: constructor, print, operator [], physical, search and vector dimension, set coordinates }
"parwei" <- function(lmom,checklmom=TRUE,...) { para <- rep(NA,3) names(para) <- c("zeta","beta","delta") if(length(lmom$L1) == 0) { # convert to named L-moments lmom <- lmorph(lmom) # nondestructive conversion! } if(checklmom & ! are.lmom.valid(lmom)) { warning("L-moments are invalid") return() } lmom$L1 <- -lmom$L1 lmom$LCV <- -lmom$LCV lmom$TAU3 <- -lmom$TAU3 lmom$L3 <- -lmom$L3 par.gev <- pargev(lmom) para[3] <- 1/par.gev$para[3] para[2] <- par.gev$para[2]*para[3] para[1] <- par.gev$para[1]+para[2] return(list(type = 'wei', para=para, source="parwei")) }
/R/parwei.R
no_license
wasquith/lmomco
R
false
false
658
r
"parwei" <- function(lmom,checklmom=TRUE,...) { para <- rep(NA,3) names(para) <- c("zeta","beta","delta") if(length(lmom$L1) == 0) { # convert to named L-moments lmom <- lmorph(lmom) # nondestructive conversion! } if(checklmom & ! are.lmom.valid(lmom)) { warning("L-moments are invalid") return() } lmom$L1 <- -lmom$L1 lmom$LCV <- -lmom$LCV lmom$TAU3 <- -lmom$TAU3 lmom$L3 <- -lmom$L3 par.gev <- pargev(lmom) para[3] <- 1/par.gev$para[3] para[2] <- par.gev$para[2]*para[3] para[1] <- par.gev$para[1]+para[2] return(list(type = 'wei', para=para, source="parwei")) }
library(testthat) library(CyAN) test_check("CyAN")
/tests/testthat.R
permissive
USGS-R/CyAN
R
false
false
52
r
library(testthat) library(CyAN) test_check("CyAN")
#' Format EBD data for occupancy modeling with `unmarked` #' #' Prepare a data frame of species observations for ingestion into the package #' `unmarked` for hierarchical modeling of abundance and occurrence. The #' function [unmarked::formatWide()] takes a data frame and converts it to one #' of several `unmarked` objects, which can then be used for modeling. This #' function converts data from a format in which each row is an observation #' (e.g. as in the eBird Basic Dataset) to the esoteric format required by #' [unmarked::formatWide()] in which each row is a site. #' #' @param x `data.frame`; observation data, e.g. from the eBird Basic Dataset #' (EBD), for **a single species**, that has been filtered to those with #' repeat visits by [filter_repeat_visits()]. #' @param site_id character; a unique idenitifer for each "site", typically #' identifying observations from a unique location by the same observer #' within a period of temporal closure. Data output from #' [filter_repeat_visits()] will have a `.site_id` variable that meets these #' requirements. #' @param response character; the variable that will act as the response in #' modeling efforts, typically a binary variable indicating presence or #' absence or a count of individuals seen. #' @param site_covs character; the variables that will act as site-level #' covariates, i.e. covariates that vary at the site level, for example, #' latitude/longitude or habitat predictors. If this parameter is missing, it #' will be assumed that any variable that is not an observation-level #' covariate (`obs_covs`) or the `site_id`, is a site-level covariate. #' @param obs_covs character; the variables that will act as observation-level #' covariates, i.e. covariates that vary within sites, at the level of #' observations, for example, time or length of observation. #' #' @details Hierarchical modeling requires repeat observations at each "site" to #' estimate detectability. A "site" is typically defined as a geographic #' location visited by the same observer within a period of temporal closure. #' To define these sites and filter out observations that do not correspond to #' repeat visits, users should use [filter_repeat_visits()], then pass the #' output to this function. #' #' [format_unmarked_occu()] is designed to prepare data to be converted into #' an `unmarkedFrameOccu` object for occupancy modeling with #' [unmarked::occu()]; however, it can also be used to prepare data for #' conversion to an `unmarkedFramePCount` object for abundance modeling with #' [unmarked::pcount()]. #' #' @return A data frame that can be processed by [unmarked::formatWide()]. #' Each row will correspond to a unqiue site and, assuming there are a maximum #' of `N` observations per site, columns will be as follows: #' #' 1. The unique site identifier, named "site". #' 2. `N` response columns, one for each observation, named "y.1", ..., "y.N". #' 3. Columns for each of the site-level covariates. #' 4. Groups of `N` columns of observation-level covariates, one column per #' covariate per observation, names "covariate_name.1", ..., #' "covariate_name.N". #' #' @export #' @family modeling #' @examples #' # read and zero-fill the ebd data #' f_ebd <- system.file("extdata/zerofill-ex_ebd.txt", package = "auk") #' f_smpl <- system.file("extdata/zerofill-ex_sampling.txt", package = "auk") #' # data must be for a single species #' ebd_zf <- auk_zerofill(x = f_ebd, sampling_events = f_smpl, #' species = "Collared Kingfisher", #' collapse = TRUE) #' occ <- filter_repeat_visits(ebd_zf, n_days = 30) #' # format for unmarked #' # typically one would join in habitat covariates prior to this step #' occ_wide <- format_unmarked_occu(occ, #' response = "species_observed", #' site_covs = c("latitude", "longitude"), #' obs_covs = c("effort_distance_km", #' "duration_minutes")) #' # create an unmarked object #' if (requireNamespace("unmarked", quietly = TRUE)) { #' occ_um <- unmarked::formatWide(occ_wide, type = "unmarkedFrameOccu") #' unmarked::summary(occ_um) #' } #' #' # this function can also be used for abundance modeling #' abd <- ebd_zf %>% #' # convert count to integer, drop records with no count #' dplyr::mutate(observation_count = as.integer(observation_count)) %>% #' dplyr::filter(!is.na(observation_count)) %>% #' # filter to repeated visits #' filter_repeat_visits(n_days = 30) #' # prepare for conversion to unmarkedFramePCount object #' abd_wide <- format_unmarked_occu(abd, #' response = "observation_count", #' site_covs = c("latitude", "longitude"), #' obs_covs = c("effort_distance_km", #' "duration_minutes")) #' # create an unmarked object #' if (requireNamespace("unmarked", quietly = TRUE)) { #' abd_um <- unmarked::formatWide(abd_wide, type = "unmarkedFrameOccu") #' unmarked::summary(abd_um) #' } format_unmarked_occu <- function(x, site_id = "site", response = "species_observed", site_covs, obs_covs) { # checks stopifnot(is.data.frame(x)) stopifnot(is.character(site_id), length(site_id) == 1, site_id %in% names(x), all(!is.na(x[[site_id]]))) stopifnot(is.character(response), length(response) == 1, response %in% names(x)) # observation covariates if (missing(obs_covs)) { obs_covs <- NULL } else { stopifnot(is.character(obs_covs), all(obs_covs %in% names(x))) } # site covariates if (missing(site_covs)) { site_covs <- setdiff(names(x), c(site_id, response, obs_covs)) } if (length(site_covs) < 1) { stop("Must provide at least one site-level covariate") } # assign observation ids within sites x <- dplyr::group_by_at(x, site_id) x <- dplyr::mutate(x, .obs_id = dplyr::row_number()) x <- dplyr::ungroup(x) # response to wide x_resp <- dplyr::select(x, !!rlang::sym(site_id), .data$.obs_id, !!rlang::sym(response)) x_resp <- tidyr::spread(x_resp, .data$.obs_id, !!rlang::sym(response)) names(x_resp)[-1] <- paste("y", names(x_resp)[-1], sep = ".") # site-level covariates x_site <- dplyr::select(x, !!rlang::sym(site_id), !!!rlang::syms(site_covs)) # collapse to one row per site x_site <- dplyr::group_by_at(x_site, site_id) x_site <- dplyr::distinct(x_site) # check covariates are constant across site n_unique <- dplyr::count(dplyr::distinct(x_site))$n if (any(n_unique != 1)) { stop("Site-level covariates must be constant across sites") } x_site <- dplyr::ungroup(x_site) # observation-level covariates obs_covs_dfs <- list() for (vr in obs_covs) { # convert to wide x_obs <- dplyr::select(x, !!rlang::sym(site_id), .data$.obs_id, !!rlang::sym(vr)) x_obs <- tidyr::spread(x_obs, .data$.obs_id, !!rlang::sym(vr)) names(x_obs)[-1] <- paste(vr, names(x_obs)[-1], sep = ".") obs_covs_dfs[[vr]] <- x_obs } # combine everything together x_out <- dplyr::inner_join(x_resp, x_site, by = site_id) for (df in obs_covs_dfs) { x_out <- dplyr::left_join(x_out, df, by = site_id) } # rename site_id to "site" because required by unmarked names(x_out)[names(x_out) == site_id] <- "site" return(x_out) }
/R/format-unmarked-occu.R
no_license
cran/auk
R
false
false
7,653
r
#' Format EBD data for occupancy modeling with `unmarked` #' #' Prepare a data frame of species observations for ingestion into the package #' `unmarked` for hierarchical modeling of abundance and occurrence. The #' function [unmarked::formatWide()] takes a data frame and converts it to one #' of several `unmarked` objects, which can then be used for modeling. This #' function converts data from a format in which each row is an observation #' (e.g. as in the eBird Basic Dataset) to the esoteric format required by #' [unmarked::formatWide()] in which each row is a site. #' #' @param x `data.frame`; observation data, e.g. from the eBird Basic Dataset #' (EBD), for **a single species**, that has been filtered to those with #' repeat visits by [filter_repeat_visits()]. #' @param site_id character; a unique idenitifer for each "site", typically #' identifying observations from a unique location by the same observer #' within a period of temporal closure. Data output from #' [filter_repeat_visits()] will have a `.site_id` variable that meets these #' requirements. #' @param response character; the variable that will act as the response in #' modeling efforts, typically a binary variable indicating presence or #' absence or a count of individuals seen. #' @param site_covs character; the variables that will act as site-level #' covariates, i.e. covariates that vary at the site level, for example, #' latitude/longitude or habitat predictors. If this parameter is missing, it #' will be assumed that any variable that is not an observation-level #' covariate (`obs_covs`) or the `site_id`, is a site-level covariate. #' @param obs_covs character; the variables that will act as observation-level #' covariates, i.e. covariates that vary within sites, at the level of #' observations, for example, time or length of observation. #' #' @details Hierarchical modeling requires repeat observations at each "site" to #' estimate detectability. A "site" is typically defined as a geographic #' location visited by the same observer within a period of temporal closure. #' To define these sites and filter out observations that do not correspond to #' repeat visits, users should use [filter_repeat_visits()], then pass the #' output to this function. #' #' [format_unmarked_occu()] is designed to prepare data to be converted into #' an `unmarkedFrameOccu` object for occupancy modeling with #' [unmarked::occu()]; however, it can also be used to prepare data for #' conversion to an `unmarkedFramePCount` object for abundance modeling with #' [unmarked::pcount()]. #' #' @return A data frame that can be processed by [unmarked::formatWide()]. #' Each row will correspond to a unqiue site and, assuming there are a maximum #' of `N` observations per site, columns will be as follows: #' #' 1. The unique site identifier, named "site". #' 2. `N` response columns, one for each observation, named "y.1", ..., "y.N". #' 3. Columns for each of the site-level covariates. #' 4. Groups of `N` columns of observation-level covariates, one column per #' covariate per observation, names "covariate_name.1", ..., #' "covariate_name.N". #' #' @export #' @family modeling #' @examples #' # read and zero-fill the ebd data #' f_ebd <- system.file("extdata/zerofill-ex_ebd.txt", package = "auk") #' f_smpl <- system.file("extdata/zerofill-ex_sampling.txt", package = "auk") #' # data must be for a single species #' ebd_zf <- auk_zerofill(x = f_ebd, sampling_events = f_smpl, #' species = "Collared Kingfisher", #' collapse = TRUE) #' occ <- filter_repeat_visits(ebd_zf, n_days = 30) #' # format for unmarked #' # typically one would join in habitat covariates prior to this step #' occ_wide <- format_unmarked_occu(occ, #' response = "species_observed", #' site_covs = c("latitude", "longitude"), #' obs_covs = c("effort_distance_km", #' "duration_minutes")) #' # create an unmarked object #' if (requireNamespace("unmarked", quietly = TRUE)) { #' occ_um <- unmarked::formatWide(occ_wide, type = "unmarkedFrameOccu") #' unmarked::summary(occ_um) #' } #' #' # this function can also be used for abundance modeling #' abd <- ebd_zf %>% #' # convert count to integer, drop records with no count #' dplyr::mutate(observation_count = as.integer(observation_count)) %>% #' dplyr::filter(!is.na(observation_count)) %>% #' # filter to repeated visits #' filter_repeat_visits(n_days = 30) #' # prepare for conversion to unmarkedFramePCount object #' abd_wide <- format_unmarked_occu(abd, #' response = "observation_count", #' site_covs = c("latitude", "longitude"), #' obs_covs = c("effort_distance_km", #' "duration_minutes")) #' # create an unmarked object #' if (requireNamespace("unmarked", quietly = TRUE)) { #' abd_um <- unmarked::formatWide(abd_wide, type = "unmarkedFrameOccu") #' unmarked::summary(abd_um) #' } format_unmarked_occu <- function(x, site_id = "site", response = "species_observed", site_covs, obs_covs) { # checks stopifnot(is.data.frame(x)) stopifnot(is.character(site_id), length(site_id) == 1, site_id %in% names(x), all(!is.na(x[[site_id]]))) stopifnot(is.character(response), length(response) == 1, response %in% names(x)) # observation covariates if (missing(obs_covs)) { obs_covs <- NULL } else { stopifnot(is.character(obs_covs), all(obs_covs %in% names(x))) } # site covariates if (missing(site_covs)) { site_covs <- setdiff(names(x), c(site_id, response, obs_covs)) } if (length(site_covs) < 1) { stop("Must provide at least one site-level covariate") } # assign observation ids within sites x <- dplyr::group_by_at(x, site_id) x <- dplyr::mutate(x, .obs_id = dplyr::row_number()) x <- dplyr::ungroup(x) # response to wide x_resp <- dplyr::select(x, !!rlang::sym(site_id), .data$.obs_id, !!rlang::sym(response)) x_resp <- tidyr::spread(x_resp, .data$.obs_id, !!rlang::sym(response)) names(x_resp)[-1] <- paste("y", names(x_resp)[-1], sep = ".") # site-level covariates x_site <- dplyr::select(x, !!rlang::sym(site_id), !!!rlang::syms(site_covs)) # collapse to one row per site x_site <- dplyr::group_by_at(x_site, site_id) x_site <- dplyr::distinct(x_site) # check covariates are constant across site n_unique <- dplyr::count(dplyr::distinct(x_site))$n if (any(n_unique != 1)) { stop("Site-level covariates must be constant across sites") } x_site <- dplyr::ungroup(x_site) # observation-level covariates obs_covs_dfs <- list() for (vr in obs_covs) { # convert to wide x_obs <- dplyr::select(x, !!rlang::sym(site_id), .data$.obs_id, !!rlang::sym(vr)) x_obs <- tidyr::spread(x_obs, .data$.obs_id, !!rlang::sym(vr)) names(x_obs)[-1] <- paste(vr, names(x_obs)[-1], sep = ".") obs_covs_dfs[[vr]] <- x_obs } # combine everything together x_out <- dplyr::inner_join(x_resp, x_site, by = site_id) for (df in obs_covs_dfs) { x_out <- dplyr::left_join(x_out, df, by = site_id) } # rename site_id to "site" because required by unmarked names(x_out)[names(x_out) == site_id] <- "site" return(x_out) }
#' @importFrom data.table fread #' @importFrom dplyr filter bind_rows NULL #' Create a phyloseq taxon table #' #' Extracts taxonomic information from the otu_table and ncbi taxonomy file to #' to create phyloseq formatted tax_table. ncbi taxonomy file is provided with the package #' taxonomy.tsv.gz #' #' #' @param OtuTable produced by convert_to_otu_table #' #' @param TAXON taxonomic level (First letter must be caps). It should be the same taxonomic level #' that its corresponding otu table is at. #' #' @return phyloseq formatted taxonomic table #' #' @export #' #' convert_to_taxa_table <- function(OtuTable, TAXON){ # check if the Taxon is the right one # if ( !(TAXON %in% c("Domain", "Phylum", "Class", "Order", "Family", "Genus", "Species")) ) { stop(paste("Specified file \"", TAXON, "\" doesn't exist or not correctly formatted!")) } # # get taxa name from otu table taxa_name <- base::row.names(OtuTable) # #create an empty taxa table taxa_table <- base::data.frame(superkingdom = character(), phylum = character(), class = character(), order = character(), family = character(), genus = character(), species = character(), stringsAsFactors = F) # # get the path of the taxonomy table concatenated to command to unzip it ncbi_taxa_filepath <- system.file("inst/extdata/taxonomy.tsv.gz", package="MetaComp") # # # read in the taxonomy file. data <- base::as.data.frame(data.table::fread(base::sprintf('gunzip -c %s', ncbi_taxa_filepath), header=T)) base::colnames(data) <- c("taxid", "dont_know", "parent_taxid", "LEVEL", "NAME") # # loop through name of taxa and build a taxa_table for (taxa in taxa_name){ # taxon_level <- dplyr::filter(data, NAME == taxa)$LEVEL[1] # # parent_taxID <- dplyr::filter(data, NAME == taxa)$parent_taxid[1] # # taxID <- dplyr::filter(data, NAME == taxa)$taxid[1] # one_row <- base::data.frame(taxon_level = taxa) colnames(one_row) <- taxon_level # if (parent_taxID != "1") { while (parent_taxID != "1") { # # get taxon LEVEL of parent taxa taxon_level <- dplyr::filter(data, taxid == parent_taxID)$LEVEL[1] # # # get taxon NAME of parent taxa taxon_name <- dplyr::filter(data, taxid == parent_taxID)$NAME[1] # # # get taxid taxID <- dplyr::filter(data, taxid == parent_taxID)$taxid[1] # # # add that to the one_row data frame one_row[taxon_level] <- taxon_name # # get parent_taxID and loop through until parent_taxID doesnt equal to 131567 (highest classification) parent_taxID <- dplyr::filter(data, taxid == taxID)$parent_taxid[1] # } taxa_table <- dplyr::bind_rows(taxa_table, one_row) } else { taxa_table <- dplyr::bind_rows(taxa_table, one_row) } } base::colnames(taxa_table) <- c("Domain", "Phylum", "Class", "Order", "Family", "Genus", "Species") base::subset(taxa_table, select=c("Domain", "Phylum", "Class", "Order", "Family", "Genus", "Species")) base::rownames(taxa_table) <- taxa_table[, TAXON] phyloseq::tax_table(base::as.matrix(taxa_table)) }
/R/convert_to_taxa_table.R
no_license
mshakya/MetaComp
R
false
false
3,453
r
#' @importFrom data.table fread #' @importFrom dplyr filter bind_rows NULL #' Create a phyloseq taxon table #' #' Extracts taxonomic information from the otu_table and ncbi taxonomy file to #' to create phyloseq formatted tax_table. ncbi taxonomy file is provided with the package #' taxonomy.tsv.gz #' #' #' @param OtuTable produced by convert_to_otu_table #' #' @param TAXON taxonomic level (First letter must be caps). It should be the same taxonomic level #' that its corresponding otu table is at. #' #' @return phyloseq formatted taxonomic table #' #' @export #' #' convert_to_taxa_table <- function(OtuTable, TAXON){ # check if the Taxon is the right one # if ( !(TAXON %in% c("Domain", "Phylum", "Class", "Order", "Family", "Genus", "Species")) ) { stop(paste("Specified file \"", TAXON, "\" doesn't exist or not correctly formatted!")) } # # get taxa name from otu table taxa_name <- base::row.names(OtuTable) # #create an empty taxa table taxa_table <- base::data.frame(superkingdom = character(), phylum = character(), class = character(), order = character(), family = character(), genus = character(), species = character(), stringsAsFactors = F) # # get the path of the taxonomy table concatenated to command to unzip it ncbi_taxa_filepath <- system.file("inst/extdata/taxonomy.tsv.gz", package="MetaComp") # # # read in the taxonomy file. data <- base::as.data.frame(data.table::fread(base::sprintf('gunzip -c %s', ncbi_taxa_filepath), header=T)) base::colnames(data) <- c("taxid", "dont_know", "parent_taxid", "LEVEL", "NAME") # # loop through name of taxa and build a taxa_table for (taxa in taxa_name){ # taxon_level <- dplyr::filter(data, NAME == taxa)$LEVEL[1] # # parent_taxID <- dplyr::filter(data, NAME == taxa)$parent_taxid[1] # # taxID <- dplyr::filter(data, NAME == taxa)$taxid[1] # one_row <- base::data.frame(taxon_level = taxa) colnames(one_row) <- taxon_level # if (parent_taxID != "1") { while (parent_taxID != "1") { # # get taxon LEVEL of parent taxa taxon_level <- dplyr::filter(data, taxid == parent_taxID)$LEVEL[1] # # # get taxon NAME of parent taxa taxon_name <- dplyr::filter(data, taxid == parent_taxID)$NAME[1] # # # get taxid taxID <- dplyr::filter(data, taxid == parent_taxID)$taxid[1] # # # add that to the one_row data frame one_row[taxon_level] <- taxon_name # # get parent_taxID and loop through until parent_taxID doesnt equal to 131567 (highest classification) parent_taxID <- dplyr::filter(data, taxid == taxID)$parent_taxid[1] # } taxa_table <- dplyr::bind_rows(taxa_table, one_row) } else { taxa_table <- dplyr::bind_rows(taxa_table, one_row) } } base::colnames(taxa_table) <- c("Domain", "Phylum", "Class", "Order", "Family", "Genus", "Species") base::subset(taxa_table, select=c("Domain", "Phylum", "Class", "Order", "Family", "Genus", "Species")) base::rownames(taxa_table) <- taxa_table[, TAXON] phyloseq::tax_table(base::as.matrix(taxa_table)) }
## --------------------------- ## ## Script name: Check Model Fit ## ## Purpose of script: ## ## Author: Daniel Smith ## ## Date Created: 2020-11-24 ## ## Email: dansmi@ceh.ac.uk ## ## --------------------------- library(coda) library(Hmsc) nice_load <- function(file, object, rename = NULL){ if (!requireNamespace("stringr", quietly = TRUE)) { stop("stringr needed for this function to work. Please install it.", call. = FALSE) } # assertthat::assert_that(is.character(file), "file must be a string") # assertthat::assert_that(is.character(object), "object must be a string") # assertthat::assert_that((is.character(rename) | is.null(rename)), "rename must be a string or NULL") file_string <- stringr::str_replace(file, "^.*/", "") file_string <- stringr::str_replace(file, "\\.RData", "") # get data frame into local environment e = local({load(file); environment()}) # make lazy-load database tools:::makeLazyLoadDB(e, file_string) lazyLoad(file_string) # load object get(object) if(!is.null(rename) ){ # create object in local env that has name matching value for object, with new name same as rename assign(eval(rename), get(object), envir = .GlobalEnv) # assign(ls()[ls() == eval(object)], rename) rm(e) # return(get(eval(quote(rename)))) } else{ rm(e) assign(eval(object), get(object), envir = .GlobalEnv) } } # Load -------------------------------------------------------------------- abu <- nice_load("Models/Abundance_Thin300/ModelExtended.RData", "output") pa <- nice_load("Models/PA_Thin300//ModelExtended.RData", "output") # Check Model Fit --------------------------------------------------------- models <- list(abu = abu, pa = pa) ## Explanatory Power MF = list() for (i in seq_along(models)) { # Explanatory Power - Not Cross Validated preds = computePredictedValues(models[[i]]) MF[[i]] = evaluateModelFit(hM = models[[i]], predY = preds) } names(MF) = names(models) MF
/Scripts/Checl_Expl_Fit.R
no_license
dansmi-hub/SomersetLevels
R
false
false
2,036
r
## --------------------------- ## ## Script name: Check Model Fit ## ## Purpose of script: ## ## Author: Daniel Smith ## ## Date Created: 2020-11-24 ## ## Email: dansmi@ceh.ac.uk ## ## --------------------------- library(coda) library(Hmsc) nice_load <- function(file, object, rename = NULL){ if (!requireNamespace("stringr", quietly = TRUE)) { stop("stringr needed for this function to work. Please install it.", call. = FALSE) } # assertthat::assert_that(is.character(file), "file must be a string") # assertthat::assert_that(is.character(object), "object must be a string") # assertthat::assert_that((is.character(rename) | is.null(rename)), "rename must be a string or NULL") file_string <- stringr::str_replace(file, "^.*/", "") file_string <- stringr::str_replace(file, "\\.RData", "") # get data frame into local environment e = local({load(file); environment()}) # make lazy-load database tools:::makeLazyLoadDB(e, file_string) lazyLoad(file_string) # load object get(object) if(!is.null(rename) ){ # create object in local env that has name matching value for object, with new name same as rename assign(eval(rename), get(object), envir = .GlobalEnv) # assign(ls()[ls() == eval(object)], rename) rm(e) # return(get(eval(quote(rename)))) } else{ rm(e) assign(eval(object), get(object), envir = .GlobalEnv) } } # Load -------------------------------------------------------------------- abu <- nice_load("Models/Abundance_Thin300/ModelExtended.RData", "output") pa <- nice_load("Models/PA_Thin300//ModelExtended.RData", "output") # Check Model Fit --------------------------------------------------------- models <- list(abu = abu, pa = pa) ## Explanatory Power MF = list() for (i in seq_along(models)) { # Explanatory Power - Not Cross Validated preds = computePredictedValues(models[[i]]) MF[[i]] = evaluateModelFit(hM = models[[i]], predY = preds) } names(MF) = names(models) MF
####################################################################### # # # Package: lcc # # # # File: CCC.R # # Contains: CCC function # # # # Written by Thiago de Paula Oliveira # # copyright (c) 2017-18, Thiago P. Oliveira # # # # First version: 11/10/2017 # # Last update: 29/07/2019 # # License: GNU General Public License version 2 (June, 1991) or later # # # ####################################################################### ##' @title Internal Function to Compute the Sampled Concordance ##' Correlation Values. ##' ##' @description This is an internally called functions used to compute ##' the sampled concordance correlation values. ##' ##' @usage NULL ##' @return No return value, called for side effects ##' @author Thiago de Paula Oliveira, \email{thiago.paula.oliveira@@alumni.usp.br} ##' ##' @importFrom stats var cov cor ##' ##' @keywords internal CCC<-function(Y1,Y2){ data=data.frame(Y1,Y2) m1<-mean(Y1) m2<-mean(Y2) S1<-var(Y1) S2<-var(Y2) S12<-cov(Y1, Y2) CCC_lin<-2*S12/(S1+S2+(m1-m2)^2) return(CCC_lin) }
/R/CCC.R
no_license
cran/lcc
R
false
false
1,697
r
####################################################################### # # # Package: lcc # # # # File: CCC.R # # Contains: CCC function # # # # Written by Thiago de Paula Oliveira # # copyright (c) 2017-18, Thiago P. Oliveira # # # # First version: 11/10/2017 # # Last update: 29/07/2019 # # License: GNU General Public License version 2 (June, 1991) or later # # # ####################################################################### ##' @title Internal Function to Compute the Sampled Concordance ##' Correlation Values. ##' ##' @description This is an internally called functions used to compute ##' the sampled concordance correlation values. ##' ##' @usage NULL ##' @return No return value, called for side effects ##' @author Thiago de Paula Oliveira, \email{thiago.paula.oliveira@@alumni.usp.br} ##' ##' @importFrom stats var cov cor ##' ##' @keywords internal CCC<-function(Y1,Y2){ data=data.frame(Y1,Y2) m1<-mean(Y1) m2<-mean(Y2) S1<-var(Y1) S2<-var(Y2) S12<-cov(Y1, Y2) CCC_lin<-2*S12/(S1+S2+(m1-m2)^2) return(CCC_lin) }
#set wd setwd("/Users/pranavpalli/Desktop/summer_21/") #files data <- read.csv(file = "./data/time_function/reg_files/wr1_kasii_reg.csv") #file of the avg data to read-- last was wr1_kasii #vectors times <- c(1, 2, 3) #1 = R5, 2 = R5/6, 3 = R6 mat <- matrix(nrow = 0, ncol = 1) #matrix of the size of the df to append df <- data.frame(mat) #new dataframe to store avg linear regression data for(x in 1:nrow(data)){ #variables normalized_gene_counts <- data[x, c("A4_avg", "B4_avg", "C4_avg")] #rows in each gene type-1, 2, 3, 4 for all of them gene_count_list <- c() for(value in normalized_gene_counts){ gene_count_list <- append(gene_count_list, value) } coef <- coefficients(lm(gene_count_list ~ times)) slope_half <- paste(as.numeric(coef[2]), "(R)", sep = "") intercept_half <- paste("+", as.numeric(coef[1]), sep = " ") equation <- paste(slope_half, intercept_half, sep = " ") df[x, ] <- equation } write.table(df, "./data/lm_results/test.csv", quote = FALSE, row.names = FALSE, col.names = FALSE) #appends rows print("done")
/scripts/plot_lm_results.R
no_license
ppalli2/summer_21
R
false
false
1,071
r
#set wd setwd("/Users/pranavpalli/Desktop/summer_21/") #files data <- read.csv(file = "./data/time_function/reg_files/wr1_kasii_reg.csv") #file of the avg data to read-- last was wr1_kasii #vectors times <- c(1, 2, 3) #1 = R5, 2 = R5/6, 3 = R6 mat <- matrix(nrow = 0, ncol = 1) #matrix of the size of the df to append df <- data.frame(mat) #new dataframe to store avg linear regression data for(x in 1:nrow(data)){ #variables normalized_gene_counts <- data[x, c("A4_avg", "B4_avg", "C4_avg")] #rows in each gene type-1, 2, 3, 4 for all of them gene_count_list <- c() for(value in normalized_gene_counts){ gene_count_list <- append(gene_count_list, value) } coef <- coefficients(lm(gene_count_list ~ times)) slope_half <- paste(as.numeric(coef[2]), "(R)", sep = "") intercept_half <- paste("+", as.numeric(coef[1]), sep = " ") equation <- paste(slope_half, intercept_half, sep = " ") df[x, ] <- equation } write.table(df, "./data/lm_results/test.csv", quote = FALSE, row.names = FALSE, col.names = FALSE) #appends rows print("done")
# 1.1 parole = read.csv("parole.csv") str(parole) # 1.2 sum(parole$violator == 1) # 1.3 parole$state = as.factor(parole$state) parole$crime = as.factor(parole$crime) # 3.1 - 3.2 set.seed(144) library(caTools) split = sample.split(parole$violator, SplitRatio = 0.7) train = subset(parole, split == TRUE) test = subset(parole, split == FALSE) # 4.1 Model1 = glm(violator ~ ., data=train, family="binomial") summary(Model1) # 4.3 exampleMan = data.frame(male = 1, race = 1, age = 50, state = as.factor(1), time.served = 3, max.sentence = 12, multiple.offenses = 0, crime = as.factor(2)) probability = predict(Model1, type="response", newdata=exampleMan) odds = probability / (1 - probability) odds probability # 5.1 predictedProb = predict(Model1, type="response", newdata=test) max(predictedProb) # 5.2 table(test$violator, predictedProb > 0.5) 12 / (12 + 11) #sensitivity 167 / (167 + 12) #specificity (167 + 12) / (167 + 12 + 12 + 11) #accuracy # 5.3 (167 + 12) / (167 + 12 + 12 + 11) # 5.6 library(ROCR) ROCRpred = prediction(predictedProb, test$violator) as.numeric(performance(ROCRpred, "auc")@y.values)
/Assignment 3/2. Predicting Parole Violators/Assignment 3.2.R
no_license
bastienbrier/MIT-15.071x
R
false
false
1,116
r
# 1.1 parole = read.csv("parole.csv") str(parole) # 1.2 sum(parole$violator == 1) # 1.3 parole$state = as.factor(parole$state) parole$crime = as.factor(parole$crime) # 3.1 - 3.2 set.seed(144) library(caTools) split = sample.split(parole$violator, SplitRatio = 0.7) train = subset(parole, split == TRUE) test = subset(parole, split == FALSE) # 4.1 Model1 = glm(violator ~ ., data=train, family="binomial") summary(Model1) # 4.3 exampleMan = data.frame(male = 1, race = 1, age = 50, state = as.factor(1), time.served = 3, max.sentence = 12, multiple.offenses = 0, crime = as.factor(2)) probability = predict(Model1, type="response", newdata=exampleMan) odds = probability / (1 - probability) odds probability # 5.1 predictedProb = predict(Model1, type="response", newdata=test) max(predictedProb) # 5.2 table(test$violator, predictedProb > 0.5) 12 / (12 + 11) #sensitivity 167 / (167 + 12) #specificity (167 + 12) / (167 + 12 + 12 + 11) #accuracy # 5.3 (167 + 12) / (167 + 12 + 12 + 11) # 5.6 library(ROCR) ROCRpred = prediction(predictedProb, test$violator) as.numeric(performance(ROCRpred, "auc")@y.values)
my_function <- function(xxx, yyy, zzz) { ttt1 <- 10 print(t<caret>tt1) }
/testData/rename/renameLocalVariableUsage.after.R
permissive
JetBrains/Rplugin
R
false
false
78
r
my_function <- function(xxx, yyy, zzz) { ttt1 <- 10 print(t<caret>tt1) }
library(dplyr) # Find current directory curr_dir <- getwd() # Download zip and extract files in current dir download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", paste(curr_dir, "/projectfiles.zip", sep="")) unzip(paste(curr_dir, "/projectfiles.zip", sep="")) # Vector of activity names (walking, running, etc) activity_labels <- read.table(paste(curr_dir, "/UCI HAR Dataset/activity_labels.txt", sep="")) # Vector of feature names (acceleration mean, gyro std dev, etc) features <- read.table(paste(curr_dir,"/UCI HAR Dataset/features.txt", sep="")) # Features will become column names for the datasets set_columns <- make.names(names=features[,2], unique=TRUE, allow_ = TRUE) # Load all info regarding the test dataset test_subjects <- read.table(paste(curr_dir, "/UCI HAR Dataset/test/subject_test.txt", sep="")) colnames(test_subjects) <- c("subject") test_activities <- read.table(paste(curr_dir, "/UCI HAR Dataset/test/y_test.txt", sep="")) colnames(test_activities) <- c("activity") test_set <- read.table(paste(curr_dir,"/UCI HAR Dataset/test/X_test.txt", sep="")) # Load all info regarding the train dataset train_subjects <- read.table(paste(curr_dir, "/UCI HAR Dataset/train/subject_train.txt", sep="")) colnames(train_subjects) <- c("subject") train_activities <- read.table(paste(curr_dir, "/UCI HAR Dataset/train/y_train.txt", sep="")) colnames(train_activities) <- c("activity") train_set <- read.table(paste(curr_dir, "/UCI HAR Dataset/train/X_train.txt", sep="")) # Set columns of sets colnames(test_set) <- set_columns colnames(train_set) <- set_columns # Add columns with subjects and activities for both datasets train_set$activity <- train_activities$activity train_set$subject <- train_subjects$subject test_set$activity <- test_activities$activity test_set$subject <- test_subjects$subject # Combine both databases merged_sets <- rbind(test_set, train_set) # Translate activity into a readable name merged_sets <- merge(merged_sets, activity_labels, by.x="activity", by.y="V1", sort=FALSE) # Select only the means and standard deviation columns means_stds <- select(merged_sets, V2, subject, contains(".mean."), contains(".std")) colnames(means_stds)[1] = "activity_name" # Make sure both subject and activity columns # are factors so grouping and summarizing works properly means_stds$subject <- factor(means_stds$subject) means_stds$activity_name <- factor(means_stds$activity_name) # Summarise by subject and activity grouped_set <- group_by(means_stds, subject, activity_name) means_summary<-summarise_each(grouped_set,funs(mean)) # Write out to file write.table(means_summary, paste(curr_dir, "/output.txt", sep=""), row.names = FALSE)
/run_analysis.R
no_license
pilimayora/get-data-course-project
R
false
false
2,730
r
library(dplyr) # Find current directory curr_dir <- getwd() # Download zip and extract files in current dir download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", paste(curr_dir, "/projectfiles.zip", sep="")) unzip(paste(curr_dir, "/projectfiles.zip", sep="")) # Vector of activity names (walking, running, etc) activity_labels <- read.table(paste(curr_dir, "/UCI HAR Dataset/activity_labels.txt", sep="")) # Vector of feature names (acceleration mean, gyro std dev, etc) features <- read.table(paste(curr_dir,"/UCI HAR Dataset/features.txt", sep="")) # Features will become column names for the datasets set_columns <- make.names(names=features[,2], unique=TRUE, allow_ = TRUE) # Load all info regarding the test dataset test_subjects <- read.table(paste(curr_dir, "/UCI HAR Dataset/test/subject_test.txt", sep="")) colnames(test_subjects) <- c("subject") test_activities <- read.table(paste(curr_dir, "/UCI HAR Dataset/test/y_test.txt", sep="")) colnames(test_activities) <- c("activity") test_set <- read.table(paste(curr_dir,"/UCI HAR Dataset/test/X_test.txt", sep="")) # Load all info regarding the train dataset train_subjects <- read.table(paste(curr_dir, "/UCI HAR Dataset/train/subject_train.txt", sep="")) colnames(train_subjects) <- c("subject") train_activities <- read.table(paste(curr_dir, "/UCI HAR Dataset/train/y_train.txt", sep="")) colnames(train_activities) <- c("activity") train_set <- read.table(paste(curr_dir, "/UCI HAR Dataset/train/X_train.txt", sep="")) # Set columns of sets colnames(test_set) <- set_columns colnames(train_set) <- set_columns # Add columns with subjects and activities for both datasets train_set$activity <- train_activities$activity train_set$subject <- train_subjects$subject test_set$activity <- test_activities$activity test_set$subject <- test_subjects$subject # Combine both databases merged_sets <- rbind(test_set, train_set) # Translate activity into a readable name merged_sets <- merge(merged_sets, activity_labels, by.x="activity", by.y="V1", sort=FALSE) # Select only the means and standard deviation columns means_stds <- select(merged_sets, V2, subject, contains(".mean."), contains(".std")) colnames(means_stds)[1] = "activity_name" # Make sure both subject and activity columns # are factors so grouping and summarizing works properly means_stds$subject <- factor(means_stds$subject) means_stds$activity_name <- factor(means_stds$activity_name) # Summarise by subject and activity grouped_set <- group_by(means_stds, subject, activity_name) means_summary<-summarise_each(grouped_set,funs(mean)) # Write out to file write.table(means_summary, paste(curr_dir, "/output.txt", sep=""), row.names = FALSE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sagemaker_operations.R \name{sagemaker_stop_hyper_parameter_tuning_job} \alias{sagemaker_stop_hyper_parameter_tuning_job} \title{Stops a running hyperparameter tuning job and all running training jobs that the tuning job launched} \usage{ sagemaker_stop_hyper_parameter_tuning_job(HyperParameterTuningJobName) } \arguments{ \item{HyperParameterTuningJobName}{[required] The name of the tuning job to stop.} } \description{ Stops a running hyperparameter tuning job and all running training jobs that the tuning job launched. } \details{ All model artifacts output from the training jobs are stored in Amazon Simple Storage Service (Amazon S3). All data that the training jobs write to Amazon CloudWatch Logs are still available in CloudWatch. After the tuning job moves to the \code{Stopped} state, it releases all reserved resources for the tuning job. } \section{Request syntax}{ \preformatted{svc$stop_hyper_parameter_tuning_job( HyperParameterTuningJobName = "string" ) } } \keyword{internal}
/cran/paws.machine.learning/man/sagemaker_stop_hyper_parameter_tuning_job.Rd
permissive
johnnytommy/paws
R
false
true
1,078
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sagemaker_operations.R \name{sagemaker_stop_hyper_parameter_tuning_job} \alias{sagemaker_stop_hyper_parameter_tuning_job} \title{Stops a running hyperparameter tuning job and all running training jobs that the tuning job launched} \usage{ sagemaker_stop_hyper_parameter_tuning_job(HyperParameterTuningJobName) } \arguments{ \item{HyperParameterTuningJobName}{[required] The name of the tuning job to stop.} } \description{ Stops a running hyperparameter tuning job and all running training jobs that the tuning job launched. } \details{ All model artifacts output from the training jobs are stored in Amazon Simple Storage Service (Amazon S3). All data that the training jobs write to Amazon CloudWatch Logs are still available in CloudWatch. After the tuning job moves to the \code{Stopped} state, it releases all reserved resources for the tuning job. } \section{Request syntax}{ \preformatted{svc$stop_hyper_parameter_tuning_job( HyperParameterTuningJobName = "string" ) } } \keyword{internal}
#Data inlezen mijnData <- read.csv(file.choose()) data <- mijnData #Waarden aanpassen voor titels boxplot mijnData$rt <- as.character(mijnData$rt) mijnData$muziek <- as.character(mijnData$muziek) mijnData$rt[mijnData$rt=="Ja"] <- "Met Retrieval Practice" mijnData$rt[mijnData$rt=="Nee"] <- "Zonder Retrieval Practice" mijnData$muziek[mijnData$muziek=="Ja"] <- "Met Muziek" mijnData$muziek[mijnData$muziek=="Nee"] <- "Zonder Muziek" #Data vergelijken enkel retrieval practice boxplot(score_1 ~ rt ,data = data, main="Resultatenanalyse Score 1",ylab="Retrieval Practice", ylim = c(0,25), xlab="Score",horizontal=TRUE, col = "skyblue") boxplot(score_2 ~ rt ,data = data, main="Resultatenanalyse Score 2",ylab="Retrieval Practice", ylim = c(0,25), xlab="Score",horizontal=TRUE, col = "skyblue") #Data vergelijken enkel muziek boxplot(score_1 ~ muziek ,data = data, main="Resultatenanalyse Score 1",ylab="Muziek", ylim = c(0,25), xlab="Score",horizontal=TRUE, col = "skyblue") boxplot(score_2 ~ muziek ,data = data, main="Resultatenanalyse Score 2",ylab="Muziek", ylim = c(0,25), xlab="Score",horizontal=TRUE, col = "skyblue") #Data vergelijken met retrieval practice en muziek boxplot(score_1 ~ rt/muziek ,data = mijnData, main="Resultatenanalyse Score 1",ylab="Variabelen", ylim = c(0,25), xlab="Score",horizontal=TRUE, col = "skyblue") boxplot(score_2 ~ rt/muziek ,data = mijnData, main="Resultatenanalyse Score 2",ylab="Variabelen", ylim = c(0,25), xlab="Score",horizontal=TRUE, col = "skyblue") #Data vergelijken met Muziek MetMuziek_score1 <- c(data$score_1[data$muziek=="Ja"]) #count 86 MetMuziek_score2 <- c(data$score_2[data$muziek=="Ja"]) #count 89 test <- c("Test 1", "Test 2") boxplot(MetMuziek_score1, MetMuziek_score2, names = test, main="Resultatenanalyse met muziek", ylab="Testen", ylim = c(0,25), xlab="Totaal Score", horizontal = TRUE,col = "skyblue") #Data vergelijken zonder muziek ZonderMuziek_score1 <- c(data$score_1[data$muziek=="Nee"]) #count 101 ZonderMuziek_score2 <- c(data$score_2[data$muziek=="Nee"]) #count 81 boxplot(ZonderMuziek_score1, ZonderMuziek_score2, names = test, main="Resultatenanalyse Zonder muziek", ylab="Testen", ylim = c(0,25), xlab="Totaal Score", horizontal = TRUE,col = "skyblue") #Data vergelijken met rt MetRT_score1 <- c(data$score_1[data$rt=="Ja"]) #count 99 MetRT_score2 <- c(data$score_2[data$rt=="Ja"]) #count 90 boxplot(MetRT_score1, MetRT_score2, names = test, main="Resultatenanalyse met Retrieval Practice", ylab="Testen", ylim = c(0,25), xlab="Totaal Score", horizontal = TRUE,col = "skyblue") #Data vergelijken zonder rt ZonderRT_score1 <- c(data$score_1[data$rt=="Nee"]) #count 88 ZonderRT_score2 <- c(data$score_2[data$rt=="Nee"]) #count 80 boxplot(ZonderRT_score1, ZonderRT_score2, names = test, main="Resultatenanalyse zonder Retrieval Practice", ylab="Testen", ylim = c(0,25), xlab="Totaal Score", horizontal = TRUE,col = "skyblue")
/scripts/Boxplot_resultaten.R
no_license
RobbieVerdurme/Onderzoeks_Technieken_Groep3
R
false
false
2,910
r
#Data inlezen mijnData <- read.csv(file.choose()) data <- mijnData #Waarden aanpassen voor titels boxplot mijnData$rt <- as.character(mijnData$rt) mijnData$muziek <- as.character(mijnData$muziek) mijnData$rt[mijnData$rt=="Ja"] <- "Met Retrieval Practice" mijnData$rt[mijnData$rt=="Nee"] <- "Zonder Retrieval Practice" mijnData$muziek[mijnData$muziek=="Ja"] <- "Met Muziek" mijnData$muziek[mijnData$muziek=="Nee"] <- "Zonder Muziek" #Data vergelijken enkel retrieval practice boxplot(score_1 ~ rt ,data = data, main="Resultatenanalyse Score 1",ylab="Retrieval Practice", ylim = c(0,25), xlab="Score",horizontal=TRUE, col = "skyblue") boxplot(score_2 ~ rt ,data = data, main="Resultatenanalyse Score 2",ylab="Retrieval Practice", ylim = c(0,25), xlab="Score",horizontal=TRUE, col = "skyblue") #Data vergelijken enkel muziek boxplot(score_1 ~ muziek ,data = data, main="Resultatenanalyse Score 1",ylab="Muziek", ylim = c(0,25), xlab="Score",horizontal=TRUE, col = "skyblue") boxplot(score_2 ~ muziek ,data = data, main="Resultatenanalyse Score 2",ylab="Muziek", ylim = c(0,25), xlab="Score",horizontal=TRUE, col = "skyblue") #Data vergelijken met retrieval practice en muziek boxplot(score_1 ~ rt/muziek ,data = mijnData, main="Resultatenanalyse Score 1",ylab="Variabelen", ylim = c(0,25), xlab="Score",horizontal=TRUE, col = "skyblue") boxplot(score_2 ~ rt/muziek ,data = mijnData, main="Resultatenanalyse Score 2",ylab="Variabelen", ylim = c(0,25), xlab="Score",horizontal=TRUE, col = "skyblue") #Data vergelijken met Muziek MetMuziek_score1 <- c(data$score_1[data$muziek=="Ja"]) #count 86 MetMuziek_score2 <- c(data$score_2[data$muziek=="Ja"]) #count 89 test <- c("Test 1", "Test 2") boxplot(MetMuziek_score1, MetMuziek_score2, names = test, main="Resultatenanalyse met muziek", ylab="Testen", ylim = c(0,25), xlab="Totaal Score", horizontal = TRUE,col = "skyblue") #Data vergelijken zonder muziek ZonderMuziek_score1 <- c(data$score_1[data$muziek=="Nee"]) #count 101 ZonderMuziek_score2 <- c(data$score_2[data$muziek=="Nee"]) #count 81 boxplot(ZonderMuziek_score1, ZonderMuziek_score2, names = test, main="Resultatenanalyse Zonder muziek", ylab="Testen", ylim = c(0,25), xlab="Totaal Score", horizontal = TRUE,col = "skyblue") #Data vergelijken met rt MetRT_score1 <- c(data$score_1[data$rt=="Ja"]) #count 99 MetRT_score2 <- c(data$score_2[data$rt=="Ja"]) #count 90 boxplot(MetRT_score1, MetRT_score2, names = test, main="Resultatenanalyse met Retrieval Practice", ylab="Testen", ylim = c(0,25), xlab="Totaal Score", horizontal = TRUE,col = "skyblue") #Data vergelijken zonder rt ZonderRT_score1 <- c(data$score_1[data$rt=="Nee"]) #count 88 ZonderRT_score2 <- c(data$score_2[data$rt=="Nee"]) #count 80 boxplot(ZonderRT_score1, ZonderRT_score2, names = test, main="Resultatenanalyse zonder Retrieval Practice", ylab="Testen", ylim = c(0,25), xlab="Totaal Score", horizontal = TRUE,col = "skyblue")
#!/usr/bin/env Rscript # MinIONQC version 1.4.1 # Copyright (C) 2017 onwards Robert Lanfear # # For license see https://github.com/roblanf/minion_qc/blob/master/LICENSE # supress warnings options(warn=-1) suppressPackageStartupMessages(library(ggplot2)) suppressPackageStartupMessages(library(viridis)) suppressPackageStartupMessages(library(plyr)) suppressPackageStartupMessages(library(reshape2)) suppressPackageStartupMessages(library(readr)) suppressPackageStartupMessages(library(yaml)) suppressPackageStartupMessages(library(scales)) suppressPackageStartupMessages(library(parallel)) suppressPackageStartupMessages(library(futile.logger)) suppressPackageStartupMessages(library(data.table)) suppressPackageStartupMessages(library(optparse)) # option parsing # parser <- OptionParser() parser <- add_option(parser, opt_str = c("-i", "--input"), type = "character", dest = 'input.file', help="Input file or directory (required). Either a full path to a sequence_summary.txt file, or a full path to a directory containing one or more such files. In the latter case the directory is searched recursively." ) parser <- add_option(parser, opt_str = c("-o", "--outputdirectory"), type = "character", dest = 'output.dir', default=NA, help="Output directory (optional, default is the same as the input directory). If a single sequencing_summary.txt file is passed as input, then the output directory will contain just the plots associated with that file. If a directory containing more than one sequencing_summary.txt files is passed as input, then the plots will be put into sub-directories that have the same names as the parent directories of each sequencing_summary.txt file" ) parser <- add_option(parser, opt_str = c("-q", "--qscore_cutoff"), type="double", default=7.0, dest = 'q', help="The cutoff value for the mean Q score of a read (default 7). Used to create separate plots for reads above and below this threshold" ) parser <- add_option(parser, opt_str = c("-p", "--processors"), type="integer", default=1, dest = 'cores', help="Number of processors to use for the anlaysis (default 1). Only helps when you are analysing more than one sequencing_summary.txt file at a time" ) parser <- add_option(parser, opt_str = c("-s", "--smallfigures"), type = "logical", default = FALSE, dest = 'smallfig', help="TRUE or FALSE (the default). When true, MinIONQC will output smaller figures, e.g. suitable for publications or presentations. The default is to produce larger figures optimised for display on screen. Some figures just require small text, and cannot be effectively resized." ) parser <- add_option(parser, opt_str = c("-c", "--combined-only"), type = "logical", default = FALSE, dest = 'combined_only', help="TRUE or FALSE (the default). When true, MinIONQC will only produce the combined report, it will not produce individual reports for each flowcell." ) opt = parse_args(parser) if(exists("test.file") == FALSE){ test.file = c(1, 2, 3) # dummy variable } if (length(opt$input.file)==1) { input.file = opt$input.file } else if (length(test.file)==1) { input.file = test.file # specifically for testing the script flog.info(paste("Using test file", test.file)) } else { stop("Input file parameter must be supplied via -i or --input. See script usage (--help) or readme for help: https://github.com/roblanf/minion_qc") } q = opt$q cores = opt$cores smallfig = opt$smallfig combined_only = opt$combined_only p1m = 1.0 # this is how we label the reads at least as good as q q_title = paste("Q>=", q, sep="") # build the map for R9.5 p1 = data.frame(channel=33:64, row=rep(1:4, each=8), col=rep(1:8, 4)) p2 = data.frame(channel=481:512, row=rep(5:8, each=8), col=rep(1:8, 4)) p3 = data.frame(channel=417:448, row=rep(9:12, each=8), col=rep(1:8, 4)) p4 = data.frame(channel=353:384, row=rep(13:16, each=8), col=rep(1:8, 4)) p5 = data.frame(channel=289:320, row=rep(17:20, each=8), col=rep(1:8, 4)) p6 = data.frame(channel=225:256, row=rep(21:24, each=8), col=rep(1:8, 4)) p7 = data.frame(channel=161:192, row=rep(25:28, each=8), col=rep(1:8, 4)) p8 = data.frame(channel=97:128, row=rep(29:32, each=8), col=rep(1:8, 4)) q1 = data.frame(channel=1:32, row=rep(1:4, each=8), col=rep(16:9, 4)) q2 = data.frame(channel=449:480, row=rep(5:8, each=8), col=rep(16:9, 4)) q3 = data.frame(channel=385:416, row=rep(9:12, each=8), col=rep(16:9, 4)) q4 = data.frame(channel=321:352, row=rep(13:16, each=8), col=rep(16:9, 4)) q5 = data.frame(channel=257:288, row=rep(17:20, each=8), col=rep(16:9, 4)) q6 = data.frame(channel=193:224, row=rep(21:24, each=8), col=rep(16:9, 4)) q7 = data.frame(channel=129:160, row=rep(25:28, each=8), col=rep(16:9, 4)) q8 = data.frame(channel=65:96, row=rep(29:32, each=8), col=rep(16:9, 4)) map = rbind(p1, p2, p3, p4, p5, p6, p7, p8, q1, q2, q3, q4, q5, q6, q7, q8) add_cols <- function(d, min.q){ # take a sequencing sumamry file (d), and a minimum Q value you are interested in (min.q) # return the same data frame with the following columns added # cumulative.bases and cumulative.bases.time # hour of run # reads.per.hour d = subset(d, mean_qscore_template >= min.q) if(nrow(d)==0){ flog.error(paste("There are no reads with a mean Q score higher than your cutoff of ", min.q, ". Please choose a lower cutoff and try again.", sep = "")) quit() } if(max(d$channel)<=512){ d = merge(d, map, by="channel") }else{ # thanks to Matt Loose. Code adapted from: https://github.com/mattloose/flowcellvis/blob/master/flowcellgif.py block = floor((d$channel-1)/250) remainder = (d$channel-1)%%250 d$row = floor(remainder/10) + 1 # +1 because R is not zero indexed d$col = remainder%%10 + block*10 + 1 # +1 because R is not zero indexed } d = d[with(d, order(as.numeric(start_time))), ] # sort by start time d$cumulative.bases.time = cumsum(as.numeric(d$sequence_length_template)) d = d[with(d, order(-sequence_length_template)), ] # sort by read length d$cumulative.bases = cumsum(as.numeric(d$sequence_length_template)) d$hour = d$start_time %/% 3600 # add the reads generated for each hour reads.per.hour = as.data.frame(table(d$hour)) names(reads.per.hour) = c("hour", "reads_per_hour") reads.per.hour$hour = as.numeric(as.character(reads.per.hour$hour)) d = merge(d, reads.per.hour, by = c("hour")) return(d) } load_summary <- function(filepath, min.q){ # load a sequencing summary and add some info # min.q is a vector of length 2 defining 2 levels of min.q to have # by default the lowest value is -Inf, i.e. includes all reads. The # other value in min.q is set by the user at the command line d = read_tsv(filepath, col_types = cols_only(channel = 'i', num_events_template = 'i', sequence_length_template = 'i', mean_qscore_template = 'n', sequence_length_2d = 'i', mean_qscore_2d = 'n', start_time = 'n', calibration_strand_genome_template = 'c')) if(max(d$channel)<=512){ flog.info("MinION flowcell detected") }else{ flog.info("PromethION flowcell detected") } # remove the control sequence from directRNA runs if("calibration_strand_genome_template" %in% names(d)){ d = subset(d, calibration_strand_genome_template != "YHR174W") } if("sequence_length_2d" %in% names(d)){ # it's a 1D2 or 2D run d$sequence_length_template = as.numeric(as.character(d$sequence_length_2d)) d$mean_qscore_template = as.numeric(as.character(d$mean_qscore_2d)) d$num_events_template = NA d$start_time = as.numeric(as.character(d$start_time)) }else{ d$sequence_length_template = as.numeric(as.character(d$sequence_length_template)) d$mean_qscore_template = as.numeric(as.character(d$mean_qscore_template)) d$num_events_template = as.numeric(as.character(d$num_events_template)) d$start_time = as.numeric(as.character(d$start_time)) } d$events_per_base = d$num_events_template/d$sequence_length_template flowcell = basename(dirname(filepath)) # add columns for all the reads d1 = add_cols(d, min.q[1]) d1$Q_cutoff = "All reads" # add columns for just the reads that pass the user Q threshold d2 = add_cols(d, min.q[2]) d2$Q_cutoff = q_title # bind those two together into one data frame d = as.data.frame(rbindlist(list(d1, d2))) # name the flowcell (useful for analyses with >1 flowcell) d$flowcell = flowcell # make sure this is a factor d$Q_cutoff = as.factor(d$Q_cutoff) keep = c("hour", "start_time", "channel", "sequence_length_template", "mean_qscore_template", "row", "col", "cumulative.bases", "cumulative.bases.time", "reads_per_hour", "Q_cutoff", "flowcell", "events_per_base") dk = d[, which(names(d) %in% keep)] return(dk) } reads.gt <- function(d, len){ # return the number of reads in data frame d # that are at least as long as length len return(length(which(d$sequence_length_template>=len))) } bases.gt <- function(d, len){ # return the number of bases contained in reads from # data frame d # that are at least as long as length len reads = subset(d, sequence_length_template >= len) return(sum(as.numeric(reads$sequence_length_template))) } log10_minor_break = function (...){ # function to add minor breaks to a log10 graph # hat-tip: https://stackoverflow.com/questions/30179442/plotting-minor-breaks-on-a-log-scale-with-ggplot function(x) { minx = floor(min(log10(x), na.rm=T))-1; maxx = ceiling(max(log10(x), na.rm=T))+1; n_major = maxx-minx+1; major_breaks = seq(minx, maxx, by=1) minor_breaks = rep(log10(seq(1, 9, by=1)), times = n_major)+ rep(major_breaks, each = 9) return(10^(minor_breaks)) } } log10_major_break = function (...){ # function to add major breaks to a log10 graph # hat-tip: https://stackoverflow.com/questions/30179442/plotting-minor-breaks-on-a-log-scale-with-ggplot function(x) { minx = floor(min(log10(x), na.rm=T))-1; maxx = ceiling(max(log10(x), na.rm=T))+1; n_major = maxx-minx+1; major_breaks = seq(minx, maxx, by=1) return(10^(major_breaks)) } } binSearch <- function(min, max, df, t = 100000) { # binary search algorithm, thanks to https://stackoverflow.com/questions/46292438/optimising-a-calculation-on-every-cumulative-subset-of-a-vector-in-r/46303384#46303384 # the aim is to return the number of reads in a dataset (df) # that comprise the largest subset of reads with an N50 of t # we use this to calculte the number of 'ultra long' reads # which are defined as those with N50 > 100KB mid = floor(mean(c(min, max))) if (mid == min) { if (df$sequence_length_template[min(which(df$cumulative.bases>df$cumulative.bases[min]/2))] < t) { return(min - 1) } else { return(max - 1) } } n = df$sequence_length_template[min(which(df$cumulative.bases>df$cumulative.bases[mid]/2))] if (n >= t) { return(binSearch(mid, max, df)) } else { return(binSearch(min, mid, df)) } } summary.stats <- function(d, Q_cutoff="All reads"){ # Calculate summary stats for a single value of min.q rows = which(as.character(d$Q_cutoff)==Q_cutoff) d = d[rows,] d = d[with(d, order(-sequence_length_template)), ] # sort by read length, just in case total.bases = sum(as.numeric(d$sequence_length_template)) total.reads = nrow(d) N50.length = d$sequence_length_template[min(which(d$cumulative.bases > (total.bases/2)))] mean.length = round(mean(as.numeric(d$sequence_length_template)), digits = 1) median.length = round(median(as.numeric(d$sequence_length_template)), digits = 1) max.length = max(as.numeric(d$sequence_length_template)) mean.q = round(mean(d$mean_qscore_template), digits = 1) median.q = round(median(d$mean_qscore_template), digits = 1) #calculate ultra-long reads and bases (max amount of data with N50>100KB) ultra.reads = binSearch(1, nrow(d), d, t = 100000) if(ultra.reads>=1){ ultra.gigabases = sum(as.numeric(d$sequence_length_template[1:ultra.reads]))/1000000000 }else{ ultra.gigabases = 0 } reads = list( reads.gt(d, 10000), reads.gt(d, 20000), reads.gt(d, 50000), reads.gt(d, 100000), reads.gt(d, 200000), reads.gt(d, 500000), reads.gt(d, 1000000), ultra.reads) names(reads) = c(">10kb", ">20kb", ">50kb", ">100kb", ">200kb", ">500kb", ">1m", "ultralong") bases = list( bases.gt(d, 10000)/1000000000, bases.gt(d, 20000)/1000000000, bases.gt(d, 50000)/1000000000, bases.gt(d, 100000)/1000000000, bases.gt(d, 200000)/1000000000, bases.gt(d, 500000)/1000000000, bases.gt(d, 1000000)/1000000000, ultra.gigabases) names(bases) = c(">10kb", ">20kb", ">50kb", ">100kb", ">200kb", ">500kb", ">1m", "ultralong") return(list('total.gigabases' = total.bases/1000000000, 'total.reads' = total.reads, 'N50.length' = N50.length, 'mean.length' = mean.length, 'median.length' = median.length, 'max.length' = max.length, 'mean.q' = mean.q, 'median.q' = median.q, 'reads' = reads, 'gigabases' = bases )) } channel.summary <- function(d){ # calculate summaries of what happened in each of the channels # of a flowcell a = ddply(d, .(channel), summarize, total.bases = sum(sequence_length_template), total.reads = sum(which(sequence_length_template>=0)), mean.read.length = mean(sequence_length_template), median.read.length = median(sequence_length_template), row = mean(row), col = mean(col)) b = melt(a, id.vars = c("channel", "row", "col")) return(b) } single.flowcell <- function(input.file, output.dir, q=7, base.dir = NA){ # wrapper function to analyse data from a single flowcell # input.file is a sequencing_summary.txt file from a 1D run # output.dir is the output directory into which to write results # q is the cutoff used for Q values, set by the user # base.dir is the base directory if and only if the user supplied a base directory # we use base.dir to name flowcells in a sensible way flog.info(paste("Loading input file:", input.file)) d = load_summary(input.file, min.q=c(-Inf, q)) flowcell = unique(d$flowcell) # output goes with the sequencing summary file unless otherwise specified if(is.na(opt$output.dir)){ output.dir = file.path(dirname(input.file)) } else { # the user supplied an output dir output.dir = file.path(opt$output.dir, flowcell) } flog.info(paste(sep = "", flowcell, ": creating output directory:", output.dir)) dir.create(output.dir, recursive = TRUE) out.txt = file.path(output.dir, "summary.yaml") flog.info(paste(sep = "", flowcell, ": summarising input file for flowcell")) all.reads.summary = summary.stats(d, Q_cutoff = "All reads") q10.reads.summary = summary.stats(d, Q_cutoff = q_title) summary = list("input file" = input.file, "All reads" = all.reads.summary, cutoff = q10.reads.summary, "notes" = 'ultralong reads refers to the largest set of reads with N50>100KB') names(summary)[3] = q_title write(as.yaml(summary), out.txt) muxes = seq(from = 0, to = max(d$hour), by = 8) # set up variable sizes if(smallfig == TRUE){ p1m = 0.5 }else{ p1m = 1.0 } if(smallfig == TRUE){ p2m = 0.6 }else{ p2m = 1.0 } # make plots flog.info(paste(sep = "", flowcell, ": plotting length histogram")) p1 = ggplot(d, aes(x = sequence_length_template, fill = Q_cutoff)) + geom_histogram(bins = 300) + scale_x_log10(minor_breaks=log10_minor_break(), breaks = log10_major_break()) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) + xlab("Read length (bases)") + ylab("Number of reads") + guides(fill=FALSE) + scale_fill_viridis(discrete = TRUE, begin = 0.25, end = 0.75) suppressMessages(ggsave(filename = file.path(output.dir, "length_histogram.png"), width = p1m*960/75, height = p1m*960/75, plot = p1)) # flog.info(paste(sep = "", flowcell, ": plotting mean Q score histogram")) p2 = ggplot(d, aes(x = mean_qscore_template, fill = Q_cutoff)) + geom_histogram(bins = 300) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) + xlab("Mean Q score of read") + ylab("Number of reads") + guides(fill=FALSE) + scale_fill_viridis(discrete = TRUE, begin = 0.25, end = 0.75) suppressMessages(ggsave(filename = file.path(output.dir, "q_histogram.png"), width = p1m*960/75, height = p1m*960/75, plot = p2)) # if(max(d$channel)<=512){ # only do this for minion, not promethion flog.info(paste(sep = "", flowcell, ": plotting flowcell overview")) p3 = ggplot(subset(d, Q_cutoff=="All reads"), aes(x=start_time/3600, y=sequence_length_template, colour = mean_qscore_template)) + geom_point(size=1.5, alpha=0.35) + scale_colour_viridis() + labs(colour='Q') + scale_y_log10() + facet_grid(row~col) + theme(panel.spacing = unit(0.5, "lines")) + xlab("Hours into run") + ylab("Read length") + theme(text = element_text(size = 40), axis.text.x = element_text(size=12), axis.text.y = element_text(size=12), legend.text=element_text(size=18), legend.title=element_text(size=24)) suppressMessages(ggsave(filename = file.path(output.dir, "flowcell_overview.png"), width = 2000/75, height = 1920/75, plot = p3)) } flog.info(paste(sep = "", flowcell, ": plotting flowcell yield over time")) p5 = ggplot(d, aes(x=start_time/3600, y=cumulative.bases.time/1000000000, colour = Q_cutoff)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + geom_line(size = 1) + xlab("Hours into run") + ylab("Total yield in gigabases") + scale_colour_viridis(discrete = TRUE, begin = 0.25, end = 0.75, guide = guide_legend(title = "Reads")) + theme(text = element_text(size = 15)) suppressMessages(ggsave(filename = file.path(output.dir, "yield_over_time.png"), width = p1m*960/75, height = p1m*480/75, plot = p5)) # flog.info(paste(sep = "", flowcell, ": plotting flowcell yield by read length")) p6 = ggplot(d, aes(x=sequence_length_template, y=cumulative.bases/1000000000, colour = Q_cutoff)) + geom_line(size = 1) + xlab("Minimum read length (bases)") + ylab("Total yield in gigabases") + scale_colour_viridis(discrete = TRUE, begin = 0.25, end = 0.75, guide = guide_legend(title = "Reads")) + theme(text = element_text(size = 15)) xmax = max(d$sequence_length_template[which(d$cumulative.bases > 0.01 * max(d$cumulative.bases))]) p6 = p6 + scale_x_continuous(limits = c(0, xmax)) suppressMessages(ggsave(filename = file.path(output.dir, "yield_by_length.png"), width = p1m*960/75, height = p1m*480/75, plot = p6)) # flog.info(paste(sep = "", flowcell, ": plotting sequence length over time")) p7 = ggplot(d, aes(x=start_time/3600, y=sequence_length_template, colour = Q_cutoff, group = Q_cutoff)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + theme(text = element_text(size = 15)) + geom_smooth() + xlab("Hours into run") + ylab("Mean read length (bases)") + scale_colour_viridis(discrete = TRUE, begin = 0.25, end = 0.75, guide = guide_legend(title = "Reads")) + ylim(0, NA) suppressMessages(ggsave(filename = file.path(output.dir, "length_by_hour.png"), width = p1m*960/75, height = p1m*480/75, plot = p7)) # flog.info(paste(sep = "", flowcell, ": plotting Q score over time")) p8 = ggplot(d, aes(x=start_time/3600, y=mean_qscore_template, colour = Q_cutoff, group = Q_cutoff)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + theme(text = element_text(size = 15)) + geom_smooth() + xlab("Hours into run") + ylab("Mean Q score") + scale_colour_viridis(discrete = TRUE, begin = 0.25, end = 0.75, guide = guide_legend(title = "Reads")) + ylim(0, NA) suppressMessages(ggsave(filename = file.path(output.dir, "q_by_hour.png"), width = p1m*960/75, height = p1m*480/75, plot = p8)) # flog.info(paste(sep = "", flowcell, ": plotting reads per hour")) f = d[c("hour", "reads_per_hour", "Q_cutoff")] f = f[!duplicated(f),] g = subset(f, Q_cutoff=="All reads") h = subset(f, Q_cutoff==q_title) max = max(f$hour) # all of this is just to fill in hours with no reads recorded all = 0:max add.g = all[which(all %in% g$hour == FALSE)] if(length(add.g)>0){ add.g = data.frame(hour = add.g, reads_per_hour = 0, Q_cutoff = "All reads") g = rbind(g, add.g) } add.h = all[which(all %in% h$hour == FALSE)] if(length(add.h)>0){ add.h = data.frame(hour = add.h, reads_per_hour = 0, Q_cutoff = q_title) h = rbind(h, add.h) } i = rbind(g, h) i$Q_cutoff = as.character(i$Q_cutoff) i$Q_cutoff[which(i$Q_cutoff==q_title)] = paste("Q>=", q, sep="") p9 = ggplot(i, aes(x=hour, y=reads_per_hour, colour = Q_cutoff, group = Q_cutoff)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + theme(text = element_text(size = 15)) + geom_point() + geom_line() + xlab("Hours into run") + ylab("Number of reads per hour") + ylim(0, NA) + scale_colour_viridis(discrete = TRUE, begin = 0.25, end = 0.75, guide = guide_legend(title = "Reads")) suppressMessages(ggsave(filename = file.path(output.dir, "reads_per_hour.png"), width = p1m*960/75, height = p1m*480/75, plot = p9)) # if(max(d$channel)<=512){ # minion flog.info(paste(sep = "", flowcell, ": plotting read length vs. q score scatterplot")) p10 = ggplot(subset(d, Q_cutoff=="All reads"), aes(x = sequence_length_template, y = mean_qscore_template, colour = events_per_base)) + geom_point(alpha=0.05, size = 0.4) + scale_x_log10(minor_breaks=log10_minor_break(), breaks = log10_major_break()) + labs(colour='Events per base\n(log scale)\n') + theme(text = element_text(size = 15)) + xlab("Read length (bases)") + ylab("Mean Q score of read") }else{ # promethion p10 = ggplot(subset(d, Q_cutoff=="All reads"), aes(x = sequence_length_template, y = mean_qscore_template, colour = events_per_base)) + geom_bin2d() + scale_x_log10(minor_breaks=log10_minor_break(), breaks = log10_major_break()) + theme(text = element_text(size = 15)) + scale_fill_viridis() + xlab("Read length (bases)") + ylab("Mean Q score of read") } if(max(d$events_per_base, na.rm=T)>0){ # a catch for 1D2 runs which don't have events per base p10 = p10 + scale_colour_viridis(trans = "log", labels = scientific, option = 'inferno') } # we keep it a bit wider, because the legend takes up a fair bit of the plot space suppressMessages(ggsave(filename = file.path(output.dir, "length_vs_q.png"), width = p2m*960/75, height = p1m*960/75, plot = p10)) # flog.info(paste(sep = "", flowcell, ": plotting flowcell channels summary histograms")) c = channel.summary(subset(d, Q_cutoff=="All reads")) c10 = channel.summary(subset(d, Q_cutoff==q_title)) c$Q_cutoff = "All reads" c10$Q_cutoff = q_title cc = rbind(c, c10) cc$variable = as.character(cc$variable) cc$variable[which(cc$variable=="total.bases")] = "Number of bases per channel" cc$variable[which(cc$variable=="total.reads")] = "Number of reads per channel" cc$variable[which(cc$variable=="mean.read.length")] = "Mean read length per channel" cc$variable[which(cc$variable=="median.read.length")] = "Median read length per channel" p11 = ggplot(cc, aes(x = value, fill = Q_cutoff)) + geom_histogram(bins = 30) + facet_grid(Q_cutoff~variable, scales = "free_x") + theme(text = element_text(size = 15), axis.text.x = element_text(angle = 60, hjust = 1)) + guides(fill=FALSE) + scale_fill_viridis(discrete = TRUE, begin = 0.25, end = 0.75) + guides(fill=FALSE) suppressMessages(ggsave(filename = file.path(output.dir, "channel_summary.png"), width = 960/75, height = 480/75, plot = p11)) flog.info(paste(sep = "", flowcell, ": plotting physical overview of output per channel")) if(max(d$channel)<=512){ # minion cols = 2 }else{ # promethion cols = 1 } p12 = ggplot(subset(cc, variable == "Number of bases per channel"), aes(x = as.numeric(col), y = as.numeric(row))) + geom_tile(aes(fill = value/1000000000), colour="white", size=0.25) + facet_wrap(~Q_cutoff, ncol = cols) + theme(text = element_text(size = 15), plot.background=element_blank(), panel.border=element_blank(), panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_fill_viridis(name = "GB/channel") + scale_y_continuous(trans = "reverse", expand=c(0,0)) + scale_x_continuous(expand=c(0,0)) + coord_fixed() + labs(x="channel column",y="channel row") if(max(d$channel)<=512){ # minion suppressMessages(ggsave(filename = file.path(output.dir, "gb_per_channel_overview.png"), width = 960/150, height = 480/75, plot = p12)) }else{ # promethion suppressMessages(ggsave(filename = file.path(output.dir, "gb_per_channel_overview.png"), width = 960/75, height = 480/75, plot = p12)) } return(d) } combined.flowcell <- function(d, output.dir, q=8){ # function to analyse combined data from multiple flowcells # useful for getting an overall impression of the combined data flog.info("Creating output directory") out.txt = file.path(output.dir, "summary.yaml") # write summaries flog.info(paste("Summarising combined data from all flowcells, saving to:", out.txt)) # tidy up and remove added stuff drops = c("cumulative.bases", "hour", "reads.per.hour") d = d[ , !(names(d) %in% drops)] d1 = subset(d, Q_cutoff == "All reads") d1 = d1[with(d1, order(-sequence_length_template)), ] # sort by read length d1$cumulative.bases = cumsum(as.numeric(d1$sequence_length_template)) d2 = subset(d, Q_cutoff == q_title) d2 = d2[with(d2, order(-sequence_length_template)), ] # sort by read length d2$cumulative.bases = cumsum(as.numeric(d2$sequence_length_template)) d1$Q_cutoff = as.factor(d1$Q_cutoff) d2$Q_cutoff = as.factor(d2$Q_cutoff) all.reads.summary = summary.stats(d1, Q_cutoff = "All reads") q10.reads.summary = summary.stats(d2, Q_cutoff = q_title) summary = list("input file" = input.file, "All reads" = all.reads.summary, cutoff = q10.reads.summary, "notes" = 'ultralong reads refers to the largest set of reads with N50>100KB') names(summary)[3] = q_title write(as.yaml(summary), out.txt) d = rbind(d1, d2) d$Q_cutoff = as.factor(d$Q_cutoff) d1 = 0 d2 = 0 # set up variable sizes if(smallfig == TRUE){ p1m = 0.5 }else{ p1m = 1.0 } # make plots flog.info("Plotting combined length histogram") p1 = ggplot(d, aes(x = sequence_length_template, fill = Q_cutoff)) + geom_histogram(bins = 300) + scale_x_log10(minor_breaks=log10_minor_break(), breaks = log10_major_break()) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) + xlab("Read length (bases)") + ylab("Number of reads") + guides(fill=FALSE) + scale_fill_viridis(discrete = TRUE, begin = 0.25, end = 0.75) suppressMessages(ggsave(filename = file.path(output.dir, "combined_length_histogram.png"), width = p1m*960/75, height = p1m*960/75, plot = p1)) flog.info("Plotting combined mean Q score histogram") p2 = ggplot(d, aes(x = mean_qscore_template, fill = Q_cutoff)) + geom_histogram(bins = 300) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) + xlab("Mean Q score of read") + ylab("Number of reads") + guides(fill=FALSE) + scale_fill_viridis(discrete = TRUE, begin = 0.25, end = 0.75) suppressMessages(ggsave(filename = file.path(output.dir, "combined_q_histogram.png"), width = p1m*960/75, height = p1m*960/75, plot = p2)) flog.info("Plotting combined yield by length") p4 = ggplot(d, aes(x=sequence_length_template, y=cumulative.bases/1000000000, colour = Q_cutoff)) + geom_line(size = 1) + xlab("Minimum read length (bases)") + ylab("Total yield in gigabases") + scale_colour_viridis(discrete = TRUE, begin = 0.25, end = 0.75, guide = guide_legend(title = "Reads")) + theme(text = element_text(size = 15)) xmax = max(d$sequence_length_template[which(d$cumulative.bases > 0.01 * max(d$cumulative.bases))]) p4 = p4 + scale_x_continuous(limits = c(0, xmax)) suppressMessages(ggsave(filename = file.path(output.dir, "combined_yield_by_length.png"), width = p1m*960/75, height = p1m*480/75, plot = p4)) } multi.flowcell = function(input.file, output.base, q){ # wrapper function to allow parallelisation of single-flowcell # analyses when >1 flowcell is analysed in one run d = single.flowcell(input.file, output.dir, q) return(d) } multi.plots = function(dm, output.dir){ # function to plot data from multiple flowcells, # where the data is not combined (as in combined.flowcell() ) # but instead just uses multiple lines on each plot. muxes = seq(from = 0, to = max(dm$hour), by = 8) # set up variable sizes if(smallfig == TRUE){ p1m = 0.5 }else{ p1m = 1.0 } # make plots flog.info("Plotting length distributions") p1 = ggplot(dm, aes(x = sequence_length_template)) + geom_line(stat="density", aes(colour = flowcell), size = 1) + scale_x_log10(minor_breaks=log10_minor_break(), breaks = log10_major_break()) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) + xlab("Read length (bases)") + ylab("Density") suppressMessages(ggsave(filename = file.path(output.dir, "length_distributions.png"), width = p1m*960/75, height = p1m*960/75, plot = p1)) # flog.info("Plotting mean Q score distributions") p2 = ggplot(dm, aes(x = mean_qscore_template)) + geom_line(stat="density", aes(colour = flowcell), size = 1) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) + xlab("Mean Q score of read") + ylab("Density") suppressMessages(ggsave(filename = file.path(output.dir, "q_distributions.png"), width = p1m*960/75, height = p1m*960/75, plot = p2)) # flog.info("Plotting flowcell yield over time") p5 = ggplot(dm, aes(x=start_time/3600, y=cumulative.bases.time/1000000000, colour = flowcell)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + geom_line(size = 1) + xlab("Hours into run") + ylab("Total yield in gigabases") + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) suppressMessages(ggsave(filename = file.path(output.dir, "yield_over_time.png"), width = p1m*960/75, height = p1m*960/75, plot = p5)) # flog.info("Plotting flowcell yield by length") p6 = ggplot(dm, aes(x=sequence_length_template, y=cumulative.bases/1000000000, colour = flowcell)) + geom_line(size = 1) + xlab("Minimum read length (bases)") + ylab("Total yield in gigabases") + theme(text = element_text(size = 15)) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") xmax = max(dm$sequence_length_template[which(dm$cumulative.bases > 0.01 * max(dm$cumulative.bases))]) p6 = p6 + scale_x_continuous(limits = c(0, xmax)) suppressMessages(ggsave(filename = file.path(output.dir, "yield_by_length.png"), width = p1m*960/75, height = p1m*960/75, plot = p6)) # flog.info("Plotting sequence length over time") p7 = ggplot(dm, aes(x=start_time/3600, y=sequence_length_template, colour = flowcell)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + theme(text = element_text(size = 15)) + geom_smooth() + xlab("Hours into run") + ylab("Mean read length (bases)") + ylim(0, NA) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") suppressMessages(ggsave(filename = file.path(output.dir, "length_by_hour.png"), width = p1m*960/75, height = p1m*960/75, plot = p7)) flog.info("Plotting Q score over time") p8 = ggplot(dm, aes(x=start_time/3600, y=mean_qscore_template, colour = flowcell)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + theme(text = element_text(size = 15)) + geom_smooth() + xlab("Hours into run") + ylab("Mean Q score") + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") suppressMessages(ggsave(filename = file.path(output.dir, "q_by_hour.png"), width = p1m*960/75, height = p1m*960/75, plot = p8)) } # Choose how to act depending on whether we have a single input file or mulitple input files if(file_test("-f", input.file)==TRUE & length(test.file)>1){ # if it's an existing file (not a folder) just run one analysis d = single.flowcell(input.file, output.dir, q) }else if(file_test("-d", input.file)==TRUE & length(test.file)>1){ # it's a directory, recursively analyse all sequencing_summary.txt files # get a list of all sequencing_summary.txt files, recursively summaries = list.files(path = input.file, pattern = "sequencing_summary.txt", recursive = TRUE, full.names = TRUE) flog.info("") flog.info("**** Analysing the following files ****") flog.info(summaries) # if the user passes a directory with only one sequencing_summary.txt file... if(length(summaries) == 1){ d = single.flowcell(summaries[1], output.dir, q) flog.info('**** Analysis complete ****') }else{ # analyse each one and keep the returns in a list if(combined_only == FALSE){ results = mclapply(summaries, multi.flowcell, output.dir, q, mc.cores = cores) }else{ results = mclapply(summaries, load_summary, min.q = c(-Inf, q), mc.cores = cores) } # rbind that list flog.info('**** Analysing data from all flowcells combined ****') dm = as.data.frame(rbindlist(results)) # now do the single plot on ALL the output if(is.na(opt$output.dir)){ combined.output = file.path(input.file, "combinedQC") } else { # the user supplied an output dir combined.output = file.path(opt$output.dir, "combinedQC") } flog.info(paste("Plots from the combined output will be saved in", combined.output)) dir.create(combined.output, recursive = TRUE) combined.flowcell(dm, combined.output, q) multi.plots(dm, combined.output) flog.info('**** Analysis complete ****') flog.info('If you use MinIONQC in your published work, please cite:') flog.info('R Lanfear, M Schalamun, D Kainer, W Wang, B Schwessinger (2018). MinIONQC: fast and simple quality control for MinION sequencing data, Bioinformatics, bty654') flog.info('https://doi.org/10.1093/bioinformatics/bty654') } }else{ #WTF flog.warn(paste("Couldn't find a sequencing summary file in your input which was: ", input.file, "\nThe input must be either a sequencing_summary.txt file, or a directory containing one or more such files")) }
/MinIONQC.R
permissive
AN-Lewis/minion_qc
R
false
false
38,579
r
#!/usr/bin/env Rscript # MinIONQC version 1.4.1 # Copyright (C) 2017 onwards Robert Lanfear # # For license see https://github.com/roblanf/minion_qc/blob/master/LICENSE # supress warnings options(warn=-1) suppressPackageStartupMessages(library(ggplot2)) suppressPackageStartupMessages(library(viridis)) suppressPackageStartupMessages(library(plyr)) suppressPackageStartupMessages(library(reshape2)) suppressPackageStartupMessages(library(readr)) suppressPackageStartupMessages(library(yaml)) suppressPackageStartupMessages(library(scales)) suppressPackageStartupMessages(library(parallel)) suppressPackageStartupMessages(library(futile.logger)) suppressPackageStartupMessages(library(data.table)) suppressPackageStartupMessages(library(optparse)) # option parsing # parser <- OptionParser() parser <- add_option(parser, opt_str = c("-i", "--input"), type = "character", dest = 'input.file', help="Input file or directory (required). Either a full path to a sequence_summary.txt file, or a full path to a directory containing one or more such files. In the latter case the directory is searched recursively." ) parser <- add_option(parser, opt_str = c("-o", "--outputdirectory"), type = "character", dest = 'output.dir', default=NA, help="Output directory (optional, default is the same as the input directory). If a single sequencing_summary.txt file is passed as input, then the output directory will contain just the plots associated with that file. If a directory containing more than one sequencing_summary.txt files is passed as input, then the plots will be put into sub-directories that have the same names as the parent directories of each sequencing_summary.txt file" ) parser <- add_option(parser, opt_str = c("-q", "--qscore_cutoff"), type="double", default=7.0, dest = 'q', help="The cutoff value for the mean Q score of a read (default 7). Used to create separate plots for reads above and below this threshold" ) parser <- add_option(parser, opt_str = c("-p", "--processors"), type="integer", default=1, dest = 'cores', help="Number of processors to use for the anlaysis (default 1). Only helps when you are analysing more than one sequencing_summary.txt file at a time" ) parser <- add_option(parser, opt_str = c("-s", "--smallfigures"), type = "logical", default = FALSE, dest = 'smallfig', help="TRUE or FALSE (the default). When true, MinIONQC will output smaller figures, e.g. suitable for publications or presentations. The default is to produce larger figures optimised for display on screen. Some figures just require small text, and cannot be effectively resized." ) parser <- add_option(parser, opt_str = c("-c", "--combined-only"), type = "logical", default = FALSE, dest = 'combined_only', help="TRUE or FALSE (the default). When true, MinIONQC will only produce the combined report, it will not produce individual reports for each flowcell." ) opt = parse_args(parser) if(exists("test.file") == FALSE){ test.file = c(1, 2, 3) # dummy variable } if (length(opt$input.file)==1) { input.file = opt$input.file } else if (length(test.file)==1) { input.file = test.file # specifically for testing the script flog.info(paste("Using test file", test.file)) } else { stop("Input file parameter must be supplied via -i or --input. See script usage (--help) or readme for help: https://github.com/roblanf/minion_qc") } q = opt$q cores = opt$cores smallfig = opt$smallfig combined_only = opt$combined_only p1m = 1.0 # this is how we label the reads at least as good as q q_title = paste("Q>=", q, sep="") # build the map for R9.5 p1 = data.frame(channel=33:64, row=rep(1:4, each=8), col=rep(1:8, 4)) p2 = data.frame(channel=481:512, row=rep(5:8, each=8), col=rep(1:8, 4)) p3 = data.frame(channel=417:448, row=rep(9:12, each=8), col=rep(1:8, 4)) p4 = data.frame(channel=353:384, row=rep(13:16, each=8), col=rep(1:8, 4)) p5 = data.frame(channel=289:320, row=rep(17:20, each=8), col=rep(1:8, 4)) p6 = data.frame(channel=225:256, row=rep(21:24, each=8), col=rep(1:8, 4)) p7 = data.frame(channel=161:192, row=rep(25:28, each=8), col=rep(1:8, 4)) p8 = data.frame(channel=97:128, row=rep(29:32, each=8), col=rep(1:8, 4)) q1 = data.frame(channel=1:32, row=rep(1:4, each=8), col=rep(16:9, 4)) q2 = data.frame(channel=449:480, row=rep(5:8, each=8), col=rep(16:9, 4)) q3 = data.frame(channel=385:416, row=rep(9:12, each=8), col=rep(16:9, 4)) q4 = data.frame(channel=321:352, row=rep(13:16, each=8), col=rep(16:9, 4)) q5 = data.frame(channel=257:288, row=rep(17:20, each=8), col=rep(16:9, 4)) q6 = data.frame(channel=193:224, row=rep(21:24, each=8), col=rep(16:9, 4)) q7 = data.frame(channel=129:160, row=rep(25:28, each=8), col=rep(16:9, 4)) q8 = data.frame(channel=65:96, row=rep(29:32, each=8), col=rep(16:9, 4)) map = rbind(p1, p2, p3, p4, p5, p6, p7, p8, q1, q2, q3, q4, q5, q6, q7, q8) add_cols <- function(d, min.q){ # take a sequencing sumamry file (d), and a minimum Q value you are interested in (min.q) # return the same data frame with the following columns added # cumulative.bases and cumulative.bases.time # hour of run # reads.per.hour d = subset(d, mean_qscore_template >= min.q) if(nrow(d)==0){ flog.error(paste("There are no reads with a mean Q score higher than your cutoff of ", min.q, ". Please choose a lower cutoff and try again.", sep = "")) quit() } if(max(d$channel)<=512){ d = merge(d, map, by="channel") }else{ # thanks to Matt Loose. Code adapted from: https://github.com/mattloose/flowcellvis/blob/master/flowcellgif.py block = floor((d$channel-1)/250) remainder = (d$channel-1)%%250 d$row = floor(remainder/10) + 1 # +1 because R is not zero indexed d$col = remainder%%10 + block*10 + 1 # +1 because R is not zero indexed } d = d[with(d, order(as.numeric(start_time))), ] # sort by start time d$cumulative.bases.time = cumsum(as.numeric(d$sequence_length_template)) d = d[with(d, order(-sequence_length_template)), ] # sort by read length d$cumulative.bases = cumsum(as.numeric(d$sequence_length_template)) d$hour = d$start_time %/% 3600 # add the reads generated for each hour reads.per.hour = as.data.frame(table(d$hour)) names(reads.per.hour) = c("hour", "reads_per_hour") reads.per.hour$hour = as.numeric(as.character(reads.per.hour$hour)) d = merge(d, reads.per.hour, by = c("hour")) return(d) } load_summary <- function(filepath, min.q){ # load a sequencing summary and add some info # min.q is a vector of length 2 defining 2 levels of min.q to have # by default the lowest value is -Inf, i.e. includes all reads. The # other value in min.q is set by the user at the command line d = read_tsv(filepath, col_types = cols_only(channel = 'i', num_events_template = 'i', sequence_length_template = 'i', mean_qscore_template = 'n', sequence_length_2d = 'i', mean_qscore_2d = 'n', start_time = 'n', calibration_strand_genome_template = 'c')) if(max(d$channel)<=512){ flog.info("MinION flowcell detected") }else{ flog.info("PromethION flowcell detected") } # remove the control sequence from directRNA runs if("calibration_strand_genome_template" %in% names(d)){ d = subset(d, calibration_strand_genome_template != "YHR174W") } if("sequence_length_2d" %in% names(d)){ # it's a 1D2 or 2D run d$sequence_length_template = as.numeric(as.character(d$sequence_length_2d)) d$mean_qscore_template = as.numeric(as.character(d$mean_qscore_2d)) d$num_events_template = NA d$start_time = as.numeric(as.character(d$start_time)) }else{ d$sequence_length_template = as.numeric(as.character(d$sequence_length_template)) d$mean_qscore_template = as.numeric(as.character(d$mean_qscore_template)) d$num_events_template = as.numeric(as.character(d$num_events_template)) d$start_time = as.numeric(as.character(d$start_time)) } d$events_per_base = d$num_events_template/d$sequence_length_template flowcell = basename(dirname(filepath)) # add columns for all the reads d1 = add_cols(d, min.q[1]) d1$Q_cutoff = "All reads" # add columns for just the reads that pass the user Q threshold d2 = add_cols(d, min.q[2]) d2$Q_cutoff = q_title # bind those two together into one data frame d = as.data.frame(rbindlist(list(d1, d2))) # name the flowcell (useful for analyses with >1 flowcell) d$flowcell = flowcell # make sure this is a factor d$Q_cutoff = as.factor(d$Q_cutoff) keep = c("hour", "start_time", "channel", "sequence_length_template", "mean_qscore_template", "row", "col", "cumulative.bases", "cumulative.bases.time", "reads_per_hour", "Q_cutoff", "flowcell", "events_per_base") dk = d[, which(names(d) %in% keep)] return(dk) } reads.gt <- function(d, len){ # return the number of reads in data frame d # that are at least as long as length len return(length(which(d$sequence_length_template>=len))) } bases.gt <- function(d, len){ # return the number of bases contained in reads from # data frame d # that are at least as long as length len reads = subset(d, sequence_length_template >= len) return(sum(as.numeric(reads$sequence_length_template))) } log10_minor_break = function (...){ # function to add minor breaks to a log10 graph # hat-tip: https://stackoverflow.com/questions/30179442/plotting-minor-breaks-on-a-log-scale-with-ggplot function(x) { minx = floor(min(log10(x), na.rm=T))-1; maxx = ceiling(max(log10(x), na.rm=T))+1; n_major = maxx-minx+1; major_breaks = seq(minx, maxx, by=1) minor_breaks = rep(log10(seq(1, 9, by=1)), times = n_major)+ rep(major_breaks, each = 9) return(10^(minor_breaks)) } } log10_major_break = function (...){ # function to add major breaks to a log10 graph # hat-tip: https://stackoverflow.com/questions/30179442/plotting-minor-breaks-on-a-log-scale-with-ggplot function(x) { minx = floor(min(log10(x), na.rm=T))-1; maxx = ceiling(max(log10(x), na.rm=T))+1; n_major = maxx-minx+1; major_breaks = seq(minx, maxx, by=1) return(10^(major_breaks)) } } binSearch <- function(min, max, df, t = 100000) { # binary search algorithm, thanks to https://stackoverflow.com/questions/46292438/optimising-a-calculation-on-every-cumulative-subset-of-a-vector-in-r/46303384#46303384 # the aim is to return the number of reads in a dataset (df) # that comprise the largest subset of reads with an N50 of t # we use this to calculte the number of 'ultra long' reads # which are defined as those with N50 > 100KB mid = floor(mean(c(min, max))) if (mid == min) { if (df$sequence_length_template[min(which(df$cumulative.bases>df$cumulative.bases[min]/2))] < t) { return(min - 1) } else { return(max - 1) } } n = df$sequence_length_template[min(which(df$cumulative.bases>df$cumulative.bases[mid]/2))] if (n >= t) { return(binSearch(mid, max, df)) } else { return(binSearch(min, mid, df)) } } summary.stats <- function(d, Q_cutoff="All reads"){ # Calculate summary stats for a single value of min.q rows = which(as.character(d$Q_cutoff)==Q_cutoff) d = d[rows,] d = d[with(d, order(-sequence_length_template)), ] # sort by read length, just in case total.bases = sum(as.numeric(d$sequence_length_template)) total.reads = nrow(d) N50.length = d$sequence_length_template[min(which(d$cumulative.bases > (total.bases/2)))] mean.length = round(mean(as.numeric(d$sequence_length_template)), digits = 1) median.length = round(median(as.numeric(d$sequence_length_template)), digits = 1) max.length = max(as.numeric(d$sequence_length_template)) mean.q = round(mean(d$mean_qscore_template), digits = 1) median.q = round(median(d$mean_qscore_template), digits = 1) #calculate ultra-long reads and bases (max amount of data with N50>100KB) ultra.reads = binSearch(1, nrow(d), d, t = 100000) if(ultra.reads>=1){ ultra.gigabases = sum(as.numeric(d$sequence_length_template[1:ultra.reads]))/1000000000 }else{ ultra.gigabases = 0 } reads = list( reads.gt(d, 10000), reads.gt(d, 20000), reads.gt(d, 50000), reads.gt(d, 100000), reads.gt(d, 200000), reads.gt(d, 500000), reads.gt(d, 1000000), ultra.reads) names(reads) = c(">10kb", ">20kb", ">50kb", ">100kb", ">200kb", ">500kb", ">1m", "ultralong") bases = list( bases.gt(d, 10000)/1000000000, bases.gt(d, 20000)/1000000000, bases.gt(d, 50000)/1000000000, bases.gt(d, 100000)/1000000000, bases.gt(d, 200000)/1000000000, bases.gt(d, 500000)/1000000000, bases.gt(d, 1000000)/1000000000, ultra.gigabases) names(bases) = c(">10kb", ">20kb", ">50kb", ">100kb", ">200kb", ">500kb", ">1m", "ultralong") return(list('total.gigabases' = total.bases/1000000000, 'total.reads' = total.reads, 'N50.length' = N50.length, 'mean.length' = mean.length, 'median.length' = median.length, 'max.length' = max.length, 'mean.q' = mean.q, 'median.q' = median.q, 'reads' = reads, 'gigabases' = bases )) } channel.summary <- function(d){ # calculate summaries of what happened in each of the channels # of a flowcell a = ddply(d, .(channel), summarize, total.bases = sum(sequence_length_template), total.reads = sum(which(sequence_length_template>=0)), mean.read.length = mean(sequence_length_template), median.read.length = median(sequence_length_template), row = mean(row), col = mean(col)) b = melt(a, id.vars = c("channel", "row", "col")) return(b) } single.flowcell <- function(input.file, output.dir, q=7, base.dir = NA){ # wrapper function to analyse data from a single flowcell # input.file is a sequencing_summary.txt file from a 1D run # output.dir is the output directory into which to write results # q is the cutoff used for Q values, set by the user # base.dir is the base directory if and only if the user supplied a base directory # we use base.dir to name flowcells in a sensible way flog.info(paste("Loading input file:", input.file)) d = load_summary(input.file, min.q=c(-Inf, q)) flowcell = unique(d$flowcell) # output goes with the sequencing summary file unless otherwise specified if(is.na(opt$output.dir)){ output.dir = file.path(dirname(input.file)) } else { # the user supplied an output dir output.dir = file.path(opt$output.dir, flowcell) } flog.info(paste(sep = "", flowcell, ": creating output directory:", output.dir)) dir.create(output.dir, recursive = TRUE) out.txt = file.path(output.dir, "summary.yaml") flog.info(paste(sep = "", flowcell, ": summarising input file for flowcell")) all.reads.summary = summary.stats(d, Q_cutoff = "All reads") q10.reads.summary = summary.stats(d, Q_cutoff = q_title) summary = list("input file" = input.file, "All reads" = all.reads.summary, cutoff = q10.reads.summary, "notes" = 'ultralong reads refers to the largest set of reads with N50>100KB') names(summary)[3] = q_title write(as.yaml(summary), out.txt) muxes = seq(from = 0, to = max(d$hour), by = 8) # set up variable sizes if(smallfig == TRUE){ p1m = 0.5 }else{ p1m = 1.0 } if(smallfig == TRUE){ p2m = 0.6 }else{ p2m = 1.0 } # make plots flog.info(paste(sep = "", flowcell, ": plotting length histogram")) p1 = ggplot(d, aes(x = sequence_length_template, fill = Q_cutoff)) + geom_histogram(bins = 300) + scale_x_log10(minor_breaks=log10_minor_break(), breaks = log10_major_break()) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) + xlab("Read length (bases)") + ylab("Number of reads") + guides(fill=FALSE) + scale_fill_viridis(discrete = TRUE, begin = 0.25, end = 0.75) suppressMessages(ggsave(filename = file.path(output.dir, "length_histogram.png"), width = p1m*960/75, height = p1m*960/75, plot = p1)) # flog.info(paste(sep = "", flowcell, ": plotting mean Q score histogram")) p2 = ggplot(d, aes(x = mean_qscore_template, fill = Q_cutoff)) + geom_histogram(bins = 300) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) + xlab("Mean Q score of read") + ylab("Number of reads") + guides(fill=FALSE) + scale_fill_viridis(discrete = TRUE, begin = 0.25, end = 0.75) suppressMessages(ggsave(filename = file.path(output.dir, "q_histogram.png"), width = p1m*960/75, height = p1m*960/75, plot = p2)) # if(max(d$channel)<=512){ # only do this for minion, not promethion flog.info(paste(sep = "", flowcell, ": plotting flowcell overview")) p3 = ggplot(subset(d, Q_cutoff=="All reads"), aes(x=start_time/3600, y=sequence_length_template, colour = mean_qscore_template)) + geom_point(size=1.5, alpha=0.35) + scale_colour_viridis() + labs(colour='Q') + scale_y_log10() + facet_grid(row~col) + theme(panel.spacing = unit(0.5, "lines")) + xlab("Hours into run") + ylab("Read length") + theme(text = element_text(size = 40), axis.text.x = element_text(size=12), axis.text.y = element_text(size=12), legend.text=element_text(size=18), legend.title=element_text(size=24)) suppressMessages(ggsave(filename = file.path(output.dir, "flowcell_overview.png"), width = 2000/75, height = 1920/75, plot = p3)) } flog.info(paste(sep = "", flowcell, ": plotting flowcell yield over time")) p5 = ggplot(d, aes(x=start_time/3600, y=cumulative.bases.time/1000000000, colour = Q_cutoff)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + geom_line(size = 1) + xlab("Hours into run") + ylab("Total yield in gigabases") + scale_colour_viridis(discrete = TRUE, begin = 0.25, end = 0.75, guide = guide_legend(title = "Reads")) + theme(text = element_text(size = 15)) suppressMessages(ggsave(filename = file.path(output.dir, "yield_over_time.png"), width = p1m*960/75, height = p1m*480/75, plot = p5)) # flog.info(paste(sep = "", flowcell, ": plotting flowcell yield by read length")) p6 = ggplot(d, aes(x=sequence_length_template, y=cumulative.bases/1000000000, colour = Q_cutoff)) + geom_line(size = 1) + xlab("Minimum read length (bases)") + ylab("Total yield in gigabases") + scale_colour_viridis(discrete = TRUE, begin = 0.25, end = 0.75, guide = guide_legend(title = "Reads")) + theme(text = element_text(size = 15)) xmax = max(d$sequence_length_template[which(d$cumulative.bases > 0.01 * max(d$cumulative.bases))]) p6 = p6 + scale_x_continuous(limits = c(0, xmax)) suppressMessages(ggsave(filename = file.path(output.dir, "yield_by_length.png"), width = p1m*960/75, height = p1m*480/75, plot = p6)) # flog.info(paste(sep = "", flowcell, ": plotting sequence length over time")) p7 = ggplot(d, aes(x=start_time/3600, y=sequence_length_template, colour = Q_cutoff, group = Q_cutoff)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + theme(text = element_text(size = 15)) + geom_smooth() + xlab("Hours into run") + ylab("Mean read length (bases)") + scale_colour_viridis(discrete = TRUE, begin = 0.25, end = 0.75, guide = guide_legend(title = "Reads")) + ylim(0, NA) suppressMessages(ggsave(filename = file.path(output.dir, "length_by_hour.png"), width = p1m*960/75, height = p1m*480/75, plot = p7)) # flog.info(paste(sep = "", flowcell, ": plotting Q score over time")) p8 = ggplot(d, aes(x=start_time/3600, y=mean_qscore_template, colour = Q_cutoff, group = Q_cutoff)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + theme(text = element_text(size = 15)) + geom_smooth() + xlab("Hours into run") + ylab("Mean Q score") + scale_colour_viridis(discrete = TRUE, begin = 0.25, end = 0.75, guide = guide_legend(title = "Reads")) + ylim(0, NA) suppressMessages(ggsave(filename = file.path(output.dir, "q_by_hour.png"), width = p1m*960/75, height = p1m*480/75, plot = p8)) # flog.info(paste(sep = "", flowcell, ": plotting reads per hour")) f = d[c("hour", "reads_per_hour", "Q_cutoff")] f = f[!duplicated(f),] g = subset(f, Q_cutoff=="All reads") h = subset(f, Q_cutoff==q_title) max = max(f$hour) # all of this is just to fill in hours with no reads recorded all = 0:max add.g = all[which(all %in% g$hour == FALSE)] if(length(add.g)>0){ add.g = data.frame(hour = add.g, reads_per_hour = 0, Q_cutoff = "All reads") g = rbind(g, add.g) } add.h = all[which(all %in% h$hour == FALSE)] if(length(add.h)>0){ add.h = data.frame(hour = add.h, reads_per_hour = 0, Q_cutoff = q_title) h = rbind(h, add.h) } i = rbind(g, h) i$Q_cutoff = as.character(i$Q_cutoff) i$Q_cutoff[which(i$Q_cutoff==q_title)] = paste("Q>=", q, sep="") p9 = ggplot(i, aes(x=hour, y=reads_per_hour, colour = Q_cutoff, group = Q_cutoff)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + theme(text = element_text(size = 15)) + geom_point() + geom_line() + xlab("Hours into run") + ylab("Number of reads per hour") + ylim(0, NA) + scale_colour_viridis(discrete = TRUE, begin = 0.25, end = 0.75, guide = guide_legend(title = "Reads")) suppressMessages(ggsave(filename = file.path(output.dir, "reads_per_hour.png"), width = p1m*960/75, height = p1m*480/75, plot = p9)) # if(max(d$channel)<=512){ # minion flog.info(paste(sep = "", flowcell, ": plotting read length vs. q score scatterplot")) p10 = ggplot(subset(d, Q_cutoff=="All reads"), aes(x = sequence_length_template, y = mean_qscore_template, colour = events_per_base)) + geom_point(alpha=0.05, size = 0.4) + scale_x_log10(minor_breaks=log10_minor_break(), breaks = log10_major_break()) + labs(colour='Events per base\n(log scale)\n') + theme(text = element_text(size = 15)) + xlab("Read length (bases)") + ylab("Mean Q score of read") }else{ # promethion p10 = ggplot(subset(d, Q_cutoff=="All reads"), aes(x = sequence_length_template, y = mean_qscore_template, colour = events_per_base)) + geom_bin2d() + scale_x_log10(minor_breaks=log10_minor_break(), breaks = log10_major_break()) + theme(text = element_text(size = 15)) + scale_fill_viridis() + xlab("Read length (bases)") + ylab("Mean Q score of read") } if(max(d$events_per_base, na.rm=T)>0){ # a catch for 1D2 runs which don't have events per base p10 = p10 + scale_colour_viridis(trans = "log", labels = scientific, option = 'inferno') } # we keep it a bit wider, because the legend takes up a fair bit of the plot space suppressMessages(ggsave(filename = file.path(output.dir, "length_vs_q.png"), width = p2m*960/75, height = p1m*960/75, plot = p10)) # flog.info(paste(sep = "", flowcell, ": plotting flowcell channels summary histograms")) c = channel.summary(subset(d, Q_cutoff=="All reads")) c10 = channel.summary(subset(d, Q_cutoff==q_title)) c$Q_cutoff = "All reads" c10$Q_cutoff = q_title cc = rbind(c, c10) cc$variable = as.character(cc$variable) cc$variable[which(cc$variable=="total.bases")] = "Number of bases per channel" cc$variable[which(cc$variable=="total.reads")] = "Number of reads per channel" cc$variable[which(cc$variable=="mean.read.length")] = "Mean read length per channel" cc$variable[which(cc$variable=="median.read.length")] = "Median read length per channel" p11 = ggplot(cc, aes(x = value, fill = Q_cutoff)) + geom_histogram(bins = 30) + facet_grid(Q_cutoff~variable, scales = "free_x") + theme(text = element_text(size = 15), axis.text.x = element_text(angle = 60, hjust = 1)) + guides(fill=FALSE) + scale_fill_viridis(discrete = TRUE, begin = 0.25, end = 0.75) + guides(fill=FALSE) suppressMessages(ggsave(filename = file.path(output.dir, "channel_summary.png"), width = 960/75, height = 480/75, plot = p11)) flog.info(paste(sep = "", flowcell, ": plotting physical overview of output per channel")) if(max(d$channel)<=512){ # minion cols = 2 }else{ # promethion cols = 1 } p12 = ggplot(subset(cc, variable == "Number of bases per channel"), aes(x = as.numeric(col), y = as.numeric(row))) + geom_tile(aes(fill = value/1000000000), colour="white", size=0.25) + facet_wrap(~Q_cutoff, ncol = cols) + theme(text = element_text(size = 15), plot.background=element_blank(), panel.border=element_blank(), panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_fill_viridis(name = "GB/channel") + scale_y_continuous(trans = "reverse", expand=c(0,0)) + scale_x_continuous(expand=c(0,0)) + coord_fixed() + labs(x="channel column",y="channel row") if(max(d$channel)<=512){ # minion suppressMessages(ggsave(filename = file.path(output.dir, "gb_per_channel_overview.png"), width = 960/150, height = 480/75, plot = p12)) }else{ # promethion suppressMessages(ggsave(filename = file.path(output.dir, "gb_per_channel_overview.png"), width = 960/75, height = 480/75, plot = p12)) } return(d) } combined.flowcell <- function(d, output.dir, q=8){ # function to analyse combined data from multiple flowcells # useful for getting an overall impression of the combined data flog.info("Creating output directory") out.txt = file.path(output.dir, "summary.yaml") # write summaries flog.info(paste("Summarising combined data from all flowcells, saving to:", out.txt)) # tidy up and remove added stuff drops = c("cumulative.bases", "hour", "reads.per.hour") d = d[ , !(names(d) %in% drops)] d1 = subset(d, Q_cutoff == "All reads") d1 = d1[with(d1, order(-sequence_length_template)), ] # sort by read length d1$cumulative.bases = cumsum(as.numeric(d1$sequence_length_template)) d2 = subset(d, Q_cutoff == q_title) d2 = d2[with(d2, order(-sequence_length_template)), ] # sort by read length d2$cumulative.bases = cumsum(as.numeric(d2$sequence_length_template)) d1$Q_cutoff = as.factor(d1$Q_cutoff) d2$Q_cutoff = as.factor(d2$Q_cutoff) all.reads.summary = summary.stats(d1, Q_cutoff = "All reads") q10.reads.summary = summary.stats(d2, Q_cutoff = q_title) summary = list("input file" = input.file, "All reads" = all.reads.summary, cutoff = q10.reads.summary, "notes" = 'ultralong reads refers to the largest set of reads with N50>100KB') names(summary)[3] = q_title write(as.yaml(summary), out.txt) d = rbind(d1, d2) d$Q_cutoff = as.factor(d$Q_cutoff) d1 = 0 d2 = 0 # set up variable sizes if(smallfig == TRUE){ p1m = 0.5 }else{ p1m = 1.0 } # make plots flog.info("Plotting combined length histogram") p1 = ggplot(d, aes(x = sequence_length_template, fill = Q_cutoff)) + geom_histogram(bins = 300) + scale_x_log10(minor_breaks=log10_minor_break(), breaks = log10_major_break()) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) + xlab("Read length (bases)") + ylab("Number of reads") + guides(fill=FALSE) + scale_fill_viridis(discrete = TRUE, begin = 0.25, end = 0.75) suppressMessages(ggsave(filename = file.path(output.dir, "combined_length_histogram.png"), width = p1m*960/75, height = p1m*960/75, plot = p1)) flog.info("Plotting combined mean Q score histogram") p2 = ggplot(d, aes(x = mean_qscore_template, fill = Q_cutoff)) + geom_histogram(bins = 300) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) + xlab("Mean Q score of read") + ylab("Number of reads") + guides(fill=FALSE) + scale_fill_viridis(discrete = TRUE, begin = 0.25, end = 0.75) suppressMessages(ggsave(filename = file.path(output.dir, "combined_q_histogram.png"), width = p1m*960/75, height = p1m*960/75, plot = p2)) flog.info("Plotting combined yield by length") p4 = ggplot(d, aes(x=sequence_length_template, y=cumulative.bases/1000000000, colour = Q_cutoff)) + geom_line(size = 1) + xlab("Minimum read length (bases)") + ylab("Total yield in gigabases") + scale_colour_viridis(discrete = TRUE, begin = 0.25, end = 0.75, guide = guide_legend(title = "Reads")) + theme(text = element_text(size = 15)) xmax = max(d$sequence_length_template[which(d$cumulative.bases > 0.01 * max(d$cumulative.bases))]) p4 = p4 + scale_x_continuous(limits = c(0, xmax)) suppressMessages(ggsave(filename = file.path(output.dir, "combined_yield_by_length.png"), width = p1m*960/75, height = p1m*480/75, plot = p4)) } multi.flowcell = function(input.file, output.base, q){ # wrapper function to allow parallelisation of single-flowcell # analyses when >1 flowcell is analysed in one run d = single.flowcell(input.file, output.dir, q) return(d) } multi.plots = function(dm, output.dir){ # function to plot data from multiple flowcells, # where the data is not combined (as in combined.flowcell() ) # but instead just uses multiple lines on each plot. muxes = seq(from = 0, to = max(dm$hour), by = 8) # set up variable sizes if(smallfig == TRUE){ p1m = 0.5 }else{ p1m = 1.0 } # make plots flog.info("Plotting length distributions") p1 = ggplot(dm, aes(x = sequence_length_template)) + geom_line(stat="density", aes(colour = flowcell), size = 1) + scale_x_log10(minor_breaks=log10_minor_break(), breaks = log10_major_break()) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) + xlab("Read length (bases)") + ylab("Density") suppressMessages(ggsave(filename = file.path(output.dir, "length_distributions.png"), width = p1m*960/75, height = p1m*960/75, plot = p1)) # flog.info("Plotting mean Q score distributions") p2 = ggplot(dm, aes(x = mean_qscore_template)) + geom_line(stat="density", aes(colour = flowcell), size = 1) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) + xlab("Mean Q score of read") + ylab("Density") suppressMessages(ggsave(filename = file.path(output.dir, "q_distributions.png"), width = p1m*960/75, height = p1m*960/75, plot = p2)) # flog.info("Plotting flowcell yield over time") p5 = ggplot(dm, aes(x=start_time/3600, y=cumulative.bases.time/1000000000, colour = flowcell)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + geom_line(size = 1) + xlab("Hours into run") + ylab("Total yield in gigabases") + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") + theme(text = element_text(size = 15)) suppressMessages(ggsave(filename = file.path(output.dir, "yield_over_time.png"), width = p1m*960/75, height = p1m*960/75, plot = p5)) # flog.info("Plotting flowcell yield by length") p6 = ggplot(dm, aes(x=sequence_length_template, y=cumulative.bases/1000000000, colour = flowcell)) + geom_line(size = 1) + xlab("Minimum read length (bases)") + ylab("Total yield in gigabases") + theme(text = element_text(size = 15)) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") xmax = max(dm$sequence_length_template[which(dm$cumulative.bases > 0.01 * max(dm$cumulative.bases))]) p6 = p6 + scale_x_continuous(limits = c(0, xmax)) suppressMessages(ggsave(filename = file.path(output.dir, "yield_by_length.png"), width = p1m*960/75, height = p1m*960/75, plot = p6)) # flog.info("Plotting sequence length over time") p7 = ggplot(dm, aes(x=start_time/3600, y=sequence_length_template, colour = flowcell)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + theme(text = element_text(size = 15)) + geom_smooth() + xlab("Hours into run") + ylab("Mean read length (bases)") + ylim(0, NA) + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") suppressMessages(ggsave(filename = file.path(output.dir, "length_by_hour.png"), width = p1m*960/75, height = p1m*960/75, plot = p7)) flog.info("Plotting Q score over time") p8 = ggplot(dm, aes(x=start_time/3600, y=mean_qscore_template, colour = flowcell)) + geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) + theme(text = element_text(size = 15)) + geom_smooth() + xlab("Hours into run") + ylab("Mean Q score") + facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") suppressMessages(ggsave(filename = file.path(output.dir, "q_by_hour.png"), width = p1m*960/75, height = p1m*960/75, plot = p8)) } # Choose how to act depending on whether we have a single input file or mulitple input files if(file_test("-f", input.file)==TRUE & length(test.file)>1){ # if it's an existing file (not a folder) just run one analysis d = single.flowcell(input.file, output.dir, q) }else if(file_test("-d", input.file)==TRUE & length(test.file)>1){ # it's a directory, recursively analyse all sequencing_summary.txt files # get a list of all sequencing_summary.txt files, recursively summaries = list.files(path = input.file, pattern = "sequencing_summary.txt", recursive = TRUE, full.names = TRUE) flog.info("") flog.info("**** Analysing the following files ****") flog.info(summaries) # if the user passes a directory with only one sequencing_summary.txt file... if(length(summaries) == 1){ d = single.flowcell(summaries[1], output.dir, q) flog.info('**** Analysis complete ****') }else{ # analyse each one and keep the returns in a list if(combined_only == FALSE){ results = mclapply(summaries, multi.flowcell, output.dir, q, mc.cores = cores) }else{ results = mclapply(summaries, load_summary, min.q = c(-Inf, q), mc.cores = cores) } # rbind that list flog.info('**** Analysing data from all flowcells combined ****') dm = as.data.frame(rbindlist(results)) # now do the single plot on ALL the output if(is.na(opt$output.dir)){ combined.output = file.path(input.file, "combinedQC") } else { # the user supplied an output dir combined.output = file.path(opt$output.dir, "combinedQC") } flog.info(paste("Plots from the combined output will be saved in", combined.output)) dir.create(combined.output, recursive = TRUE) combined.flowcell(dm, combined.output, q) multi.plots(dm, combined.output) flog.info('**** Analysis complete ****') flog.info('If you use MinIONQC in your published work, please cite:') flog.info('R Lanfear, M Schalamun, D Kainer, W Wang, B Schwessinger (2018). MinIONQC: fast and simple quality control for MinION sequencing data, Bioinformatics, bty654') flog.info('https://doi.org/10.1093/bioinformatics/bty654') } }else{ #WTF flog.warn(paste("Couldn't find a sequencing summary file in your input which was: ", input.file, "\nThe input must be either a sequencing_summary.txt file, or a directory containing one or more such files")) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convenience.R \name{sc_cols_non_NA} \alias{sc_cols_non_NA} \title{Checks that all elements from the specified columns are not NA} \usage{ sc_cols_non_NA(object, cols = names(object), ..., unk_cols_callback = stop) } \arguments{ \item{object}{table with a columns specified by \code{cols}} \item{cols}{vector of characters of columns that are checked for NAs} \item{...}{further parameters that are passed to \link{add_sanity_check}.} \item{unk_cols_callback}{user-defined function that is called if some of the \code{cols} are not contained in the \code{object}. This is helpful if an additional warning or error should be thrown or maybe a log-entry should be created. Default is the function \code{stop}} } \value{ a list where every element is an object returned by \link{add_sanity_check} for each column specified in \code{cols} that exists in \code{object} } \description{ Checks that all elements from the specified columns are not NA } \examples{ iris[c(1,3,5,7,9), 1] <- NA dummy_call <- function(x) { sc_cols_non_NA(object = iris, description = "No NAs expected in iris") } dummy_call(x = iris) get_sanity_checks() }
/man/sc_cols_non_NA.Rd
no_license
cran/sanityTracker
R
false
true
1,214
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convenience.R \name{sc_cols_non_NA} \alias{sc_cols_non_NA} \title{Checks that all elements from the specified columns are not NA} \usage{ sc_cols_non_NA(object, cols = names(object), ..., unk_cols_callback = stop) } \arguments{ \item{object}{table with a columns specified by \code{cols}} \item{cols}{vector of characters of columns that are checked for NAs} \item{...}{further parameters that are passed to \link{add_sanity_check}.} \item{unk_cols_callback}{user-defined function that is called if some of the \code{cols} are not contained in the \code{object}. This is helpful if an additional warning or error should be thrown or maybe a log-entry should be created. Default is the function \code{stop}} } \value{ a list where every element is an object returned by \link{add_sanity_check} for each column specified in \code{cols} that exists in \code{object} } \description{ Checks that all elements from the specified columns are not NA } \examples{ iris[c(1,3,5,7,9), 1] <- NA dummy_call <- function(x) { sc_cols_non_NA(object = iris, description = "No NAs expected in iris") } dummy_call(x = iris) get_sanity_checks() }
#' Non-uniform Discrete Fourier Transform #' #' @importFrom graphics points legend curve par #' @importFrom stats fft #' #' @description Compute the one-dimensional Non-uniform Discrete Fourier Transform (NDFT). This is needed if the data is sampled in non-uniform intervals. #' #' @param f vector of real or complex function values #' @param x vector of points in direct space, typically time or position coordinates. If \code{inverse=FALSE}, \code{x} must have the same length as \code{f}. #' @param nu vector of frequencies in units of [1/units of x]. If \code{inverse=TRUE}, \code{nu} must have the same length as \code{f}. #' @param inverse logical flag; if TRUE, the inverse Fourier transform is performed. #' @param weighing logical flag; if TRUE, irregularly spaced evaluations of \code{f} will be weighted proportionally to their bin width in \code{x} (if \code{inverse=FALSE}) or \code{nu} (if \code{inverse=FALSE}). #' @param simplify logical flag; if TRUE, the complex output array will be simplified to a real array, if it is real within the floating point accuracy. #' #' @return Returns a vector of the same length as \code{x} (if \code{inverse=FALSE}) or \code{nu} (if \code{inverse=TRUE}). #' #' @details The one-dimensional NDFT of a vector \eqn{f=(f_1,...,f_N)} is defined as \deqn{F_j=\sum_i w_i f_i exp(-2\pi i*x_i*\nu_j)} where \eqn{w_i} are optional weights, proportional to the interval around \eqn{x_i}, only used if \code{weighing=TRUE}. Likewise, the inverse NDFT is defined as \deqn{f_i=\sum_j w_j F_j exp(+2\pi i*x_i*nu_j)} where \eqn{w_j} are optional weights, proportional to the interval around \eqn{\nu_j}. In this implementation NDFTs are computed using a brute force algorithm, scaling as \eqn{O(N*N)}, which is considerably worse than the \eqn{O(N)*log(N)} scaling of FFT algorithms. It is therefore important to pick the required frequencies wisely to minimise computing times. #' #' @examples #' #' # Define an example signal #' nu1 = 1 # [Hz] first frequency #' nu2 = 8 # [Hz] second frequency in the signal #' s = function(t) sin(2*pi*nu1*t)+0.7*cos(2*pi*nu2*t+5) #' #' # Discretize signal #' N = 50 # number of samples #' t.uniform = seq(0,N-1)/N #' t.nonuniform = t.uniform^1.3 #' s.uniform = s(t.uniform) #' s.nonuniform = s(t.nonuniform) #' #' # Plot signal #' oldpar = par(mfrow = c(1, 2)) #' curve(s,0,1,500,xaxs='i',main='Time signal',xlab='Time t',ylab='s(t)',col='grey') #' points(t.uniform,s.uniform,pch=16,cex=0.8) #' points(t.nonuniform,s.nonuniform,pch=4,col='blue') #' legend('topright',c('Continuous signal','Uniform sample','Non-uniform sample'), #' lwd=c(1,NA,NA),pch=c(NA,16,4),col=c('grey','black','blue'),pt.cex=c(1,0.8,1)) #' #' # Uniform and non-uniform DFT #' nu = seq(0,N-1) # discrete frequencies #' spectrum.uniform = stats::fft(s.uniform) #' spectrum.nonuniform = ndft(s.nonuniform,t.nonuniform,nu) #' spectrum.wrong = stats::fft(s.nonuniform) #' #' # Evaluate power #' power.uniform = Mod(spectrum.uniform)^2 #' power.nonuniform = Mod(spectrum.nonuniform)^2 #' power.wrong = Mod(spectrum.wrong)^2 #' #' # Plot DFT and NDFT up to Nyquist frequency #' plot(nu,power.uniform,pch=16,cex=0.8,xlim=c(0,N/2),xaxs='i', #' main='Power spectrum',xlab=expression('Frequency'~nu~'[Hz]'),ylab='Power') #' points(nu,power.nonuniform,pch=4,col='blue') #' points(nu,power.wrong,pch=1,col='red') #' abline(v=c(nu1,nu2),col='grey',lty=2) #' legend('topright',c('DFT of uniform sample','NDFT of non-uniform sample', #' 'DFT of non-uniform sample (wrong)','Input frequencies'), #' lwd=c(NA,NA,NA,1),lty=c(NA,NA,NA,2),pch=c(16,4,1,NA), #' col=c('black','blue','red','grey'),pt.cex=c(0.8,1,1,NA)) #' par(oldpar) #' #' @author Danail Obreschkow #' #' @seealso \code{\link[stats]{fft}} #' #' @export ndft = function(f, x=seq(0,length(f)-1)/length(f), nu=seq(0,length(f)-1), inverse=FALSE, weighing=TRUE, simplify=TRUE) { # f vector of real or complex function values # x vector of points in direct space, typically time or position coordinates. If \code{inverse=FALSE}, \code{x} must have the same length as \code{f}. # nu vector of frequencies in units of [1/units of x]. If \code{inverse=TRUE}, \code{nu} must have the same length as \code{f}. # inverse logical flag; if TRUE, the inverse Fourier transform is performed. # weighing logical flag; if TRUE, irregularly spaced evaluations of \code{f} will be weighted proportionally to their bin width in \code{x} (if \code{inverse=FALSE}) or \code{nu} (if \code{inverse=FALSE}). # simplify logical flag; if TRUE, the complex output array will be simplified to a real array, if it is real within the floating point accuracy. N = length(f) if (N<=1) stop('f must be a vector with more than one elements') k = 2*pi*nu # angular frequencies if (inverse) { # inverse NDFT if (length(nu)!=N) stop('nu and f must be vectors of the same length') if (weighing) { w = c(x[2]-x[1],(x[3:N]-x[1:(N-2)])/2,x[N]-x[N-1]) w = w/sum(w)*N } else { w = 1 } g = colSums(w*f*exp(+1i*cbind(k)%*%rbind(x)))/length(f) } else { # forward NDFT if (length(x)!=N) stop('x and f must be vectors of the same length') if (weighing) { w = c(x[2]-x[1],(x[3:N]-x[1:(N-2)])/2,x[N]-x[N-1]) w = w/sum(w)*N } else { w = 1 } g = colSums(w*f*exp(-1i*cbind(x)%*%rbind(k))) } if (simplify) { if (mean(abs(Im(g)))/(mean(abs(g))+.Machine$double.xmin)<1e-13) g = Re(g) } return(g) }
/R/ndft.R
no_license
obreschkow/cooltools
R
false
false
5,515
r
#' Non-uniform Discrete Fourier Transform #' #' @importFrom graphics points legend curve par #' @importFrom stats fft #' #' @description Compute the one-dimensional Non-uniform Discrete Fourier Transform (NDFT). This is needed if the data is sampled in non-uniform intervals. #' #' @param f vector of real or complex function values #' @param x vector of points in direct space, typically time or position coordinates. If \code{inverse=FALSE}, \code{x} must have the same length as \code{f}. #' @param nu vector of frequencies in units of [1/units of x]. If \code{inverse=TRUE}, \code{nu} must have the same length as \code{f}. #' @param inverse logical flag; if TRUE, the inverse Fourier transform is performed. #' @param weighing logical flag; if TRUE, irregularly spaced evaluations of \code{f} will be weighted proportionally to their bin width in \code{x} (if \code{inverse=FALSE}) or \code{nu} (if \code{inverse=FALSE}). #' @param simplify logical flag; if TRUE, the complex output array will be simplified to a real array, if it is real within the floating point accuracy. #' #' @return Returns a vector of the same length as \code{x} (if \code{inverse=FALSE}) or \code{nu} (if \code{inverse=TRUE}). #' #' @details The one-dimensional NDFT of a vector \eqn{f=(f_1,...,f_N)} is defined as \deqn{F_j=\sum_i w_i f_i exp(-2\pi i*x_i*\nu_j)} where \eqn{w_i} are optional weights, proportional to the interval around \eqn{x_i}, only used if \code{weighing=TRUE}. Likewise, the inverse NDFT is defined as \deqn{f_i=\sum_j w_j F_j exp(+2\pi i*x_i*nu_j)} where \eqn{w_j} are optional weights, proportional to the interval around \eqn{\nu_j}. In this implementation NDFTs are computed using a brute force algorithm, scaling as \eqn{O(N*N)}, which is considerably worse than the \eqn{O(N)*log(N)} scaling of FFT algorithms. It is therefore important to pick the required frequencies wisely to minimise computing times. #' #' @examples #' #' # Define an example signal #' nu1 = 1 # [Hz] first frequency #' nu2 = 8 # [Hz] second frequency in the signal #' s = function(t) sin(2*pi*nu1*t)+0.7*cos(2*pi*nu2*t+5) #' #' # Discretize signal #' N = 50 # number of samples #' t.uniform = seq(0,N-1)/N #' t.nonuniform = t.uniform^1.3 #' s.uniform = s(t.uniform) #' s.nonuniform = s(t.nonuniform) #' #' # Plot signal #' oldpar = par(mfrow = c(1, 2)) #' curve(s,0,1,500,xaxs='i',main='Time signal',xlab='Time t',ylab='s(t)',col='grey') #' points(t.uniform,s.uniform,pch=16,cex=0.8) #' points(t.nonuniform,s.nonuniform,pch=4,col='blue') #' legend('topright',c('Continuous signal','Uniform sample','Non-uniform sample'), #' lwd=c(1,NA,NA),pch=c(NA,16,4),col=c('grey','black','blue'),pt.cex=c(1,0.8,1)) #' #' # Uniform and non-uniform DFT #' nu = seq(0,N-1) # discrete frequencies #' spectrum.uniform = stats::fft(s.uniform) #' spectrum.nonuniform = ndft(s.nonuniform,t.nonuniform,nu) #' spectrum.wrong = stats::fft(s.nonuniform) #' #' # Evaluate power #' power.uniform = Mod(spectrum.uniform)^2 #' power.nonuniform = Mod(spectrum.nonuniform)^2 #' power.wrong = Mod(spectrum.wrong)^2 #' #' # Plot DFT and NDFT up to Nyquist frequency #' plot(nu,power.uniform,pch=16,cex=0.8,xlim=c(0,N/2),xaxs='i', #' main='Power spectrum',xlab=expression('Frequency'~nu~'[Hz]'),ylab='Power') #' points(nu,power.nonuniform,pch=4,col='blue') #' points(nu,power.wrong,pch=1,col='red') #' abline(v=c(nu1,nu2),col='grey',lty=2) #' legend('topright',c('DFT of uniform sample','NDFT of non-uniform sample', #' 'DFT of non-uniform sample (wrong)','Input frequencies'), #' lwd=c(NA,NA,NA,1),lty=c(NA,NA,NA,2),pch=c(16,4,1,NA), #' col=c('black','blue','red','grey'),pt.cex=c(0.8,1,1,NA)) #' par(oldpar) #' #' @author Danail Obreschkow #' #' @seealso \code{\link[stats]{fft}} #' #' @export ndft = function(f, x=seq(0,length(f)-1)/length(f), nu=seq(0,length(f)-1), inverse=FALSE, weighing=TRUE, simplify=TRUE) { # f vector of real or complex function values # x vector of points in direct space, typically time or position coordinates. If \code{inverse=FALSE}, \code{x} must have the same length as \code{f}. # nu vector of frequencies in units of [1/units of x]. If \code{inverse=TRUE}, \code{nu} must have the same length as \code{f}. # inverse logical flag; if TRUE, the inverse Fourier transform is performed. # weighing logical flag; if TRUE, irregularly spaced evaluations of \code{f} will be weighted proportionally to their bin width in \code{x} (if \code{inverse=FALSE}) or \code{nu} (if \code{inverse=FALSE}). # simplify logical flag; if TRUE, the complex output array will be simplified to a real array, if it is real within the floating point accuracy. N = length(f) if (N<=1) stop('f must be a vector with more than one elements') k = 2*pi*nu # angular frequencies if (inverse) { # inverse NDFT if (length(nu)!=N) stop('nu and f must be vectors of the same length') if (weighing) { w = c(x[2]-x[1],(x[3:N]-x[1:(N-2)])/2,x[N]-x[N-1]) w = w/sum(w)*N } else { w = 1 } g = colSums(w*f*exp(+1i*cbind(k)%*%rbind(x)))/length(f) } else { # forward NDFT if (length(x)!=N) stop('x and f must be vectors of the same length') if (weighing) { w = c(x[2]-x[1],(x[3:N]-x[1:(N-2)])/2,x[N]-x[N-1]) w = w/sum(w)*N } else { w = 1 } g = colSums(w*f*exp(-1i*cbind(x)%*%rbind(k))) } if (simplify) { if (mean(abs(Im(g)))/(mean(abs(g))+.Machine$double.xmin)<1e-13) g = Re(g) } return(g) }
conditionalhello{ if(*name!="Your Name"){ writeLine("stdout", "Hello *name!"); } else { writeLine("stdout", "Hello world!"); } } INPUT *name="Your Name" OUTPUT ruleExecOut, *name
/ExampleTrainings/iRODS-User-training/exampleRules/conditionalhello.r
permissive
stefan-wolfsheimer/B2SAFE-B2STAGE-Training
R
false
false
203
r
conditionalhello{ if(*name!="Your Name"){ writeLine("stdout", "Hello *name!"); } else { writeLine("stdout", "Hello world!"); } } INPUT *name="Your Name" OUTPUT ruleExecOut, *name
context("POST to API") test_that("User can send a POST request to the API", { post_response <- gcnlp_post(text_body = sample_1, extract_syntax = TRUE, extract_entities = TRUE, extract_document_sentiment = TRUE) expect_identical(object = names(post_response), expected = c("content", "raw_response")) expect_identical(object = names(post_response$content), expected = c("sentences", "tokens", "entities", "documentSentiment", "language")) expect_identical(object = names(post_response$raw_response), expected = c("url", "status_code", "headers", "all_headers", "cookies", "content", "date", "times", "request", "handle")) })
/tests_archive/tests/testthat/test-post.R
permissive
BrianWeinstein/googlenlp
R
false
false
818
r
context("POST to API") test_that("User can send a POST request to the API", { post_response <- gcnlp_post(text_body = sample_1, extract_syntax = TRUE, extract_entities = TRUE, extract_document_sentiment = TRUE) expect_identical(object = names(post_response), expected = c("content", "raw_response")) expect_identical(object = names(post_response$content), expected = c("sentences", "tokens", "entities", "documentSentiment", "language")) expect_identical(object = names(post_response$raw_response), expected = c("url", "status_code", "headers", "all_headers", "cookies", "content", "date", "times", "request", "handle")) })
N_studies <- 8 rt <- c(1, 9, 2, 1, 10, 1, 1, 90) nt <- c(40, 135, 200, 48, 150, 59, 25, 1159) rc <- c(2, 23, 7, 1, 8, 9, 3, 118) nc <- c(36, 135, 200, 46, 148, 56, 23, 1157)
/test/integration/example-models/bugs_examples/vol1/magnesium/magnesium.data.R
permissive
nhuurre/stanc3
R
false
false
174
r
N_studies <- 8 rt <- c(1, 9, 2, 1, 10, 1, 1, 90) nt <- c(40, 135, 200, 48, 150, 59, 25, 1159) rc <- c(2, 23, 7, 1, 8, 9, 3, 118) nc <- c(36, 135, 200, 46, 148, 56, 23, 1157)
sink("model/threshold_baseline.jags") cat(" model { for (x in 1:Nobs){ #Observation of a flowering plant Y[x] ~ dbern(p[x]) logit(p[x]) <- alpha[Plant[x]] #Residuals discrepancy[x] <- abs(Y[x] - p[x]) #Assess Model Fit Ynew[x] ~ dbern(p[x]) discrepancy.new[x]<-abs(Ynew[x] - p[x]) } #Sum discrepancy fit<-sum(discrepancy)/Nobs fitnew<-sum(discrepancy.new)/Nobs #Prediction for(x in 1:Npreds){ #predict value #Observation - probability of flowering prediction[x] ~ dbern(p_new[x]) logit(p_new[x])<-alpha[PredPlant[x]] #predictive error pred_error[x] <- abs(Ypred[x] - p_new[x]) } #Predictive Error fitpred<-sum(pred_error)/Npreds #Priors #Species level priors for (j in 1:Plants){ #Intercept flowering probability alpha[j] ~ dnorm(0,0.386) } } ",fill=TRUE) sink()
/model/threshold_baseline.R
no_license
bw4sz/plant_dist
R
false
false
980
r
sink("model/threshold_baseline.jags") cat(" model { for (x in 1:Nobs){ #Observation of a flowering plant Y[x] ~ dbern(p[x]) logit(p[x]) <- alpha[Plant[x]] #Residuals discrepancy[x] <- abs(Y[x] - p[x]) #Assess Model Fit Ynew[x] ~ dbern(p[x]) discrepancy.new[x]<-abs(Ynew[x] - p[x]) } #Sum discrepancy fit<-sum(discrepancy)/Nobs fitnew<-sum(discrepancy.new)/Nobs #Prediction for(x in 1:Npreds){ #predict value #Observation - probability of flowering prediction[x] ~ dbern(p_new[x]) logit(p_new[x])<-alpha[PredPlant[x]] #predictive error pred_error[x] <- abs(Ypred[x] - p_new[x]) } #Predictive Error fitpred<-sum(pred_error)/Npreds #Priors #Species level priors for (j in 1:Plants){ #Intercept flowering probability alpha[j] ~ dnorm(0,0.386) } } ",fill=TRUE) sink()
#' Augment data according to a tidied model #' #' Given an R statistical model or other non-tidy object, add columns to the #' original dataset such as predictions, residuals and cluster assignments. #' #' @details #' `sw_augment()` is a wrapper for `broom::augment()`. The benefit of `sw_augment` #' is that it has methods for various time-series model classes such as #' `HoltWinters`, `ets`, `Arima`, etc. #' #' For non-time series, `sw_augment()` defaults to `broom::augment()`. #' The only difference is that the return is a tibble. #' #' Note that by convention the first argument is almost always \code{data}, #' which specifies the original data object. This is not part of the S3 #' signature, partly because it prevents rowwise_df_tidiers from #' taking a column name as the first argument. #' #' @seealso [broom::augment()] #' @param x model or other R object to convert to data frame #' @param ... other arguments passed to methods #' @export sw_augment <- function(x, ...) UseMethod("sw_augment") #' Default augment method #' #' By default, `sw_augment()` uses [broom::augment()] to convert its output. #' #' #' @param x an object to be tidied #' @param ... extra arguments passed to `broom::augment()` #' #' @return A tibble generated by [broom::augment()] #' #' @export sw_augment.default <- function(x, ...) { broom::augment(x, ...) %>% tibble::as_tibble() }
/R/sw_augment.R
no_license
cran/sweep
R
false
false
1,429
r
#' Augment data according to a tidied model #' #' Given an R statistical model or other non-tidy object, add columns to the #' original dataset such as predictions, residuals and cluster assignments. #' #' @details #' `sw_augment()` is a wrapper for `broom::augment()`. The benefit of `sw_augment` #' is that it has methods for various time-series model classes such as #' `HoltWinters`, `ets`, `Arima`, etc. #' #' For non-time series, `sw_augment()` defaults to `broom::augment()`. #' The only difference is that the return is a tibble. #' #' Note that by convention the first argument is almost always \code{data}, #' which specifies the original data object. This is not part of the S3 #' signature, partly because it prevents rowwise_df_tidiers from #' taking a column name as the first argument. #' #' @seealso [broom::augment()] #' @param x model or other R object to convert to data frame #' @param ... other arguments passed to methods #' @export sw_augment <- function(x, ...) UseMethod("sw_augment") #' Default augment method #' #' By default, `sw_augment()` uses [broom::augment()] to convert its output. #' #' #' @param x an object to be tidied #' @param ... extra arguments passed to `broom::augment()` #' #' @return A tibble generated by [broom::augment()] #' #' @export sw_augment.default <- function(x, ...) { broom::augment(x, ...) %>% tibble::as_tibble() }
#clear the environment rm(list=ls()) x<-c (1,2,2,2,8,8,8,10,14,14,18,37) median(x) IQR(x) boxplot(x) quantile(x)
/Problem set 1/2b.R
no_license
mmahin/Data-Mining-Fall-20
R
false
false
113
r
#clear the environment rm(list=ls()) x<-c (1,2,2,2,8,8,8,10,14,14,18,37) median(x) IQR(x) boxplot(x) quantile(x)
# the ordering of counties is the standard throughout the code (in the global_var.R) # make sure adj and initial conditions have the same ordering! ######################################### # draw random params: limit to beta only rparams = function(params) { params_tmp = params params_tmp$beta_pre = rtruncdist(1, mean=(params$beta_pre), sd=params$sd_beta_pre, lower=params$lower_beta_pre, upper=params$upper_beta_pre) return(params_tmp) } ## functions for scenario evaluation in the future: imposed changes in transmission parameter # UPDATE THIS FUNCTION WITH DESIRED NUMBER OF INTERVENTIONS IN THE FUTURE ########################################################################## # create a list of contact intervention effects from params get_intervention_effects = function(params) { return( as.list(c(params$intv1_effect)) ) } get_ramping_times = function(params) { return( as.list(c(2)) ) } ######################## # function: get state0 from initial confitions csv file get_state0 = function(init_file_csv){ init <- read.csv(init_file_csv, stringsAsFactors=FALSE) # double double check order init <- init[match(CT_NAMES, init$county), ] E_init = init$E I_s_init = init$Is I_m_init = init$Im A_init = init$A H_init = init$H Hbar_init = rep(0,dim(init)[1]) NH_init = init$NH NI_init = init$NI D_init = init$D R_init = init$R S_init = as.numeric(init$population) - (E_init + I_s_init + I_m_init + A_init + H_init + Hbar_init + NH_init + NI_init + D_init + R_init) # this is state0 to be passed to get_sir_results state0 = c(S=S_init, E=E_init, I_s=I_s_init, I_m=I_m_init, A=A_init, H=H_init, Hbar=Hbar_init, NH=NH_init, NI=NI_init, D=D_init, R=R_init) return(state0) } ################################### ## draw params from joint posterior ################################### # some supporting functions # get state0 on day0 for a given set of parameters get_state0_params <- function(params, interventions, populations, adj, county_capacities){ nregions = nrow(adj) time_num = params$time_num if (time_num<0){ stop("time_num cannot be negative") } else if (time_num==0){ E_init = params$E_init * E_INIT_COUNTY I_s_init = rep(0,nregions) I_m_init = rep(0,nregions) A_init = rep(0,nregions) H_init = rep(0,nregions) Hbar_init = rep(0,nregions) NH_init = rep(0,nregions) NI_init = rep(0,nregions) D_init = rep(0,nregions) R_init = rep(0,nregions) S_init = as.numeric(populations) - (E_init + I_s_init + I_m_init + A_init + H_init + Hbar_init + NH_init + NI_init + D_init + R_init) } else { # initial number exposed E_init_state0 = params$E_init * E_INIT_COUNTY nregions = length(E_init_state0) mytmax = max(params$detect_lag+1, params$time_num + 1) params_tmp <- params # no interventions and no changes to any inputs to contact pattern params_tmp$lockdown_effect <- 0 int_num = length(interventions$distancing_list) int_effects_tmp = as.list(rep(0, int_num)) re_fun_tmp = approxfun(1:mytmax, rep(0,mytmax) ,method='linear', rule=2) interventions$random_effect = re_fun_tmp params_tmp$testing_effect <- 0 params_tmp$H_lag <- 0 params_tmp$D_lag <- 0 init_start_ind = which(MOB$time == -params_tmp$time_num) mb_init = MOB$smooth[init_start_ind:(init_start_ind+mytmax-1)] mobfun_tmp = approxfun(1:mytmax, mb_init ,method='linear', rule=2) interventions$mobility = mobfun_tmp #deathfun_tmp = approxfun(1:mytmax, rep( mean(DEATH_HAZ$smooth.rel_haz[1:7]),mytmax) ,method='linear', rule=2) deathfun_tmp = approxfun(1:mytmax, rep(DEATH_HAZ$smooth.rel_haz[1], mytmax) ,method='linear', rule=2) sevfun_tmp = approxfun(1:mytmax, rep(SEV$sev.measure[1],mytmax) ,method='linear', rule=2) hdischargefun_tmp = approxfun(1:mytmax, rep(1/HLOS$smooth.alos[1],mytmax) ,method='linear', rule=2) # initial conditions at time day0 - time_num E_init = E_init_state0 I_s_init = rep(0,nregions) I_m_init = rep(0,nregions) A_init = rep(0,nregions) H_init = rep(0,nregions) Hbar_init = rep(0,nregions) NH_init = rep(0,nregions) NI_init = rep(0,nregions) D_init = rep(0,nregions) R_init = rep(0,nregions) S_init = as.numeric(populations) - (E_init + I_s_init + I_m_init + A_init + H_init + Hbar_init + NH_init + NI_init + D_init + R_init) state0 = c(S=S_init, E=E_init, I_s=I_s_init, I_m=I_m_init, A=A_init, H=H_init, Hbar=Hbar_init, NH=NH_init, NI=NI_init, D=D_init, R=R_init) res = run_sir_model(state0=state0, params=params_tmp, region_adj=adj, populations=as.numeric(populations), tmax=mytmax, interventions=interventions, int_effects = int_effects_tmp, capacities=county_capacities, deathfun=deathfun_tmp, sevfun=sevfun_tmp, hdischargefun=hdischargefun_tmp) # get initial conditions corresponding to params, E_init, and time_num init = matrix(0, ncol=10, nrow=nregions) compartments <- c("E", "I_s", "I_m", "A", "H", "NH", "NI", "D", "R" ) for (k in 1:length(compartments)){ compartment = compartments[k] comp.init <- c(nregions) for (i in 1:nregions){ region = CT_NAMES[i] idx = paste(compartment, region, sep=".") comp.init[i] = res[params$time_num, idx] } init[,k] = comp.init } init = as.data.frame(init) colnames(init) <- c("E", "Is", "Im", "A", "H", "NH", "NI", "D", "R", "county") init$county <- CT_NAMES E_init = init$E I_s_init = init$Is I_m_init = init$Im A_init = init$A H_init = init$H Hbar_init = rep(0,dim(init)[1]) NH_init = init$NH NI_init = init$NI D_init = init$D R_init = init$R S_init = as.numeric(populations) - (E_init + I_s_init + I_m_init + A_init + H_init + Hbar_init + NH_init + NI_init + D_init + R_init) } # this is state0 to be passed to get_sir_results state0 = c(S=S_init, E=E_init, I_s=I_s_init, I_m=I_m_init, A=A_init, H=H_init, Hbar=Hbar_init, NH=NH_init, NI=NI_init, D=D_init, R=R_init) return(state0) } # return a list of params and state0 for a random draw from joint posterior ## inputs: # params: list of parameters representing a chosen scenario # posterior: data frame with a sample from joint posterior rposterior = function(params, posterior, interventions){ index <- sample(c(1:nrow(posterior)), size = 1) par_smpl = as.list(posterior[index, ]) params = set_params_mcmc(params, par_smpl) state0 = get_state0_params(params, interventions, CT_POPULATIONS, CT_ADJ, COUNTY_CAPACITIES) return( list (params, state0, index) ) } # update parameter values that are calibrated ################################################## set_params_mcmc = function(myparams, varparams){ # names of params to be updated var_names = names(varparams) # update these params for (k in var_names){ myparams[[k]] = varparams[[k]] } # update calculated params myparams$q_Im = 1 - myparams$q_A - myparams$q_Is myparams$gamma_Hbar = myparams$gamma_H #myparams$school_closure_effect = varparams$tot_lockdown_effect * varparams$school_closure_prop #myparams$lockdown_effect = varparams$tot_lockdown_effect * (1 - varparams$school_closure_prop) return(myparams) } ####################### ## Run the model # draw_rparams controls uncertainty simulation: # if draw_rparams = TRUE, parameters are drawn independently using rparams() # if draw_rparams = TRUE, parameters are drawn from joint posterior supplied as 'posterior' get_sir_results = function(daymax, int_off_dates, nsim=1, params, state0, posterior, posterior.re = NULL, draw_rparams = FALSE, seed = NULL, CI = 0.95) { dayseq = seq(day0, daymax, by="day") tmax = as.numeric(difftime(daymax, day0, units="days")) # if(!is.null(INTERVENTIONS)){interventions <- INTERVENTIONS} else { stop("INTERVENTIONS must be defined globally") } # # get a list of intervetion ramping times from default params # int_ramping_times = get_ramping_times(params) # # # list of contact intervention functions # distancingfun_list = get_distancing_fun_list(dayseq, INT_START_DATES, int_off_dates, int_ramping_times) # # # mobility # mobilityfun = get_mobility_fun(dayseq, MOB) # # # testing # testingfun = get_testing_fun(dayseq, TESTING) # # # combine in the list of interventions # interventions = list(distancing_list=distancingfun_list, mobility=mobilityfun, testing=testingfun, random_effect = function(x){0}) if(!is.null(posterior.re)){ random_effect_at = as.numeric(gsub("time", "", colnames(posterior.re))) } # hospital death hazard function deathfun = get_death_fun(dayseq, DEATH_HAZ) # severity function sevfun = get_severity_fun(dayseq, SEV) # hospital discharge rate function hdischargefun = get_hosp_discharge_fun(dayseq, HLOS) pars <- list() state0s <- list() interventions_list <- list() # parameters, set seed if given if(!is.null(seed)) set.seed(seed) if (draw_rparams == TRUE){ if(nsim == 1){ pars[[1]] = params state0s[[1]] = state0 interventions_list[[1]] = interventions } else { for(i in 1:nsim){ pars[[i]] = rparams(params) state0s[[i]] = state0 interventions_list[[i]] = interventions } } } else { if(nsim == 1){ pars[[1]] = params state0s[[1]] = state0 interventions_list[[1]] = interventions } else { for(i in 1:nsim){ rpost_out = rposterior(params, posterior, interventions) pars[[i]] = rpost_out[[1]] state0s[[i]] = rpost_out[[2]] index <- rpost_out[[3]] # update interventions with random effects #ramping_upd = get_ramping_times(pars[[i]]) #distancingfun_list_upd = get_distancing_fun_list(dayseq, INT_START_DATES, int_off_dates, ramping_upd) #interventions_list[[i]] = list(distancing_list=distancingfun_list_upd, mobility=mobilityfun, testing=testingfun) interventions_list[[i]] = interventions if(!is.null(posterior.re)){ random_effect_fun = get_random_effect_fun(dayseq, random_effect_at, posterior.re[index, ]) interventions_list[[i]]$random_effect = random_effect_fun } } } } sir_results = lapply(1:nsim, function(i){ res = run_sir_model(state0=state0s[[i]], params=pars[[i]], region_adj=CT_ADJ, populations=CT_POPULATIONS, tmax=tmax, interventions=interventions_list[[i]], int_effects = get_intervention_effects(pars[[i]]), capacities=COUNTY_CAPACITIES, deathfun=deathfun, sevfun=sevfun, hdischargefun=hdischargefun) res$sim_id = i res }) sir_results_all = ldply(sir_results, rbind) # sir_results_all[,"rHsum.Connecticut"] <- sir_results_all[,"rH.Connecticut"]+sir_results_all[,"rHbar.Connecticut"] #for(nm in c("Connecticut", colnames(CT_ADJ))){ # sir_results_all[, paste0("rHsum.", nm)] <- sir_results_all[,paste0("rH.", nm)]+sir_results_all[,paste0("rHbar.", nm)] # } sir_results_long <- melt(sir_results_all, id.vars = c("time", "sim_id")) sir_results_summary <- sir_results_long %>% group_by(variable, time) %>% summarise( mean = mean(value), lower = quantile(value, (1-CI)/2, na.rm=TRUE), upper = quantile(value, 1-(1-CI)/2, na.rm=TRUE), median = median(value)) return(list(raw_results=sir_results, summary=sir_results_summary)) } #################################### # Print a vector of counts in the RMD file # get_compartment(data=res1$sir_results_summary, date="2020-07-01", toprint="rD.Connecticut",start_day = day0) get_compartment <- function(data, date, toprint, start_day){ sir_result_internal = data.frame(filter(data, variable%in%toprint)) t = as.numeric(difftime(as.Date(date), start_day, unit='days')) sir_result_internal = subset(sir_result_internal, time%in%t) out <- as.character(format(sir_result_internal$mean, digits=2, big.mark=",")) if(length(date)>2){ out <- paste0(paste(out[-length(out)], collapse=", "), ", and ", out[length(out)]) }else if(length(date)==2){ out <- paste0(out[1], " and ", out[2]) } return(out) } get_dashboard_output <- function(data, plot=TRUE, filename=NULL){ toprint <- c("rD.Connecticut", "rHsum.Connecticut", "dailyI.Connecticut", "dailyH.Connecticut") if(is.null(names(data))) names(data) <- paste("Scenario", 1:length(data)) out <- NULL for(i in 1:length(data)){ sir_result_internal <- filter(data[[i]]$summary, variable%in%toprint) %>% ungroup(variable) %>% mutate(variable=revalue(variable, c("rD.Connecticut"="Projected_Cumulative_Deaths", "rHsum.Connecticut"="Projected_Hospitalizations", "dailyI.Connecticut"="Projected_New_Infections", "dailyH.Connecticut"="Projected_New_Hospitalizations"))) %>% mutate(Date = day0 + time, value = mean, Scenario=names(data)[i]) %>% select(Date, variable, value, Scenario) out <- rbind(out, sir_result_internal) } out$Scenario <- factor(out$Scenario, levels = names(data)) obs <- DAT_CT_STATE[, c("date", "deaths", "cur_hosp")] colnames(obs) <- c("Date", "Actual_Cumulative_Deaths", "Actual_Hospitalizations") # add back 3/1 to 3/8 zeros <- data.frame(Date = day0 + c(0:(obs$Date[1] - day0 + 1))) zeros$Actual_Cumulative_Deaths <- 0 zeros$Actual_Hospitalizations <- 0 obs <- rbind(zeros, obs) out <- dcast(out, Date + Scenario ~ variable, value.var = "value") out <- out %>% full_join(obs, by = "Date") %>% arrange(Scenario, Date) if(plot){ g <- ggplot(melt(out, id.vars=c("Date", "Scenario")), aes(x=Date, y=value, color=variable, group=variable)) + geom_line() + facet_wrap(~Scenario) + theme(legend.position = "bottom") print(g) } out$Produced_by <- "Crawford Lab" out$Produced_time <- Sys.time() if(!is.null(filename)){ write.csv(out, file = filename, row.names=FALSE, quote=FALSE) } return(out) } get_snapshot <- function(data, date, where = "Connecticut"){ toprint <- unique(data$variable) toprint <- toprint[grep(where, toprint)] time.toprint <- as.numeric(as.Date(date, "%m/%d/%Y") - day0) out <- NULL sir_result_internal <- filter(data, variable%in%toprint) %>% filter(time == time.toprint) %>% ungroup(variable) %>% mutate(Date = day0 + time) %>% mutate(variable = gsub(paste0(".",where), "", variable)) %>% select(variable, time, mean, lower, upper) return(data.frame(sir_result_internal)) return(out) }
/functions/run_ct_model.R
permissive
fcrawford/covid19_ct
R
false
false
15,799
r
# the ordering of counties is the standard throughout the code (in the global_var.R) # make sure adj and initial conditions have the same ordering! ######################################### # draw random params: limit to beta only rparams = function(params) { params_tmp = params params_tmp$beta_pre = rtruncdist(1, mean=(params$beta_pre), sd=params$sd_beta_pre, lower=params$lower_beta_pre, upper=params$upper_beta_pre) return(params_tmp) } ## functions for scenario evaluation in the future: imposed changes in transmission parameter # UPDATE THIS FUNCTION WITH DESIRED NUMBER OF INTERVENTIONS IN THE FUTURE ########################################################################## # create a list of contact intervention effects from params get_intervention_effects = function(params) { return( as.list(c(params$intv1_effect)) ) } get_ramping_times = function(params) { return( as.list(c(2)) ) } ######################## # function: get state0 from initial confitions csv file get_state0 = function(init_file_csv){ init <- read.csv(init_file_csv, stringsAsFactors=FALSE) # double double check order init <- init[match(CT_NAMES, init$county), ] E_init = init$E I_s_init = init$Is I_m_init = init$Im A_init = init$A H_init = init$H Hbar_init = rep(0,dim(init)[1]) NH_init = init$NH NI_init = init$NI D_init = init$D R_init = init$R S_init = as.numeric(init$population) - (E_init + I_s_init + I_m_init + A_init + H_init + Hbar_init + NH_init + NI_init + D_init + R_init) # this is state0 to be passed to get_sir_results state0 = c(S=S_init, E=E_init, I_s=I_s_init, I_m=I_m_init, A=A_init, H=H_init, Hbar=Hbar_init, NH=NH_init, NI=NI_init, D=D_init, R=R_init) return(state0) } ################################### ## draw params from joint posterior ################################### # some supporting functions # get state0 on day0 for a given set of parameters get_state0_params <- function(params, interventions, populations, adj, county_capacities){ nregions = nrow(adj) time_num = params$time_num if (time_num<0){ stop("time_num cannot be negative") } else if (time_num==0){ E_init = params$E_init * E_INIT_COUNTY I_s_init = rep(0,nregions) I_m_init = rep(0,nregions) A_init = rep(0,nregions) H_init = rep(0,nregions) Hbar_init = rep(0,nregions) NH_init = rep(0,nregions) NI_init = rep(0,nregions) D_init = rep(0,nregions) R_init = rep(0,nregions) S_init = as.numeric(populations) - (E_init + I_s_init + I_m_init + A_init + H_init + Hbar_init + NH_init + NI_init + D_init + R_init) } else { # initial number exposed E_init_state0 = params$E_init * E_INIT_COUNTY nregions = length(E_init_state0) mytmax = max(params$detect_lag+1, params$time_num + 1) params_tmp <- params # no interventions and no changes to any inputs to contact pattern params_tmp$lockdown_effect <- 0 int_num = length(interventions$distancing_list) int_effects_tmp = as.list(rep(0, int_num)) re_fun_tmp = approxfun(1:mytmax, rep(0,mytmax) ,method='linear', rule=2) interventions$random_effect = re_fun_tmp params_tmp$testing_effect <- 0 params_tmp$H_lag <- 0 params_tmp$D_lag <- 0 init_start_ind = which(MOB$time == -params_tmp$time_num) mb_init = MOB$smooth[init_start_ind:(init_start_ind+mytmax-1)] mobfun_tmp = approxfun(1:mytmax, mb_init ,method='linear', rule=2) interventions$mobility = mobfun_tmp #deathfun_tmp = approxfun(1:mytmax, rep( mean(DEATH_HAZ$smooth.rel_haz[1:7]),mytmax) ,method='linear', rule=2) deathfun_tmp = approxfun(1:mytmax, rep(DEATH_HAZ$smooth.rel_haz[1], mytmax) ,method='linear', rule=2) sevfun_tmp = approxfun(1:mytmax, rep(SEV$sev.measure[1],mytmax) ,method='linear', rule=2) hdischargefun_tmp = approxfun(1:mytmax, rep(1/HLOS$smooth.alos[1],mytmax) ,method='linear', rule=2) # initial conditions at time day0 - time_num E_init = E_init_state0 I_s_init = rep(0,nregions) I_m_init = rep(0,nregions) A_init = rep(0,nregions) H_init = rep(0,nregions) Hbar_init = rep(0,nregions) NH_init = rep(0,nregions) NI_init = rep(0,nregions) D_init = rep(0,nregions) R_init = rep(0,nregions) S_init = as.numeric(populations) - (E_init + I_s_init + I_m_init + A_init + H_init + Hbar_init + NH_init + NI_init + D_init + R_init) state0 = c(S=S_init, E=E_init, I_s=I_s_init, I_m=I_m_init, A=A_init, H=H_init, Hbar=Hbar_init, NH=NH_init, NI=NI_init, D=D_init, R=R_init) res = run_sir_model(state0=state0, params=params_tmp, region_adj=adj, populations=as.numeric(populations), tmax=mytmax, interventions=interventions, int_effects = int_effects_tmp, capacities=county_capacities, deathfun=deathfun_tmp, sevfun=sevfun_tmp, hdischargefun=hdischargefun_tmp) # get initial conditions corresponding to params, E_init, and time_num init = matrix(0, ncol=10, nrow=nregions) compartments <- c("E", "I_s", "I_m", "A", "H", "NH", "NI", "D", "R" ) for (k in 1:length(compartments)){ compartment = compartments[k] comp.init <- c(nregions) for (i in 1:nregions){ region = CT_NAMES[i] idx = paste(compartment, region, sep=".") comp.init[i] = res[params$time_num, idx] } init[,k] = comp.init } init = as.data.frame(init) colnames(init) <- c("E", "Is", "Im", "A", "H", "NH", "NI", "D", "R", "county") init$county <- CT_NAMES E_init = init$E I_s_init = init$Is I_m_init = init$Im A_init = init$A H_init = init$H Hbar_init = rep(0,dim(init)[1]) NH_init = init$NH NI_init = init$NI D_init = init$D R_init = init$R S_init = as.numeric(populations) - (E_init + I_s_init + I_m_init + A_init + H_init + Hbar_init + NH_init + NI_init + D_init + R_init) } # this is state0 to be passed to get_sir_results state0 = c(S=S_init, E=E_init, I_s=I_s_init, I_m=I_m_init, A=A_init, H=H_init, Hbar=Hbar_init, NH=NH_init, NI=NI_init, D=D_init, R=R_init) return(state0) } # return a list of params and state0 for a random draw from joint posterior ## inputs: # params: list of parameters representing a chosen scenario # posterior: data frame with a sample from joint posterior rposterior = function(params, posterior, interventions){ index <- sample(c(1:nrow(posterior)), size = 1) par_smpl = as.list(posterior[index, ]) params = set_params_mcmc(params, par_smpl) state0 = get_state0_params(params, interventions, CT_POPULATIONS, CT_ADJ, COUNTY_CAPACITIES) return( list (params, state0, index) ) } # update parameter values that are calibrated ################################################## set_params_mcmc = function(myparams, varparams){ # names of params to be updated var_names = names(varparams) # update these params for (k in var_names){ myparams[[k]] = varparams[[k]] } # update calculated params myparams$q_Im = 1 - myparams$q_A - myparams$q_Is myparams$gamma_Hbar = myparams$gamma_H #myparams$school_closure_effect = varparams$tot_lockdown_effect * varparams$school_closure_prop #myparams$lockdown_effect = varparams$tot_lockdown_effect * (1 - varparams$school_closure_prop) return(myparams) } ####################### ## Run the model # draw_rparams controls uncertainty simulation: # if draw_rparams = TRUE, parameters are drawn independently using rparams() # if draw_rparams = TRUE, parameters are drawn from joint posterior supplied as 'posterior' get_sir_results = function(daymax, int_off_dates, nsim=1, params, state0, posterior, posterior.re = NULL, draw_rparams = FALSE, seed = NULL, CI = 0.95) { dayseq = seq(day0, daymax, by="day") tmax = as.numeric(difftime(daymax, day0, units="days")) # if(!is.null(INTERVENTIONS)){interventions <- INTERVENTIONS} else { stop("INTERVENTIONS must be defined globally") } # # get a list of intervetion ramping times from default params # int_ramping_times = get_ramping_times(params) # # # list of contact intervention functions # distancingfun_list = get_distancing_fun_list(dayseq, INT_START_DATES, int_off_dates, int_ramping_times) # # # mobility # mobilityfun = get_mobility_fun(dayseq, MOB) # # # testing # testingfun = get_testing_fun(dayseq, TESTING) # # # combine in the list of interventions # interventions = list(distancing_list=distancingfun_list, mobility=mobilityfun, testing=testingfun, random_effect = function(x){0}) if(!is.null(posterior.re)){ random_effect_at = as.numeric(gsub("time", "", colnames(posterior.re))) } # hospital death hazard function deathfun = get_death_fun(dayseq, DEATH_HAZ) # severity function sevfun = get_severity_fun(dayseq, SEV) # hospital discharge rate function hdischargefun = get_hosp_discharge_fun(dayseq, HLOS) pars <- list() state0s <- list() interventions_list <- list() # parameters, set seed if given if(!is.null(seed)) set.seed(seed) if (draw_rparams == TRUE){ if(nsim == 1){ pars[[1]] = params state0s[[1]] = state0 interventions_list[[1]] = interventions } else { for(i in 1:nsim){ pars[[i]] = rparams(params) state0s[[i]] = state0 interventions_list[[i]] = interventions } } } else { if(nsim == 1){ pars[[1]] = params state0s[[1]] = state0 interventions_list[[1]] = interventions } else { for(i in 1:nsim){ rpost_out = rposterior(params, posterior, interventions) pars[[i]] = rpost_out[[1]] state0s[[i]] = rpost_out[[2]] index <- rpost_out[[3]] # update interventions with random effects #ramping_upd = get_ramping_times(pars[[i]]) #distancingfun_list_upd = get_distancing_fun_list(dayseq, INT_START_DATES, int_off_dates, ramping_upd) #interventions_list[[i]] = list(distancing_list=distancingfun_list_upd, mobility=mobilityfun, testing=testingfun) interventions_list[[i]] = interventions if(!is.null(posterior.re)){ random_effect_fun = get_random_effect_fun(dayseq, random_effect_at, posterior.re[index, ]) interventions_list[[i]]$random_effect = random_effect_fun } } } } sir_results = lapply(1:nsim, function(i){ res = run_sir_model(state0=state0s[[i]], params=pars[[i]], region_adj=CT_ADJ, populations=CT_POPULATIONS, tmax=tmax, interventions=interventions_list[[i]], int_effects = get_intervention_effects(pars[[i]]), capacities=COUNTY_CAPACITIES, deathfun=deathfun, sevfun=sevfun, hdischargefun=hdischargefun) res$sim_id = i res }) sir_results_all = ldply(sir_results, rbind) # sir_results_all[,"rHsum.Connecticut"] <- sir_results_all[,"rH.Connecticut"]+sir_results_all[,"rHbar.Connecticut"] #for(nm in c("Connecticut", colnames(CT_ADJ))){ # sir_results_all[, paste0("rHsum.", nm)] <- sir_results_all[,paste0("rH.", nm)]+sir_results_all[,paste0("rHbar.", nm)] # } sir_results_long <- melt(sir_results_all, id.vars = c("time", "sim_id")) sir_results_summary <- sir_results_long %>% group_by(variable, time) %>% summarise( mean = mean(value), lower = quantile(value, (1-CI)/2, na.rm=TRUE), upper = quantile(value, 1-(1-CI)/2, na.rm=TRUE), median = median(value)) return(list(raw_results=sir_results, summary=sir_results_summary)) } #################################### # Print a vector of counts in the RMD file # get_compartment(data=res1$sir_results_summary, date="2020-07-01", toprint="rD.Connecticut",start_day = day0) get_compartment <- function(data, date, toprint, start_day){ sir_result_internal = data.frame(filter(data, variable%in%toprint)) t = as.numeric(difftime(as.Date(date), start_day, unit='days')) sir_result_internal = subset(sir_result_internal, time%in%t) out <- as.character(format(sir_result_internal$mean, digits=2, big.mark=",")) if(length(date)>2){ out <- paste0(paste(out[-length(out)], collapse=", "), ", and ", out[length(out)]) }else if(length(date)==2){ out <- paste0(out[1], " and ", out[2]) } return(out) } get_dashboard_output <- function(data, plot=TRUE, filename=NULL){ toprint <- c("rD.Connecticut", "rHsum.Connecticut", "dailyI.Connecticut", "dailyH.Connecticut") if(is.null(names(data))) names(data) <- paste("Scenario", 1:length(data)) out <- NULL for(i in 1:length(data)){ sir_result_internal <- filter(data[[i]]$summary, variable%in%toprint) %>% ungroup(variable) %>% mutate(variable=revalue(variable, c("rD.Connecticut"="Projected_Cumulative_Deaths", "rHsum.Connecticut"="Projected_Hospitalizations", "dailyI.Connecticut"="Projected_New_Infections", "dailyH.Connecticut"="Projected_New_Hospitalizations"))) %>% mutate(Date = day0 + time, value = mean, Scenario=names(data)[i]) %>% select(Date, variable, value, Scenario) out <- rbind(out, sir_result_internal) } out$Scenario <- factor(out$Scenario, levels = names(data)) obs <- DAT_CT_STATE[, c("date", "deaths", "cur_hosp")] colnames(obs) <- c("Date", "Actual_Cumulative_Deaths", "Actual_Hospitalizations") # add back 3/1 to 3/8 zeros <- data.frame(Date = day0 + c(0:(obs$Date[1] - day0 + 1))) zeros$Actual_Cumulative_Deaths <- 0 zeros$Actual_Hospitalizations <- 0 obs <- rbind(zeros, obs) out <- dcast(out, Date + Scenario ~ variable, value.var = "value") out <- out %>% full_join(obs, by = "Date") %>% arrange(Scenario, Date) if(plot){ g <- ggplot(melt(out, id.vars=c("Date", "Scenario")), aes(x=Date, y=value, color=variable, group=variable)) + geom_line() + facet_wrap(~Scenario) + theme(legend.position = "bottom") print(g) } out$Produced_by <- "Crawford Lab" out$Produced_time <- Sys.time() if(!is.null(filename)){ write.csv(out, file = filename, row.names=FALSE, quote=FALSE) } return(out) } get_snapshot <- function(data, date, where = "Connecticut"){ toprint <- unique(data$variable) toprint <- toprint[grep(where, toprint)] time.toprint <- as.numeric(as.Date(date, "%m/%d/%Y") - day0) out <- NULL sir_result_internal <- filter(data, variable%in%toprint) %>% filter(time == time.toprint) %>% ungroup(variable) %>% mutate(Date = day0 + time) %>% mutate(variable = gsub(paste0(".",where), "", variable)) %>% select(variable, time, mean, lower, upper) return(data.frame(sir_result_internal)) return(out) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/risk.partition.ITR.R \name{risk.partition.ITR} \alias{risk.partition.ITR} \title{Determines optimal partition.} \usage{ risk.partition.ITR(dat, split.var, test = NULL, risk.threshold = NA, min.ndsz = 20, n0 = 5, lambda = 0, name = "0", ctg = ctg, max.depth = 15, mtry = length(split.var), dat.rest = NULL, max.score = NULL, AIPWE = AIPWE, use.other.nodes = TRUE, extremeRandomized = FALSE) } \arguments{ \item{dat}{data.frame. Data used to identify split.} \item{split.var}{numeric vector. Columns of spliting variables.} \item{test}{data.frame of testing observations. Should be formatted the same as 'data'.} \item{risk.threshold}{numeric. Desired level of risk control.} \item{min.ndsz}{numeric specifying minimum number of observations required to call a node terminal. Defaults to 20.} \item{n0}{numeric specifying minimum number of treatment/control observations needed in a split to declare a node terminal. Defaults to 5.} \item{lambda}{numeric. Penalty parameter for risk scores. Defaults to 0, i.e. no constraint.} \item{name}{char. Name of internal node, used for ordering splits.} \item{ctg}{numeric vector corresponding to the categorical input columns. Defaults to NULL. Not available yet.} \item{max.depth}{numeric specifying maximum depth of the tree. Defaults to 15 levels.} \item{mtry}{numeric specifying the number of randomly selected splitting variables to be included. Defaults to number of splitting variables.} \item{dat.rest}{dataframe. Data outside current splitting node.} \item{max.score}{numeric. Current score for the tree.} \item{AIPWE}{logical. Should AIPWE (TRUE) or IPWE (FALSE) be used. Not available yet.} \item{use.other.nodes}{logical. Should global estimator of objective function be used. Defaults to TRUE.} \item{extremeRandomized}{logical. Experimental for randomly selecting cutpoints in a random forest model. Defaults to FALSE} } \value{ summary of the best split for a given data frame. } \description{ Determines optimal partition for an input node in an rcDT model. }
/man/risk.partition.ITR.Rd
no_license
kdoub5ha/rcITR
R
false
true
2,170
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/risk.partition.ITR.R \name{risk.partition.ITR} \alias{risk.partition.ITR} \title{Determines optimal partition.} \usage{ risk.partition.ITR(dat, split.var, test = NULL, risk.threshold = NA, min.ndsz = 20, n0 = 5, lambda = 0, name = "0", ctg = ctg, max.depth = 15, mtry = length(split.var), dat.rest = NULL, max.score = NULL, AIPWE = AIPWE, use.other.nodes = TRUE, extremeRandomized = FALSE) } \arguments{ \item{dat}{data.frame. Data used to identify split.} \item{split.var}{numeric vector. Columns of spliting variables.} \item{test}{data.frame of testing observations. Should be formatted the same as 'data'.} \item{risk.threshold}{numeric. Desired level of risk control.} \item{min.ndsz}{numeric specifying minimum number of observations required to call a node terminal. Defaults to 20.} \item{n0}{numeric specifying minimum number of treatment/control observations needed in a split to declare a node terminal. Defaults to 5.} \item{lambda}{numeric. Penalty parameter for risk scores. Defaults to 0, i.e. no constraint.} \item{name}{char. Name of internal node, used for ordering splits.} \item{ctg}{numeric vector corresponding to the categorical input columns. Defaults to NULL. Not available yet.} \item{max.depth}{numeric specifying maximum depth of the tree. Defaults to 15 levels.} \item{mtry}{numeric specifying the number of randomly selected splitting variables to be included. Defaults to number of splitting variables.} \item{dat.rest}{dataframe. Data outside current splitting node.} \item{max.score}{numeric. Current score for the tree.} \item{AIPWE}{logical. Should AIPWE (TRUE) or IPWE (FALSE) be used. Not available yet.} \item{use.other.nodes}{logical. Should global estimator of objective function be used. Defaults to TRUE.} \item{extremeRandomized}{logical. Experimental for randomly selecting cutpoints in a random forest model. Defaults to FALSE} } \value{ summary of the best split for a given data frame. } \description{ Determines optimal partition for an input node in an rcDT model. }
#' @export convert.f.to.omegasq <- function(f, df1, df2) { return( (f - 1) / (f + (df2 + 1) / (df1)) ); }
/Rmisc/[tmp]convert.f.to.omegasq.R
no_license
qsh7950/escalc
R
false
false
107
r
#' @export convert.f.to.omegasq <- function(f, df1, df2) { return( (f - 1) / (f + (df2 + 1) / (df1)) ); }
############################ ## UI part printHTMLUI <- function(id) { ns <- NS(id) htmlOutput(ns("html")) } ############################# ## server part printHTML <- function(input, output, session, what, error=NULL, global.input=NULL, global.param=NULL){ ##cat('test: ',error$msg, '\nend\n') txt='' ## ##@############################# ## ## getting started if(what == 'gs'){ ## render HTML output$html <- renderText({ if(!is.null(global.input$file)) return() if(!is.null(error$msg)) return() includeMarkdown('readme.md') }) } ##@############################### ## changelog if(what == 'cl'){ txt <- '<h4><font color="red">What\'s new?</font></h4> <font size=\"3\"> <b>v0.8.5 Jan 21, 2019</b> <ul> <li>Misc: Added BSD-3 license.</li> <li>Misc: Updated Readme file.</li> </ul> <b>v0.8.4.3 Jan 17, 2019</b> <ul> <li>Misc (SSP only): Started to implement a shiny module for session management which will enable users to share saved sessions with collaborators/team members. Not used in this version though.</li> </ul> <b>v0.8.4.2 Jan 10, 2019</b> <ul> <li>Misc: Compatible with both, R>=3.5 AND R<3.5</li> </ul> <b>v0.8.4.1 Dec 5, 2018</b> <ul> <li>Misc: Compatible with R >=3.5</li> </ul> <b>v0.8.4 Nov 16, 2018</b> <ul> <li>Multiscatter: fixed bug that would show straight lines for each pairwise plot. Occured when column ids where longer than 20 characters.</li> <li>GCT 1.3: Error message if GCT file does not contain any column meta data tracks.</li> <li>Misc: fixed a bug causing the app to crash if a GCT 1.3 file with single <b>column meta data track</b> was uploaded.</li> <li>Misc: renamed <i>Modify selected groups</i> to <i>Select groups</i>.</li> </ul> <b>v0.8.3.1 July 24, 2018</b> <ul> <li>Misc: robustified filtering of significant features for plotting purposes.</li> <li>Misc: disabled Javascript code in the datatable().</li> <li>Correlation boxplots: changed some more aesthetics.</li> </ul> <b>v0.8.3 July 23, 2018</b> <ul> <li><mark>BUG:</mark> fixed a bug resulting in an inaccurate number of significant features reported in the heatmap.</li> <li>UpSet-plot: small bugfix causing a crash under certein circumstances.</li> <li>Correlation boxplots: changed some aesthetics of the plot.</li> </ul> <b>v0.8.2.8 July 2, 2018</b> <ul> <li>Misc: fixed a bug causing the app to crash if a GCT 1.3 file <b>without row meta data</b> was uploaded.</li> </ul> <b>v0.8.2.7 June 28, 2018</b> <ul> <li>Misc: fixed a bug causing the app to crash under certain combinations of <i>Modify selected groups</i> and test selections.</li> </ul> <b>v0.8.2.6 June 28, 2018</b> <ul> <li>Misc: disabled the cmapR-package because of installation problems of the required package <i>rhdf5</i> on a <i>Red Hat Enterprise Linux 6.9</i> machine. The io.R file from the cmapR GitHub repository is used instead. </li> </ul> <b>v0.8.2.5 June 28, 2018</b> <ul> <li>Excel sheet: in case of <b>Two sample moderated T-test</b>, the table header will now report <b>KO.over.WT</b> instead of <b>WT.vs.KO</b>.</li> <li>Export: result files will also be epxorted in GCT v1.3 format.</li> <li>Barplot: fixed a bug causing the barplot of identified features mislabel the colors if the <i>Modify selected groups</i>-feature was used.</li> <li>Boxplots: fixed a bug resulting in slightly different numbers reported in the exported pdf file compared to the numbers shown in the app. This only happened in boxplots depicting values after normalization.</li> </ul> <b>v0.8.2.4 June 27, 2018</b> <ul> <li><mark>BUG:</mark> duplicated session ids: included a timestamp-based seed before generating the seesion id. It happened that the sample function returned the same string/number combination.</li> <li>Misc: session id is doubled checked whether it exists as folder on the server.</li> <li>Export: all files except RData-session files and zip-archives are removed from the server.</li> </ul> <b>v0.8.2.3 April 20, 2018</b> <ul> <li>Misc: updated code for 2-component normalization (by D. R. Mani).</li> </ul> <b>v0.8.2.2 April 17, 2018</b> <ul> <li>Heatmap: row and column labels can be disabled now.</li> <li>UpSet plots: inter-group comparison of significantly regulated features.</li> </ul> <b>v0.8.2.1 March 14, 2018</b> <ul> <li>Correlation: correlation matrix calculated centrally in a separate function and shared with plots using correlations: multiscatter, correlation heatmap, correlation boxplot.</li> <li>Correlation: novel QC-tab depicting pairwise intra-group correlations as boxplots.</li> </ul> <b>v0.8.2 February 27, 2018</b> <ul> <li>Misc: Installable on Mac OS.</li> <li>Misc: group selection now correctly updated in saved sessions.</li> </ul> <b>v0.8.1 February 26, 2018</b> <ul> <li>Misc: simplified installation under Windows OS.</li> <li>Misc: if Perl and/or Pandoc are not availbale the app will show a corresponding messsage.</li> <li>PCA: "Run me first"-tab became obsolete.</li> <li>Scatterplots: added trace of filtered values for reprodicibility filter.</li> <li>Scatterplots: separated data tracks and added legend.</li> </ul> <b>v0.8.0.9 February 24, 2018</b> <ul> <li>GCT 1.3: robustified import of GCT 1.3 files. If not unique, row and column identifiers are made unique.</li> <li>Volcano: Labeled points can be removed individually from the table.</li> <li>Table: page overhaul</li> </ul> <b>v0.8.0.7 February 22, 2018</b> <ul> <li>Misc: PPI queries now work after export of results.</li> </ul> <b>v0.8.0.6 February 21, 2018</b> <ul> <li>UI: updated help text</li> <li>UI: Only first 20 characters of column names are shown when prompted to select ID column.</li> </ul> <b>v0.8.0.5 February 20, 2018</b> <ul> <li>Export: fixed a bug preventing the export of results as zip-archive.</li> </ul> <b>v0.8.0.4 February 15, 2018</b> <ul> <li>Misc: Robustified import of gct 1.3 files (row and column names are made unique).</li> </ul> <b>v0.8.0.3 February 14, 2018</b> <ul> <li>Misc: new session import/export features</li> </ul> <b>v0.8.0.2 February 14, 2018</b> <ul> <li>Export: page overhaul</li> <li>Export: generation of Rmarkdown-reports (still under developement).</li> <li>Export: added option to download rmarkdown, xls, zip, separately.</li> <li>Volcanos: color overhaul.</li> <li>PPI: fixed a bug in which multiple occurences of selected bait proteins were not shown in zoomed view.</li> <li>PPI: fixed the <i>all-turns-green</i> bug.</li> <li>PPI: added ID mapping support for <i>mus musculus</i>, <i>rattus norvegicus</i> and <i>danio rerio</i>.</li> <li>Multiscatter: robust determination of plotting limits.</li> <li>Multiscatter: re-drawing only after button was pressed.</li> </ul> <b>v0.8.0.1 February 06, 2018</b> <ul> <li>Misc: Piwik integration.</li> <li>Fanplot: colors are synchronized with current group selection.</li> <li>Fanplot: added legend and possibility to modify labels.</li> </ul> <b>v0.8.0 January 25, 2018</b> <ul> <li>Release version for SSP (dev),</li> <li>Session import: improved backwards compatibility.</li> <li>Export: data directory is cleaned up now. Only .RData session files and the latest zip archive remain in the user/session data directory.</li> </ul> <b>v0.7.8.4 January 24, 2018</b> <ul> <li>Heatmap: interactive heatmap using "heatmaply".</li> <li>Heatmap: annotation tracks (GCT 1.3) can be selected/deselected.</li> <li>Clustering: default distance metric switched from <b>euclidean</b> to <b>1-Pearson</b>.</li> <li>Clustering: Fanplot v0.1 - circular dendrogram to visualize sample clustering.</li> <li>PCA: added legend to plots.</li> <li>Misc: links to "Genecards" if ids are not UniProt.</li> <li>Multiscatter: BA-filtered values shown in blue.</li> </ul> <b>v0.7.8.3 January 22, 2018</b> <ul> <li>PPI scatterplots: reduced opacity for non-interactors.</li> <li>PPI: robustified extraction of gene symbols in function"get.interactors()".</li> <li>Summary: new plot for missing values.</li> <li>SSP import: backwards compatibility, sessions saved from older versions can be imported.</li> <li>Gene name mapping: fixed bug that would cause a crash if neither UniProt nor RefSeq ids were found.</li> <li>Gene name mapping: gene names that could not mapped are indicated by "NotFound".</li> <li>Misc: improved start up time of the app using function "import.ppi.db()"</li> <li>Misc: working button in the "Select Groups" modal window.</li> </ul> <b>v0.7.8.2 December 29, 2017</b> <ul> <li>Volcano: fixed overlaping legends.</li> <li>Volcano: fixed fdr line bug.</li> <li>Volcano: IDs are site-specific. Also effects PPI panel, i.e. a query always returns a single site rather than all sites mapping to a gene symbol.</li> <li>Heatmap: GCT v1.3 annotation columns shown as tracks.</li> <li>Misc: Groups defined in the experimental design or in GCT v1.3 annotation tracks can be enabled/disabled for testing.</li> <li>Gene name mapping: finally works with RefSeq ids.</li> </ul> <b>v0.7.8.1 December 25, 2017</b> <ul> <li>Misc: added support for GCT v1.3 files. Class vector can be selected from column meta data.</li> </ul> <b>v0.7.8 December 4, 2017</b> <ul> <li>Heatmap: had to disable Morpheus widget since it would interfere with interactivity of volcono plots.</li> <li>Misc: switched to "selectizeInput" to select saved sessions.</li> <li>Misc: re-organization of navbarPage creation to fix an error thrown after Shiny R-packge update (v1.0.5)</li> <li>Misc: integrated Readme.html into entry page.</li> </ul> <b>v0.7.7 October 30, 2017</b> <ul> <li>Heatmap: Morpheus integration (ALPHA)</li> <li>Gene name mapping: robustified mapping if no RefSeq or UniProt ids were used.</li> </ul> <b>v0.7.6 September 02, 2017</b> <ul> <li>Misc: switched to <i>pacman</i> R package managment system.</li> <li>Misc: added Readme on GitHub.</li> <li>Normalization: Turned off automatic centering of Quantile-normalized data.</li> </ul> <b>v0.7.5 August 18, 2017</b> <ul> <li>Volcano: re-organization of PPI legends.</li> <li>Scatterplots: PPI analysis is now fully integrated.</li> <li>Heatmap: row annotations are shown correctly again.</li> <li>Misc: gene mapping doesn\'t crash if no test was selected.</li> <li>Misc: fixed a couple of other smaller bugs, mostly related to data exploration without performing a test.</li> </ul> <b>v0.7.4 August 10, 2017</b> <ul> <li>Scatterplots: new tab that provides interactive scatterplots between replicate measurements. For One-sample moderated T-test and F-test the significant proteins are marked in red.</li> <li>Volcano: PPI - search mask keeps working after multiple rounds of analysis.</li> <li>Misc: parameter file: fixed NA for data filter.</li> </ul> <b>v0.7.3.1 August 4, 2017</b> <ul> <li>Volcano plots: Fixed a bug causing volcano plots to crash when points were selected, but no protein-protein interactors were found.</li> </ul> <b>v0.7.3 June 20, 2017</b> <ul> <li>Misc: unified the naming of the id-column throughout the code.</li> <li>Volcano plots: Integration of Reactome (human) protein-protein interactions.</li> </ul> <b>v0.7.2 June 6, 2017</b> <ul> <li>Volcano plots: added hyperbolic curves based on a minimal fold change cut-off and adjusted p-values.</li> </ul> <b>v0.7.1 June 1, 2017</b> <ul> <li>Misc: fixed a bug preventing the filter to be triggered after the user re-runs an analysis.</li> <li>Volcano plots: integration of BioGRID (human) protein-protein interactions.</li> <li>Volcano plots: <i>selectizeInput</i> now rendered on the server. Significantly speeded up page respond times.</li> <li>Correlation matrix: updated color scheme to better visualize subtle differences.</li> <li>Gene name mapping: fixed some bugs causing the app to crash if no gene names could be mapped or if other accessions than UniProt or RefSeq were used..</li> <li>Misc: loading animation.</li> </ul> <b>v0.7.0 May 5, 2017</b> <ul> <li>Misc: automatic mapping to gene names if RefSeq or UniProt accession numbers were found in "id" column.</li> <li>Volcano plots: integration of InWeb database.</li> <li>Volcano plots: paramater "max. Log10(p-value)" works in all volcano plots. Before, changing the parameter only worked in the parameter panel of the first volcano.</li> <li>Volcano plots: completely zoomable</li> <li>Volcano plots: button to reset volcano annotations</li> </ul> <b>v0.6.6.1 Mar 17, 2017</b> <ul> <li>Export-tab: PCA loadings can be exported as Excel sheet (by Ozan Aygun).</li> <li>PCA-tab: New PCA loadings plot (by Ozan Aygun).</li> <li>Export-tab: included button for PCA loadings in \"toggle all\".</li> <li>Heatmap-tab: Default vaules for row/column fon size read from \"plotparams\", if defined.</li> </ul> <b>v0.6.6 Mar 17, 2017</b> <ul> <li>Fixed the \"incorrect number of dimensions\"-error in the table preview tab, if only a single annotation column is present.</li> <li>Prevented the automatic switch to the \"Summary\"-tab after changing the filter.</li> <li>Related to the previous point, the result filter is now implemented as observer rather than a reactive function.</li> <li>Summary-tab: fixed the workflow box showing NA when selecting filter \"none\" or \"top.n\".</li> <li>Dynamic UI elements will not switch back to \"One-sample modT\" after running an analysis.</li> <li>Table-tab: switched to DT package.</li> </ul> <b>v0.6.5 Mar 7, 2017</b> <ul> <li>Fixed a bug that resulted in not listing all saved session for a user.</li> <li>Worked on the filenames of exported RData and Excel files.</li> <li>modF: In case of too many missing values the test would not return a p-value which resulted in NA for the enumber of significant hits on the summary page.</li> </ul> <b>v0.6.4 Mar 6, 2017</b> <ul> <li>Summary tab: number of significant hits are now reported correctly.</li> <li>Summary tab: Missing value distribution after log-transformation shown correctly.</li> <li>Changed cluster method from \'complete\' to \'ward\'.</li> <li>Fixed a bug that happend if a project is defined and shared in \'user-roles.txt\' but has been deleted from the server.</li> </ul> <b>v0.6.3 Feb 2, 2017</b> <ul> <li>Commited to GitHub for debugging purposes. Do not use this verion!</li> <li>Re-organization of UI elements when setting up the analysis.</li> <li>Implementation of SD filter across all samples.</li> </ul> <b>v0.6.2 Jan 31, 2017</b> <ul> <li>UI elements for setting up an anlysis workflow are now dynamically generated, e.g. if reproducibility filter is chosen, onnly "One-sample modT" or "none" can be chosen.</li> <li>Reproducibility filter: users can choose bewteen (predefined) alpha-values.</li> <li>Increased number of colors by 60 (85 total).</li> <li>Correlation matrix: increased the size of exported heatmap to 12x12 inches.</li> <li>Multiscatter: increased number of digits to three.</li> <li>Some more error handling when exporting analysis results.</li> <li>Previously saved sessions are not deleted anymore, if checkbox "save session" is not enabled.</li> </ul> <b>v0.6.1 Jan 12, 2017</b> <ul> <li>Session managment: Added possibility to delete saved sessions and to choose whether to save a session on the server in the first place.</li> <li>User role managment (alpha status): A project saved on the server has an owner and (optional) collaborators. Collaborators can \"see\" projects they are assigned to in the dropdown menu \"Saved sessions\".</li> </ul> <b>v0.6.0 Jan 4, 2017</b> <ul> <li>Switched to <a href="https://rstudio.github.io/shinydashboard/">Shiny Dashboards</a>.</li> <li>Extented PCA analysis using the <i>ChemometricsWithR</i> R package.</li> </ul> <b>v0.5.4 Dec 27, 2016</b> <ul> <li>Filter values and plotting parameters are now restored after session import (except for volcano plot...).</li> <li>Changed visual style of volcano plots.</li> </ul> <b>v0.5.3 Dec 21, 2016</b> <ul> <li>Minor fixes due to shiny update.</li> <li>User can now specify label names used to create file and session names when exporting results. Initial values are taken from filenames of the input and experimental design file.</li> <li>Experimental design file is now part of the results.</li> </ul> <b>v0.5.2 Dec 1, 2016</b> <ul> <li>Rudimentary support of \'gct\' files, i.e. files can be imported by ignoring the first two lines (gct header). </li> <li>Figured out the issue with the 2-sample T-test volcanos. The functions in \'limma\' always report fold changes group factor variable \'0\'. The original \'moderated.t\' alphabetically orders the class names and then converts class names to factors. First class name will become zero. I make sure that class names are alphabeticaly sorted before calling \'moderated.t\'.</li> </ul> <b>v0.5.1 Nov 26, 2016</b> <ul> <li><mark>BUG: </mark>Reverted the indication of direction in volcano plots for <b>2-sample tests</b>. The direction was inferred from the sign of \'logFC\' returned by function \'topTable\' (limma) which cannot be used to do that.</li> <li>Updated shiny R package from 0.12/0.13.2 to 0.14.2 resulting in some minor changes in the <i>look and feel</i> of the app. Code needed some adaptions (navbarPage, navbarMenu) to run poperly with 0.14.2 version.</li> <li>Outsourced HTML instructions to a separate file using Shiny-module framework.</li> <li>Changed how heatmap dimensions are determined to better show very large and very small heatmaps.</li> <li>Scaling of heatmap done after clustering.</li> </ul> <b>v0.5.0 Nov 7, 2016</b> <ul> <li>Exported sessions are saved on the server and can be re-imported. Each user has its own folder on ther server in which an R-sessions file is stored.</li> <li>Non-unique entries in the id column are made unique, e.g. \'Abl\', \'Abl\' -> \'Abl\', \'Abl_1\'. Empty entries will be replaced by \'X\', e.g. \'Abl\', \'\', \'\' -> \'Abl\', \'X\', \'X_1\'.</li> </ul> <b>v0.4.5 Sep 1, 2016</b> <ul> <li>Multiscatter: log-transformed values wil be used if log-transformation has been applied.</li> <li>For each user a new folder on the server is created. Every session that gets exported will be saved there.</li> <li>A copy of the original data file will be part of the results (zip-file).</li> </ul> <b>v0.4.4 Aug 19, 2016</b> <ul> <li>New \'Export\'-tab to download a zip-file containing: <ul> <li>all figures (pdf).</li> <li>result table (xlsx).</li> <li>session file (Rdata) which can be imported back into the app.</li> <li>parameter file (txt)</li> </ul> <li>Directionality of two-sample test is now indicated in the volcano plots.</li> <li>Error handling for two-component normalization.</li> <li>Profile plots under \'QC\'-tab</li> </ul> <b>v0.4.3 Aug 16, 2016</b> <ul> <li>Session export/import.</li> <li>"#VALUE!"-entries from Excel can be handeled now.</li> <li>Fixed bug causing PDF export of heatmap with user defined max. values to crash.</li> </ul> <b>v0.4.2 Jul 21, 2016</b> <ul> <li><mark>BUG:</mark> Bugfix in 2-sample test that occured whenever the names of different groups defined the experimental design file started with the same series of characters, e.g. \'ABC\' and \'ABCD\'.</li> </ul> <b>v0.4.1 Jul 1, 2016</b> <ul> <li>Novel tab summarizing the analysis.</i> <li>Data can now be log-transformed, e.g. for MaxQuant LFQ results.</li> <li>Added option to skip testing, e.g. for PCA analysis.</li> <li>User can specify principle components in the PCA scatterplot.</li> </ul> <b>v0.4 Jun 29, 2016</b> <ul> <li>Integration of moderated F statistics</li> <li>Disabled column-based clustering one-sample and two-sample tests if multiple groups are being compared.</li> </ul> <b>v0.3 Mar 11, 2016</b> <ul> <li>Data normalization.</li> <li>Reproducibility filter.</li> <li>Upload/download of experimental design files.</li> <li>Download of native Excel files.</li> <li>Integration of the Javascript D3-based plotly library.</li> </ul> <b>v0.2 Feb 23, 2016</b> <ul> <li>Working version on server.</li> </ul> <b>v0.1 Dec 20, 2015</b> <ul> <li>First prototype.</li> </ul> </font>' #,sep='') ## render HTML output$html <- renderText({ if(!is.null(global.input$file)) return() HTML(txt) }) } ##@######################################### ## id column / exp design template if(what == 'id'){ txt <- paste('<br><br><p><font size=\"4\"><b>Group assigment</b></br> Here you can download a template of an experimental design file. You can open this file in Excel and define the groups you want to compare. Replicate measurements have to be grouped under a single name in the \'Experiment\'-column. <mark>Please don\'t use special characters, like blanks or any punctuation, when defining these names!</mark></font></p> <br><p><font size=\"4\"><b>Select ID column</b></br> Choose a column from the list on the left that contains <b>unique</b> identifiers for the features in the data table. If the enntries are not unique, uniqueness will enforces by appending \"_1\". Preferably, IDs should be unique protein accession numbers (e.g. <font face=\"Courier\">NP_073737</font>) or a combination of protein accession and residue number in case of PTM analysis (e.g. <font face=\"Courier\">NP_073737_S544s _1_1_544_544</font>).</p> <br><p><font size=\"4\"><b>Automatic retrieval of gene symbols</b></br> If the ID column contains <a href=\"http://www.uniprot.org/\" target=\"_blank_\">UniProt</a> or <a href=\"https://www.ncbi.nlm.nih.gov/refseq/\" target=\"_blank_\">RefSeq</a> accession numbers, the software will try to map those ids to gene symbols. Currently, mapping of following organisms is supported: <ul> <li>human (<i>Homo sapiens</i>)</li> <li>mouse (<i>Mus musculus</i>)</li> <li>rat (<i>Rattus norvegicus</i>)</li> <li>zebrafish (<i>Danio rerio</i>)</li> </ul> </font></p>') ## render HTML output$html <- renderText({ if(global.param$analysis.run) return() if(global.param$id.done) return() if(!global.param$file.done) return() #if(is.null(global.input$id.col)) return() ## start page #if(global.input$id.col > 0 && !is.null(global.param$id.col.value)) return() ## after id column is choosen HTML(txt) }) } ##################################################################### ## upload of experimental design file if(what == 'ed'){ txt <- paste('<br><br><p><font size=\"4\">Please upload the experimental design file that you have created using the upload button on the left.</p></font></p>') ## render HTML output$html <- renderText({ if(global.param$analysis.run) return() if(!global.param$file.done) return() if( is.null(global.param$id.col.value) ) return() #if(global.input$id.col ==0) return() if(global.param$file.gct3) return() #if(global.param$id.done) return() if(global.param$grp.done) return() HTML(txt) }) } ##################################################################### ## gct v3 if(what == 'gct3'){ #txt <- paste('<br><br><p><font size=\"4\">Found GCT v1.3 file with', ncol(global.input$cdesc),'annotation columns. Choose one column as class vector for marker selection.</p></font></p>') txt <- paste('<br><p><font size=\"4\"><b>Found GCT v1.3 file</b><br>Choose the annotation column to use as class vector for marker selection.</p></font></p>') ## render HTML output$html <- renderText({ if(global.param$analysis.run) return() if(!global.param$file.gct3) return() #if(global.param$id.done) return() if(global.param$grp.done) return() HTML(txt) }) } ## #################################################################### ## analysis if(what == 'ana'){ txt <- paste('<font size=\"4\"> <p><h3>Log-transformation</h3>Apply log transformation to the data.</p> <p><h3>Data normalization</h3>You can apply different normalization methods to the data prior to testing. The methods are applied for each column separately, except for \'Quantile\'-normalization which takes the entire matrix into account.</p> <p> <ul> <li><b>Median</b>: Subtract the sample median from each value (centering).</li> <li><b>Median-MAD</b>: Subtract the sample median and divide by sample MAD (centering plus scaling).</li> <li><b>2-component</b>: Use a mixture-model approach to separate non-changing from changing features and divide both populations by the median of the non-changing features.</li> <li><b>Quantile</b>: Transform the data such that the quantiles of all sample distributions are the equal.</li> <li><b>none</b>: The data will be taken as is. Should be used if the data has been already normalized.</li> </ul> <p><h3>Filter data</h3> <b>Reproducibility:</b><br> Remove features that were not reproducibly quantifified across replicate measurements. Only available for <b>one-sample tests</b> and will be ignored otherwise. For duplicate measurements a Bland-Altman Filter of 99.9% (+/-3.29 sigma) will be applied. For more than two replicate measurements per group a generalized reproducibility filter is applied which is based on a linear mixed effects model to model the within-group variance and between-group variance (See \'MethComp book (pp 58-61). <i>Comparing Clinical Measurement Methods</i> by Bendix Carstensen\' for more details). You can inspect the results of the filtering step in the multiscatter plot under the \'QC\'-tab as well as in the interactive scatterplots. Data points removed prior to testing will be depicted in blue.</p> <b>StdDev:</b><br> Remove features with low standard deviation across all samples. Only useful if applied to sample cohorts that were quantified against a common reference. The percentile <b><i>P</i></b> you specify in the slider refers to the <b><i>P</i></b> percent of features having the <b>lowest standard deviation</b> across sample columns which will be <b>excluded prior to analyis</b>. Using this type of filter is useful to explore result of unsupervised clustering of the data without running a statistical test. <br><h3>Select test</h3>You can choose between a one-sample, two-sample moderate T-tests, moderated F-test or no testing. <ul> <li><b>One-sample mod T</b>: For each test whether the group mean is significantly different from zero. Only meaningful to <b>ratio data</b>!</li> <li><b>Two-sample mod T</b>: For each possible pairwise comparison of groups test whether the group means are significantly different from each other.</li> <li><b>mod F</b>: Test whether there is a significant difference between any of the difined groups. Should be used if more than 2 groups are being compared. Only meaningful to <b>ratio data</b>!</li> <li><b>none</b>: Don\'t do any test. Useful for data exploration such as PCA.</li> </ul> <br></font></p>') ## render HTML output$html <- renderText({ if(global.param$analysis.run) return() ## if( !is.null(error$msg) ) return() if(global.param$grp.done == F) return() if(!is.null(global.input$run.test)) if(global.input$run.test > 0) return() HTML(txt) }) } ## #################################################################### ## analysis if(what == 'res'){ txt <- paste('<p><font size=\"4\">This page allows you to interactively explore the results of you analyis. On the left you can choose between different filters, the results will be updated immediately. The filter that you specify applies to all tabs (\'Heatmap\', \'Volcanos\', ...), except the \'QC\' which shows the entire dataset. You can change the appearance of the heatmap by modifying the parameters below, you can select points shown in the Volcano plots and browse through the result table.</font></p><br>') ## render HTML output$html <- renderText({ ## if( !is.null(error$msg) ) return() if(global.param$grp.done == F) return() if(!is.null(global.input$run.test)) if(global.input$run.test == 0) return() HTML(txt) }) } } ## end printHTML
/src/helptext.r
permissive
jakejaffe/protigy
R
false
false
28,213
r
############################ ## UI part printHTMLUI <- function(id) { ns <- NS(id) htmlOutput(ns("html")) } ############################# ## server part printHTML <- function(input, output, session, what, error=NULL, global.input=NULL, global.param=NULL){ ##cat('test: ',error$msg, '\nend\n') txt='' ## ##@############################# ## ## getting started if(what == 'gs'){ ## render HTML output$html <- renderText({ if(!is.null(global.input$file)) return() if(!is.null(error$msg)) return() includeMarkdown('readme.md') }) } ##@############################### ## changelog if(what == 'cl'){ txt <- '<h4><font color="red">What\'s new?</font></h4> <font size=\"3\"> <b>v0.8.5 Jan 21, 2019</b> <ul> <li>Misc: Added BSD-3 license.</li> <li>Misc: Updated Readme file.</li> </ul> <b>v0.8.4.3 Jan 17, 2019</b> <ul> <li>Misc (SSP only): Started to implement a shiny module for session management which will enable users to share saved sessions with collaborators/team members. Not used in this version though.</li> </ul> <b>v0.8.4.2 Jan 10, 2019</b> <ul> <li>Misc: Compatible with both, R>=3.5 AND R<3.5</li> </ul> <b>v0.8.4.1 Dec 5, 2018</b> <ul> <li>Misc: Compatible with R >=3.5</li> </ul> <b>v0.8.4 Nov 16, 2018</b> <ul> <li>Multiscatter: fixed bug that would show straight lines for each pairwise plot. Occured when column ids where longer than 20 characters.</li> <li>GCT 1.3: Error message if GCT file does not contain any column meta data tracks.</li> <li>Misc: fixed a bug causing the app to crash if a GCT 1.3 file with single <b>column meta data track</b> was uploaded.</li> <li>Misc: renamed <i>Modify selected groups</i> to <i>Select groups</i>.</li> </ul> <b>v0.8.3.1 July 24, 2018</b> <ul> <li>Misc: robustified filtering of significant features for plotting purposes.</li> <li>Misc: disabled Javascript code in the datatable().</li> <li>Correlation boxplots: changed some more aesthetics.</li> </ul> <b>v0.8.3 July 23, 2018</b> <ul> <li><mark>BUG:</mark> fixed a bug resulting in an inaccurate number of significant features reported in the heatmap.</li> <li>UpSet-plot: small bugfix causing a crash under certein circumstances.</li> <li>Correlation boxplots: changed some aesthetics of the plot.</li> </ul> <b>v0.8.2.8 July 2, 2018</b> <ul> <li>Misc: fixed a bug causing the app to crash if a GCT 1.3 file <b>without row meta data</b> was uploaded.</li> </ul> <b>v0.8.2.7 June 28, 2018</b> <ul> <li>Misc: fixed a bug causing the app to crash under certain combinations of <i>Modify selected groups</i> and test selections.</li> </ul> <b>v0.8.2.6 June 28, 2018</b> <ul> <li>Misc: disabled the cmapR-package because of installation problems of the required package <i>rhdf5</i> on a <i>Red Hat Enterprise Linux 6.9</i> machine. The io.R file from the cmapR GitHub repository is used instead. </li> </ul> <b>v0.8.2.5 June 28, 2018</b> <ul> <li>Excel sheet: in case of <b>Two sample moderated T-test</b>, the table header will now report <b>KO.over.WT</b> instead of <b>WT.vs.KO</b>.</li> <li>Export: result files will also be epxorted in GCT v1.3 format.</li> <li>Barplot: fixed a bug causing the barplot of identified features mislabel the colors if the <i>Modify selected groups</i>-feature was used.</li> <li>Boxplots: fixed a bug resulting in slightly different numbers reported in the exported pdf file compared to the numbers shown in the app. This only happened in boxplots depicting values after normalization.</li> </ul> <b>v0.8.2.4 June 27, 2018</b> <ul> <li><mark>BUG:</mark> duplicated session ids: included a timestamp-based seed before generating the seesion id. It happened that the sample function returned the same string/number combination.</li> <li>Misc: session id is doubled checked whether it exists as folder on the server.</li> <li>Export: all files except RData-session files and zip-archives are removed from the server.</li> </ul> <b>v0.8.2.3 April 20, 2018</b> <ul> <li>Misc: updated code for 2-component normalization (by D. R. Mani).</li> </ul> <b>v0.8.2.2 April 17, 2018</b> <ul> <li>Heatmap: row and column labels can be disabled now.</li> <li>UpSet plots: inter-group comparison of significantly regulated features.</li> </ul> <b>v0.8.2.1 March 14, 2018</b> <ul> <li>Correlation: correlation matrix calculated centrally in a separate function and shared with plots using correlations: multiscatter, correlation heatmap, correlation boxplot.</li> <li>Correlation: novel QC-tab depicting pairwise intra-group correlations as boxplots.</li> </ul> <b>v0.8.2 February 27, 2018</b> <ul> <li>Misc: Installable on Mac OS.</li> <li>Misc: group selection now correctly updated in saved sessions.</li> </ul> <b>v0.8.1 February 26, 2018</b> <ul> <li>Misc: simplified installation under Windows OS.</li> <li>Misc: if Perl and/or Pandoc are not availbale the app will show a corresponding messsage.</li> <li>PCA: "Run me first"-tab became obsolete.</li> <li>Scatterplots: added trace of filtered values for reprodicibility filter.</li> <li>Scatterplots: separated data tracks and added legend.</li> </ul> <b>v0.8.0.9 February 24, 2018</b> <ul> <li>GCT 1.3: robustified import of GCT 1.3 files. If not unique, row and column identifiers are made unique.</li> <li>Volcano: Labeled points can be removed individually from the table.</li> <li>Table: page overhaul</li> </ul> <b>v0.8.0.7 February 22, 2018</b> <ul> <li>Misc: PPI queries now work after export of results.</li> </ul> <b>v0.8.0.6 February 21, 2018</b> <ul> <li>UI: updated help text</li> <li>UI: Only first 20 characters of column names are shown when prompted to select ID column.</li> </ul> <b>v0.8.0.5 February 20, 2018</b> <ul> <li>Export: fixed a bug preventing the export of results as zip-archive.</li> </ul> <b>v0.8.0.4 February 15, 2018</b> <ul> <li>Misc: Robustified import of gct 1.3 files (row and column names are made unique).</li> </ul> <b>v0.8.0.3 February 14, 2018</b> <ul> <li>Misc: new session import/export features</li> </ul> <b>v0.8.0.2 February 14, 2018</b> <ul> <li>Export: page overhaul</li> <li>Export: generation of Rmarkdown-reports (still under developement).</li> <li>Export: added option to download rmarkdown, xls, zip, separately.</li> <li>Volcanos: color overhaul.</li> <li>PPI: fixed a bug in which multiple occurences of selected bait proteins were not shown in zoomed view.</li> <li>PPI: fixed the <i>all-turns-green</i> bug.</li> <li>PPI: added ID mapping support for <i>mus musculus</i>, <i>rattus norvegicus</i> and <i>danio rerio</i>.</li> <li>Multiscatter: robust determination of plotting limits.</li> <li>Multiscatter: re-drawing only after button was pressed.</li> </ul> <b>v0.8.0.1 February 06, 2018</b> <ul> <li>Misc: Piwik integration.</li> <li>Fanplot: colors are synchronized with current group selection.</li> <li>Fanplot: added legend and possibility to modify labels.</li> </ul> <b>v0.8.0 January 25, 2018</b> <ul> <li>Release version for SSP (dev),</li> <li>Session import: improved backwards compatibility.</li> <li>Export: data directory is cleaned up now. Only .RData session files and the latest zip archive remain in the user/session data directory.</li> </ul> <b>v0.7.8.4 January 24, 2018</b> <ul> <li>Heatmap: interactive heatmap using "heatmaply".</li> <li>Heatmap: annotation tracks (GCT 1.3) can be selected/deselected.</li> <li>Clustering: default distance metric switched from <b>euclidean</b> to <b>1-Pearson</b>.</li> <li>Clustering: Fanplot v0.1 - circular dendrogram to visualize sample clustering.</li> <li>PCA: added legend to plots.</li> <li>Misc: links to "Genecards" if ids are not UniProt.</li> <li>Multiscatter: BA-filtered values shown in blue.</li> </ul> <b>v0.7.8.3 January 22, 2018</b> <ul> <li>PPI scatterplots: reduced opacity for non-interactors.</li> <li>PPI: robustified extraction of gene symbols in function"get.interactors()".</li> <li>Summary: new plot for missing values.</li> <li>SSP import: backwards compatibility, sessions saved from older versions can be imported.</li> <li>Gene name mapping: fixed bug that would cause a crash if neither UniProt nor RefSeq ids were found.</li> <li>Gene name mapping: gene names that could not mapped are indicated by "NotFound".</li> <li>Misc: improved start up time of the app using function "import.ppi.db()"</li> <li>Misc: working button in the "Select Groups" modal window.</li> </ul> <b>v0.7.8.2 December 29, 2017</b> <ul> <li>Volcano: fixed overlaping legends.</li> <li>Volcano: fixed fdr line bug.</li> <li>Volcano: IDs are site-specific. Also effects PPI panel, i.e. a query always returns a single site rather than all sites mapping to a gene symbol.</li> <li>Heatmap: GCT v1.3 annotation columns shown as tracks.</li> <li>Misc: Groups defined in the experimental design or in GCT v1.3 annotation tracks can be enabled/disabled for testing.</li> <li>Gene name mapping: finally works with RefSeq ids.</li> </ul> <b>v0.7.8.1 December 25, 2017</b> <ul> <li>Misc: added support for GCT v1.3 files. Class vector can be selected from column meta data.</li> </ul> <b>v0.7.8 December 4, 2017</b> <ul> <li>Heatmap: had to disable Morpheus widget since it would interfere with interactivity of volcono plots.</li> <li>Misc: switched to "selectizeInput" to select saved sessions.</li> <li>Misc: re-organization of navbarPage creation to fix an error thrown after Shiny R-packge update (v1.0.5)</li> <li>Misc: integrated Readme.html into entry page.</li> </ul> <b>v0.7.7 October 30, 2017</b> <ul> <li>Heatmap: Morpheus integration (ALPHA)</li> <li>Gene name mapping: robustified mapping if no RefSeq or UniProt ids were used.</li> </ul> <b>v0.7.6 September 02, 2017</b> <ul> <li>Misc: switched to <i>pacman</i> R package managment system.</li> <li>Misc: added Readme on GitHub.</li> <li>Normalization: Turned off automatic centering of Quantile-normalized data.</li> </ul> <b>v0.7.5 August 18, 2017</b> <ul> <li>Volcano: re-organization of PPI legends.</li> <li>Scatterplots: PPI analysis is now fully integrated.</li> <li>Heatmap: row annotations are shown correctly again.</li> <li>Misc: gene mapping doesn\'t crash if no test was selected.</li> <li>Misc: fixed a couple of other smaller bugs, mostly related to data exploration without performing a test.</li> </ul> <b>v0.7.4 August 10, 2017</b> <ul> <li>Scatterplots: new tab that provides interactive scatterplots between replicate measurements. For One-sample moderated T-test and F-test the significant proteins are marked in red.</li> <li>Volcano: PPI - search mask keeps working after multiple rounds of analysis.</li> <li>Misc: parameter file: fixed NA for data filter.</li> </ul> <b>v0.7.3.1 August 4, 2017</b> <ul> <li>Volcano plots: Fixed a bug causing volcano plots to crash when points were selected, but no protein-protein interactors were found.</li> </ul> <b>v0.7.3 June 20, 2017</b> <ul> <li>Misc: unified the naming of the id-column throughout the code.</li> <li>Volcano plots: Integration of Reactome (human) protein-protein interactions.</li> </ul> <b>v0.7.2 June 6, 2017</b> <ul> <li>Volcano plots: added hyperbolic curves based on a minimal fold change cut-off and adjusted p-values.</li> </ul> <b>v0.7.1 June 1, 2017</b> <ul> <li>Misc: fixed a bug preventing the filter to be triggered after the user re-runs an analysis.</li> <li>Volcano plots: integration of BioGRID (human) protein-protein interactions.</li> <li>Volcano plots: <i>selectizeInput</i> now rendered on the server. Significantly speeded up page respond times.</li> <li>Correlation matrix: updated color scheme to better visualize subtle differences.</li> <li>Gene name mapping: fixed some bugs causing the app to crash if no gene names could be mapped or if other accessions than UniProt or RefSeq were used..</li> <li>Misc: loading animation.</li> </ul> <b>v0.7.0 May 5, 2017</b> <ul> <li>Misc: automatic mapping to gene names if RefSeq or UniProt accession numbers were found in "id" column.</li> <li>Volcano plots: integration of InWeb database.</li> <li>Volcano plots: paramater "max. Log10(p-value)" works in all volcano plots. Before, changing the parameter only worked in the parameter panel of the first volcano.</li> <li>Volcano plots: completely zoomable</li> <li>Volcano plots: button to reset volcano annotations</li> </ul> <b>v0.6.6.1 Mar 17, 2017</b> <ul> <li>Export-tab: PCA loadings can be exported as Excel sheet (by Ozan Aygun).</li> <li>PCA-tab: New PCA loadings plot (by Ozan Aygun).</li> <li>Export-tab: included button for PCA loadings in \"toggle all\".</li> <li>Heatmap-tab: Default vaules for row/column fon size read from \"plotparams\", if defined.</li> </ul> <b>v0.6.6 Mar 17, 2017</b> <ul> <li>Fixed the \"incorrect number of dimensions\"-error in the table preview tab, if only a single annotation column is present.</li> <li>Prevented the automatic switch to the \"Summary\"-tab after changing the filter.</li> <li>Related to the previous point, the result filter is now implemented as observer rather than a reactive function.</li> <li>Summary-tab: fixed the workflow box showing NA when selecting filter \"none\" or \"top.n\".</li> <li>Dynamic UI elements will not switch back to \"One-sample modT\" after running an analysis.</li> <li>Table-tab: switched to DT package.</li> </ul> <b>v0.6.5 Mar 7, 2017</b> <ul> <li>Fixed a bug that resulted in not listing all saved session for a user.</li> <li>Worked on the filenames of exported RData and Excel files.</li> <li>modF: In case of too many missing values the test would not return a p-value which resulted in NA for the enumber of significant hits on the summary page.</li> </ul> <b>v0.6.4 Mar 6, 2017</b> <ul> <li>Summary tab: number of significant hits are now reported correctly.</li> <li>Summary tab: Missing value distribution after log-transformation shown correctly.</li> <li>Changed cluster method from \'complete\' to \'ward\'.</li> <li>Fixed a bug that happend if a project is defined and shared in \'user-roles.txt\' but has been deleted from the server.</li> </ul> <b>v0.6.3 Feb 2, 2017</b> <ul> <li>Commited to GitHub for debugging purposes. Do not use this verion!</li> <li>Re-organization of UI elements when setting up the analysis.</li> <li>Implementation of SD filter across all samples.</li> </ul> <b>v0.6.2 Jan 31, 2017</b> <ul> <li>UI elements for setting up an anlysis workflow are now dynamically generated, e.g. if reproducibility filter is chosen, onnly "One-sample modT" or "none" can be chosen.</li> <li>Reproducibility filter: users can choose bewteen (predefined) alpha-values.</li> <li>Increased number of colors by 60 (85 total).</li> <li>Correlation matrix: increased the size of exported heatmap to 12x12 inches.</li> <li>Multiscatter: increased number of digits to three.</li> <li>Some more error handling when exporting analysis results.</li> <li>Previously saved sessions are not deleted anymore, if checkbox "save session" is not enabled.</li> </ul> <b>v0.6.1 Jan 12, 2017</b> <ul> <li>Session managment: Added possibility to delete saved sessions and to choose whether to save a session on the server in the first place.</li> <li>User role managment (alpha status): A project saved on the server has an owner and (optional) collaborators. Collaborators can \"see\" projects they are assigned to in the dropdown menu \"Saved sessions\".</li> </ul> <b>v0.6.0 Jan 4, 2017</b> <ul> <li>Switched to <a href="https://rstudio.github.io/shinydashboard/">Shiny Dashboards</a>.</li> <li>Extented PCA analysis using the <i>ChemometricsWithR</i> R package.</li> </ul> <b>v0.5.4 Dec 27, 2016</b> <ul> <li>Filter values and plotting parameters are now restored after session import (except for volcano plot...).</li> <li>Changed visual style of volcano plots.</li> </ul> <b>v0.5.3 Dec 21, 2016</b> <ul> <li>Minor fixes due to shiny update.</li> <li>User can now specify label names used to create file and session names when exporting results. Initial values are taken from filenames of the input and experimental design file.</li> <li>Experimental design file is now part of the results.</li> </ul> <b>v0.5.2 Dec 1, 2016</b> <ul> <li>Rudimentary support of \'gct\' files, i.e. files can be imported by ignoring the first two lines (gct header). </li> <li>Figured out the issue with the 2-sample T-test volcanos. The functions in \'limma\' always report fold changes group factor variable \'0\'. The original \'moderated.t\' alphabetically orders the class names and then converts class names to factors. First class name will become zero. I make sure that class names are alphabeticaly sorted before calling \'moderated.t\'.</li> </ul> <b>v0.5.1 Nov 26, 2016</b> <ul> <li><mark>BUG: </mark>Reverted the indication of direction in volcano plots for <b>2-sample tests</b>. The direction was inferred from the sign of \'logFC\' returned by function \'topTable\' (limma) which cannot be used to do that.</li> <li>Updated shiny R package from 0.12/0.13.2 to 0.14.2 resulting in some minor changes in the <i>look and feel</i> of the app. Code needed some adaptions (navbarPage, navbarMenu) to run poperly with 0.14.2 version.</li> <li>Outsourced HTML instructions to a separate file using Shiny-module framework.</li> <li>Changed how heatmap dimensions are determined to better show very large and very small heatmaps.</li> <li>Scaling of heatmap done after clustering.</li> </ul> <b>v0.5.0 Nov 7, 2016</b> <ul> <li>Exported sessions are saved on the server and can be re-imported. Each user has its own folder on ther server in which an R-sessions file is stored.</li> <li>Non-unique entries in the id column are made unique, e.g. \'Abl\', \'Abl\' -> \'Abl\', \'Abl_1\'. Empty entries will be replaced by \'X\', e.g. \'Abl\', \'\', \'\' -> \'Abl\', \'X\', \'X_1\'.</li> </ul> <b>v0.4.5 Sep 1, 2016</b> <ul> <li>Multiscatter: log-transformed values wil be used if log-transformation has been applied.</li> <li>For each user a new folder on the server is created. Every session that gets exported will be saved there.</li> <li>A copy of the original data file will be part of the results (zip-file).</li> </ul> <b>v0.4.4 Aug 19, 2016</b> <ul> <li>New \'Export\'-tab to download a zip-file containing: <ul> <li>all figures (pdf).</li> <li>result table (xlsx).</li> <li>session file (Rdata) which can be imported back into the app.</li> <li>parameter file (txt)</li> </ul> <li>Directionality of two-sample test is now indicated in the volcano plots.</li> <li>Error handling for two-component normalization.</li> <li>Profile plots under \'QC\'-tab</li> </ul> <b>v0.4.3 Aug 16, 2016</b> <ul> <li>Session export/import.</li> <li>"#VALUE!"-entries from Excel can be handeled now.</li> <li>Fixed bug causing PDF export of heatmap with user defined max. values to crash.</li> </ul> <b>v0.4.2 Jul 21, 2016</b> <ul> <li><mark>BUG:</mark> Bugfix in 2-sample test that occured whenever the names of different groups defined the experimental design file started with the same series of characters, e.g. \'ABC\' and \'ABCD\'.</li> </ul> <b>v0.4.1 Jul 1, 2016</b> <ul> <li>Novel tab summarizing the analysis.</i> <li>Data can now be log-transformed, e.g. for MaxQuant LFQ results.</li> <li>Added option to skip testing, e.g. for PCA analysis.</li> <li>User can specify principle components in the PCA scatterplot.</li> </ul> <b>v0.4 Jun 29, 2016</b> <ul> <li>Integration of moderated F statistics</li> <li>Disabled column-based clustering one-sample and two-sample tests if multiple groups are being compared.</li> </ul> <b>v0.3 Mar 11, 2016</b> <ul> <li>Data normalization.</li> <li>Reproducibility filter.</li> <li>Upload/download of experimental design files.</li> <li>Download of native Excel files.</li> <li>Integration of the Javascript D3-based plotly library.</li> </ul> <b>v0.2 Feb 23, 2016</b> <ul> <li>Working version on server.</li> </ul> <b>v0.1 Dec 20, 2015</b> <ul> <li>First prototype.</li> </ul> </font>' #,sep='') ## render HTML output$html <- renderText({ if(!is.null(global.input$file)) return() HTML(txt) }) } ##@######################################### ## id column / exp design template if(what == 'id'){ txt <- paste('<br><br><p><font size=\"4\"><b>Group assigment</b></br> Here you can download a template of an experimental design file. You can open this file in Excel and define the groups you want to compare. Replicate measurements have to be grouped under a single name in the \'Experiment\'-column. <mark>Please don\'t use special characters, like blanks or any punctuation, when defining these names!</mark></font></p> <br><p><font size=\"4\"><b>Select ID column</b></br> Choose a column from the list on the left that contains <b>unique</b> identifiers for the features in the data table. If the enntries are not unique, uniqueness will enforces by appending \"_1\". Preferably, IDs should be unique protein accession numbers (e.g. <font face=\"Courier\">NP_073737</font>) or a combination of protein accession and residue number in case of PTM analysis (e.g. <font face=\"Courier\">NP_073737_S544s _1_1_544_544</font>).</p> <br><p><font size=\"4\"><b>Automatic retrieval of gene symbols</b></br> If the ID column contains <a href=\"http://www.uniprot.org/\" target=\"_blank_\">UniProt</a> or <a href=\"https://www.ncbi.nlm.nih.gov/refseq/\" target=\"_blank_\">RefSeq</a> accession numbers, the software will try to map those ids to gene symbols. Currently, mapping of following organisms is supported: <ul> <li>human (<i>Homo sapiens</i>)</li> <li>mouse (<i>Mus musculus</i>)</li> <li>rat (<i>Rattus norvegicus</i>)</li> <li>zebrafish (<i>Danio rerio</i>)</li> </ul> </font></p>') ## render HTML output$html <- renderText({ if(global.param$analysis.run) return() if(global.param$id.done) return() if(!global.param$file.done) return() #if(is.null(global.input$id.col)) return() ## start page #if(global.input$id.col > 0 && !is.null(global.param$id.col.value)) return() ## after id column is choosen HTML(txt) }) } ##################################################################### ## upload of experimental design file if(what == 'ed'){ txt <- paste('<br><br><p><font size=\"4\">Please upload the experimental design file that you have created using the upload button on the left.</p></font></p>') ## render HTML output$html <- renderText({ if(global.param$analysis.run) return() if(!global.param$file.done) return() if( is.null(global.param$id.col.value) ) return() #if(global.input$id.col ==0) return() if(global.param$file.gct3) return() #if(global.param$id.done) return() if(global.param$grp.done) return() HTML(txt) }) } ##################################################################### ## gct v3 if(what == 'gct3'){ #txt <- paste('<br><br><p><font size=\"4\">Found GCT v1.3 file with', ncol(global.input$cdesc),'annotation columns. Choose one column as class vector for marker selection.</p></font></p>') txt <- paste('<br><p><font size=\"4\"><b>Found GCT v1.3 file</b><br>Choose the annotation column to use as class vector for marker selection.</p></font></p>') ## render HTML output$html <- renderText({ if(global.param$analysis.run) return() if(!global.param$file.gct3) return() #if(global.param$id.done) return() if(global.param$grp.done) return() HTML(txt) }) } ## #################################################################### ## analysis if(what == 'ana'){ txt <- paste('<font size=\"4\"> <p><h3>Log-transformation</h3>Apply log transformation to the data.</p> <p><h3>Data normalization</h3>You can apply different normalization methods to the data prior to testing. The methods are applied for each column separately, except for \'Quantile\'-normalization which takes the entire matrix into account.</p> <p> <ul> <li><b>Median</b>: Subtract the sample median from each value (centering).</li> <li><b>Median-MAD</b>: Subtract the sample median and divide by sample MAD (centering plus scaling).</li> <li><b>2-component</b>: Use a mixture-model approach to separate non-changing from changing features and divide both populations by the median of the non-changing features.</li> <li><b>Quantile</b>: Transform the data such that the quantiles of all sample distributions are the equal.</li> <li><b>none</b>: The data will be taken as is. Should be used if the data has been already normalized.</li> </ul> <p><h3>Filter data</h3> <b>Reproducibility:</b><br> Remove features that were not reproducibly quantifified across replicate measurements. Only available for <b>one-sample tests</b> and will be ignored otherwise. For duplicate measurements a Bland-Altman Filter of 99.9% (+/-3.29 sigma) will be applied. For more than two replicate measurements per group a generalized reproducibility filter is applied which is based on a linear mixed effects model to model the within-group variance and between-group variance (See \'MethComp book (pp 58-61). <i>Comparing Clinical Measurement Methods</i> by Bendix Carstensen\' for more details). You can inspect the results of the filtering step in the multiscatter plot under the \'QC\'-tab as well as in the interactive scatterplots. Data points removed prior to testing will be depicted in blue.</p> <b>StdDev:</b><br> Remove features with low standard deviation across all samples. Only useful if applied to sample cohorts that were quantified against a common reference. The percentile <b><i>P</i></b> you specify in the slider refers to the <b><i>P</i></b> percent of features having the <b>lowest standard deviation</b> across sample columns which will be <b>excluded prior to analyis</b>. Using this type of filter is useful to explore result of unsupervised clustering of the data without running a statistical test. <br><h3>Select test</h3>You can choose between a one-sample, two-sample moderate T-tests, moderated F-test or no testing. <ul> <li><b>One-sample mod T</b>: For each test whether the group mean is significantly different from zero. Only meaningful to <b>ratio data</b>!</li> <li><b>Two-sample mod T</b>: For each possible pairwise comparison of groups test whether the group means are significantly different from each other.</li> <li><b>mod F</b>: Test whether there is a significant difference between any of the difined groups. Should be used if more than 2 groups are being compared. Only meaningful to <b>ratio data</b>!</li> <li><b>none</b>: Don\'t do any test. Useful for data exploration such as PCA.</li> </ul> <br></font></p>') ## render HTML output$html <- renderText({ if(global.param$analysis.run) return() ## if( !is.null(error$msg) ) return() if(global.param$grp.done == F) return() if(!is.null(global.input$run.test)) if(global.input$run.test > 0) return() HTML(txt) }) } ## #################################################################### ## analysis if(what == 'res'){ txt <- paste('<p><font size=\"4\">This page allows you to interactively explore the results of you analyis. On the left you can choose between different filters, the results will be updated immediately. The filter that you specify applies to all tabs (\'Heatmap\', \'Volcanos\', ...), except the \'QC\' which shows the entire dataset. You can change the appearance of the heatmap by modifying the parameters below, you can select points shown in the Volcano plots and browse through the result table.</font></p><br>') ## render HTML output$html <- renderText({ ## if( !is.null(error$msg) ) return() if(global.param$grp.done == F) return() if(!is.null(global.input$run.test)) if(global.input$run.test == 0) return() HTML(txt) }) } } ## end printHTML
require("minfi") MfromBeta <- function(beta) { log2(beta / (1 - beta)) } split_path <- function(x) if (dirname(x)==x) x else c(basename(x),split_path(dirname(x))) get_idat_basenames <- function(idat.dir) { #' Get the basename of all idat files found recursively under the provided directory #' The basename is the full path, minus the trailing _Red.idat flist <- list.files(path = idat.dir, recursive = T) flist <- flist[grep('_Red.idat', flist)] # convert to basenames for loading basenames <- file.path(idat.dir, sub(pattern = "_Red.idat", "", flist)) } get_filenames_from_batches <- function( in.dirs, samples, meta_fn='sources.csv', idat_subdir='idat' ) { in.files <- NULL snames <- NULL batches <- NULL for (b in in.dirs) { meta <- read.csv(file.path(b, meta_fn)) # set the rownames as filenames rownames(meta) <- paste(meta$Sentrix_ID, meta$Sentrix_Position, sep = '_') this_files <- get_idat_basenames(file.path(b, idat_subdir)) # reorder meta meta <- meta[basename(this_files),] # filter meta idx <- meta[, 'sample'] %in% samples # define file and sample names and add to list this_files <- this_files[idx] this_snames <- as.vector(meta[idx, 'sample']) this_batches <- as.vector(sapply(this_files, function(x){split_path(x)[4]})) in.files <- c(in.files, this_files) snames <- c(snames, this_snames) batches <- c(batches, this_batches) } list(in.files=in.files, snames=snames, batches=batches) } process_idats <- function( in.files, snames, norm.fun=c('swan', 'bmiq', 'funnorm', 'noob', 'quantile', 'pbc', 'raw'), arraytype='EPIC', force=F ) { norm.fun = match.arg(norm.fun) rgSet <- read.metharray(in.files, extended = T, force = force) colnames(rgSet) <- snames mset <- preprocessRaw(rgSet) detP <- detectionP(rgSet) # Load beta values (raw), then apply default ChAMP filtering beta.raw <- getBeta(mset, "Illumina") champLoad <- champ.filter(beta.raw, detP = detP, pd = NULL, arraytype = arraytype) beta.raw <- champLoad$beta if (norm.fun == 'raw') beta <- beta.raw if (norm.fun == 'swan') { mset.swan <- preprocessSWAN(rgSet, mSet = mset) beta.swan <- getBeta(mset.swan) beta <- beta.swan[rownames(beta.raw),] } if (norm.fun == 'bmiq') { beta <- champ.norm(beta = beta.raw, method = 'BMIQ', arraytype = arraytype, cores=4) } if (norm.fun == 'pbc') { beta <- champ.norm(beta = beta.raw, method = 'PBC', arraytype = arraytype) } if (norm.fun == 'funnorm') { grSet.funnorm <- preprocessFunnorm(rgSet) beta <- getBeta(grSet.funnorm)[rownames(beta.raw),] } if (norm.fun == 'quantile') { grSet.quantile <- preprocessQuantile(rgSet) beta <- getBeta(grSet.quantile)[rownames(beta.raw),] } if (norm.fun == 'noob') { mset.noob <- preprocessNoob(rgSet) beta <- getBeta(mset.noob)[rownames(beta.raw),] } return(list(beta.raw=beta.raw, beta=beta)) }
/R/methylation/loader.R
no_license
gaberosser/qmul-bioinf
R
false
false
3,012
r
require("minfi") MfromBeta <- function(beta) { log2(beta / (1 - beta)) } split_path <- function(x) if (dirname(x)==x) x else c(basename(x),split_path(dirname(x))) get_idat_basenames <- function(idat.dir) { #' Get the basename of all idat files found recursively under the provided directory #' The basename is the full path, minus the trailing _Red.idat flist <- list.files(path = idat.dir, recursive = T) flist <- flist[grep('_Red.idat', flist)] # convert to basenames for loading basenames <- file.path(idat.dir, sub(pattern = "_Red.idat", "", flist)) } get_filenames_from_batches <- function( in.dirs, samples, meta_fn='sources.csv', idat_subdir='idat' ) { in.files <- NULL snames <- NULL batches <- NULL for (b in in.dirs) { meta <- read.csv(file.path(b, meta_fn)) # set the rownames as filenames rownames(meta) <- paste(meta$Sentrix_ID, meta$Sentrix_Position, sep = '_') this_files <- get_idat_basenames(file.path(b, idat_subdir)) # reorder meta meta <- meta[basename(this_files),] # filter meta idx <- meta[, 'sample'] %in% samples # define file and sample names and add to list this_files <- this_files[idx] this_snames <- as.vector(meta[idx, 'sample']) this_batches <- as.vector(sapply(this_files, function(x){split_path(x)[4]})) in.files <- c(in.files, this_files) snames <- c(snames, this_snames) batches <- c(batches, this_batches) } list(in.files=in.files, snames=snames, batches=batches) } process_idats <- function( in.files, snames, norm.fun=c('swan', 'bmiq', 'funnorm', 'noob', 'quantile', 'pbc', 'raw'), arraytype='EPIC', force=F ) { norm.fun = match.arg(norm.fun) rgSet <- read.metharray(in.files, extended = T, force = force) colnames(rgSet) <- snames mset <- preprocessRaw(rgSet) detP <- detectionP(rgSet) # Load beta values (raw), then apply default ChAMP filtering beta.raw <- getBeta(mset, "Illumina") champLoad <- champ.filter(beta.raw, detP = detP, pd = NULL, arraytype = arraytype) beta.raw <- champLoad$beta if (norm.fun == 'raw') beta <- beta.raw if (norm.fun == 'swan') { mset.swan <- preprocessSWAN(rgSet, mSet = mset) beta.swan <- getBeta(mset.swan) beta <- beta.swan[rownames(beta.raw),] } if (norm.fun == 'bmiq') { beta <- champ.norm(beta = beta.raw, method = 'BMIQ', arraytype = arraytype, cores=4) } if (norm.fun == 'pbc') { beta <- champ.norm(beta = beta.raw, method = 'PBC', arraytype = arraytype) } if (norm.fun == 'funnorm') { grSet.funnorm <- preprocessFunnorm(rgSet) beta <- getBeta(grSet.funnorm)[rownames(beta.raw),] } if (norm.fun == 'quantile') { grSet.quantile <- preprocessQuantile(rgSet) beta <- getBeta(grSet.quantile)[rownames(beta.raw),] } if (norm.fun == 'noob') { mset.noob <- preprocessNoob(rgSet) beta <- getBeta(mset.noob)[rownames(beta.raw),] } return(list(beta.raw=beta.raw, beta=beta)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Student_t.R \name{likt} \alias{likt} \title{Student's t Log Likelihood Function} \usage{ likt(x, df, ncp, log = TRUE) } \arguments{ \item{x}{vector of quantiles.} \item{df}{degrees of freedom (\eqn{> 0}, maybe non-integer). \code{df = Inf} is allowed.} \item{ncp}{non-centrality parameter \eqn{\delta}{delta}; currently except for \code{rt()}, only for \code{abs(ncp) <= 37.62}. If omitted, use the central t distribution.} \item{log}{logical; if TRUE, probabilities p are given as log(p).} } \value{ A numeric scalar for the log likelihood of the Student's t density given the data where df and ncp can be held constant or if vector were given vector will be returned. } \description{ The log likelihood of a Student's t density with data, x, df and ncp parameters. } \details{ The log likelihood is the log of a function of parameters given the data. } \examples{ likt(x = rt(n = 2, df = 4), df = 4) }
/man/likt.Rd
no_license
cran/likelihoodExplore
R
false
true
1,008
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Student_t.R \name{likt} \alias{likt} \title{Student's t Log Likelihood Function} \usage{ likt(x, df, ncp, log = TRUE) } \arguments{ \item{x}{vector of quantiles.} \item{df}{degrees of freedom (\eqn{> 0}, maybe non-integer). \code{df = Inf} is allowed.} \item{ncp}{non-centrality parameter \eqn{\delta}{delta}; currently except for \code{rt()}, only for \code{abs(ncp) <= 37.62}. If omitted, use the central t distribution.} \item{log}{logical; if TRUE, probabilities p are given as log(p).} } \value{ A numeric scalar for the log likelihood of the Student's t density given the data where df and ncp can be held constant or if vector were given vector will be returned. } \description{ The log likelihood of a Student's t density with data, x, df and ncp parameters. } \details{ The log likelihood is the log of a function of parameters given the data. } \examples{ likt(x = rt(n = 2, df = 4), df = 4) }
library(foreign) cps08 <- read.dta("C:/Users/Saboor/Desktop/cps08.dta") # Q1A. The coefficient on the (Age) variable is 0.5852. Therefore, average hourly earnings are expected to increase by approximately $0.59 for age change 25 to 26 and for age change 33 to 34 AHE would increase by .59. That is because change in one year equals the change in the variable, which in this case is .5852 or .59. lm((cps08$ahe)~cps08$age+cps08$female+cps08$bachelor) AHE= ??o + ??1AGE + ??2FEMALE + ??3BACHELOR +u AHE(-.6357)= ??o + ??1(.5852) + ??2(-3.6640) + ??3(8.0830) +u # Q2B. The average hourly earnings change for age change 25 to 26 and for age change 33 to 34 is about 2.7%. lm(log(cps08$ahe)~cps08$age+cps08$female+cps08$bachelor) AGE:.02733; FEMALE: -.18592; BACHELOR:.42813 data=data.frame(cps08$ahe,cps08$age) x=c(cps08$ahe) y=c(cps08$age) plot(cps08$age,cps08$ahe,log="xy") coef(fitted) # Q3C. For age change from 25 to 26:0.80391 For age change from 33 to 34: .80391 lm(log(cps08$ahe)~log(cps08$age)+cps08$female+cps08$bachelor) # Q4D. lm(log(cps08$ahe)~cps08$age+log(cps08$age,base = exp(2))+cps08$female+cps08$bachelor) # If AGE increases from 25 to 26 or 33 to 34, it would be a percent change if 2.91939% summary(lm(cps08$ahe~cps08$age)) # Q5E. #Regression for C summary(lm(log(cps08$ahe)~log(cps08$age)+cps08$female+cps08$bachelor)) #Regression for B summary(lm(log(cps08$ahe)~cps08$age+cps08$female+cps08$bachelor)) #I prefer the regression in (B) because the R-Squared in (b) in stronger than that of (c). # For (b) the R-Squared is ".2007" whereas for (c) is ".2008" # Q6F. #Regression for D summary(lm(log(cps08$ahe)~cps08$age+log(cps08$age,base = exp(2))+cps08$female+cps08$bachelor)) #Regression for B summary(lm(log(cps08$ahe)~cps08$age+cps08$female+cps08$bachelor)) #The regressions in (d) adds the variable Age 2 to regression (b). The coefficient on Age 2 is statistically significant (1.153), and this suggests that # the addition of Age 2 is important. Thus, regression (d) is preferred over (b). # Q7G #Regression for D summary(lm(log(cps08$ahe)~cps08$age+log(cps08$age,base = exp(2))+cps08$female+cps08$bachelor)) #Regression for C summary(lm(log(cps08$ahe)~log(cps08$age)+cps08$female+cps08$bachelor)) #The regressions differ depending on their choice of the regressors. They can be compared based on the adjusted R2 # Regression (d) has the higher adjusted R2, so in this case, regression (d) is preferred over (c) #Q8H. Y=c(lm(log(cps08$ahe)~cps08$age)) X=c(cps08$age) plot(X,Y) plot(cps08$age,lm(log(cps08$ahe)~cps08$age)) length(x) length(b) plot(X,B) C=c(lm(log(cps08$ahe)~log(cps08$age))) plot(X,C) D=c(lm(log(cps08$ahe)~cps08$age+log(cps08$age,base = exp(2)))) length(D) plot(X,D) Y=c(lm(log(cps08$ahe)~log(cps08$age))) plot(X,Y,type = "2") fit <- lm(cps08$ahe~cps08$age+cps08$female+cps08$bachelor) # . The regression functions plot using AGE(B) and ln(AHE) from (b), are similar. the graph for (d) is different. It shows a decreasing effect on AGE as AHE increases. # The regression function plot for females with college degress would change in regards to the coefficent of the variable BACHELOR and the variable FEMALE. # Q9I. lm(log(cps08$ahe)~cps08$age+log(cps08$age,base = exp(2))+cps08$female+cps08$bachelor) #The coefficent on the interaction term (FEMALE*BACHELOR) shows the extra effect of BAchelor on ln(AHE) for women relating to the effect for men. Alexis: -1.59105 + 2.91939(30) + -.02237(30) + -.18588 + .42836 = 85.56203 #The regression perdicts that her average hourly earnings (AHE) will be $85.56 Jane: -1.59105 + 2.91939(30) + -.02237(30) + -.18588 = 85.13367 # The Regression predicts that Jane's AHE will be $85.13367 #The predicted difference between Alexis's and Jane's earnings is that Alexis will earn more because Alexis has a bachelors degree wheres Jane does not. BOB: -1.59105 + 2.91939(30) + -.02237(30) + .42836 = 85.74 # This predicts that Bob will be making more than Alexis because he is a man. Jim: -1.59105 + 2.91939(30) + -.02237(30) = 85.32 #THis regression predicts that Jim will make an AHE of 85.32 which more than Jane who the same credentials. # Predicted difference between Jim and Bob earnings are 85.74 - 85.32 = .42 # Q10 J. There is a different effect of AGE on the ln(AHE) of men and women because of the variable AGE^2. # Using the regression from (d), which includes "AGE^2", your income would increase by 2.91939 for each additional year. # Q11 K. The effect of AGE on earnings is different for high school graduates than for colege graduates because college grads have an advantage of .42836 according to regression from (d) # Q12 L. The effect of Age on earnings of for young workers is that they will make less money compared to their seniors because of the regressions. They can try to make up the difference by being a man and having a bachelors.
/HW 4 Code.R
no_license
ahmedsaboor/R-Coding-Assignments
R
false
false
4,946
r
library(foreign) cps08 <- read.dta("C:/Users/Saboor/Desktop/cps08.dta") # Q1A. The coefficient on the (Age) variable is 0.5852. Therefore, average hourly earnings are expected to increase by approximately $0.59 for age change 25 to 26 and for age change 33 to 34 AHE would increase by .59. That is because change in one year equals the change in the variable, which in this case is .5852 or .59. lm((cps08$ahe)~cps08$age+cps08$female+cps08$bachelor) AHE= ??o + ??1AGE + ??2FEMALE + ??3BACHELOR +u AHE(-.6357)= ??o + ??1(.5852) + ??2(-3.6640) + ??3(8.0830) +u # Q2B. The average hourly earnings change for age change 25 to 26 and for age change 33 to 34 is about 2.7%. lm(log(cps08$ahe)~cps08$age+cps08$female+cps08$bachelor) AGE:.02733; FEMALE: -.18592; BACHELOR:.42813 data=data.frame(cps08$ahe,cps08$age) x=c(cps08$ahe) y=c(cps08$age) plot(cps08$age,cps08$ahe,log="xy") coef(fitted) # Q3C. For age change from 25 to 26:0.80391 For age change from 33 to 34: .80391 lm(log(cps08$ahe)~log(cps08$age)+cps08$female+cps08$bachelor) # Q4D. lm(log(cps08$ahe)~cps08$age+log(cps08$age,base = exp(2))+cps08$female+cps08$bachelor) # If AGE increases from 25 to 26 or 33 to 34, it would be a percent change if 2.91939% summary(lm(cps08$ahe~cps08$age)) # Q5E. #Regression for C summary(lm(log(cps08$ahe)~log(cps08$age)+cps08$female+cps08$bachelor)) #Regression for B summary(lm(log(cps08$ahe)~cps08$age+cps08$female+cps08$bachelor)) #I prefer the regression in (B) because the R-Squared in (b) in stronger than that of (c). # For (b) the R-Squared is ".2007" whereas for (c) is ".2008" # Q6F. #Regression for D summary(lm(log(cps08$ahe)~cps08$age+log(cps08$age,base = exp(2))+cps08$female+cps08$bachelor)) #Regression for B summary(lm(log(cps08$ahe)~cps08$age+cps08$female+cps08$bachelor)) #The regressions in (d) adds the variable Age 2 to regression (b). The coefficient on Age 2 is statistically significant (1.153), and this suggests that # the addition of Age 2 is important. Thus, regression (d) is preferred over (b). # Q7G #Regression for D summary(lm(log(cps08$ahe)~cps08$age+log(cps08$age,base = exp(2))+cps08$female+cps08$bachelor)) #Regression for C summary(lm(log(cps08$ahe)~log(cps08$age)+cps08$female+cps08$bachelor)) #The regressions differ depending on their choice of the regressors. They can be compared based on the adjusted R2 # Regression (d) has the higher adjusted R2, so in this case, regression (d) is preferred over (c) #Q8H. Y=c(lm(log(cps08$ahe)~cps08$age)) X=c(cps08$age) plot(X,Y) plot(cps08$age,lm(log(cps08$ahe)~cps08$age)) length(x) length(b) plot(X,B) C=c(lm(log(cps08$ahe)~log(cps08$age))) plot(X,C) D=c(lm(log(cps08$ahe)~cps08$age+log(cps08$age,base = exp(2)))) length(D) plot(X,D) Y=c(lm(log(cps08$ahe)~log(cps08$age))) plot(X,Y,type = "2") fit <- lm(cps08$ahe~cps08$age+cps08$female+cps08$bachelor) # . The regression functions plot using AGE(B) and ln(AHE) from (b), are similar. the graph for (d) is different. It shows a decreasing effect on AGE as AHE increases. # The regression function plot for females with college degress would change in regards to the coefficent of the variable BACHELOR and the variable FEMALE. # Q9I. lm(log(cps08$ahe)~cps08$age+log(cps08$age,base = exp(2))+cps08$female+cps08$bachelor) #The coefficent on the interaction term (FEMALE*BACHELOR) shows the extra effect of BAchelor on ln(AHE) for women relating to the effect for men. Alexis: -1.59105 + 2.91939(30) + -.02237(30) + -.18588 + .42836 = 85.56203 #The regression perdicts that her average hourly earnings (AHE) will be $85.56 Jane: -1.59105 + 2.91939(30) + -.02237(30) + -.18588 = 85.13367 # The Regression predicts that Jane's AHE will be $85.13367 #The predicted difference between Alexis's and Jane's earnings is that Alexis will earn more because Alexis has a bachelors degree wheres Jane does not. BOB: -1.59105 + 2.91939(30) + -.02237(30) + .42836 = 85.74 # This predicts that Bob will be making more than Alexis because he is a man. Jim: -1.59105 + 2.91939(30) + -.02237(30) = 85.32 #THis regression predicts that Jim will make an AHE of 85.32 which more than Jane who the same credentials. # Predicted difference between Jim and Bob earnings are 85.74 - 85.32 = .42 # Q10 J. There is a different effect of AGE on the ln(AHE) of men and women because of the variable AGE^2. # Using the regression from (d), which includes "AGE^2", your income would increase by 2.91939 for each additional year. # Q11 K. The effect of AGE on earnings is different for high school graduates than for colege graduates because college grads have an advantage of .42836 according to regression from (d) # Q12 L. The effect of Age on earnings of for young workers is that they will make less money compared to their seniors because of the regressions. They can try to make up the difference by being a man and having a bachelors.
wleEst <- function( resp, # The vector of responses params, # The item parameters range = c(-6, 6), # The integer to maximize over mod = c("brm", "grm"), # The model ... ){ # First turn params into a matrix: params <- rbind(params) # And turn response into a matrix: resp <- { if( dim(params)[1] > 1 ) rbind(resp) # ... --> turn it into a multi-column matrix, else cbind(resp) } # ... --> or a 1-column matrix #~~~~~~~~~~~~~~~~~# # Argument Checks # #~~~~~~~~~~~~~~~~~# # Make sure that the arguments are OK: ## 1 ## (Make sure that params and resp are ALL numeric) if( mode(params) != "numeric" ) stop( "params need to be numeric" ) if( !is.null(resp) & mode(resp) != "numeric" ) stop( "resp needs to be numeric" ) ## 2 ## (Make sure that the dimensions of params and response are equal) if( !is.null(resp) & ( dim(resp)[ 2 ] != dim(params)[ 1 ] ) ) stop( "number of params does not match the length of resp" ) #~~~~~~~~~~~~~~~~~~~~~~~~~# # Weighted Likelihood Est # #~~~~~~~~~~~~~~~~~~~~~~~~~# # Indicate the lower/upper boundary of the search: if( is.null(range) ) range <- c(-6, 6) l <- range[1]; u <- range[2] est <- NULL # a vector for estimates d <- NULL # a vector of corrections # Then, maximize the loglikelihood function over that interval for each person: for( i in 1:dim(resp)[1] ){ lderFun <- paste("lder1.", mod, sep = "") est[i] <- uniroot( get(lderFun), lower = l, upper = u, extendInt = "yes", u = resp[i, ], params = params, type = "WLE" )$root d[i] <- { get(lderFun)( u = resp[i, ], theta = est[i], params = params, type = "WLE" ) - get(lderFun)( u = resp[i, ], theta = est[i], params = params, type = "MLE" ) } } # END for LOOP # Round the estimated value to three/four? decimal places: est <- pmax(l, pmin(u, est)) est <- round(est, digits = 4) # And pull out the information as well as the SEM: info <- get(paste("FI.", mod, sep = ""))( params = params, theta = est, type = "observed", resp = resp )$test # Note: See Warm for the WLE SEM. return( list( theta = est, info = info, sem = sqrt( (info + d^2)/info^2 ) ) ) } # END wleEst FUNCTION
/catIrt/R/wleEst.R
no_license
ingted/R-Examples
R
false
false
2,520
r
wleEst <- function( resp, # The vector of responses params, # The item parameters range = c(-6, 6), # The integer to maximize over mod = c("brm", "grm"), # The model ... ){ # First turn params into a matrix: params <- rbind(params) # And turn response into a matrix: resp <- { if( dim(params)[1] > 1 ) rbind(resp) # ... --> turn it into a multi-column matrix, else cbind(resp) } # ... --> or a 1-column matrix #~~~~~~~~~~~~~~~~~# # Argument Checks # #~~~~~~~~~~~~~~~~~# # Make sure that the arguments are OK: ## 1 ## (Make sure that params and resp are ALL numeric) if( mode(params) != "numeric" ) stop( "params need to be numeric" ) if( !is.null(resp) & mode(resp) != "numeric" ) stop( "resp needs to be numeric" ) ## 2 ## (Make sure that the dimensions of params and response are equal) if( !is.null(resp) & ( dim(resp)[ 2 ] != dim(params)[ 1 ] ) ) stop( "number of params does not match the length of resp" ) #~~~~~~~~~~~~~~~~~~~~~~~~~# # Weighted Likelihood Est # #~~~~~~~~~~~~~~~~~~~~~~~~~# # Indicate the lower/upper boundary of the search: if( is.null(range) ) range <- c(-6, 6) l <- range[1]; u <- range[2] est <- NULL # a vector for estimates d <- NULL # a vector of corrections # Then, maximize the loglikelihood function over that interval for each person: for( i in 1:dim(resp)[1] ){ lderFun <- paste("lder1.", mod, sep = "") est[i] <- uniroot( get(lderFun), lower = l, upper = u, extendInt = "yes", u = resp[i, ], params = params, type = "WLE" )$root d[i] <- { get(lderFun)( u = resp[i, ], theta = est[i], params = params, type = "WLE" ) - get(lderFun)( u = resp[i, ], theta = est[i], params = params, type = "MLE" ) } } # END for LOOP # Round the estimated value to three/four? decimal places: est <- pmax(l, pmin(u, est)) est <- round(est, digits = 4) # And pull out the information as well as the SEM: info <- get(paste("FI.", mod, sep = ""))( params = params, theta = est, type = "observed", resp = resp )$test # Note: See Warm for the WLE SEM. return( list( theta = est, info = info, sem = sqrt( (info + d^2)/info^2 ) ) ) } # END wleEst FUNCTION
#' Calculates harmonic reserves with uncertainty using bootstrap method #' #' \code{Boot.Harm.Reserves} #' All iterations that give a cumulative production less than the actual cum are rejected and a replacement iteration is added #' #' @param x data frame to parse #' @param iters number of iteration to use for bootstrap estiamtes #' @export #' @examples #' data <- data.frame(q=seq(500,5,-5),Q=seq(20,2000,20),qf=rep(25,100)) #' iters <- 5 #' Boot.Harm.Reserves(data, iters) Boot.Harm.Reserves <- function(x, iters){ x <- subset(x,q>0) bstrap_iters <- iters bstrap <- rep(NA_real_, bstrap_iters) # boot.harm builds an enclosure of the data "x" and a function that returns a bootstrapped replicate of an harmonic decline (or the cum production to date if it's the larger) on the data boot.harm <- function(x, ...){ function(){ qf <- max(x$qf) Qcum <- max(x$Q) temp <- boot.lm(log(q) ~ Q, x)()$coef EUR <- as.numeric((log(qf)-temp[1])/temp[2]) return(max(EUR,Qcum)) } } # Boot.Harm is an unargumented function that returns a bootstrapped replicate of an harmonic decline Boot.Harm <- boot.harm(x) if(length(x$q)<3) { # If only three producing months, don't do much for now } else { # Check if the well has more than 4 producing months before doing a decline if(length(x$q)>4) { bstrap <- sapply(X=1:bstrap_iters, FUN=function(x) Boot.Harm()) } } return(bstrap) }
/R/Boot.Harm.Reserves.r
no_license
morganmadell/PetroleumDeclineAnalysis
R
false
false
1,451
r
#' Calculates harmonic reserves with uncertainty using bootstrap method #' #' \code{Boot.Harm.Reserves} #' All iterations that give a cumulative production less than the actual cum are rejected and a replacement iteration is added #' #' @param x data frame to parse #' @param iters number of iteration to use for bootstrap estiamtes #' @export #' @examples #' data <- data.frame(q=seq(500,5,-5),Q=seq(20,2000,20),qf=rep(25,100)) #' iters <- 5 #' Boot.Harm.Reserves(data, iters) Boot.Harm.Reserves <- function(x, iters){ x <- subset(x,q>0) bstrap_iters <- iters bstrap <- rep(NA_real_, bstrap_iters) # boot.harm builds an enclosure of the data "x" and a function that returns a bootstrapped replicate of an harmonic decline (or the cum production to date if it's the larger) on the data boot.harm <- function(x, ...){ function(){ qf <- max(x$qf) Qcum <- max(x$Q) temp <- boot.lm(log(q) ~ Q, x)()$coef EUR <- as.numeric((log(qf)-temp[1])/temp[2]) return(max(EUR,Qcum)) } } # Boot.Harm is an unargumented function that returns a bootstrapped replicate of an harmonic decline Boot.Harm <- boot.harm(x) if(length(x$q)<3) { # If only three producing months, don't do much for now } else { # Check if the well has more than 4 producing months before doing a decline if(length(x$q)>4) { bstrap <- sapply(X=1:bstrap_iters, FUN=function(x) Boot.Harm()) } } return(bstrap) }
require(bnlearn) load("ecoli70.rda") ecoli70 <- bn load("magic-niab.rda") magic_niab <- bn load("magic-irri.rda") magic_irri <- bn load("arth150.rda") arth150 <- bn print_edges <- function(bn.fit) { arcs <- bn.net(bn.fit)$arcs cat(paste0("[(\"", arcs[1,1], "\", \"", arcs[1,2], "\")")) for (row in 2:nrow(arcs)) { cat(paste0(", (\"", arcs[row,1], "\", \"", arcs[row, 2], "\")")) } cat("]\n") print("") } print_cpds <- function(bn.fit) { for (node in names(bn.fit)) { name <- paste0("\"", bn.fit[[node]]$node, "\"") n_parents <- length(bn.fit[[node]]$parents) if (n_parents == 0) { beta <- paste0("[", bn.fit[[node]]$coefficients[["(Intercept)"]], "]") parents <- "[]" } else { first_parent <- bn.fit[[node]]$parents[1] beta <- paste0("[", bn.fit[[node]]$coefficients[["(Intercept)"]], ", ", bn.fit[[node]]$coefficients[[first_parent]]) parents <- paste0("[\"", first_parent, "\"") if (n_parents > 1) { for (parent in bn.fit[[node]]$parents[2:n_parents]) { beta <- paste0(beta, ", ", bn.fit[[node]]$coefficients[[parent]]) parents <- paste0(parents, ", \"", parent, "\"") } } beta <- paste0(beta, "]") parents <- paste0(parents, "]") variance <- bn.fit[[node]]$sd**2 } cat(paste0("n", node, "_cpd = LinearGaussianCPD(", name, ", ", parents, ", ", beta, ", ", variance, ")\n")) } add <- paste0("add_cpds(n", names(bn.fit)[1], "_cpd") n_nodes <- length(names(bn.fit)) for (node in names(bn.fit)[2:n_nodes]) { add <- paste0(add, ", n", node, "_cpd") } add <- paste0(add, ")") cat(paste0(add, "\n")) } print("ECOLI70") print("=======================") print("") print_edges(ecoli70) print_cpds(ecoli70) print("") print("") print("MAGIC-NIAB") print("=======================") print("") print_edges(magic_niab) print_cpds(magic_niab) print("") print("") print("MAGIC-IRRI") print("=======================") print("") print_edges(magic_irri) print_cpds(magic_irri) print("") print("") print("ARTH150") print("=======================") print("") print_edges(arth150) print_cpds(arth150)
/GaussianNetworks/print_structure.R
no_license
isidrolpez/SPBN-Experiments
R
false
false
2,190
r
require(bnlearn) load("ecoli70.rda") ecoli70 <- bn load("magic-niab.rda") magic_niab <- bn load("magic-irri.rda") magic_irri <- bn load("arth150.rda") arth150 <- bn print_edges <- function(bn.fit) { arcs <- bn.net(bn.fit)$arcs cat(paste0("[(\"", arcs[1,1], "\", \"", arcs[1,2], "\")")) for (row in 2:nrow(arcs)) { cat(paste0(", (\"", arcs[row,1], "\", \"", arcs[row, 2], "\")")) } cat("]\n") print("") } print_cpds <- function(bn.fit) { for (node in names(bn.fit)) { name <- paste0("\"", bn.fit[[node]]$node, "\"") n_parents <- length(bn.fit[[node]]$parents) if (n_parents == 0) { beta <- paste0("[", bn.fit[[node]]$coefficients[["(Intercept)"]], "]") parents <- "[]" } else { first_parent <- bn.fit[[node]]$parents[1] beta <- paste0("[", bn.fit[[node]]$coefficients[["(Intercept)"]], ", ", bn.fit[[node]]$coefficients[[first_parent]]) parents <- paste0("[\"", first_parent, "\"") if (n_parents > 1) { for (parent in bn.fit[[node]]$parents[2:n_parents]) { beta <- paste0(beta, ", ", bn.fit[[node]]$coefficients[[parent]]) parents <- paste0(parents, ", \"", parent, "\"") } } beta <- paste0(beta, "]") parents <- paste0(parents, "]") variance <- bn.fit[[node]]$sd**2 } cat(paste0("n", node, "_cpd = LinearGaussianCPD(", name, ", ", parents, ", ", beta, ", ", variance, ")\n")) } add <- paste0("add_cpds(n", names(bn.fit)[1], "_cpd") n_nodes <- length(names(bn.fit)) for (node in names(bn.fit)[2:n_nodes]) { add <- paste0(add, ", n", node, "_cpd") } add <- paste0(add, ")") cat(paste0(add, "\n")) } print("ECOLI70") print("=======================") print("") print_edges(ecoli70) print_cpds(ecoli70) print("") print("") print("MAGIC-NIAB") print("=======================") print("") print_edges(magic_niab) print_cpds(magic_niab) print("") print("") print("MAGIC-IRRI") print("=======================") print("") print_edges(magic_irri) print_cpds(magic_irri) print("") print("") print("ARTH150") print("=======================") print("") print_edges(arth150) print_cpds(arth150)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/yingtools2.R \name{is.between} \alias{is.between} \title{Determines if x is between start and stop/} \usage{ is.between(x, start, stop, check = TRUE) } \arguments{ \item{x}{vector of values to be checked} \item{start}{vector of start time(s)} \item{stop}{vector of stop time(s)} \item{check}{whether to check if start comes before stop.} } \description{ Similar to \code{\link[dplyr:between]{dplyr::between()}}, except that the vectors are recycled, so x can be a fixed value. }
/man/is.between.Rd
no_license
ying14/yingtools2
R
false
true
560
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/yingtools2.R \name{is.between} \alias{is.between} \title{Determines if x is between start and stop/} \usage{ is.between(x, start, stop, check = TRUE) } \arguments{ \item{x}{vector of values to be checked} \item{start}{vector of start time(s)} \item{stop}{vector of stop time(s)} \item{check}{whether to check if start comes before stop.} } \description{ Similar to \code{\link[dplyr:between]{dplyr::between()}}, except that the vectors are recycled, so x can be a fixed value. }
## Put comments here that give an overall description of what your ## functions do ## The below function, makeCacheMatrix, creates a matrix, which is really a list containing a function to ## a) set the value of the matrix ## b) get the value of the matrix ## c) set the inverse of the matrix ## d) get the inverse of the matrix makeCacheMatrix <- function(x=matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <-function() x setinverse <- function(inverse) inv <<- inverse getinverse <- function() inv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## The following function calculates the inverse of the matrix created with the above function. ## However, it first checks to see if the inverse has already been calculated. If so, it gets the inverse from the cache ## and skips the computation. Otherwise, it calculates the inverse of the matrix and sets the value of the inverse ## in the cache via the setinverse function. cachesolve <- function(x, ...) { inv <- getinverse() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- get() inv <- solve(x, ...) setinverse(inv) inv ## Return a matrix that is the inverse of 'x' }
/cachematrix.R
no_license
romio1983/ProgrammingAssignment2
R
false
false
1,272
r
## Put comments here that give an overall description of what your ## functions do ## The below function, makeCacheMatrix, creates a matrix, which is really a list containing a function to ## a) set the value of the matrix ## b) get the value of the matrix ## c) set the inverse of the matrix ## d) get the inverse of the matrix makeCacheMatrix <- function(x=matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <-function() x setinverse <- function(inverse) inv <<- inverse getinverse <- function() inv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## The following function calculates the inverse of the matrix created with the above function. ## However, it first checks to see if the inverse has already been calculated. If so, it gets the inverse from the cache ## and skips the computation. Otherwise, it calculates the inverse of the matrix and sets the value of the inverse ## in the cache via the setinverse function. cachesolve <- function(x, ...) { inv <- getinverse() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- get() inv <- solve(x, ...) setinverse(inv) inv ## Return a matrix that is the inverse of 'x' }
# MOMOpack for R # Originally MOMOpack V 4.3 for Stata, # created by Bernadette Gergonne, SSI-EpiLife for Euro MOMO. # Port to R and further development by Theodore Lytras <thlytras@gmail.com> analyzeMOMO <- function(mi, version="v4-3", datesISO=TRUE, useAUTOMN=FALSE, USEglm2=TRUE, zvalue=1.96, compatibility.mode=FALSE, verbose=TRUE) { if (!("MOMOinput" %in% class(mi))) stop("Argument 'mi' should have class \"MOMOinput\".") return(lapply(momoAttr$groups, function(x) { if (verbose) cat(sprintf("- Iterating over group %s... ", x)) ret <- analyzeMOMOgroup(mi, x, version, datesISO, useAUTOMN, USEglm2, zvalue, compatibility.mode) if (verbose) cat("DONE\n") ret })) } analyzeMOMOgroup <- function(mi, group, version="v4-3", datesISO=TRUE, useAUTOMN=FALSE, USEglm2=TRUE, zvalue=1.96, compatibility.mode=FALSE) { if (!("MOMOinput" %in% class(mi))) stop("Argument 'mi' should have class \"MOMOinput\".") if (sum(mi[,sprintf("GRP%s", group)], na.rm=TRUE) == nrow(mi)) { groupfile <- mi } else { groupfile <- mi[mi[,sprintf("GRP%s", group)] & !is.na(mi[,sprintf("GRP%s", group)]),] } aggr <- aggregateMOMO(groupfile, group, compatibility.mode) dataExport$aggr <- aggr aggr_fullDelay <- delayMOMO(aggr, zvalue) dataExport$aggr_fullDelay <- aggr_fullDelay aggr_delay <- trimDelayMOMO(aggr_fullDelay) dataExport$aggr_delay <- aggr_delay final <- excessMOMO(aggr=aggr_delay, version, useAUTOMN, USEglm2, zvalue) n <- names(final) extraVars <- n[stringr::str_detect(n,"^pred")] keepVars <- n[!n %in% extraVars] toSave <- final final <- final[,keepVars] for(Z in seq(4,20,2)) { # We drop the variables UPI if they are not crossed by the data if (sum(final$nbc > final[[paste("UPIb",Z,sep="")]], na.rm=TRUE)==0) { final[[paste("UPIb",Z,sep="")]] <- NULL ## RICHARD CHANGE #break } } table <- tableMOMO(final) EUROMOMO <- EUROMOMOoutput(final, useAUTOMN, datesISO) periods <- list( cumChoice = calcPeriodMOMO(final, momoAttr$WStart, momoAttr$WEnd), # 1. using the chosen Period cumWinter = calcPeriodMOMO(final, 40, 20), # 2. WINTER EXCESS WEEK 40 to 20 cumSummer = calcPeriodMOMO(final, 21, 39), # 3. SUMMER EXCESS WEEK 21 to 39 cumYear = calcPeriodMOMO(final, 1, 53, attr(mi, "WStart")), # 4. EXCESS FULL YEAR WEEK 1 to 53 cumSeason = calcPeriodMOMO(final, 27, 26)) # 5. EXCESS FULL SEASON w27 to w26 attr(periods, "WStart") <- momoAttr$WStart attr(periods, "WEnd") <- momoAttr$WEnd return(list( aggregate = aggr, aggregate_fullDelay = aggr_fullDelay, aggregate_delay = aggr_delay, finalDataset = final, MOMOtable=table, EUROMOMOcomplete = EUROMOMO$COMPLETE, EUROMOMOrestricted = EUROMOMO$RESTRICTED, periods = periods, toSave=toSave)) }
/R/analyzeMOMO.R
no_license
EuroMOMOnetwork/MOMO
R
false
false
2,794
r
# MOMOpack for R # Originally MOMOpack V 4.3 for Stata, # created by Bernadette Gergonne, SSI-EpiLife for Euro MOMO. # Port to R and further development by Theodore Lytras <thlytras@gmail.com> analyzeMOMO <- function(mi, version="v4-3", datesISO=TRUE, useAUTOMN=FALSE, USEglm2=TRUE, zvalue=1.96, compatibility.mode=FALSE, verbose=TRUE) { if (!("MOMOinput" %in% class(mi))) stop("Argument 'mi' should have class \"MOMOinput\".") return(lapply(momoAttr$groups, function(x) { if (verbose) cat(sprintf("- Iterating over group %s... ", x)) ret <- analyzeMOMOgroup(mi, x, version, datesISO, useAUTOMN, USEglm2, zvalue, compatibility.mode) if (verbose) cat("DONE\n") ret })) } analyzeMOMOgroup <- function(mi, group, version="v4-3", datesISO=TRUE, useAUTOMN=FALSE, USEglm2=TRUE, zvalue=1.96, compatibility.mode=FALSE) { if (!("MOMOinput" %in% class(mi))) stop("Argument 'mi' should have class \"MOMOinput\".") if (sum(mi[,sprintf("GRP%s", group)], na.rm=TRUE) == nrow(mi)) { groupfile <- mi } else { groupfile <- mi[mi[,sprintf("GRP%s", group)] & !is.na(mi[,sprintf("GRP%s", group)]),] } aggr <- aggregateMOMO(groupfile, group, compatibility.mode) dataExport$aggr <- aggr aggr_fullDelay <- delayMOMO(aggr, zvalue) dataExport$aggr_fullDelay <- aggr_fullDelay aggr_delay <- trimDelayMOMO(aggr_fullDelay) dataExport$aggr_delay <- aggr_delay final <- excessMOMO(aggr=aggr_delay, version, useAUTOMN, USEglm2, zvalue) n <- names(final) extraVars <- n[stringr::str_detect(n,"^pred")] keepVars <- n[!n %in% extraVars] toSave <- final final <- final[,keepVars] for(Z in seq(4,20,2)) { # We drop the variables UPI if they are not crossed by the data if (sum(final$nbc > final[[paste("UPIb",Z,sep="")]], na.rm=TRUE)==0) { final[[paste("UPIb",Z,sep="")]] <- NULL ## RICHARD CHANGE #break } } table <- tableMOMO(final) EUROMOMO <- EUROMOMOoutput(final, useAUTOMN, datesISO) periods <- list( cumChoice = calcPeriodMOMO(final, momoAttr$WStart, momoAttr$WEnd), # 1. using the chosen Period cumWinter = calcPeriodMOMO(final, 40, 20), # 2. WINTER EXCESS WEEK 40 to 20 cumSummer = calcPeriodMOMO(final, 21, 39), # 3. SUMMER EXCESS WEEK 21 to 39 cumYear = calcPeriodMOMO(final, 1, 53, attr(mi, "WStart")), # 4. EXCESS FULL YEAR WEEK 1 to 53 cumSeason = calcPeriodMOMO(final, 27, 26)) # 5. EXCESS FULL SEASON w27 to w26 attr(periods, "WStart") <- momoAttr$WStart attr(periods, "WEnd") <- momoAttr$WEnd return(list( aggregate = aggr, aggregate_fullDelay = aggr_fullDelay, aggregate_delay = aggr_delay, finalDataset = final, MOMOtable=table, EUROMOMOcomplete = EUROMOMO$COMPLETE, EUROMOMOrestricted = EUROMOMO$RESTRICTED, periods = periods, toSave=toSave)) }
#' Create a policy adoption event history data frame #' #' This the short description #' #' And some details #' #' @import dplyr #' #' @param cascades an object of class cascade containing node and cascade #' information. See \code{\link{as_cascade_long}} and #' \code{\link{as_cascade_wide}} for details. #' #' @return Returns an object of class \code{data.frame} #' #' @examples #' #' data(cascades) #' #' @export make_eha_data = function(cascades, networks, decay_parameter, min_time) { # Create the time x cascade_id grid (withing the time range of each cascade) ranges = lapply(1:length(cascades$cascade_times), function(i) { x = cascades$cascade_times[[i]] data_frame(event_time = min(x):max(x), cascade_id = names(cascades$cascade_times[i]) ) }) ranges = do.call(rbind, ranges) # Create event indicator p <- tbl_df(as.data.frame(cascades)) %>% arrange(cascade_id, event_time) %>% mutate(event = 1) # Get for time t number the of adoptions until (including) t - 1 events_by_time = group_by(p, cascade_id, event_time) %>% summarize(count = n()) %>% right_join(ranges, by = c('event_time', 'cascade_id')) %>% group_by(cascade_id) %>% mutate(count = ifelse(is.na(count), 0, count), events_so_far = c(0, cumsum(count)[-length(count)])) %>% dplyr::select(-count) # All node-time combinations for each cascade ranges_list = split(ranges, f = ranges$cascade_id) node_time_combos = lapply(ranges_list, function(x) { tbl_df(expand.grid(cascades$node_names, x$event_time, stringsAsFactors = FALSE)) %>% rename(node_name = Var1, event_time = Var2) %>% mutate(cascade_id = x$cascade_id[1]) }) node_time_combos = do.call(rbind, node_time_combos) %>% # Fill in the event outcome left_join(p, by = c('node_name', 'event_time', 'cascade_id')) %>% # Fill outcome of non-matched node-times with zero mutate(event = ifelse(is.na(event), 0, 1)) %>% # For each time get how many events happened in previous times left_join(events_by_time, by = c('cascade_id', 'event_time')) %>% # Generate cumulative indicator if node had event at any preceding time arrange(cascade_id, node_name, event_time) %>% group_by(cascade_id, node_name) %>% mutate(node_cumsum = cumsum(event), node_cumsum = ifelse(event == 1, 0, node_cumsum), decay_weight = decay_event(event, decay_parameter)) %>% ungroup() # Calculate number of events of neighbors for each cascade-node-time neighbor_events = left_join(networks, node_time_combos, by = c("origin_node" = "node_name", "time" = "event_time")) %>% filter(!is.na(node_cumsum)) %>% dplyr::select(-event, -events_so_far) %>% group_by(cascade_id, destination_node, time) %>% summarize(n_neighbor_events = sum(node_cumsum), n_neighbor_events_decay = sum(decay_weight)) eha_data = filter(node_time_combos, event_time >= min_time, node_cumsum == 0) %>% left_join(neighbor_events, by = c('cascade_id' = 'cascade_id', 'node_name' = 'destination_node', 'event_time' = 'time')) %>% dplyr::select(-node_cumsum, -decay_weight) %>% mutate(n_neighbor_events = ifelse(is.na(n_neighbor_events), 0, n_neighbor_events), n_neighbor_events_decay = ifelse(is.na(n_neighbor_events_decay), 0, n_neighbor_events_decay), cascade_id = as.factor(cascade_id)) return(eha_data) } decay_event = function(x, lambda) { if(sum(x) == 0) return(x) idx = which(x == 1) y = seq(0,(length(x)-idx)) dec_seq = lambda * exp(-y/lambda) # Make the weight 0 in the actual adoption year and shift the sequence dec_seq = c(0, dec_seq[-length(dec_seq)]) return(c(rep(0, idx-1), dec_seq)) } #' Evaluate a parameter combination #' #' This the short description #' #' And some details #' #' @import speedglm #' #' @param cascades an object of class cascade containing node and cascade #' information. See \code{\link{as_cascade_long}} and #' \code{\link{as_cascade_wide}} for details. #' #' @return Returns an object of class \code{data.frame} #' #' @examples #' #' data(cascades) #' #' @export evaluate_grid_point <- function(n_edges, params, time_window, cascades, min_time = NULL, decay_parameter) { max_time = max(unlist(cascades$cascade_times)) if(is.null(min_time)) min_time = time_window + 1 times = seq(min_time, max_time, 1) # Infer the network for all years networks = do.call(rbind, lapply(times, infer_network, time_window = time_window, cascades = cascades, params = params, n_edges = n_edges)) if(nrow(networks) == 0) return(NA) # Build dataset for event history model event_data = make_eha_data(cascades, networks, decay_parameter, min_time) # Fit model mod <- event ~ events_so_far + n_neighbor_events_decay + cascade_id res <- speedglm::speedglm(mod, data = droplevels(event_data), family = binomial(link = "logit")) return(BIC(res)) } #' Evaluate a parameter grid #' #' This the short description #' #' And some details #' #' @import doParallel #' #' @param cascades an object of class cascade containing node and cascade #' information. See \code{\link{as_cascade_long}} and #' \code{\link{as_cascade_wide}} for details. #' #' @return Returns an object of class \code{data.frame} #' #' @examples #' #' data(cascades) #' #' @export grid_search_eha = function(cascades, n_jobs, n_edges, params, time_windows) { grid_points <- expand.grid(params, n_edges, time_windows) # Run cl <- makeCluster(n_jobs) registerDoParallel(cl) results <- foreach(i = 1:nrow(grid_points), .packages = c("dplyr", "NetworkInference", "spid"), .combine = c) %dopar% { evaluate_grid_point(n_edges = grid_points[i, 2], params = grid_points[i, 1], time_window = grid_points[i, 3], cascades = cascades, decay_parameter = grid_points[i, 1]) } stopCluster(cl) out = as.data.frame(cbind(grid_points, results)) colnames(out) = c('params', 'n_edges', 'time_window', 'bic') return(out) } #' Infer diffusion network on time slice of cascades #' #' This the short description #' #' And some details #' #@import checkmate #' #' @param cascades an object of class cascade containing node and cascade #' information. See \code{\link{as_cascade_long}} and #' \code{\link{as_cascade_wide}} for details. #' #' @return Returns an object of class \code{data.frame} #' #' @examples #' #' data(cascades) #' #' @export infer_network <- function(time, time_window, cascades, params, n_edges) { casc = subset_cascade_time(cascades, (time - time_window), time) network = try(netinf(cascades = casc, trans_mod = "exponential", n_edges = n_edges, params = params, quiet = TRUE), silent = TRUE) if(inherits(network, 'try-error')){ warning(paste0('Could not fit netinf for parameter combination:', ' n_edges: ', n_edges, ', params: ', params, ', time: ', time, ', time_window: ', time_window, '. Reason: ', attr(network, 'condition'), '. Returning empty network')) return(data.frame(origin_node = c(), destination_node = c(), year = c())) } network = network[, -c(3, 4)] network$time = time return(network) }
/R/eha_utilities.R
no_license
desmarais-lab/spid
R
false
false
8,269
r
#' Create a policy adoption event history data frame #' #' This the short description #' #' And some details #' #' @import dplyr #' #' @param cascades an object of class cascade containing node and cascade #' information. See \code{\link{as_cascade_long}} and #' \code{\link{as_cascade_wide}} for details. #' #' @return Returns an object of class \code{data.frame} #' #' @examples #' #' data(cascades) #' #' @export make_eha_data = function(cascades, networks, decay_parameter, min_time) { # Create the time x cascade_id grid (withing the time range of each cascade) ranges = lapply(1:length(cascades$cascade_times), function(i) { x = cascades$cascade_times[[i]] data_frame(event_time = min(x):max(x), cascade_id = names(cascades$cascade_times[i]) ) }) ranges = do.call(rbind, ranges) # Create event indicator p <- tbl_df(as.data.frame(cascades)) %>% arrange(cascade_id, event_time) %>% mutate(event = 1) # Get for time t number the of adoptions until (including) t - 1 events_by_time = group_by(p, cascade_id, event_time) %>% summarize(count = n()) %>% right_join(ranges, by = c('event_time', 'cascade_id')) %>% group_by(cascade_id) %>% mutate(count = ifelse(is.na(count), 0, count), events_so_far = c(0, cumsum(count)[-length(count)])) %>% dplyr::select(-count) # All node-time combinations for each cascade ranges_list = split(ranges, f = ranges$cascade_id) node_time_combos = lapply(ranges_list, function(x) { tbl_df(expand.grid(cascades$node_names, x$event_time, stringsAsFactors = FALSE)) %>% rename(node_name = Var1, event_time = Var2) %>% mutate(cascade_id = x$cascade_id[1]) }) node_time_combos = do.call(rbind, node_time_combos) %>% # Fill in the event outcome left_join(p, by = c('node_name', 'event_time', 'cascade_id')) %>% # Fill outcome of non-matched node-times with zero mutate(event = ifelse(is.na(event), 0, 1)) %>% # For each time get how many events happened in previous times left_join(events_by_time, by = c('cascade_id', 'event_time')) %>% # Generate cumulative indicator if node had event at any preceding time arrange(cascade_id, node_name, event_time) %>% group_by(cascade_id, node_name) %>% mutate(node_cumsum = cumsum(event), node_cumsum = ifelse(event == 1, 0, node_cumsum), decay_weight = decay_event(event, decay_parameter)) %>% ungroup() # Calculate number of events of neighbors for each cascade-node-time neighbor_events = left_join(networks, node_time_combos, by = c("origin_node" = "node_name", "time" = "event_time")) %>% filter(!is.na(node_cumsum)) %>% dplyr::select(-event, -events_so_far) %>% group_by(cascade_id, destination_node, time) %>% summarize(n_neighbor_events = sum(node_cumsum), n_neighbor_events_decay = sum(decay_weight)) eha_data = filter(node_time_combos, event_time >= min_time, node_cumsum == 0) %>% left_join(neighbor_events, by = c('cascade_id' = 'cascade_id', 'node_name' = 'destination_node', 'event_time' = 'time')) %>% dplyr::select(-node_cumsum, -decay_weight) %>% mutate(n_neighbor_events = ifelse(is.na(n_neighbor_events), 0, n_neighbor_events), n_neighbor_events_decay = ifelse(is.na(n_neighbor_events_decay), 0, n_neighbor_events_decay), cascade_id = as.factor(cascade_id)) return(eha_data) } decay_event = function(x, lambda) { if(sum(x) == 0) return(x) idx = which(x == 1) y = seq(0,(length(x)-idx)) dec_seq = lambda * exp(-y/lambda) # Make the weight 0 in the actual adoption year and shift the sequence dec_seq = c(0, dec_seq[-length(dec_seq)]) return(c(rep(0, idx-1), dec_seq)) } #' Evaluate a parameter combination #' #' This the short description #' #' And some details #' #' @import speedglm #' #' @param cascades an object of class cascade containing node and cascade #' information. See \code{\link{as_cascade_long}} and #' \code{\link{as_cascade_wide}} for details. #' #' @return Returns an object of class \code{data.frame} #' #' @examples #' #' data(cascades) #' #' @export evaluate_grid_point <- function(n_edges, params, time_window, cascades, min_time = NULL, decay_parameter) { max_time = max(unlist(cascades$cascade_times)) if(is.null(min_time)) min_time = time_window + 1 times = seq(min_time, max_time, 1) # Infer the network for all years networks = do.call(rbind, lapply(times, infer_network, time_window = time_window, cascades = cascades, params = params, n_edges = n_edges)) if(nrow(networks) == 0) return(NA) # Build dataset for event history model event_data = make_eha_data(cascades, networks, decay_parameter, min_time) # Fit model mod <- event ~ events_so_far + n_neighbor_events_decay + cascade_id res <- speedglm::speedglm(mod, data = droplevels(event_data), family = binomial(link = "logit")) return(BIC(res)) } #' Evaluate a parameter grid #' #' This the short description #' #' And some details #' #' @import doParallel #' #' @param cascades an object of class cascade containing node and cascade #' information. See \code{\link{as_cascade_long}} and #' \code{\link{as_cascade_wide}} for details. #' #' @return Returns an object of class \code{data.frame} #' #' @examples #' #' data(cascades) #' #' @export grid_search_eha = function(cascades, n_jobs, n_edges, params, time_windows) { grid_points <- expand.grid(params, n_edges, time_windows) # Run cl <- makeCluster(n_jobs) registerDoParallel(cl) results <- foreach(i = 1:nrow(grid_points), .packages = c("dplyr", "NetworkInference", "spid"), .combine = c) %dopar% { evaluate_grid_point(n_edges = grid_points[i, 2], params = grid_points[i, 1], time_window = grid_points[i, 3], cascades = cascades, decay_parameter = grid_points[i, 1]) } stopCluster(cl) out = as.data.frame(cbind(grid_points, results)) colnames(out) = c('params', 'n_edges', 'time_window', 'bic') return(out) } #' Infer diffusion network on time slice of cascades #' #' This the short description #' #' And some details #' #@import checkmate #' #' @param cascades an object of class cascade containing node and cascade #' information. See \code{\link{as_cascade_long}} and #' \code{\link{as_cascade_wide}} for details. #' #' @return Returns an object of class \code{data.frame} #' #' @examples #' #' data(cascades) #' #' @export infer_network <- function(time, time_window, cascades, params, n_edges) { casc = subset_cascade_time(cascades, (time - time_window), time) network = try(netinf(cascades = casc, trans_mod = "exponential", n_edges = n_edges, params = params, quiet = TRUE), silent = TRUE) if(inherits(network, 'try-error')){ warning(paste0('Could not fit netinf for parameter combination:', ' n_edges: ', n_edges, ', params: ', params, ', time: ', time, ', time_window: ', time_window, '. Reason: ', attr(network, 'condition'), '. Returning empty network')) return(data.frame(origin_node = c(), destination_node = c(), year = c())) } network = network[, -c(3, 4)] network$time = time return(network) }
library(DBI) library(RMySQL) library(ggplot2) library(scales) Sys.setlocale(locale="UTF-8") # connect to database con <- dbConnect(MySQL(), host="localhost", dbname="bilibili", user="(づ。◕‿‿◕。)づ", password="123456") video.rawdata <- dbGetQuery(con, "SELECT * FROM video WHERE status=0"); # view chart class <- c("<1k", "1k-10k", "10k-100k", "100k-1M", ">1M") counts <- NULL counts <- append(counts, lengths(subset(video.rawdata,view<1000))[1]) counts <- append(counts, lengths(subset(video.rawdata,view>=1000 & view<10000))[1]) counts <- append(counts, lengths(subset(video.rawdata,view>=10000 & view<100000))[1]) counts <- append(counts, lengths(subset(video.rawdata,view>=100000 & view<1000000))[1]) counts <- append(counts, lengths(subset(video.rawdata,view>=1000000))[1]) video.view.data <- data.frame(class, counts) video.view.data.cumsum <- cumsum(counts) video.view.data.sum <- sum(counts) video.view <- ggplot(video.view.data, aes(x="", y=counts, fill=class)) video.view <- video.view + geom_bar(width=1, stat = "identity") video.view <- video.view + coord_polar("y", start=0) video.view <- video.view + labs(title="View Distribution") video.view <- video.view + theme(plot.title=element_text(hjust=0.5)) video.view <- video.view + theme(axis.ticks=element_blank()) video.view <- video.view + theme(legend.title=element_blank()) video.view <- video.view + theme(axis.title.x=element_blank()) video.view <- video.view + theme(axis.title.y=element_blank()) video.view <- video.view + theme(axis.text.x=element_blank()) video.view <- video.view + theme(axis.text.y=element_blank()) video.view <- video.view + theme(panel.border=element_blank()) video.view <- video.view + theme(panel.grid=element_blank()) video.view <- video.view + scale_fill_brewer("Blues", limits=class, direction=-1) video.view <- video.view + geom_text(aes(y=counts[1]/2, label=percent(counts[1]/video.view.data.sum)), size=5) video.view <- video.view + geom_text(aes(y=counts[2]/2 + video.view.data.cumsum[1], label=percent(counts[2]/video.view.data.sum)), size=5) video.view <- video.view + geom_text(aes(y=counts[3]/2 + video.view.data.cumsum[2], label=percent(counts[3]/video.view.data.sum)), size=5) video.view <- video.view + geom_text(aes(y=counts[4]/2 + video.view.data.cumsum[3], label=percent(counts[4]/video.view.data.sum), x=1.1), size=5) video.view <- video.view + geom_text(aes(y=counts[5]/2 + video.view.data.cumsum[4], label=percent(counts[5]/video.view.data.sum), x=1.3), size=5) ggsave("assets/view_distribution.png") # pubdate chart video.pubdate.data <- as.POSIXct(video.rawdata$pubdate) video.pubdate.bymonth <- cut(video.pubdate.data, breaks="month") video.pubdate.split <- split(video.pubdate.data, video.pubdate.bymonth) video.pubdate.date <- as.POSIXct(names(video.pubdate.split)) video.pubdate.counts <- lengths(video.pubdate.split) video.pubdate.increaserate <- video.pubdate.counts[-1] / video.pubdate.counts[-length(video.pubdate.counts)] - 1 video.pubdate.increaserate <- append(c(NaN), video.pubdate.increaserate) video.pubdate.cumcnt <- cumsum(video.pubdate.counts) video.pubdate.plotdata <- data.frame(pubdate=video.pubdate.date, counts=video.pubdate.counts, increaserate=video.pubdate.increaserate, cumsum=video.pubdate.cumcnt) video.pubdate.plotdata <- video.pubdate.plotdata[-lengths(video.pubdate.plotdata),] # video.pubdate.plotdata[which(video.pubdate.plotdata$increaserate==Inf),"increaserate"] <- NaN oo <- options(scipen=200) video.pubdate <- ggplot(data=video.pubdate.plotdata) video.pubdate <- video.pubdate + labs(title="Bilibili Monthly New Video") video.pubdate <- video.pubdate + ylab("Number of New Video") video.pubdate <- video.pubdate + xlab("") video.pubdate <- video.pubdate + geom_bar(mapping=aes(x=pubdate, y=cumsum/15), stat="identity", fill="#54BCC3") video.pubdate <- video.pubdate + scale_y_continuous(sec.axis=sec_axis(~.*15, name="Number of Total Video", breaks=c(5000000,10000000,15000000,20000000,25000000))) video.pubdate <- video.pubdate + geom_line(mapping=aes(x=pubdate, y=counts, color="New Video")) video.pubdate <- video.pubdate + scale_x_datetime(breaks=date_breaks("3 month"), date_labels="%Y %b") # video.pubdate <- video.pubdate + scale_colour_manual(values=c("#54BCC3","#E87D72")) video.pubdate <- video.pubdate + theme(axis.text.x=element_text(angle=45,vjust=1,hjust=1)) video.pubdate <- video.pubdate + theme(plot.title=element_text(hjust=0.5)) ggsave("assets/monthly_new_video.png", width=14, height=7) options(oo) # close database dbDisconnect(con)
/visual/video.R
no_license
LLipter/bilibiliCrawler
R
false
false
4,673
r
library(DBI) library(RMySQL) library(ggplot2) library(scales) Sys.setlocale(locale="UTF-8") # connect to database con <- dbConnect(MySQL(), host="localhost", dbname="bilibili", user="(づ。◕‿‿◕。)づ", password="123456") video.rawdata <- dbGetQuery(con, "SELECT * FROM video WHERE status=0"); # view chart class <- c("<1k", "1k-10k", "10k-100k", "100k-1M", ">1M") counts <- NULL counts <- append(counts, lengths(subset(video.rawdata,view<1000))[1]) counts <- append(counts, lengths(subset(video.rawdata,view>=1000 & view<10000))[1]) counts <- append(counts, lengths(subset(video.rawdata,view>=10000 & view<100000))[1]) counts <- append(counts, lengths(subset(video.rawdata,view>=100000 & view<1000000))[1]) counts <- append(counts, lengths(subset(video.rawdata,view>=1000000))[1]) video.view.data <- data.frame(class, counts) video.view.data.cumsum <- cumsum(counts) video.view.data.sum <- sum(counts) video.view <- ggplot(video.view.data, aes(x="", y=counts, fill=class)) video.view <- video.view + geom_bar(width=1, stat = "identity") video.view <- video.view + coord_polar("y", start=0) video.view <- video.view + labs(title="View Distribution") video.view <- video.view + theme(plot.title=element_text(hjust=0.5)) video.view <- video.view + theme(axis.ticks=element_blank()) video.view <- video.view + theme(legend.title=element_blank()) video.view <- video.view + theme(axis.title.x=element_blank()) video.view <- video.view + theme(axis.title.y=element_blank()) video.view <- video.view + theme(axis.text.x=element_blank()) video.view <- video.view + theme(axis.text.y=element_blank()) video.view <- video.view + theme(panel.border=element_blank()) video.view <- video.view + theme(panel.grid=element_blank()) video.view <- video.view + scale_fill_brewer("Blues", limits=class, direction=-1) video.view <- video.view + geom_text(aes(y=counts[1]/2, label=percent(counts[1]/video.view.data.sum)), size=5) video.view <- video.view + geom_text(aes(y=counts[2]/2 + video.view.data.cumsum[1], label=percent(counts[2]/video.view.data.sum)), size=5) video.view <- video.view + geom_text(aes(y=counts[3]/2 + video.view.data.cumsum[2], label=percent(counts[3]/video.view.data.sum)), size=5) video.view <- video.view + geom_text(aes(y=counts[4]/2 + video.view.data.cumsum[3], label=percent(counts[4]/video.view.data.sum), x=1.1), size=5) video.view <- video.view + geom_text(aes(y=counts[5]/2 + video.view.data.cumsum[4], label=percent(counts[5]/video.view.data.sum), x=1.3), size=5) ggsave("assets/view_distribution.png") # pubdate chart video.pubdate.data <- as.POSIXct(video.rawdata$pubdate) video.pubdate.bymonth <- cut(video.pubdate.data, breaks="month") video.pubdate.split <- split(video.pubdate.data, video.pubdate.bymonth) video.pubdate.date <- as.POSIXct(names(video.pubdate.split)) video.pubdate.counts <- lengths(video.pubdate.split) video.pubdate.increaserate <- video.pubdate.counts[-1] / video.pubdate.counts[-length(video.pubdate.counts)] - 1 video.pubdate.increaserate <- append(c(NaN), video.pubdate.increaserate) video.pubdate.cumcnt <- cumsum(video.pubdate.counts) video.pubdate.plotdata <- data.frame(pubdate=video.pubdate.date, counts=video.pubdate.counts, increaserate=video.pubdate.increaserate, cumsum=video.pubdate.cumcnt) video.pubdate.plotdata <- video.pubdate.plotdata[-lengths(video.pubdate.plotdata),] # video.pubdate.plotdata[which(video.pubdate.plotdata$increaserate==Inf),"increaserate"] <- NaN oo <- options(scipen=200) video.pubdate <- ggplot(data=video.pubdate.plotdata) video.pubdate <- video.pubdate + labs(title="Bilibili Monthly New Video") video.pubdate <- video.pubdate + ylab("Number of New Video") video.pubdate <- video.pubdate + xlab("") video.pubdate <- video.pubdate + geom_bar(mapping=aes(x=pubdate, y=cumsum/15), stat="identity", fill="#54BCC3") video.pubdate <- video.pubdate + scale_y_continuous(sec.axis=sec_axis(~.*15, name="Number of Total Video", breaks=c(5000000,10000000,15000000,20000000,25000000))) video.pubdate <- video.pubdate + geom_line(mapping=aes(x=pubdate, y=counts, color="New Video")) video.pubdate <- video.pubdate + scale_x_datetime(breaks=date_breaks("3 month"), date_labels="%Y %b") # video.pubdate <- video.pubdate + scale_colour_manual(values=c("#54BCC3","#E87D72")) video.pubdate <- video.pubdate + theme(axis.text.x=element_text(angle=45,vjust=1,hjust=1)) video.pubdate <- video.pubdate + theme(plot.title=element_text(hjust=0.5)) ggsave("assets/monthly_new_video.png", width=14, height=7) options(oo) # close database dbDisconnect(con)
#Pourcentages d'inertie p_inertia=function(dataset){ acm.ref=MCA(dataset,graph=F) p_iner=matrix(0,200,3) for (k in 1:200){ tab=dataset for (j in 1:ncol(dataset)){ lev=levels(dataset[,j]) proba=matrix(summary(dataset[,j]))[,1]/nrow(dataset) #tab[,j]=sample(lev,nrow(dataset),replace=T,prob=proba) tab[,j]=dataset[sample(1:nrow(dataset),nrow(dataset)),j] } colnames(tab)=colnames(dataset) res=MCA(tab,graph=F) p_iner[k,1]=res$eig[[2]][1] p_iner[k,2]=res$eig[[2]][2] p_iner[k,3]=p_iner[k,1]+p_iner[k,2] } colnames(p_iner)=c("Dim.1","Dim.2","Plan.1-2") #resum=matrix(0,2,2) #resum[1,1]=mean(p_iner[,1]) #resum[2,1]=mean(p_iner[,2]) #resum[1,2]=sd(p_iner[,1]) #resum[2,2]=sd(p_iner[,2]) #rownames(resum)=c("Dim.1","Dim.2") #colnames(resum)=c("Mean","Standard deviation") resum=matrix(NA,3,2) rownames(resum)=c("Dim.1","Dim.2","Plan.1-2") colnames(resum)=c("% of variance","p-value") resum[1,1]=acm.ref$eig[1,2] resum[2,1]=acm.ref$eig[2,2] resum[3,1]=acm.ref$eig[2,3] resum[1,2]=length(which(p_iner[,1]>acm.ref$eig[1,2]))/200 resum[2,2]=length(which(p_iner[,2]>acm.ref$eig[2,2]))/200 resum[3,2]=length(which(p_iner[,3]>acm.ref$eig[2,3]))/200 res=list() res$p_iner=p_iner res$resum=resum return(res) } #p_iner=p_inertia(tea,c(1:21,23:36))
/R/p_inertia.R
no_license
cran/EnQuireR
R
false
false
1,352
r
#Pourcentages d'inertie p_inertia=function(dataset){ acm.ref=MCA(dataset,graph=F) p_iner=matrix(0,200,3) for (k in 1:200){ tab=dataset for (j in 1:ncol(dataset)){ lev=levels(dataset[,j]) proba=matrix(summary(dataset[,j]))[,1]/nrow(dataset) #tab[,j]=sample(lev,nrow(dataset),replace=T,prob=proba) tab[,j]=dataset[sample(1:nrow(dataset),nrow(dataset)),j] } colnames(tab)=colnames(dataset) res=MCA(tab,graph=F) p_iner[k,1]=res$eig[[2]][1] p_iner[k,2]=res$eig[[2]][2] p_iner[k,3]=p_iner[k,1]+p_iner[k,2] } colnames(p_iner)=c("Dim.1","Dim.2","Plan.1-2") #resum=matrix(0,2,2) #resum[1,1]=mean(p_iner[,1]) #resum[2,1]=mean(p_iner[,2]) #resum[1,2]=sd(p_iner[,1]) #resum[2,2]=sd(p_iner[,2]) #rownames(resum)=c("Dim.1","Dim.2") #colnames(resum)=c("Mean","Standard deviation") resum=matrix(NA,3,2) rownames(resum)=c("Dim.1","Dim.2","Plan.1-2") colnames(resum)=c("% of variance","p-value") resum[1,1]=acm.ref$eig[1,2] resum[2,1]=acm.ref$eig[2,2] resum[3,1]=acm.ref$eig[2,3] resum[1,2]=length(which(p_iner[,1]>acm.ref$eig[1,2]))/200 resum[2,2]=length(which(p_iner[,2]>acm.ref$eig[2,2]))/200 resum[3,2]=length(which(p_iner[,3]>acm.ref$eig[2,3]))/200 res=list() res$p_iner=p_iner res$resum=resum return(res) } #p_iner=p_inertia(tea,c(1:21,23:36))
if(Sys.info()["sysname"] == "Windows") { horiz <- w.scale(70)/385 verti <- h.scale(60)/385 }else{ horiz <- w.scale(70)/480 verti <- h.scale(60)/480 } horizS <- round(horiz, 1) vertiS <- round(verti, 1) hRedraw <- tkimage.create('photo', '-file', file.path(imgdir, 'RedrawButton24.gif', fsep = .Platform$file.sep)) hRedraw1 <- tkimage.create('photo', '-file', file.path(imgdir, 'RedrawButton-Change24.gif', fsep = .Platform$file.sep)) #################################################################################################### tb.open.file <- tkbutton.toolbar(tools.frame, imgdir, "open24.gif", TextOutputVar, "Open file", "Open file format: txt, csv,...") tb.save.image <- tkbutton.toolbar(tools.frame, imgdir, "save_img24.gif", TextOutputVar, "Save image", "Save image") tb.open.table <- tkbutton.toolbar(tools.frame, imgdir, "open_table24.gif", TextOutputVar, "Open table", "Open table") tb.save.table <- tkbutton.toolbar(tools.frame, imgdir, "save_table24.gif", TextOutputVar, "Save table", "Save table") ### tb.run <- tkbutton.toolbar(tools.frame, imgdir, "run24.gif", TextOutputVar, "Execute", "Execute the append task") ### lspinH <- tklabel(tools.frame, text = 'Width:') spinH <- ttkspinbox(tools.frame, from = 0.5, to = 5.0, increment = 0.1, justify = 'center', width = 6, state = 'disabled') tkset(spinH, horizS) infobulle(lspinH, 'Horizontal scale factor for image size') status.bar.display(lspinH, TextOutputVar, 'Horizontal scale factor for image size') infobulle(spinH, 'Horizontal scale factor for image size') status.bar.display(spinH, TextOutputVar, 'Horizontal scale factor for image size') ### lspinV <- tklabel(tools.frame, text = 'Height:') spinV <- ttkspinbox(tools.frame, from = 0.5, to = 5.0, increment = 0.1, justify = 'center', width = 6, state = 'disabled') tkset(spinV, vertiS) infobulle(lspinV, 'Vertical scale factor for image size') status.bar.display(lspinV, TextOutputVar, 'Vertical scale factor for image size') infobulle(spinV, 'Vertical scale factor for image size') status.bar.display(spinV, TextOutputVar, 'Vertical scale factor for image size') ### plotRedraw <- tkbutton.toolbar(tools.frame, imgdir, "RedrawButton24.gif", TextOutputVar, "Redraw plot", "Redraw plot") ### tb.close.tab <- tkbutton.toolbar(tools.frame, imgdir, "close_tab24.gif", TextOutputVar, "Close active Tab", "Close active tab") tb.exit.win <- tkbutton.toolbar(tools.frame, imgdir, "exit24.gif", TextOutputVar, "Quit CDT", "Quit CDT") ###### tb.separator0 <- ttkseparator(tools.frame, orient = 'vertical') tb.separator1 <- ttkseparator(tools.frame, orient = 'vertical') tb.separator2 <- ttkseparator(tools.frame, orient = 'vertical') tb.separator3 <- ttkseparator(tools.frame, orient = 'vertical') ######## tkgrid(tb.open.file, tb.save.image, tb.separator0, tb.open.table, tb.save.table, tb.separator1, tb.run, tb.separator2, lspinH, spinH, lspinV, spinV, plotRedraw, tb.separator3, tb.close.tab, tb.exit.win) ####### tkgrid.configure(tb.separator0, sticky = 'ns') tkgrid.configure(tb.separator1, sticky = 'ns') tkgrid.configure(tb.separator2, sticky = 'ns', padx = 20) tkgrid.configure(tb.separator3, sticky = 'ns', padx = 20) tkgrid.configure(tb.open.file, padx = 5) tkgrid.configure(tb.save.image, padx = 5) tkgrid.configure(tb.open.table, padx = 5) tkgrid.configure(tb.save.table, padx = 5) ### tkgrid.configure(tb.run, padx = 20, ipadx = 5) ### tkgrid.configure(plotRedraw, padx = 5) ### tkgrid.configure(tb.close.tab, padx = 5) tkgrid.configure(tb.exit.win, padx = 30, sticky = 'e') #####**************************** Change plot window scale ************************###### tkconfigure(plotRedraw, relief = 'raised', command = function(){ tabid <- as.numeric(tclvalue(tkindex(tknotes, 'current')))+1 if(length(AllOpenTabType) > 0){ if(AllOpenTabType[[tabid]] == "img"){ if(class(AllOpenTabData[[tabid]][[2]]) == "tkwin"){ W <- AllOpenTabData[[tabid]][[2]] img <- AllOpenTabData[[tabid]][[2]] refreshPlot1(W = W, img = img, hscale = as.numeric(tclvalue(tkget(spinH))), vscale = as.numeric(tclvalue(tkget(spinV)))) } if(class(AllOpenTabData[[tabid]][[2]]) == "list"){ W <- AllOpenTabData[[tabid]][[2]][[1]] img <- AllOpenTabData[[tabid]][[2]][[2]] refreshPlot1(W = W, img = img, hscale = as.numeric(tclvalue(tkget(spinH))), vscale = as.numeric(tclvalue(tkget(spinV)))) if(tclvalue(tkwinfo('class', tkwinfo('children', AllOpenTabData[[tabid]][[1]][[2]]))) == "Frame"){ w <- as.double(tkwinfo("width", panel.right)) h <- as.double(tkwinfo("height", panel.right)) setScrollCanvas(W, w, h) } } tkconfigure(plotRedraw, image = hRedraw) } } }) ####### tkbind(plotRedraw, "<ButtonRelease>", function(){ tkconfigure(plotRedraw, image = hRedraw) }) tkbind(spinH, "<<Increment>>", function(){ tkconfigure(plotRedraw, image = hRedraw1) }) tkbind(spinH, "<<Decrement>>", function(){ tkconfigure(plotRedraw, image = hRedraw1) }) tkbind(spinV, "<<Increment>>", function(){ tkconfigure(plotRedraw, image = hRedraw1) }) tkbind(spinV, "<<Decrement>>", function(){ tkconfigure(plotRedraw, image = hRedraw1) }) #####**************************** Configure command toolbars ************************###### tkconfigure(tb.open.file, state = 'normal', command = function(){ tkconfigure(main.win, cursor = 'watch'); tcl('update') dat.opfiles <- getOpenFiles(main.win, all.opfiles) tkconfigure(main.win, cursor = '') if(!is.null(dat.opfiles)){ nopf <- length(AllOpenFilesType) AllOpenFilesType[[nopf+1]] <<- 'ascii' AllOpenFilesData[[nopf+1]] <<- dat.opfiles }else{ return(NULL) } }) ####### tkconfigure(tb.save.image, state = 'normal', command = function(){ ## add options (width, height in px, in, cm) ## add jpeg/png/gif SavePlot() }) ####### tkconfigure(tb.open.table, state = 'normal', command = function() { tab.array <- displayArrayTab(main.win, tknotes) if(!is.null(tab.array)){ ntab <- length(AllOpenTabType) AllOpenTabType[[ntab+1]] <<- 'arr' AllOpenTabData[[ntab+1]] <<- tab.array tkselect(tknotes, ntab) }else{ return(NULL) } }) ####### tkconfigure(tb.save.table, state = 'normal', command = function(){ if(!is.null(ReturnExecResults)){ tkconfigure(main.win, cursor = 'watch') tcl('update') tab2sav <- try(SaveNotebookTabArray(tknotes), silent = TRUE) if(!inherits(tab2sav, "try-error")){ InsertMessagesTxt(main.txt.out, "Table saved successfully") tkconfigure(main.win, cursor = '') }else{ InsertMessagesTxt(main.txt.out, "The table could not be saved", format = TRUE) InsertMessagesTxt(main.txt.out, gsub('[\r\n]', '', tab2sav[1]), format = TRUE) tkconfigure(main.win, cursor = '') return(NULL) } }else{ return(NULL) } }) #####**************************** Run Task ************************###### tkconfigure(tb.run, state = 'normal', command = function(){ if(is.null(GeneralParameters)){ return(NULL) }else{ tkconfigure(main.win, cursor = 'watch'); tcl('update') ReturnExecResults <<- tryCatch(Execute_All_Functions(tclvalue(lchoixStnFr$env$stn.choix.val)), #warning = function(w) warningFun(w), error = function(e) errorFun(e), finally = { tkconfigure(main.win, cursor = '') }) } }) #####**************************** Close CDT ************************###### ##??? demande de sauver s'il y a encore des onglets ouverts??? tkconfigure(tb.exit.win, state = 'normal', command = function(){ on.exit({ #sink(type = "message") #close(msgOUT) options(warn = 0) }) tkdestroy(main.win) }) #####**************************** Close Notebook Tab ************************###### tkconfigure(tb.close.tab, state = 'normal', command = function(){ tabid <- as.numeric(tclvalue(tkindex(tknotes, 'current'))) CloseNotebookTab(tabid) })
/functions/cdtToolbar_functions.R
no_license
rijaf/CDT
R
false
false
7,733
r
if(Sys.info()["sysname"] == "Windows") { horiz <- w.scale(70)/385 verti <- h.scale(60)/385 }else{ horiz <- w.scale(70)/480 verti <- h.scale(60)/480 } horizS <- round(horiz, 1) vertiS <- round(verti, 1) hRedraw <- tkimage.create('photo', '-file', file.path(imgdir, 'RedrawButton24.gif', fsep = .Platform$file.sep)) hRedraw1 <- tkimage.create('photo', '-file', file.path(imgdir, 'RedrawButton-Change24.gif', fsep = .Platform$file.sep)) #################################################################################################### tb.open.file <- tkbutton.toolbar(tools.frame, imgdir, "open24.gif", TextOutputVar, "Open file", "Open file format: txt, csv,...") tb.save.image <- tkbutton.toolbar(tools.frame, imgdir, "save_img24.gif", TextOutputVar, "Save image", "Save image") tb.open.table <- tkbutton.toolbar(tools.frame, imgdir, "open_table24.gif", TextOutputVar, "Open table", "Open table") tb.save.table <- tkbutton.toolbar(tools.frame, imgdir, "save_table24.gif", TextOutputVar, "Save table", "Save table") ### tb.run <- tkbutton.toolbar(tools.frame, imgdir, "run24.gif", TextOutputVar, "Execute", "Execute the append task") ### lspinH <- tklabel(tools.frame, text = 'Width:') spinH <- ttkspinbox(tools.frame, from = 0.5, to = 5.0, increment = 0.1, justify = 'center', width = 6, state = 'disabled') tkset(spinH, horizS) infobulle(lspinH, 'Horizontal scale factor for image size') status.bar.display(lspinH, TextOutputVar, 'Horizontal scale factor for image size') infobulle(spinH, 'Horizontal scale factor for image size') status.bar.display(spinH, TextOutputVar, 'Horizontal scale factor for image size') ### lspinV <- tklabel(tools.frame, text = 'Height:') spinV <- ttkspinbox(tools.frame, from = 0.5, to = 5.0, increment = 0.1, justify = 'center', width = 6, state = 'disabled') tkset(spinV, vertiS) infobulle(lspinV, 'Vertical scale factor for image size') status.bar.display(lspinV, TextOutputVar, 'Vertical scale factor for image size') infobulle(spinV, 'Vertical scale factor for image size') status.bar.display(spinV, TextOutputVar, 'Vertical scale factor for image size') ### plotRedraw <- tkbutton.toolbar(tools.frame, imgdir, "RedrawButton24.gif", TextOutputVar, "Redraw plot", "Redraw plot") ### tb.close.tab <- tkbutton.toolbar(tools.frame, imgdir, "close_tab24.gif", TextOutputVar, "Close active Tab", "Close active tab") tb.exit.win <- tkbutton.toolbar(tools.frame, imgdir, "exit24.gif", TextOutputVar, "Quit CDT", "Quit CDT") ###### tb.separator0 <- ttkseparator(tools.frame, orient = 'vertical') tb.separator1 <- ttkseparator(tools.frame, orient = 'vertical') tb.separator2 <- ttkseparator(tools.frame, orient = 'vertical') tb.separator3 <- ttkseparator(tools.frame, orient = 'vertical') ######## tkgrid(tb.open.file, tb.save.image, tb.separator0, tb.open.table, tb.save.table, tb.separator1, tb.run, tb.separator2, lspinH, spinH, lspinV, spinV, plotRedraw, tb.separator3, tb.close.tab, tb.exit.win) ####### tkgrid.configure(tb.separator0, sticky = 'ns') tkgrid.configure(tb.separator1, sticky = 'ns') tkgrid.configure(tb.separator2, sticky = 'ns', padx = 20) tkgrid.configure(tb.separator3, sticky = 'ns', padx = 20) tkgrid.configure(tb.open.file, padx = 5) tkgrid.configure(tb.save.image, padx = 5) tkgrid.configure(tb.open.table, padx = 5) tkgrid.configure(tb.save.table, padx = 5) ### tkgrid.configure(tb.run, padx = 20, ipadx = 5) ### tkgrid.configure(plotRedraw, padx = 5) ### tkgrid.configure(tb.close.tab, padx = 5) tkgrid.configure(tb.exit.win, padx = 30, sticky = 'e') #####**************************** Change plot window scale ************************###### tkconfigure(plotRedraw, relief = 'raised', command = function(){ tabid <- as.numeric(tclvalue(tkindex(tknotes, 'current')))+1 if(length(AllOpenTabType) > 0){ if(AllOpenTabType[[tabid]] == "img"){ if(class(AllOpenTabData[[tabid]][[2]]) == "tkwin"){ W <- AllOpenTabData[[tabid]][[2]] img <- AllOpenTabData[[tabid]][[2]] refreshPlot1(W = W, img = img, hscale = as.numeric(tclvalue(tkget(spinH))), vscale = as.numeric(tclvalue(tkget(spinV)))) } if(class(AllOpenTabData[[tabid]][[2]]) == "list"){ W <- AllOpenTabData[[tabid]][[2]][[1]] img <- AllOpenTabData[[tabid]][[2]][[2]] refreshPlot1(W = W, img = img, hscale = as.numeric(tclvalue(tkget(spinH))), vscale = as.numeric(tclvalue(tkget(spinV)))) if(tclvalue(tkwinfo('class', tkwinfo('children', AllOpenTabData[[tabid]][[1]][[2]]))) == "Frame"){ w <- as.double(tkwinfo("width", panel.right)) h <- as.double(tkwinfo("height", panel.right)) setScrollCanvas(W, w, h) } } tkconfigure(plotRedraw, image = hRedraw) } } }) ####### tkbind(plotRedraw, "<ButtonRelease>", function(){ tkconfigure(plotRedraw, image = hRedraw) }) tkbind(spinH, "<<Increment>>", function(){ tkconfigure(plotRedraw, image = hRedraw1) }) tkbind(spinH, "<<Decrement>>", function(){ tkconfigure(plotRedraw, image = hRedraw1) }) tkbind(spinV, "<<Increment>>", function(){ tkconfigure(plotRedraw, image = hRedraw1) }) tkbind(spinV, "<<Decrement>>", function(){ tkconfigure(plotRedraw, image = hRedraw1) }) #####**************************** Configure command toolbars ************************###### tkconfigure(tb.open.file, state = 'normal', command = function(){ tkconfigure(main.win, cursor = 'watch'); tcl('update') dat.opfiles <- getOpenFiles(main.win, all.opfiles) tkconfigure(main.win, cursor = '') if(!is.null(dat.opfiles)){ nopf <- length(AllOpenFilesType) AllOpenFilesType[[nopf+1]] <<- 'ascii' AllOpenFilesData[[nopf+1]] <<- dat.opfiles }else{ return(NULL) } }) ####### tkconfigure(tb.save.image, state = 'normal', command = function(){ ## add options (width, height in px, in, cm) ## add jpeg/png/gif SavePlot() }) ####### tkconfigure(tb.open.table, state = 'normal', command = function() { tab.array <- displayArrayTab(main.win, tknotes) if(!is.null(tab.array)){ ntab <- length(AllOpenTabType) AllOpenTabType[[ntab+1]] <<- 'arr' AllOpenTabData[[ntab+1]] <<- tab.array tkselect(tknotes, ntab) }else{ return(NULL) } }) ####### tkconfigure(tb.save.table, state = 'normal', command = function(){ if(!is.null(ReturnExecResults)){ tkconfigure(main.win, cursor = 'watch') tcl('update') tab2sav <- try(SaveNotebookTabArray(tknotes), silent = TRUE) if(!inherits(tab2sav, "try-error")){ InsertMessagesTxt(main.txt.out, "Table saved successfully") tkconfigure(main.win, cursor = '') }else{ InsertMessagesTxt(main.txt.out, "The table could not be saved", format = TRUE) InsertMessagesTxt(main.txt.out, gsub('[\r\n]', '', tab2sav[1]), format = TRUE) tkconfigure(main.win, cursor = '') return(NULL) } }else{ return(NULL) } }) #####**************************** Run Task ************************###### tkconfigure(tb.run, state = 'normal', command = function(){ if(is.null(GeneralParameters)){ return(NULL) }else{ tkconfigure(main.win, cursor = 'watch'); tcl('update') ReturnExecResults <<- tryCatch(Execute_All_Functions(tclvalue(lchoixStnFr$env$stn.choix.val)), #warning = function(w) warningFun(w), error = function(e) errorFun(e), finally = { tkconfigure(main.win, cursor = '') }) } }) #####**************************** Close CDT ************************###### ##??? demande de sauver s'il y a encore des onglets ouverts??? tkconfigure(tb.exit.win, state = 'normal', command = function(){ on.exit({ #sink(type = "message") #close(msgOUT) options(warn = 0) }) tkdestroy(main.win) }) #####**************************** Close Notebook Tab ************************###### tkconfigure(tb.close.tab, state = 'normal', command = function(){ tabid <- as.numeric(tclvalue(tkindex(tknotes, 'current'))) CloseNotebookTab(tabid) })
\name{IlluminaMethylationManifest-class} \Rdversion{1.1} \docType{class} \alias{IlluminaMethylationManifest-class} \alias{IlluminaMethylationManifest} \alias{show,IlluminaMethylationManifest-method} \alias{getManifest,IlluminaMethylationManifest-method} \alias{getManifest,character-method} \alias{getManifest} \alias{getProbeInfo} \alias{getManifestInfo} \alias{getControlAddress} \alias{getControlTypes} \title{Class \code{"IlluminaMethylationManifest"}} \description{ This is a class for representing an Illumina methylation microarray design, ie. the physical location and the probe sequences. This information should be independent of genome build and annotation. } \usage{ ## Constructor IlluminaMethylationManifest(TypeI = new("DataFrame"), TypeII = new("DataFrame"), TypeControl = new("DataFrame"), TypeSnpI = new("DataFrame"), TypeSnpII = new("DataFrame"), annotation = "") ## Data extraction \S4method{getManifest}{IlluminaMethylationManifest}(object) \S4method{getManifest}{character}(object) getProbeInfo(object, type = c("I", "II", "Control", "I-Green", "I-Red", "SnpI", "SnpII")) getManifestInfo(object, type = c("nLoci", "locusNames")) getControlAddress(object, controlType = c("NORM_A", "NORM_C", "NORM_G", "NORM_T"), asList = FALSE) getControlTypes(object) } \arguments{ \item{object}{Either an object of class \code{IlluminaMethylationManifest} or class \code{character} for \code{getManifest}. For \code{getProbeInfo}, \code{getManifestInfo} and \code{getControlAddress} an object of either class \code{RGChannelSet}, \code{IlluminaMethylationManifest}.} \item{TypeI}{A \code{DataFrame} of type I probes.} \item{TypeII}{A \code{DataFrame} of type II probes.} \item{TypeControl}{A \code{DataFrame} of control probes.} \item{TypeSnpI}{A \code{DataFrame} of SNP type I probes.} \item{TypeSnpII}{A \code{DataFrame} of SNP type II probes.} \item{annotation}{An annotation \code{character}.} \item{type}{A single character describing what kind of information should be returned. For \code{getProbeInfo} it represents the following subtypes of probes on the array: Type I, Type II, Controls as well as Type I (methylation measured in the Green channel) and Type II (methylation measured in the Red channel). For \code{getManifestInfo} it represents either the number of methylation loci (approx. number of CpGs) on the array or the locus names.} \item{controlType}{A character vector of control types.} \item{asList}{If \code{TRUE} the return object is a list with one component for each \code{controlType}.} } \section{Details}{ The \code{data} slot contains the following objects: \code{TypeI}, \code{TypeII} and \code{TypeControl} which are all of class \code{data.frame}, describing the array design. Methylation loci of type I are measured using two different probes, in either the red or the green channel. The columns \code{AddressA}, \code{AddresB} describes the physical location of the two probes on the array (with \code{ProbeSeqA}, \code{ProbeSeqB} giving the probe sequences), and the column \code{Color} describes which color channel is used. Methylation loci of type II are measured using a single probe, but with two different color channels. The methylation signal is always measured in the green channel. } \section{Utilities}{ In the following code, \code{object} is a \code{IlluminaMethylationManifest}. \describe{ \item{\code{getManifest(object)}}{Get the manifest object.} \item{\code{getProbeInfo(object)}}{Returns a \code{DataFrame} giving the type I, type II or control probes. It is also possible to get the type I probes measured in the Green or Red channel. This function ensures that the return object only contains probes which are part of the input object. In case of a \code{RGChannelSet} and type I probes, both addresses needs to be in the object.} \item{\code{getManifestInfo(object)}}{Get some information about the manifest object (the chip design).} \item{\code{getControlAddress(object)}}{Get the control addresses for control probes of a certain type.} \item{\code{getControlTypes(object)}}{Returns the types and the numbers of control probes of each type.} } } \author{ Kasper Daniel Hansen \email{khansen@jhsph.edu}. } \seealso{ \code{\link{IlluminaMethylationAnnotation}} for annotation information for the array (information depending on a specific genome build). } \examples{ if(require(IlluminaHumanMethylation450kmanifest)) { show(IlluminaHumanMethylation450kmanifest) head(getProbeInfo(IlluminaHumanMethylation450kmanifest, type = "I")) head(IlluminaHumanMethylation450kmanifest@data$TypeI) head(IlluminaHumanMethylation450kmanifest@data$TypeII) head(IlluminaHumanMethylation450kmanifest@data$TypeControl) } } \keyword{classes}
/man/IlluminaMethylationManifest-class.Rd
no_license
aleiyishi/minfi
R
false
false
5,091
rd
\name{IlluminaMethylationManifest-class} \Rdversion{1.1} \docType{class} \alias{IlluminaMethylationManifest-class} \alias{IlluminaMethylationManifest} \alias{show,IlluminaMethylationManifest-method} \alias{getManifest,IlluminaMethylationManifest-method} \alias{getManifest,character-method} \alias{getManifest} \alias{getProbeInfo} \alias{getManifestInfo} \alias{getControlAddress} \alias{getControlTypes} \title{Class \code{"IlluminaMethylationManifest"}} \description{ This is a class for representing an Illumina methylation microarray design, ie. the physical location and the probe sequences. This information should be independent of genome build and annotation. } \usage{ ## Constructor IlluminaMethylationManifest(TypeI = new("DataFrame"), TypeII = new("DataFrame"), TypeControl = new("DataFrame"), TypeSnpI = new("DataFrame"), TypeSnpII = new("DataFrame"), annotation = "") ## Data extraction \S4method{getManifest}{IlluminaMethylationManifest}(object) \S4method{getManifest}{character}(object) getProbeInfo(object, type = c("I", "II", "Control", "I-Green", "I-Red", "SnpI", "SnpII")) getManifestInfo(object, type = c("nLoci", "locusNames")) getControlAddress(object, controlType = c("NORM_A", "NORM_C", "NORM_G", "NORM_T"), asList = FALSE) getControlTypes(object) } \arguments{ \item{object}{Either an object of class \code{IlluminaMethylationManifest} or class \code{character} for \code{getManifest}. For \code{getProbeInfo}, \code{getManifestInfo} and \code{getControlAddress} an object of either class \code{RGChannelSet}, \code{IlluminaMethylationManifest}.} \item{TypeI}{A \code{DataFrame} of type I probes.} \item{TypeII}{A \code{DataFrame} of type II probes.} \item{TypeControl}{A \code{DataFrame} of control probes.} \item{TypeSnpI}{A \code{DataFrame} of SNP type I probes.} \item{TypeSnpII}{A \code{DataFrame} of SNP type II probes.} \item{annotation}{An annotation \code{character}.} \item{type}{A single character describing what kind of information should be returned. For \code{getProbeInfo} it represents the following subtypes of probes on the array: Type I, Type II, Controls as well as Type I (methylation measured in the Green channel) and Type II (methylation measured in the Red channel). For \code{getManifestInfo} it represents either the number of methylation loci (approx. number of CpGs) on the array or the locus names.} \item{controlType}{A character vector of control types.} \item{asList}{If \code{TRUE} the return object is a list with one component for each \code{controlType}.} } \section{Details}{ The \code{data} slot contains the following objects: \code{TypeI}, \code{TypeII} and \code{TypeControl} which are all of class \code{data.frame}, describing the array design. Methylation loci of type I are measured using two different probes, in either the red or the green channel. The columns \code{AddressA}, \code{AddresB} describes the physical location of the two probes on the array (with \code{ProbeSeqA}, \code{ProbeSeqB} giving the probe sequences), and the column \code{Color} describes which color channel is used. Methylation loci of type II are measured using a single probe, but with two different color channels. The methylation signal is always measured in the green channel. } \section{Utilities}{ In the following code, \code{object} is a \code{IlluminaMethylationManifest}. \describe{ \item{\code{getManifest(object)}}{Get the manifest object.} \item{\code{getProbeInfo(object)}}{Returns a \code{DataFrame} giving the type I, type II or control probes. It is also possible to get the type I probes measured in the Green or Red channel. This function ensures that the return object only contains probes which are part of the input object. In case of a \code{RGChannelSet} and type I probes, both addresses needs to be in the object.} \item{\code{getManifestInfo(object)}}{Get some information about the manifest object (the chip design).} \item{\code{getControlAddress(object)}}{Get the control addresses for control probes of a certain type.} \item{\code{getControlTypes(object)}}{Returns the types and the numbers of control probes of each type.} } } \author{ Kasper Daniel Hansen \email{khansen@jhsph.edu}. } \seealso{ \code{\link{IlluminaMethylationAnnotation}} for annotation information for the array (information depending on a specific genome build). } \examples{ if(require(IlluminaHumanMethylation450kmanifest)) { show(IlluminaHumanMethylation450kmanifest) head(getProbeInfo(IlluminaHumanMethylation450kmanifest, type = "I")) head(IlluminaHumanMethylation450kmanifest@data$TypeI) head(IlluminaHumanMethylation450kmanifest@data$TypeII) head(IlluminaHumanMethylation450kmanifest@data$TypeControl) } } \keyword{classes}
#package "ggplot2" library(ggplot2) #loading a dataset a=read.csv(file.choose(),sep=";") #plot a relationship between studytime and portugees score p=qplot(studytime,G3,data=a) p+geom_abline(colour="blue") coef(lm(G3~studytime,data=a)) #plot the distribution of score k=qplot(G3,data=a,geom="density")
/Lecture 11 Result/code_11.R
no_license
khscor/Hansol_Kim
R
false
false
306
r
#package "ggplot2" library(ggplot2) #loading a dataset a=read.csv(file.choose(),sep=";") #plot a relationship between studytime and portugees score p=qplot(studytime,G3,data=a) p+geom_abline(colour="blue") coef(lm(G3~studytime,data=a)) #plot the distribution of score k=qplot(G3,data=a,geom="density")
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# # # This software was authored by Zhian N. Kamvar and Javier F. Tabima, graduate # students at Oregon State University; Jonah C. Brooks, undergraduate student at # Oregon State University; and Dr. Nik Grünwald, an employee of USDA-ARS. # # Permission to use, copy, modify, and distribute this software and its # documentation for educational, research and non-profit purposes, without fee, # and without a written agreement is hereby granted, provided that the statement # above is incorporated into the material, giving appropriate attribution to the # authors. # # Permission to incorporate this software into commercial products may be # obtained by contacting USDA ARS and OREGON STATE UNIVERSITY Office for # Commercialization and Corporate Development. # # The software program and documentation are supplied "as is", without any # accompanying services from the USDA or the University. USDA ARS or the # University do not warrant that the operation of the program will be # uninterrupted or error-free. The end-user understands that the program was # developed for research purposes and is advised not to rely exclusively on the # program for any reason. # # IN NO EVENT SHALL USDA ARS OR OREGON STATE UNIVERSITY BE LIABLE TO ANY PARTY # FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING # LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, # EVEN IF THE OREGON STATE UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. USDA ARS OR OREGON STATE UNIVERSITY SPECIFICALLY DISCLAIMS ANY # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND ANY STATUTORY # WARRANTY OF NON-INFRINGEMENT. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" # BASIS, AND USDA ARS AND OREGON STATE UNIVERSITY HAVE NO OBLIGATIONS TO PROVIDE # MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. # #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #==============================================================================# #' Calculate a distance matrix comparing samples based on the number of alleles #' that differ in zygosity. #' #' This function does pairwise comparisons between diploid samples in a genlight #' object. The number representing the distance between two samples is equal to #' the number of alleles in the samples that do not have the same zygosity. #' #' @param x a genlight, genind, or genclone object. #' #' @param percent \code{logical}. Should the distance be represented from 0 to 1? #' Default set to \code{TRUE}. \code{FALSE} will return the distance represented #' as integers from 1 to n where n is the number of loci. #' #' @param mat \code{logical}. Return a matrix object. Default set to #' \code{FALSE}, returning a dist object. \code{TRUE} returns a matrix object. #' #' @param missing_match \code{logical}. Determines whether two samples differing #' by missing data in a location should be counted as matching at that location. #' Default set to \code{TRUE}, which forces missing data to match with anything. #' \code{FALSE} forces missing data to not match with any other information. #' #' @param differences_only \code{logical}. Determines whether the matrix should #' count differences or distances. For instance, 0 to 2 would be a distance of 2 #' but a difference of 1. #' #' @param threads The maximum number of parallel threads to be used within this #' function. A value of 0 (default) will attempt to use as many threads as there #' are available cores/CPUs. In most cases this is ideal. A value of 1 will force #' the function to run serially, which may increase stability on some systems. #' Other values may be specified, but should be used with caution. #' #' #' @return Pairwise distances between individuals present in the genlight object. #' @author Zhian N. Kamvar, Jonah C. Brooks #' #' @export #' @examples #' set.seed(999) #' x <- glSim(n.ind = 10, n.snp.nonstruc = 5e2, n.snp.struc = 5e2, ploidy = 2) #' x #' system.time(xd <- bitwise.dist(x)) #' xd #==============================================================================# bitwise.dist <- function(x, percent=TRUE, mat=FALSE, missing_match=TRUE, differences_only=FALSE, threads=0){ stopifnot(class(x)[1] %in% c("genlight", "genclone", "genind", "snpclone")) # Stop if the ploidy of the genlight object is not consistent stopifnot(min(ploidy(x)) == max(ploidy(x))) # Stop if the ploidy of the genlight object is not haploid or diploid stopifnot(min(ploidy(x)) == 2 || min(ploidy(x)) == 1) ploid <- min(ploidy(x)) ind.names <- indNames(x) inds <- nInd(x) numPairs <- nLoc(x) # Use Prevosti if this is a genclone or genind object if(!is(x, "genlight")){ dist.mat <- prevosti.dist(x) if (percent == FALSE){ dist.mat <- dist.mat*ploid*numPairs } if (mat == TRUE){ dist.mat <- as.matrix(dist.mat) } # Return this matrix and exit function return(dist.mat) } # Continue function for genlight objects # Ensure that every SNPbin object has data for all chromosomes if(ploid == 2){ for(i in 1:length(x$gen)){ if(length(x$gen[[i]]$snp) == 1){ x$gen[[i]]$snp <- append(x$gen[[i]]$snp, list(as.raw(rep(0,length(x$gen[[i]]$snp[[1]]))))) } } } # Threads must be something that can cast to integer if(!is.numeric(threads) && !is.integer(threads) && threads >= 0) { stop("Threads must be a non-negative numeric or integer value") } # Cast parameters to proper types before passing them to C threads <- as.integer(threads) if (ploid == 1) { pairwise_dist <- .Call("bitwise_distance_haploid", x, missing_match, threads) } else { pairwise_dist <- .Call("bitwise_distance_diploid", x, missing_match, differences_only, threads) } dist.mat <- pairwise_dist dim(dist.mat) <- c(inds,inds) colnames(dist.mat) <- ind.names rownames(dist.mat) <- ind.names if (percent){ if(differences_only) { dist.mat <- dist.mat/(numPairs) } else { dist.mat <- dist.mat/(numPairs*ploid) } } if (mat == FALSE){ dist.mat <- as.dist(dist.mat) } return(dist.mat) } #==============================================================================# #' Determines whether openMP is support on this system. #' #' @return FALSE if openMP is not supported, TRUE if it is #' @author Zhian N. Kamvar, Jonah C. Brooks #' #' @export #' @examples #' poppr_has_parallel() #==============================================================================# poppr_has_parallel <- function(){ supported <- .Call("omp_test", PACKAGE = "poppr") if(supported == 0) { return(FALSE) } else { return(TRUE) } } #==============================================================================# #' Calculate the index of association between samples in a genlight object. #' #' This function parses over a genlight object to calculate and return the #' index of association for those samples. #' #' @param x a genlight object. #' #' @param missing_match a boolean determining whether missing data should be #' considered a match. If TRUE (default) missing data at a locus will match #' with any data at that locus in each comparison. If FALSE, missing data at #' a locus will cause all comparisons to return the maximum possible distance #' at that locus (ie, if sample 1 has missing data at locus 1, and sample 2 #' is heterozygous at locus 1, the distance at that locus will be 1. If sample #' 2 was heterozygous or missing at locus 1, the distance would be 2. #' #' @param differences_only a boolean determining how distance should be counted #' for diploids. Whether TRUE or FALSE the distance between a heterozygous locus #' and a homozygous locus is 1. If FALSE (default) the distance between opposite #' homozygous loci is 2. If TRUE that distance counts as 1, indicating only that #' the two samples differ at that locus. #' #' @param threads The maximum number of parallel threads to be used within this #' function. A value of 0 (default) will attempt to use as many threads as there #' are available cores/CPUs. In most cases this is ideal. A value of 1 will force #' the function to run serially, which may increase stability on some systems. #' Other values may be specified, but should be used with caution. #' #' @return Index of association representing the samples in this genlight object. #' @author Zhian N. Kamvar, Jonah C. Brooks #' #' @export #' @keywords internal #==============================================================================# bitwise.ia <- function(x, missing_match=TRUE, differences_only=FALSE, threads=0){ stopifnot(class(x)[1] %in% c("genlight", "snpclone")) # Stop if the ploidy of the genlight object is not consistent stopifnot(min(ploidy(x)) == max(ploidy(x))) # Stop if the ploidy of the genlight object is not haploid or diploid stopifnot(min(ploidy(x)) == 2 || min(ploidy(x)) == 1) ploid <- min(ploidy(x)) # Threads must be something that can cast to integer if(!is.numeric(threads) && !is.integer(threads) && threads >= 0) { stop("Threads must be a non-negative numeric or integer value") } # Cast parameters to proper types before passing them to C threads <- as.integer(threads) # Ensure that every SNPbin object has data for all chromosomes if(ploid == 2){ for(i in 1:length(x$gen)){ if(length(x$gen[[i]]$snp) == 1){ x$gen[[i]]$snp <- append(x$gen[[i]]$snp, list(as.raw(rep(0,length(x$gen[[i]]$snp[[1]]))))) } } IA <- .Call("association_index_diploid", x, missing_match, differences_only, threads, PACKAGE = "poppr") } else if(ploid == 1) { IA <- .Call("association_index_haploid", x, missing_match, threads, PACKAGE = "poppr") } else { stop("bitwise.ia only supports haploids and diploids") } #TODO: Allow for automated index generation, such as random or window based #TODO: Call C function and return return(IA) } #==============================================================================# #' Calculate windows of the index of association for genlight objects. #' #' Genlight objects can contain millions of loci. Since it does not make much #' sense to calculate the index of association over that many loci, this #' function will scan windows across the loci positions and calculate the index #' of association. #' #' @param x a genlight object. #' #' @param window an integer specifying the size of the window. #' #' @param min.snps an integer specifying the minimum number of snps allowed per #' window. If a window does not meet this criteria, the value will return as #' NA. #' #' @param threads The maximum number of parallel threads to be used within this #' function. A value of 0 (default) will attempt to use as many threads as #' there are available cores/CPUs. In most cases this is ideal. A value of 1 #' will force the function to run serially, which may increase stability on #' some systems. Other values may be specified, but should be used with #' caution. #' #' @param quiet if \code{FALSE}, a progress bar will be printed to the screen. #' #' @return Index of association representing the samples in this genlight #' object. #' #' @note this will calculate the standardized index of association from Agapow #' 2001. See \code{\link{ia}} for details. #' #' @author Zhian N. Kamvar, Jonah C. Brooks #' #' @export #' @examples #' #' # with structured snps assuming 1e4 positions #' set.seed(999) #' x <- glSim(n.ind = 10, n.snp.nonstruc = 5e2, n.snp.struc = 5e2, ploidy = 2) #' position(x) <- sort(sample(1e4, 1e3)) #' res <- win.ia(x, window = 300L) # Calculate for windows of size 300 #' plot(res, type = "l") #' #' \dontrun{ #' # unstructured snps #' set.seed(999) #' x <- glSim(n.ind = 10, n.snp.nonstruc = 1e3, ploidy = 2) #' position(x) <- sort(sample(1e4, 1e3)) #' res <- win.ia(x, window = 300L) # Calculate for windows of size 300 #' plot(res, type = "l") #' } #' #==============================================================================# win.ia <- function(x, window = 100L, min.snps = 3L, threads = 1L, quiet = FALSE){ stopifnot(is(x, "genlight")) if (!is.null(position(x))){ xpos <- position(x) } else { xpos <- seq(nLoc(x)) } winmat <- make_windows(maxp = max(xpos), minp = min(xpos), window = window) nwin <- nrow(winmat) res_mat <- vector(mode = "numeric", length = nwin) if (!quiet) progbar <- txtProgressBar(style = 3) for (i in seq(nwin)){ posns <- which(xpos %in% winmat[i, 1]:winmat[i, 2]) if (length(posns) < min.snps){ res_mat[i] <- NA } else { res_mat[i] <- bitwise.ia(x[, posns], threads = threads) } if (!quiet){ setTxtProgressBar(progbar, i/nwin) } } if (!quiet) cat("\n") return(res_mat) } #==============================================================================# #' Calculate random samples of the index of association for genlight objects. #' #' Genlight objects can contain millions of loci. Since it does not make much #' sense to calculate the index of association over that many loci, this #' function will randomly sample sites to calculate the index of association. #' #' @param x a genlight object. #' #' @param n.snp the number of snps to be used to calculate standardized index #' of association. #' #' @param reps the number of times to perform the calculation. #' #' @param threads The maximum number of parallel threads to be used within this #' function. A value of 0 (default) will attempt to use as many threads as #' there are available cores/CPUs. In most cases this is ideal. A value of 1 #' will force the function to run serially, which may increase stability on #' some systems. Other values may be specified, but should be used with #' caution. #' #' @param quiet if \code{FALSE}, a progress bar will be printed to the screen. #' #' #' @note this will calculate the standardized index of association from Agapow #' 2001. See \code{\link{ia}} for details. #' #' @return Index of association representing the samples in this genlight #' object. #' @author Zhian N. Kamvar, Jonah C. Brooks #' #' @export #' @examples #' \dontrun{ #' # with structured snps assuming 1e4 positions #' set.seed(999) #' x <- glSim(n.ind = 10, n.snp.nonstruc = 5e2, n.snp.struc = 5e2, ploidy = 2) #' position(x) <- sort(sample(1e4, 1e3)) #' res <- samp.ia(x) #' hist(res) #' #' # with unstructured snps assuming 1e4 positions #' set.seed(999) #' x <- glSim(n.ind = 10, n.snp.nonstruc = 1e3, ploidy = 2) #' position(x) <- sort(sample(1e4, 1e3)) #' res <- samp.ia(x) #' hist(res) #' } #==============================================================================# samp.ia <- function(x, n.snp = 100L, reps = 100L, threads = 1L, quiet = FALSE){ stopifnot(is(x, "genlight")) nloc <- nLoc(x) res_mat <- vector(mode = "numeric", length = reps) if (!quiet) progbar <- txtProgressBar(style = 3) for (i in seq(reps)){ posns <- sample(nloc, n.snp) res_mat[i] <- bitwise.ia(x[, posns], threads = threads) if (!quiet){ setTxtProgressBar(progbar, i/reps) } } if (!quiet) cat("\n") return(res_mat) } # Sat Aug 15 20:02:40 2015 ------------------------------ # # This function was used in place of bitwise.ia before it # was fixed. Since it has no purpose now, it is being # commented out, but kept here for reference. # # snpia <- function(x, threads = 1L){ # nloc <- nLoc(x) # nind <- nInd(x) # np <- choose(nind, 2) # d_mat <- vapply(seq(nloc), function(i) as.vector(bitwise.dist(x[, i], percent = FALSE, threads = threads)), integer(np)) # D <- rowSums(d_mat) # SD <- sum(D) # Sd <- colSums(d_mat) # Sd2 <- colSums(d_mat*d_mat) # Vo <- (sum(D*D) - (SD*SD)/np)/np # varj <- (Sd2 - (Sd*Sd)/np)/np # Ve <- sum(varj) # Svarij <- .Call("pairwise_covar", varj, PACKAGE = "poppr") # return((Vo - Ve)/(2 * sum(Svarij))) # }
/R/bitwise.r
no_license
craigcitro/poppr
R
false
false
16,870
r
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# # # This software was authored by Zhian N. Kamvar and Javier F. Tabima, graduate # students at Oregon State University; Jonah C. Brooks, undergraduate student at # Oregon State University; and Dr. Nik Grünwald, an employee of USDA-ARS. # # Permission to use, copy, modify, and distribute this software and its # documentation for educational, research and non-profit purposes, without fee, # and without a written agreement is hereby granted, provided that the statement # above is incorporated into the material, giving appropriate attribution to the # authors. # # Permission to incorporate this software into commercial products may be # obtained by contacting USDA ARS and OREGON STATE UNIVERSITY Office for # Commercialization and Corporate Development. # # The software program and documentation are supplied "as is", without any # accompanying services from the USDA or the University. USDA ARS or the # University do not warrant that the operation of the program will be # uninterrupted or error-free. The end-user understands that the program was # developed for research purposes and is advised not to rely exclusively on the # program for any reason. # # IN NO EVENT SHALL USDA ARS OR OREGON STATE UNIVERSITY BE LIABLE TO ANY PARTY # FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING # LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, # EVEN IF THE OREGON STATE UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. USDA ARS OR OREGON STATE UNIVERSITY SPECIFICALLY DISCLAIMS ANY # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND ANY STATUTORY # WARRANTY OF NON-INFRINGEMENT. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" # BASIS, AND USDA ARS AND OREGON STATE UNIVERSITY HAVE NO OBLIGATIONS TO PROVIDE # MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. # #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# #==============================================================================# #' Calculate a distance matrix comparing samples based on the number of alleles #' that differ in zygosity. #' #' This function does pairwise comparisons between diploid samples in a genlight #' object. The number representing the distance between two samples is equal to #' the number of alleles in the samples that do not have the same zygosity. #' #' @param x a genlight, genind, or genclone object. #' #' @param percent \code{logical}. Should the distance be represented from 0 to 1? #' Default set to \code{TRUE}. \code{FALSE} will return the distance represented #' as integers from 1 to n where n is the number of loci. #' #' @param mat \code{logical}. Return a matrix object. Default set to #' \code{FALSE}, returning a dist object. \code{TRUE} returns a matrix object. #' #' @param missing_match \code{logical}. Determines whether two samples differing #' by missing data in a location should be counted as matching at that location. #' Default set to \code{TRUE}, which forces missing data to match with anything. #' \code{FALSE} forces missing data to not match with any other information. #' #' @param differences_only \code{logical}. Determines whether the matrix should #' count differences or distances. For instance, 0 to 2 would be a distance of 2 #' but a difference of 1. #' #' @param threads The maximum number of parallel threads to be used within this #' function. A value of 0 (default) will attempt to use as many threads as there #' are available cores/CPUs. In most cases this is ideal. A value of 1 will force #' the function to run serially, which may increase stability on some systems. #' Other values may be specified, but should be used with caution. #' #' #' @return Pairwise distances between individuals present in the genlight object. #' @author Zhian N. Kamvar, Jonah C. Brooks #' #' @export #' @examples #' set.seed(999) #' x <- glSim(n.ind = 10, n.snp.nonstruc = 5e2, n.snp.struc = 5e2, ploidy = 2) #' x #' system.time(xd <- bitwise.dist(x)) #' xd #==============================================================================# bitwise.dist <- function(x, percent=TRUE, mat=FALSE, missing_match=TRUE, differences_only=FALSE, threads=0){ stopifnot(class(x)[1] %in% c("genlight", "genclone", "genind", "snpclone")) # Stop if the ploidy of the genlight object is not consistent stopifnot(min(ploidy(x)) == max(ploidy(x))) # Stop if the ploidy of the genlight object is not haploid or diploid stopifnot(min(ploidy(x)) == 2 || min(ploidy(x)) == 1) ploid <- min(ploidy(x)) ind.names <- indNames(x) inds <- nInd(x) numPairs <- nLoc(x) # Use Prevosti if this is a genclone or genind object if(!is(x, "genlight")){ dist.mat <- prevosti.dist(x) if (percent == FALSE){ dist.mat <- dist.mat*ploid*numPairs } if (mat == TRUE){ dist.mat <- as.matrix(dist.mat) } # Return this matrix and exit function return(dist.mat) } # Continue function for genlight objects # Ensure that every SNPbin object has data for all chromosomes if(ploid == 2){ for(i in 1:length(x$gen)){ if(length(x$gen[[i]]$snp) == 1){ x$gen[[i]]$snp <- append(x$gen[[i]]$snp, list(as.raw(rep(0,length(x$gen[[i]]$snp[[1]]))))) } } } # Threads must be something that can cast to integer if(!is.numeric(threads) && !is.integer(threads) && threads >= 0) { stop("Threads must be a non-negative numeric or integer value") } # Cast parameters to proper types before passing them to C threads <- as.integer(threads) if (ploid == 1) { pairwise_dist <- .Call("bitwise_distance_haploid", x, missing_match, threads) } else { pairwise_dist <- .Call("bitwise_distance_diploid", x, missing_match, differences_only, threads) } dist.mat <- pairwise_dist dim(dist.mat) <- c(inds,inds) colnames(dist.mat) <- ind.names rownames(dist.mat) <- ind.names if (percent){ if(differences_only) { dist.mat <- dist.mat/(numPairs) } else { dist.mat <- dist.mat/(numPairs*ploid) } } if (mat == FALSE){ dist.mat <- as.dist(dist.mat) } return(dist.mat) } #==============================================================================# #' Determines whether openMP is support on this system. #' #' @return FALSE if openMP is not supported, TRUE if it is #' @author Zhian N. Kamvar, Jonah C. Brooks #' #' @export #' @examples #' poppr_has_parallel() #==============================================================================# poppr_has_parallel <- function(){ supported <- .Call("omp_test", PACKAGE = "poppr") if(supported == 0) { return(FALSE) } else { return(TRUE) } } #==============================================================================# #' Calculate the index of association between samples in a genlight object. #' #' This function parses over a genlight object to calculate and return the #' index of association for those samples. #' #' @param x a genlight object. #' #' @param missing_match a boolean determining whether missing data should be #' considered a match. If TRUE (default) missing data at a locus will match #' with any data at that locus in each comparison. If FALSE, missing data at #' a locus will cause all comparisons to return the maximum possible distance #' at that locus (ie, if sample 1 has missing data at locus 1, and sample 2 #' is heterozygous at locus 1, the distance at that locus will be 1. If sample #' 2 was heterozygous or missing at locus 1, the distance would be 2. #' #' @param differences_only a boolean determining how distance should be counted #' for diploids. Whether TRUE or FALSE the distance between a heterozygous locus #' and a homozygous locus is 1. If FALSE (default) the distance between opposite #' homozygous loci is 2. If TRUE that distance counts as 1, indicating only that #' the two samples differ at that locus. #' #' @param threads The maximum number of parallel threads to be used within this #' function. A value of 0 (default) will attempt to use as many threads as there #' are available cores/CPUs. In most cases this is ideal. A value of 1 will force #' the function to run serially, which may increase stability on some systems. #' Other values may be specified, but should be used with caution. #' #' @return Index of association representing the samples in this genlight object. #' @author Zhian N. Kamvar, Jonah C. Brooks #' #' @export #' @keywords internal #==============================================================================# bitwise.ia <- function(x, missing_match=TRUE, differences_only=FALSE, threads=0){ stopifnot(class(x)[1] %in% c("genlight", "snpclone")) # Stop if the ploidy of the genlight object is not consistent stopifnot(min(ploidy(x)) == max(ploidy(x))) # Stop if the ploidy of the genlight object is not haploid or diploid stopifnot(min(ploidy(x)) == 2 || min(ploidy(x)) == 1) ploid <- min(ploidy(x)) # Threads must be something that can cast to integer if(!is.numeric(threads) && !is.integer(threads) && threads >= 0) { stop("Threads must be a non-negative numeric or integer value") } # Cast parameters to proper types before passing them to C threads <- as.integer(threads) # Ensure that every SNPbin object has data for all chromosomes if(ploid == 2){ for(i in 1:length(x$gen)){ if(length(x$gen[[i]]$snp) == 1){ x$gen[[i]]$snp <- append(x$gen[[i]]$snp, list(as.raw(rep(0,length(x$gen[[i]]$snp[[1]]))))) } } IA <- .Call("association_index_diploid", x, missing_match, differences_only, threads, PACKAGE = "poppr") } else if(ploid == 1) { IA <- .Call("association_index_haploid", x, missing_match, threads, PACKAGE = "poppr") } else { stop("bitwise.ia only supports haploids and diploids") } #TODO: Allow for automated index generation, such as random or window based #TODO: Call C function and return return(IA) } #==============================================================================# #' Calculate windows of the index of association for genlight objects. #' #' Genlight objects can contain millions of loci. Since it does not make much #' sense to calculate the index of association over that many loci, this #' function will scan windows across the loci positions and calculate the index #' of association. #' #' @param x a genlight object. #' #' @param window an integer specifying the size of the window. #' #' @param min.snps an integer specifying the minimum number of snps allowed per #' window. If a window does not meet this criteria, the value will return as #' NA. #' #' @param threads The maximum number of parallel threads to be used within this #' function. A value of 0 (default) will attempt to use as many threads as #' there are available cores/CPUs. In most cases this is ideal. A value of 1 #' will force the function to run serially, which may increase stability on #' some systems. Other values may be specified, but should be used with #' caution. #' #' @param quiet if \code{FALSE}, a progress bar will be printed to the screen. #' #' @return Index of association representing the samples in this genlight #' object. #' #' @note this will calculate the standardized index of association from Agapow #' 2001. See \code{\link{ia}} for details. #' #' @author Zhian N. Kamvar, Jonah C. Brooks #' #' @export #' @examples #' #' # with structured snps assuming 1e4 positions #' set.seed(999) #' x <- glSim(n.ind = 10, n.snp.nonstruc = 5e2, n.snp.struc = 5e2, ploidy = 2) #' position(x) <- sort(sample(1e4, 1e3)) #' res <- win.ia(x, window = 300L) # Calculate for windows of size 300 #' plot(res, type = "l") #' #' \dontrun{ #' # unstructured snps #' set.seed(999) #' x <- glSim(n.ind = 10, n.snp.nonstruc = 1e3, ploidy = 2) #' position(x) <- sort(sample(1e4, 1e3)) #' res <- win.ia(x, window = 300L) # Calculate for windows of size 300 #' plot(res, type = "l") #' } #' #==============================================================================# win.ia <- function(x, window = 100L, min.snps = 3L, threads = 1L, quiet = FALSE){ stopifnot(is(x, "genlight")) if (!is.null(position(x))){ xpos <- position(x) } else { xpos <- seq(nLoc(x)) } winmat <- make_windows(maxp = max(xpos), minp = min(xpos), window = window) nwin <- nrow(winmat) res_mat <- vector(mode = "numeric", length = nwin) if (!quiet) progbar <- txtProgressBar(style = 3) for (i in seq(nwin)){ posns <- which(xpos %in% winmat[i, 1]:winmat[i, 2]) if (length(posns) < min.snps){ res_mat[i] <- NA } else { res_mat[i] <- bitwise.ia(x[, posns], threads = threads) } if (!quiet){ setTxtProgressBar(progbar, i/nwin) } } if (!quiet) cat("\n") return(res_mat) } #==============================================================================# #' Calculate random samples of the index of association for genlight objects. #' #' Genlight objects can contain millions of loci. Since it does not make much #' sense to calculate the index of association over that many loci, this #' function will randomly sample sites to calculate the index of association. #' #' @param x a genlight object. #' #' @param n.snp the number of snps to be used to calculate standardized index #' of association. #' #' @param reps the number of times to perform the calculation. #' #' @param threads The maximum number of parallel threads to be used within this #' function. A value of 0 (default) will attempt to use as many threads as #' there are available cores/CPUs. In most cases this is ideal. A value of 1 #' will force the function to run serially, which may increase stability on #' some systems. Other values may be specified, but should be used with #' caution. #' #' @param quiet if \code{FALSE}, a progress bar will be printed to the screen. #' #' #' @note this will calculate the standardized index of association from Agapow #' 2001. See \code{\link{ia}} for details. #' #' @return Index of association representing the samples in this genlight #' object. #' @author Zhian N. Kamvar, Jonah C. Brooks #' #' @export #' @examples #' \dontrun{ #' # with structured snps assuming 1e4 positions #' set.seed(999) #' x <- glSim(n.ind = 10, n.snp.nonstruc = 5e2, n.snp.struc = 5e2, ploidy = 2) #' position(x) <- sort(sample(1e4, 1e3)) #' res <- samp.ia(x) #' hist(res) #' #' # with unstructured snps assuming 1e4 positions #' set.seed(999) #' x <- glSim(n.ind = 10, n.snp.nonstruc = 1e3, ploidy = 2) #' position(x) <- sort(sample(1e4, 1e3)) #' res <- samp.ia(x) #' hist(res) #' } #==============================================================================# samp.ia <- function(x, n.snp = 100L, reps = 100L, threads = 1L, quiet = FALSE){ stopifnot(is(x, "genlight")) nloc <- nLoc(x) res_mat <- vector(mode = "numeric", length = reps) if (!quiet) progbar <- txtProgressBar(style = 3) for (i in seq(reps)){ posns <- sample(nloc, n.snp) res_mat[i] <- bitwise.ia(x[, posns], threads = threads) if (!quiet){ setTxtProgressBar(progbar, i/reps) } } if (!quiet) cat("\n") return(res_mat) } # Sat Aug 15 20:02:40 2015 ------------------------------ # # This function was used in place of bitwise.ia before it # was fixed. Since it has no purpose now, it is being # commented out, but kept here for reference. # # snpia <- function(x, threads = 1L){ # nloc <- nLoc(x) # nind <- nInd(x) # np <- choose(nind, 2) # d_mat <- vapply(seq(nloc), function(i) as.vector(bitwise.dist(x[, i], percent = FALSE, threads = threads)), integer(np)) # D <- rowSums(d_mat) # SD <- sum(D) # Sd <- colSums(d_mat) # Sd2 <- colSums(d_mat*d_mat) # Vo <- (sum(D*D) - (SD*SD)/np)/np # varj <- (Sd2 - (Sd*Sd)/np)/np # Ve <- sum(varj) # Svarij <- .Call("pairwise_covar", varj, PACKAGE = "poppr") # return((Vo - Ve)/(2 * sum(Svarij))) # }
#' intact.EASE.enrich() #' #' Provide a rapid way to perform EASE.tests on columns of the <.intact> objects created using the lipid.miner function. #' #' @param X any column of a <Query.intact> object #' @param Y any column of a <Universe.intact> object (has to be the same column as X) #' @param p is a regular pue to use as a cutoff for the enrichment #' @param q is a FDR qvalue adjusted using the bioconductor package qvalue #' #' @details The difference between the intact.EASE and the intact.EASE.enrich function is that the <.enrich> function allows to extract only the element that are enriched according to a p or a q value. #' @details The universe file db.intact is provided if you are lacking an universe file. #' #' @return This function will return a data frame with the results of the tests #' #' @examples intact.EASE.enrich(queryExample.intact$`Main class`, universeExample.intact$`Main class`) #' #' @author Geremy Clair #' @export #' intact.EASE.enrich<- function(X,Y,p=0.05, q=1.0){ #X is the query list #Y is the Universe list if (missing(p)){p=0.05} if (missing(q)){q=1} if (p<0){p=0.05} if (q<0){q=1} lipidEnrich <- intact.EASE(X,Y) lipidEnrich <- lipidEnrich[lipidEnrich$`p-value`<p,] lipidEnrich <- lipidEnrich[lipidEnrich$`FDR.q-value`<q,] lipidEnrich <- lipidEnrich[lipidEnrich$Fold.change>1,] lipidEnrich } #' allchains.EASE() #' #' Provide a rapid way to perform EASE.tests on <.allchains> objects created using the lipid.miner function. #' #' @param X any <Query.allchains> object #' @param Y any <Universe.allchains> object #' @param p is a regular pue to use as a cutoff for the enrichment #' @param q is a FDR qvalue adjusted using the bioconductor package qvalue #' #' @details The difference between the allchains.EASE and the allchains.EASE.enrich function is that the <.enrich> function allows to extract only the element that are enriched according to a p or a q value. #' @details the universe file db.allchains is provided if you are lacking an universe file #' #' @return This function will return a data frame with the results of the tests #' #' @examples allchains.EASE.enrich(queryExample.allchains, universeExample.allchains) #' #' @author Geremy Clair #' @export #' allchains.EASE.enrich<- function(X,Y,p=0.05, q=1.0){ if (missing(p)){p=0.05} if (missing(q)){q=1} if (p<0){p=0.05} if (q<0){q=1} lipidEnrich <- allchains.EASE(X,Y) lipidEnrich <- lipidEnrich[lipidEnrich$`p-value`<p,] lipidEnrich <- lipidEnrich[lipidEnrich$`FDR.q-value`<q,] lipidEnrich <- lipidEnrich[lipidEnrich$Fold.change>1,] lipidEnrich } #' chain.EASE() #' #' Provide a rapid way to perform EASE.tests on <.chain> objects created using the lipid.miner function. #' #' @param X any <Query.chain> object #' @param Y any <Universe.chain> object #' @param p is a regular pue to use as a cutoff for the enrichment #' @param q is a FDR qvalue adjusted using the bioconductor package qvalue #' #' @details The difference between the chain.EASE and the chain.EASE.enrich function is that the <.enrich> function allows to extract only the element that are enriched according to a p or a q value. #' @details the universe file db.chain is provided if you are lacking an universe file #' #' @return This function will return a data frame with the results of the tests #' #' @examples chain.EASE.enrich(queryExample.chain, universeExample.chain) #' #' @author Geremy Clair #' @export #' chain.EASE.enrich<- function(X,Y,p=0.05, q=1.0){ if (missing(p)){p=0.05} if (missing(q)){q=1} if (p<0){p=0.05} if (q<0){q=1} lipidEnrich <- chain.EASE(X,Y) lipidEnrich <- lipidEnrich[lipidEnrich$`p-value`<p,] lipidEnrich <- lipidEnrich[lipidEnrich$`FDR.q-value`<q,] lipidEnrich <- lipidEnrich[lipidEnrich$Fold.change>1,] lipidEnrich }
/R/10_EASE_enrichment_functions.R
permissive
PNNL-Comp-Mass-Spec/Rodin
R
false
false
3,788
r
#' intact.EASE.enrich() #' #' Provide a rapid way to perform EASE.tests on columns of the <.intact> objects created using the lipid.miner function. #' #' @param X any column of a <Query.intact> object #' @param Y any column of a <Universe.intact> object (has to be the same column as X) #' @param p is a regular pue to use as a cutoff for the enrichment #' @param q is a FDR qvalue adjusted using the bioconductor package qvalue #' #' @details The difference between the intact.EASE and the intact.EASE.enrich function is that the <.enrich> function allows to extract only the element that are enriched according to a p or a q value. #' @details The universe file db.intact is provided if you are lacking an universe file. #' #' @return This function will return a data frame with the results of the tests #' #' @examples intact.EASE.enrich(queryExample.intact$`Main class`, universeExample.intact$`Main class`) #' #' @author Geremy Clair #' @export #' intact.EASE.enrich<- function(X,Y,p=0.05, q=1.0){ #X is the query list #Y is the Universe list if (missing(p)){p=0.05} if (missing(q)){q=1} if (p<0){p=0.05} if (q<0){q=1} lipidEnrich <- intact.EASE(X,Y) lipidEnrich <- lipidEnrich[lipidEnrich$`p-value`<p,] lipidEnrich <- lipidEnrich[lipidEnrich$`FDR.q-value`<q,] lipidEnrich <- lipidEnrich[lipidEnrich$Fold.change>1,] lipidEnrich } #' allchains.EASE() #' #' Provide a rapid way to perform EASE.tests on <.allchains> objects created using the lipid.miner function. #' #' @param X any <Query.allchains> object #' @param Y any <Universe.allchains> object #' @param p is a regular pue to use as a cutoff for the enrichment #' @param q is a FDR qvalue adjusted using the bioconductor package qvalue #' #' @details The difference between the allchains.EASE and the allchains.EASE.enrich function is that the <.enrich> function allows to extract only the element that are enriched according to a p or a q value. #' @details the universe file db.allchains is provided if you are lacking an universe file #' #' @return This function will return a data frame with the results of the tests #' #' @examples allchains.EASE.enrich(queryExample.allchains, universeExample.allchains) #' #' @author Geremy Clair #' @export #' allchains.EASE.enrich<- function(X,Y,p=0.05, q=1.0){ if (missing(p)){p=0.05} if (missing(q)){q=1} if (p<0){p=0.05} if (q<0){q=1} lipidEnrich <- allchains.EASE(X,Y) lipidEnrich <- lipidEnrich[lipidEnrich$`p-value`<p,] lipidEnrich <- lipidEnrich[lipidEnrich$`FDR.q-value`<q,] lipidEnrich <- lipidEnrich[lipidEnrich$Fold.change>1,] lipidEnrich } #' chain.EASE() #' #' Provide a rapid way to perform EASE.tests on <.chain> objects created using the lipid.miner function. #' #' @param X any <Query.chain> object #' @param Y any <Universe.chain> object #' @param p is a regular pue to use as a cutoff for the enrichment #' @param q is a FDR qvalue adjusted using the bioconductor package qvalue #' #' @details The difference between the chain.EASE and the chain.EASE.enrich function is that the <.enrich> function allows to extract only the element that are enriched according to a p or a q value. #' @details the universe file db.chain is provided if you are lacking an universe file #' #' @return This function will return a data frame with the results of the tests #' #' @examples chain.EASE.enrich(queryExample.chain, universeExample.chain) #' #' @author Geremy Clair #' @export #' chain.EASE.enrich<- function(X,Y,p=0.05, q=1.0){ if (missing(p)){p=0.05} if (missing(q)){q=1} if (p<0){p=0.05} if (q<0){q=1} lipidEnrich <- chain.EASE(X,Y) lipidEnrich <- lipidEnrich[lipidEnrich$`p-value`<p,] lipidEnrich <- lipidEnrich[lipidEnrich$`FDR.q-value`<q,] lipidEnrich <- lipidEnrich[lipidEnrich$Fold.change>1,] lipidEnrich }
# Processing environmental data: NDVI # NDVI MODIS processing from NASA for my two regions # Cape vs SWA publication # Ruan van Mazijk # Import order-information-file to get URLs for MOD13C2 (NDVI) rasters --------- order <- as_tibble(read.csv( here::here("data/raw-data/NASA-ladsweb-MOD13C2-order-501172795.csv") )) # Download HDF4 files ---------------------------------------------------------- # e.g. # `https://ladsweb.modaps.eosdis.nasa.gov/ # archive/orders/501172795/ # MOD13C2.A2000032.006.2015147122546.hdf` # becomes -> # `ftp://ladsweb.modaps.eosdis.nasa.gov/ # orders/501172795/ # MOD13C2.A2000032.006.2015147122546.hdf` FTP <- "ftp://ladsweb.modaps.eosdis.nasa.gov/orders/501172795/" # Test if (FALSE) { download.file( url = paste0(FTP, order$name[1]), destfile = paste0( giswd, "MOD13C2/", order$name[1] ), method = "wget", quiet = TRUE, cacheOK = TRUE, extra = getOption("download.file.extra") ) # Works! } # Actual download run for (file in order$name) { download.file( url = paste0(FTP, file), destfile = paste0( giswd, "MOD13C2/", file ), method = "wget", quiet = TRUE, cacheOK = TRUE, extra = getOption("download.file.extra") ) } # Convert HDF4s to GEOTIFFs ---------------------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2/" ), pattern = "\\.hdf$" ) bash_gdalinfo( x = files[1], x_dir = paste0( giswd, "MOD13C2/" ) ) bash_gdalinfo_one( x = files[1], band = "':MOD_Grid_monthly_CMG_VI:CMG 0.05 Deg Monthly NDVI'", # (extra single quote bc of space in band-name) x_dir = paste0( giswd, "MOD13C2/" ) ) # Use parallel:: cluster <- makeCluster(detectCores() - 1, outfile = "") parLapply(cluster, files, bash_gdaltranslate_one, band = "':MOD_Grid_monthly_CMG_VI:CMG 0.05 Deg Monthly NDVI'", x_dir = paste0( giswd, "MOD13C2/" ), out_dir = paste0( giswd, "MOD13C2_GeoTiffs/" ) ) stopCluster(cluster) # Crop & mask to GCFR/SWAFR ---------------------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/" ), pattern = "\\.tif$" ) # .... GCFR -------------------------------------------------------------------- # Get the box GCFR_box <- readRDS(here::here("data/derived-data/borders/GCFR_box.rds")) # Reproject it to the CRS of the NASA tiffs proj4string(GCFR_box) == std_CRS NASA_CRS <- crs(raster(paste0(giswd, "MOD13C2_GeoTiffs/", files[1]))) GCFR_box_NASA <- spTransform(GCFR_box, CRSobj = NASA_CRS) # Use this, in parallel, to crop those tiffs cluster <- makeCluster(detectCores() - 1, outfile = "") parLapply(cluster, files, crop_MOD13C2, box = GCFR_box_NASA, filedir = paste0( giswd, "MOD13C2_GeoTiffs/" ), outdir = paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/" ), suffix = "_GCFR" ) stopCluster(cluster) # .... SWAFR ------------------------------------------------------------------ # Get the box SWAFR_box <- readRDS(here::here("data/derived-data/borders/SWAFR_box.rds")) # Reproject it to the CRS of the NASA tiffs proj4string(SWAFR_box) == std_CRS NASA_CRS <- crs(raster(paste0(giswd, "MOD13C2_GeoTiffs/", files[1]))) SWAFR_box_NASA <- spTransform(SWAFR_box, CRSobj = NASA_CRS) # Use this, in parallel, to crop those tiffs cluster <- makeCluster(detectCores() - 1, outfile = "") parLapply(cluster, files, crop_MOD13C2, box = SWAFR_box_NASA, filedir = paste0( giswd, "MOD13C2_GeoTiffs/" ), outdir = paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/" ), suffix = "_SWAFR" ) stopCluster(cluster) # Check CRS crs(raster(paste0(giswd, "MOD13C2_GeoTiffs/", "GCFR"))) # Reproject to std_CRS --------------------------------------------------------- # .... GCFR -------------------------------------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/" ), pattern = "\\.tif$" ) crs(raster(paste0(giswd, "MOD13C2_GeoTiffs/", "GCFR/", files[1]))) cluster <- makeCluster(detectCores() - 1, outfile = "") parLapply(cluster, files, project_MOD13C2, crs = std_CRS, filedir = paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/" ), outdir = paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/" ), suffix = "_std_CRS" ) stopCluster(cluster) # .... SWAFR ------------------------------------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/" ), pattern = "\\.tif$" ) crs(raster(paste0(giswd, "MOD13C2_GeoTiffs/", "SWAFR/", files[1]))) cluster <- makeCluster(detectCores() - 1, outfile = "") parLapply(cluster, files, project_MOD13C2, crs = std_CRS, filedir = paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/" ), outdir = paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/" ), suffix = "_std_CRS" ) stopCluster(cluster) # Compute mean annual NDVI ----------------------------------------------------- # .... GCFR -------------------------------------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/" ), pattern = "std_CRS\\.tif$" ) # ........ Organise by year & month -------------------------------------------- years <- 2000:2017 files_by_year <- vector("list", length = length(years)) for (i in seq_along(years)) { year_index <- str_detect( files, pattern = glue("A{years[i]}") ) files_by_year[[i]] <- files[year_index] names(files_by_year)[i] <- years[i] } for (i in seq_along(files_by_year)) { if (names(files_by_year[i]) == "2000") { files_by_year[[i]] <- c( NA, files_by_year[[i]] ) } } for (i in seq_along(files_by_year)) { names(files_by_year[[i]]) <- if (names(files_by_year[i]) == "2017") { c( "Jan", "Feb", "Mar", "Apr" ) } else { c( "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ) } } # ........ Make monthly avgs --------------------------------------------------- # ............ January --------------------------------------------------------- GCFR_month_NDVI_stack <- stack() for (i in 2:length(2000:2017)) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/", files_by_year[[i]]["Jan"] )) GCFR_month_NDVI_stack %<>% stack(layer) } writeRaster( GCFR_month_NDVI_stack, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_Jan.tif ") ) writeRaster( mean(GCFR_month_NDVI_stack), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_Jan_mean.tif ") ) # ............ February, March, April ------------------------------------------ for (month in c("Feb", "Mar", "Apr")) { GCFR_month_NDVI_stack <- stack() for (i in seq_along(2000:2017)) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/", files_by_year[[i]][month] )) GCFR_month_NDVI_stack %<>% stack(layer) } writeRaster( GCFR_month_NDVI_stack, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_{month}.tif ") ) writeRaster( mean(GCFR_month_NDVI_stack), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_{month}_mean.tif ") ) } # ............ May to December ------------------------------------------------- for (month in c( "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" )) { GCFR_month_NDVI_stack <- stack() for (i in seq_along(2000:2016)) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/", files_by_year[[i]][month] )) GCFR_month_NDVI_stack %<>% stack(layer) } writeRaster( GCFR_month_NDVI_stack, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_{month}.tif ") ) writeRaster( mean(GCFR_month_NDVI_stack), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_{month}_mean.tif ") ) } # ........ Finally, make annual avg NDVI --------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/" ), pattern = "std_CRS_.+_mean\\.tif$" ) GCFR_all_months <- stack() for (i in 1:12) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/", files[i] )) GCFR_all_months %<>% stack(layer) } writeRaster( GCFR_all_months, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_all_months.tif ") ) writeRaster( mean(GCFR_all_months), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_annual_avg.tif ") ) # .... SWAFR ------------------------------------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/" ), pattern = "std_CRS\\.tif$" ) # ........ Organise by year & month -------------------------------------------- years <- 2000:2017 files_by_year <- vector("list", length = length(years)) for (i in seq_along(years)) { year_index <- str_detect( files, pattern = glue("A{years[i]}") ) files_by_year[[i]] <- files[year_index] names(files_by_year)[i] <- years[i] } for (i in seq_along(files_by_year)) { if (names(files_by_year[i]) == "2000") { files_by_year[[i]] <- c( NA, files_by_year[[i]] ) } } for (i in seq_along(files_by_year)) { names(files_by_year[[i]]) <- if (names(files_by_year[i]) == "2017") { c( "Jan", "Feb", "Mar", "Apr" ) } else { c( "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ) } } # ........ Make monthly avgs --------------------------------------------------- # ............ January --------------------------------------------------------- GCFR_month_NDVI_stack <- stack() for (i in 2:length(2000:2017)) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/", files_by_year[[i]]["Jan"] )) GCFR_month_NDVI_stack %<>% stack(layer) } writeRaster( GCFR_month_NDVI_stack, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_Jan.tif ") ) writeRaster( mean(GCFR_month_NDVI_stack), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_Jan_mean.tif ") ) # ............ February, March, April ------------------------------------------ for (month in c("Feb", "Mar", "Apr")) { GCFR_month_NDVI_stack <- stack() for (i in 1:length(2000:2017)) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/", files_by_year[[i]][month] )) GCFR_month_NDVI_stack %<>% stack(layer) } writeRaster( GCFR_month_NDVI_stack, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_{month}.tif ") ) writeRaster( mean(GCFR_month_NDVI_stack), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_{month}_mean.tif ") ) } # ............ May to December ------------------------------------------------- for (month in c("May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")) { GCFR_month_NDVI_stack <- stack() for (i in 1:length(2000:2016)) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/", files_by_year[[i]][month] )) GCFR_month_NDVI_stack %<>% stack(layer) } writeRaster( GCFR_month_NDVI_stack, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_{month}.tif ") ) writeRaster( mean(GCFR_month_NDVI_stack), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_{month}_mean.tif ") ) } # ........ Finally, make annual avg NDVI --------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/" ), pattern = "std_CRS_.+_mean\\.tif$" ) GCFR_all_months <- stack() for (i in 1:12) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/", files[i] )) GCFR_all_months %<>% stack(layer) } writeRaster( GCFR_all_months, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_all_months.tif ") ) writeRaster( mean(GCFR_all_months), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_annual_avg.tif ") ) # Crop-mask to "boxes" --------------------------------------------------------- # Why did I do this again? # Ag, doesn't matter,,, GCFR_box <- readOGR(here::here("data/derived-data/borders/GCFR_box/")) GCFR_NDVI <- raster(glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_annual_avg.tif ")) GCFR_NDVI %<>% crop(GCFR_box) %>% mask(GCFR_box) writeRaster( GCFR_NDVI, here::here("data/derived-data/NDVI/GCFR_NDVI.tif") ) SWAFR_box <- readOGR(here::here("data/derived-data/borders/SWAFR_box/")) SWAFR_NDVI <- raster(glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_annual_avg.tif ")) SWAFR_NDVI %<>% crop(SWAFR_box) %>% mask(SWAFR_box) writeRaster( SWAFR_NDVI, here::here("data/derived-data/NDVI/SWAFR_NDVI.tif") )
/R/data-processing/02_process-environmental-data/04_NDVI.R
permissive
rvanmazijk/Cape-vs-SWA
R
false
false
13,812
r
# Processing environmental data: NDVI # NDVI MODIS processing from NASA for my two regions # Cape vs SWA publication # Ruan van Mazijk # Import order-information-file to get URLs for MOD13C2 (NDVI) rasters --------- order <- as_tibble(read.csv( here::here("data/raw-data/NASA-ladsweb-MOD13C2-order-501172795.csv") )) # Download HDF4 files ---------------------------------------------------------- # e.g. # `https://ladsweb.modaps.eosdis.nasa.gov/ # archive/orders/501172795/ # MOD13C2.A2000032.006.2015147122546.hdf` # becomes -> # `ftp://ladsweb.modaps.eosdis.nasa.gov/ # orders/501172795/ # MOD13C2.A2000032.006.2015147122546.hdf` FTP <- "ftp://ladsweb.modaps.eosdis.nasa.gov/orders/501172795/" # Test if (FALSE) { download.file( url = paste0(FTP, order$name[1]), destfile = paste0( giswd, "MOD13C2/", order$name[1] ), method = "wget", quiet = TRUE, cacheOK = TRUE, extra = getOption("download.file.extra") ) # Works! } # Actual download run for (file in order$name) { download.file( url = paste0(FTP, file), destfile = paste0( giswd, "MOD13C2/", file ), method = "wget", quiet = TRUE, cacheOK = TRUE, extra = getOption("download.file.extra") ) } # Convert HDF4s to GEOTIFFs ---------------------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2/" ), pattern = "\\.hdf$" ) bash_gdalinfo( x = files[1], x_dir = paste0( giswd, "MOD13C2/" ) ) bash_gdalinfo_one( x = files[1], band = "':MOD_Grid_monthly_CMG_VI:CMG 0.05 Deg Monthly NDVI'", # (extra single quote bc of space in band-name) x_dir = paste0( giswd, "MOD13C2/" ) ) # Use parallel:: cluster <- makeCluster(detectCores() - 1, outfile = "") parLapply(cluster, files, bash_gdaltranslate_one, band = "':MOD_Grid_monthly_CMG_VI:CMG 0.05 Deg Monthly NDVI'", x_dir = paste0( giswd, "MOD13C2/" ), out_dir = paste0( giswd, "MOD13C2_GeoTiffs/" ) ) stopCluster(cluster) # Crop & mask to GCFR/SWAFR ---------------------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/" ), pattern = "\\.tif$" ) # .... GCFR -------------------------------------------------------------------- # Get the box GCFR_box <- readRDS(here::here("data/derived-data/borders/GCFR_box.rds")) # Reproject it to the CRS of the NASA tiffs proj4string(GCFR_box) == std_CRS NASA_CRS <- crs(raster(paste0(giswd, "MOD13C2_GeoTiffs/", files[1]))) GCFR_box_NASA <- spTransform(GCFR_box, CRSobj = NASA_CRS) # Use this, in parallel, to crop those tiffs cluster <- makeCluster(detectCores() - 1, outfile = "") parLapply(cluster, files, crop_MOD13C2, box = GCFR_box_NASA, filedir = paste0( giswd, "MOD13C2_GeoTiffs/" ), outdir = paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/" ), suffix = "_GCFR" ) stopCluster(cluster) # .... SWAFR ------------------------------------------------------------------ # Get the box SWAFR_box <- readRDS(here::here("data/derived-data/borders/SWAFR_box.rds")) # Reproject it to the CRS of the NASA tiffs proj4string(SWAFR_box) == std_CRS NASA_CRS <- crs(raster(paste0(giswd, "MOD13C2_GeoTiffs/", files[1]))) SWAFR_box_NASA <- spTransform(SWAFR_box, CRSobj = NASA_CRS) # Use this, in parallel, to crop those tiffs cluster <- makeCluster(detectCores() - 1, outfile = "") parLapply(cluster, files, crop_MOD13C2, box = SWAFR_box_NASA, filedir = paste0( giswd, "MOD13C2_GeoTiffs/" ), outdir = paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/" ), suffix = "_SWAFR" ) stopCluster(cluster) # Check CRS crs(raster(paste0(giswd, "MOD13C2_GeoTiffs/", "GCFR"))) # Reproject to std_CRS --------------------------------------------------------- # .... GCFR -------------------------------------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/" ), pattern = "\\.tif$" ) crs(raster(paste0(giswd, "MOD13C2_GeoTiffs/", "GCFR/", files[1]))) cluster <- makeCluster(detectCores() - 1, outfile = "") parLapply(cluster, files, project_MOD13C2, crs = std_CRS, filedir = paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/" ), outdir = paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/" ), suffix = "_std_CRS" ) stopCluster(cluster) # .... SWAFR ------------------------------------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/" ), pattern = "\\.tif$" ) crs(raster(paste0(giswd, "MOD13C2_GeoTiffs/", "SWAFR/", files[1]))) cluster <- makeCluster(detectCores() - 1, outfile = "") parLapply(cluster, files, project_MOD13C2, crs = std_CRS, filedir = paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/" ), outdir = paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/" ), suffix = "_std_CRS" ) stopCluster(cluster) # Compute mean annual NDVI ----------------------------------------------------- # .... GCFR -------------------------------------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/" ), pattern = "std_CRS\\.tif$" ) # ........ Organise by year & month -------------------------------------------- years <- 2000:2017 files_by_year <- vector("list", length = length(years)) for (i in seq_along(years)) { year_index <- str_detect( files, pattern = glue("A{years[i]}") ) files_by_year[[i]] <- files[year_index] names(files_by_year)[i] <- years[i] } for (i in seq_along(files_by_year)) { if (names(files_by_year[i]) == "2000") { files_by_year[[i]] <- c( NA, files_by_year[[i]] ) } } for (i in seq_along(files_by_year)) { names(files_by_year[[i]]) <- if (names(files_by_year[i]) == "2017") { c( "Jan", "Feb", "Mar", "Apr" ) } else { c( "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ) } } # ........ Make monthly avgs --------------------------------------------------- # ............ January --------------------------------------------------------- GCFR_month_NDVI_stack <- stack() for (i in 2:length(2000:2017)) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/", files_by_year[[i]]["Jan"] )) GCFR_month_NDVI_stack %<>% stack(layer) } writeRaster( GCFR_month_NDVI_stack, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_Jan.tif ") ) writeRaster( mean(GCFR_month_NDVI_stack), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_Jan_mean.tif ") ) # ............ February, March, April ------------------------------------------ for (month in c("Feb", "Mar", "Apr")) { GCFR_month_NDVI_stack <- stack() for (i in seq_along(2000:2017)) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/", files_by_year[[i]][month] )) GCFR_month_NDVI_stack %<>% stack(layer) } writeRaster( GCFR_month_NDVI_stack, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_{month}.tif ") ) writeRaster( mean(GCFR_month_NDVI_stack), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_{month}_mean.tif ") ) } # ............ May to December ------------------------------------------------- for (month in c( "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" )) { GCFR_month_NDVI_stack <- stack() for (i in seq_along(2000:2016)) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/", files_by_year[[i]][month] )) GCFR_month_NDVI_stack %<>% stack(layer) } writeRaster( GCFR_month_NDVI_stack, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_{month}.tif ") ) writeRaster( mean(GCFR_month_NDVI_stack), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_{month}_mean.tif ") ) } # ........ Finally, make annual avg NDVI --------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/" ), pattern = "std_CRS_.+_mean\\.tif$" ) GCFR_all_months <- stack() for (i in 1:12) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "GCFR/", files[i] )) GCFR_all_months %<>% stack(layer) } writeRaster( GCFR_all_months, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_all_months.tif ") ) writeRaster( mean(GCFR_all_months), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_annual_avg.tif ") ) # .... SWAFR ------------------------------------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/" ), pattern = "std_CRS\\.tif$" ) # ........ Organise by year & month -------------------------------------------- years <- 2000:2017 files_by_year <- vector("list", length = length(years)) for (i in seq_along(years)) { year_index <- str_detect( files, pattern = glue("A{years[i]}") ) files_by_year[[i]] <- files[year_index] names(files_by_year)[i] <- years[i] } for (i in seq_along(files_by_year)) { if (names(files_by_year[i]) == "2000") { files_by_year[[i]] <- c( NA, files_by_year[[i]] ) } } for (i in seq_along(files_by_year)) { names(files_by_year[[i]]) <- if (names(files_by_year[i]) == "2017") { c( "Jan", "Feb", "Mar", "Apr" ) } else { c( "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ) } } # ........ Make monthly avgs --------------------------------------------------- # ............ January --------------------------------------------------------- GCFR_month_NDVI_stack <- stack() for (i in 2:length(2000:2017)) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/", files_by_year[[i]]["Jan"] )) GCFR_month_NDVI_stack %<>% stack(layer) } writeRaster( GCFR_month_NDVI_stack, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_Jan.tif ") ) writeRaster( mean(GCFR_month_NDVI_stack), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_Jan_mean.tif ") ) # ............ February, March, April ------------------------------------------ for (month in c("Feb", "Mar", "Apr")) { GCFR_month_NDVI_stack <- stack() for (i in 1:length(2000:2017)) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/", files_by_year[[i]][month] )) GCFR_month_NDVI_stack %<>% stack(layer) } writeRaster( GCFR_month_NDVI_stack, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_{month}.tif ") ) writeRaster( mean(GCFR_month_NDVI_stack), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_{month}_mean.tif ") ) } # ............ May to December ------------------------------------------------- for (month in c("May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")) { GCFR_month_NDVI_stack <- stack() for (i in 1:length(2000:2016)) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/", files_by_year[[i]][month] )) GCFR_month_NDVI_stack %<>% stack(layer) } writeRaster( GCFR_month_NDVI_stack, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_{month}.tif ") ) writeRaster( mean(GCFR_month_NDVI_stack), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_{month}_mean.tif ") ) } # ........ Finally, make annual avg NDVI --------------------------------------- files <- list.files( path = paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/" ), pattern = "std_CRS_.+_mean\\.tif$" ) GCFR_all_months <- stack() for (i in 1:12) { layer <- raster(paste0( giswd, "MOD13C2_GeoTiffs/", "SWAFR/", files[i] )) GCFR_all_months %<>% stack(layer) } writeRaster( GCFR_all_months, overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_all_months.tif ") ) writeRaster( mean(GCFR_all_months), overwrite = TRUE, glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_annual_avg.tif ") ) # Crop-mask to "boxes" --------------------------------------------------------- # Why did I do this again? # Ag, doesn't matter,,, GCFR_box <- readOGR(here::here("data/derived-data/borders/GCFR_box/")) GCFR_NDVI <- raster(glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ GCFR/\\ MOD13C2_GCFR_std_CRS_annual_avg.tif ")) GCFR_NDVI %<>% crop(GCFR_box) %>% mask(GCFR_box) writeRaster( GCFR_NDVI, here::here("data/derived-data/NDVI/GCFR_NDVI.tif") ) SWAFR_box <- readOGR(here::here("data/derived-data/borders/SWAFR_box/")) SWAFR_NDVI <- raster(glue(" {giswd}\\ MOD13C2_GeoTiffs/\\ SWAFR/\\ MOD13C2_SWAFR_std_CRS_annual_avg.tif ")) SWAFR_NDVI %<>% crop(SWAFR_box) %>% mask(SWAFR_box) writeRaster( SWAFR_NDVI, here::here("data/derived-data/NDVI/SWAFR_NDVI.tif") )
library(maps) library(mapproj) library(dplyr) png("/Users/gregorymatthews/Dropbox/StatsInTheWild/greenParty.png",res=300,units="in",w=20,h=10) prez<-read.csv("/Users/gregorymatthews/pres16results.csv") prez$fips<-as.numeric(as.character(prez$fips)) prez$cand<-(as.character(prez$cand)) data(county.fips) #prez$cand[!prez$cand%in%c("Donald Trump","Hillary Clinton")]<-"Other" prez<-prez[prez$cand=="Jill Stein",] prez<-prez[!is.na(as.numeric(prez[,2])),] prez<-prez[order(prez$fips),] #tots<-aggregate(prez$votes, by=list(Category=prez$fips), FUN=sum) #totsVotes<-aggregate(prez$total_votes, by=list(Category=prez$fips), FUN=min) #prez<-as.data.frame(cbind(tots,totsVotes[,2])) #names(prez)<-c("fips","votes","total_votes") prez<-merge(prez,county.fips,by.x="fips",by.y="fips",all.x=TRUE) #prez<-merge(prez[prez$st=="WY",],county.fips,by.x="fips",by.y="fips",all.x=TRUE) nbin<-100 #prez$colorBuckets <- as.numeric(cut(prez$pct*100, seq(0,100,length=nbin))) prez$colorBuckets <- as.numeric(cut((prez$pct), seq(0,1,length=nbin))) # define color buckets #colors = c("#F1EEF6", "#D4B9DA", "#C994C7", "#DF65B0", "#DD1C77", "#980043") colors <- rgb(seq(0,1,length=nbin),0,seq(1,0,length=nbin)) leg.txt <- paste0(c(1:nbin)) # align data with map definitions by (partial) matching state,county # names, which include multiple polygons for some counties cnty.fips <- county.fips$fips[match(map("county", plot=FALSE)$names,prez$polyname)] cnty.fips<-cnty.fips[!is.na(cnty.fips)] for (i in 1:length(prez[prez$st=="WY","polyname"])-1){ temp<-prez[prez$st=="WY","polyname"] tempFIPS<-prez[prez$st=="WY","fips"] cnty.fips[map("county", plot=FALSE)$names==temp[i]]<-as.numeric(as.character(tempFIPS[i])) } for (i in 1:length(prez[prez$st=="WI","polyname"])-1){ temp<-prez[prez$st=="WI","polyname"] tempFIPS<-prez[prez$st=="WI","fips"] cnty.fips[map("county", plot=FALSE)$names==temp[i]]<-as.numeric(as.character(tempFIPS[i])) } for (i in 1:length(prez[prez$st=="NM","polyname"])-1){ temp<-prez[prez$st=="NM","polyname"] tempFIPS<-prez[prez$st=="NM","fips"] cnty.fips[map("county", plot=FALSE)$names==temp[i]]<-as.numeric(as.character(tempFIPS[i])) } for (i in 1:length(prez[prez$st=="AZ","polyname"])-1){ temp<-prez[prez$st=="AZ","polyname"] tempFIPS<-prez[prez$st=="AZ","fips"] cnty.fips[map("county", plot=FALSE)$names==temp[i]]<-as.numeric(as.character(tempFIPS[i])) } colorsmatched <- prez$colorBuckets[match(cnty.fips, prez$fips)] prez<-prez[prez$polyname%in%map("county", plot=FALSE)$names,] col<-rep(NA,length(map("county", plot=FALSE)$names)) prez$pct<-prez$votes/prez$total_votes prez<-prez[!is.na(prez$fips),] col <- unlist(lapply(map("county", plot=FALSE)$names,function(x){ temp<-prez[prez$polyname==x,] temp$pct[temp$pct>0.1]<-0.1 if (dim(temp)[1]>0){ out<-NA if (!is.na(temp$pct)){ out<-rgb(1-temp$pct/.1,1,1-temp$pct/.1,1) } } return(out) })) # draw map map("county", col = col, fill = TRUE, resolution = 0, lty = 0, projection = "polyconic") map("state", col = "black", fill = FALSE, add = TRUE, lty = 1, lwd = 0.2, projection="polyconic") title("Election 2016: Green Party") colleg<-rgb(1-seq(0,0.1,length=5)/0.1,1,1-seq(0,0.1,length=5)/0.1,1) leg.txt<-paste0(c(round(100*seq(0,0.1,length=5),2)),"%") legend("bottomright", leg.txt, horiz = FALSE, fill = colleg,cex=2) #colleg<-rgb(0.5,0,0.5,seq(0,50000,length=9)/50000) #leg.txt<-paste0(round(seq(0,50000,length=9),2)) #leg.txt[9]<-paste0(">",leg.txt[9]) #legend("bottomleft", leg.txt, horiz = FALSE, fill = colleg,cex=2) dev.off()
/GreenParty.R
no_license
gjm112/prez2016dataViz
R
false
false
3,563
r
library(maps) library(mapproj) library(dplyr) png("/Users/gregorymatthews/Dropbox/StatsInTheWild/greenParty.png",res=300,units="in",w=20,h=10) prez<-read.csv("/Users/gregorymatthews/pres16results.csv") prez$fips<-as.numeric(as.character(prez$fips)) prez$cand<-(as.character(prez$cand)) data(county.fips) #prez$cand[!prez$cand%in%c("Donald Trump","Hillary Clinton")]<-"Other" prez<-prez[prez$cand=="Jill Stein",] prez<-prez[!is.na(as.numeric(prez[,2])),] prez<-prez[order(prez$fips),] #tots<-aggregate(prez$votes, by=list(Category=prez$fips), FUN=sum) #totsVotes<-aggregate(prez$total_votes, by=list(Category=prez$fips), FUN=min) #prez<-as.data.frame(cbind(tots,totsVotes[,2])) #names(prez)<-c("fips","votes","total_votes") prez<-merge(prez,county.fips,by.x="fips",by.y="fips",all.x=TRUE) #prez<-merge(prez[prez$st=="WY",],county.fips,by.x="fips",by.y="fips",all.x=TRUE) nbin<-100 #prez$colorBuckets <- as.numeric(cut(prez$pct*100, seq(0,100,length=nbin))) prez$colorBuckets <- as.numeric(cut((prez$pct), seq(0,1,length=nbin))) # define color buckets #colors = c("#F1EEF6", "#D4B9DA", "#C994C7", "#DF65B0", "#DD1C77", "#980043") colors <- rgb(seq(0,1,length=nbin),0,seq(1,0,length=nbin)) leg.txt <- paste0(c(1:nbin)) # align data with map definitions by (partial) matching state,county # names, which include multiple polygons for some counties cnty.fips <- county.fips$fips[match(map("county", plot=FALSE)$names,prez$polyname)] cnty.fips<-cnty.fips[!is.na(cnty.fips)] for (i in 1:length(prez[prez$st=="WY","polyname"])-1){ temp<-prez[prez$st=="WY","polyname"] tempFIPS<-prez[prez$st=="WY","fips"] cnty.fips[map("county", plot=FALSE)$names==temp[i]]<-as.numeric(as.character(tempFIPS[i])) } for (i in 1:length(prez[prez$st=="WI","polyname"])-1){ temp<-prez[prez$st=="WI","polyname"] tempFIPS<-prez[prez$st=="WI","fips"] cnty.fips[map("county", plot=FALSE)$names==temp[i]]<-as.numeric(as.character(tempFIPS[i])) } for (i in 1:length(prez[prez$st=="NM","polyname"])-1){ temp<-prez[prez$st=="NM","polyname"] tempFIPS<-prez[prez$st=="NM","fips"] cnty.fips[map("county", plot=FALSE)$names==temp[i]]<-as.numeric(as.character(tempFIPS[i])) } for (i in 1:length(prez[prez$st=="AZ","polyname"])-1){ temp<-prez[prez$st=="AZ","polyname"] tempFIPS<-prez[prez$st=="AZ","fips"] cnty.fips[map("county", plot=FALSE)$names==temp[i]]<-as.numeric(as.character(tempFIPS[i])) } colorsmatched <- prez$colorBuckets[match(cnty.fips, prez$fips)] prez<-prez[prez$polyname%in%map("county", plot=FALSE)$names,] col<-rep(NA,length(map("county", plot=FALSE)$names)) prez$pct<-prez$votes/prez$total_votes prez<-prez[!is.na(prez$fips),] col <- unlist(lapply(map("county", plot=FALSE)$names,function(x){ temp<-prez[prez$polyname==x,] temp$pct[temp$pct>0.1]<-0.1 if (dim(temp)[1]>0){ out<-NA if (!is.na(temp$pct)){ out<-rgb(1-temp$pct/.1,1,1-temp$pct/.1,1) } } return(out) })) # draw map map("county", col = col, fill = TRUE, resolution = 0, lty = 0, projection = "polyconic") map("state", col = "black", fill = FALSE, add = TRUE, lty = 1, lwd = 0.2, projection="polyconic") title("Election 2016: Green Party") colleg<-rgb(1-seq(0,0.1,length=5)/0.1,1,1-seq(0,0.1,length=5)/0.1,1) leg.txt<-paste0(c(round(100*seq(0,0.1,length=5),2)),"%") legend("bottomright", leg.txt, horiz = FALSE, fill = colleg,cex=2) #colleg<-rgb(0.5,0,0.5,seq(0,50000,length=9)/50000) #leg.txt<-paste0(round(seq(0,50000,length=9),2)) #leg.txt[9]<-paste0(">",leg.txt[9]) #legend("bottomleft", leg.txt, horiz = FALSE, fill = colleg,cex=2) dev.off()
skip_on_cran() # Setup for testing ------------------------------------------------------- futile.logger::flog.threshold("FATAL") reported_cases <- EpiNow2::example_confirmed[1:50] generation_time <- get_generation_time(disease = "SARS-CoV-2", source = "ganyani", max_value = 10) incubation_period <- get_incubation_period(disease = "SARS-CoV-2", source = "lauer", max_value = 10) reporting_delay <- list( mean = convert_to_logmean(2, 1), mean_sd = 0.1, sd = convert_to_logsd(2, 1), sd_sd = 0.1, max = 10 ) library(data.table) out <- suppressWarnings(estimate_infections(reported_cases, generation_time = generation_time, delays = delay_opts(reporting_delay), gp = NULL, rt = rt_opts(rw = 14), stan = stan_opts( chains = 2, warmup = 100, samples = 100, control = list(adapt_delta = 0.9) ), verbose = FALSE )) test_that("simulate_infections works to simulate a passed in estimate_infections object", { sims <- simulate_infections(out) expect_equal(names(sims), c("samples", "summarised", "observations")) }) test_that("simulate_infections works to simulate a passed in estimate_infections object with an adjusted Rt", { R <- c(rep(NA_real_, 40), rep(0.5, 9)) sims <- simulate_infections(out, R) expect_equal(names(sims), c("samples", "summarised", "observations")) expect_equal(tail(sims$summarised[variable == "R"]$median, 9), rep(0.5, 9)) }) test_that("simulate_infections works to simulate a passed in estimate_infections object with a short adjusted Rt", { R <- c(rep(NA_real_, 40), rep(0.5, 10)) sims <- simulate_infections(out, R) expect_equal(names(sims), c("samples", "summarised", "observations")) expect_equal(tail(sims$summarised[variable == "R"]$median, 9), rep(0.5, 9)) }) test_that("simulate_infections works to simulate a passed in estimate_infections object with a long adjusted Rt", { R <- c(rep(NA_real_, 40), rep(1.2, 15), rep(0.8, 15)) sims <- simulate_infections(out, R) sims10 <- simulate_infections(out, R, samples = 10) expect_equal(names(sims), c("samples", "summarised", "observations")) expect_equal(tail(sims$summarised[variable == "R"]$median, 30), R[41:70]) }) test_that("simulate infections can be run with a limited number of samples", { R <- c(rep(NA_real_, 40), rep(1.2, 15), rep(0.8, 15)) sims <- simulate_infections(out, R, samples = 10) expect_equal(names(sims), c("samples", "summarised", "observations")) expect_equal(tail(sims$summarised[variable == "R"]$median, 30), R[41:70]) expect_equal(max(sims$samples$sample), 10) }) test_that("simulate infections fails as expected", { expect_error(simulate_infections()) expect_error(simulate_infections(out[-"fit"])) }) test_that("simulate_infections works to simulate a passed in estimate_infections object with an adjusted Rt in data frame", { R <- c(rep(1.4, 32), rep(0.5, 17)) R_dt <- data.frame(date = summary(out, type = "parameters", param = "R")$date, value = R) sims_dt <- simulate_infections(out, R_dt) expect_equal(names(sims_dt), c("samples", "summarised", "observations")) }) test_that("simulate_infections works to simulate a passed in estimate_infections object with samples of Rt in a data frame", { R_samples <- summary(out, type = "samples", param = "R") R_samples <- R_samples[, .(date, sample, value)][sample <= 1000] R_samples <- R_samples[date >= "2020-04-01", value := 1.1] sims_sample <- simulate_infections(out, R_samples) expect_equal(names(sims_sample), c("samples", "summarised", "observations")) })
/tests/testthat/test-simulate_infections.R
permissive
cran/EpiNow2
R
false
false
3,506
r
skip_on_cran() # Setup for testing ------------------------------------------------------- futile.logger::flog.threshold("FATAL") reported_cases <- EpiNow2::example_confirmed[1:50] generation_time <- get_generation_time(disease = "SARS-CoV-2", source = "ganyani", max_value = 10) incubation_period <- get_incubation_period(disease = "SARS-CoV-2", source = "lauer", max_value = 10) reporting_delay <- list( mean = convert_to_logmean(2, 1), mean_sd = 0.1, sd = convert_to_logsd(2, 1), sd_sd = 0.1, max = 10 ) library(data.table) out <- suppressWarnings(estimate_infections(reported_cases, generation_time = generation_time, delays = delay_opts(reporting_delay), gp = NULL, rt = rt_opts(rw = 14), stan = stan_opts( chains = 2, warmup = 100, samples = 100, control = list(adapt_delta = 0.9) ), verbose = FALSE )) test_that("simulate_infections works to simulate a passed in estimate_infections object", { sims <- simulate_infections(out) expect_equal(names(sims), c("samples", "summarised", "observations")) }) test_that("simulate_infections works to simulate a passed in estimate_infections object with an adjusted Rt", { R <- c(rep(NA_real_, 40), rep(0.5, 9)) sims <- simulate_infections(out, R) expect_equal(names(sims), c("samples", "summarised", "observations")) expect_equal(tail(sims$summarised[variable == "R"]$median, 9), rep(0.5, 9)) }) test_that("simulate_infections works to simulate a passed in estimate_infections object with a short adjusted Rt", { R <- c(rep(NA_real_, 40), rep(0.5, 10)) sims <- simulate_infections(out, R) expect_equal(names(sims), c("samples", "summarised", "observations")) expect_equal(tail(sims$summarised[variable == "R"]$median, 9), rep(0.5, 9)) }) test_that("simulate_infections works to simulate a passed in estimate_infections object with a long adjusted Rt", { R <- c(rep(NA_real_, 40), rep(1.2, 15), rep(0.8, 15)) sims <- simulate_infections(out, R) sims10 <- simulate_infections(out, R, samples = 10) expect_equal(names(sims), c("samples", "summarised", "observations")) expect_equal(tail(sims$summarised[variable == "R"]$median, 30), R[41:70]) }) test_that("simulate infections can be run with a limited number of samples", { R <- c(rep(NA_real_, 40), rep(1.2, 15), rep(0.8, 15)) sims <- simulate_infections(out, R, samples = 10) expect_equal(names(sims), c("samples", "summarised", "observations")) expect_equal(tail(sims$summarised[variable == "R"]$median, 30), R[41:70]) expect_equal(max(sims$samples$sample), 10) }) test_that("simulate infections fails as expected", { expect_error(simulate_infections()) expect_error(simulate_infections(out[-"fit"])) }) test_that("simulate_infections works to simulate a passed in estimate_infections object with an adjusted Rt in data frame", { R <- c(rep(1.4, 32), rep(0.5, 17)) R_dt <- data.frame(date = summary(out, type = "parameters", param = "R")$date, value = R) sims_dt <- simulate_infections(out, R_dt) expect_equal(names(sims_dt), c("samples", "summarised", "observations")) }) test_that("simulate_infections works to simulate a passed in estimate_infections object with samples of Rt in a data frame", { R_samples <- summary(out, type = "samples", param = "R") R_samples <- R_samples[, .(date, sample, value)][sample <= 1000] R_samples <- R_samples[date >= "2020-04-01", value := 1.1] sims_sample <- simulate_infections(out, R_samples) expect_equal(names(sims_sample), c("samples", "summarised", "observations")) })
###################################################################### # # read.cross.csv.R # # copyright (c) 2000-2010, Karl W Broman # last modified Apr, 2010 # first written Aug, 2000 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License, # version 3, as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but without any warranty; without even the implied warranty of # merchantability or fitness for a particular purpose. See the GNU # General Public License, version 3, for more details. # # A copy of the GNU General Public License, version 3, is available # at http://www.r-project.org/Licenses/GPL-3 # # Part of the R/qtl package # Contains: read.cross.csv # [See read.cross.R for the main read.cross function.] # ###################################################################### ###################################################################### # # read.cross.csv # # read data in comma-delimited format # ###################################################################### read.cross.csv <- function(dir, file, na.strings=c("-","NA"), genotypes=c("A","H","B","D","C"), estimate.map=TRUE, rotate=FALSE, ...) { # create file names if(missing(file)) file <- "data.csv" if(!missing(dir) && dir != "") { file <- file.path(dir, file) } args <- list(...) # if user wants to use comma for decimal point, we need if(length(args) > 0 && "dec" %in% names(args)) { dec <- args[["dec"]] } else dec <- "." # read the data file if(length(args) < 1 || !("sep" %in% names(args))) { # "sep" not in the "..." argument and so take sep="," if(length(args) < 1 || !("comment.char" %in% names(args))) data <- read.table(file, sep=",", na.strings=na.strings, colClasses="character", fill=TRUE, blank.lines.skip=TRUE, comment.char="", ...) else data <- read.table(file, sep=",", na.strings=na.strings, colClasses="character", fill=TRUE, blank.lines.skip=TRUE, ...) } else { if(length(args) < 1 || !("comment.char" %in% names(args))) data <- read.table(file, na.strings=na.strings, colClasses="character", fill=TRUE, blank.lines.skip=TRUE, comment.char="", ...) else data <- read.table(file, na.strings=na.strings, colClasses="character", fill=TRUE, blank.lines.skip=TRUE, ...) } if(rotate) data <- as.data.frame(t(data), stringsAsFactors=FALSE) # determine number of phenotypes based on initial blanks in row 2 if(length(grep("^\\s*$", data[2,1]))==0) stop("You must include at least one phenotype (e.g., an index).") empty <- grep("^\\s*$", data[2,]) n.phe <- min((1:ncol(data))[-empty])-1 # Is map included? yes if first n.phe columns in row 3 are all blank empty <- rep(FALSE, n.phe) empty[grep("^\\s*$", data[3,1:n.phe])] <- TRUE if(all(!is.na(data[3,1:n.phe]) & empty)) { map.included <- TRUE map <- asnumericwithdec(unlist(data[3,-(1:n.phe)]), dec=dec) if(any(is.na(map))) { temp <- unique(unlist(data[3,-(1:n.phe)])[is.na(map)]) stop(paste("There are missing marker positions.\n", " In particular, we see these values: ", paste("\"", temp, "\"", collapse=" ", sep=""))) } nondatrow <- 3 } else { map.included <- FALSE map <- rep(0,ncol(data)-n.phe) nondatrow <- 2 # last non-data row } pheno <- as.data.frame(data[-(1:nondatrow),1:n.phe,drop=FALSE]) colnames(pheno) <- data[1,1:n.phe] # replace empty cells with NA data <- sapply(data,function(a) { a[!is.na(a) & a==""] <- NA; a }) # pull apart phenotypes, genotypes and map mnames <- data[1,-(1:n.phe)] if(any(is.na(mnames))) stop("There are missing marker names.") chr <- data[2,-(1:n.phe)] if(any(is.na(chr))) stop("There are missing chromosome IDs.") if(length(genotypes) > 0) { # look for strange entries in the genotype data temp <- unique(as.character(data[-(1:nondatrow),-(1:n.phe),drop=FALSE])) temp <- temp[!is.na(temp)] wh <- !(temp %in% genotypes) if(any(wh)) { warn <- "The following unexpected genotype codes were treated as missing.\n " ge <- paste("|", paste(temp[wh],collapse="|"),"|",sep="") warn <- paste(warn,ge,"\n",sep="") warning(warn) } # convert genotype data allgeno <- matrix(match(data[-(1:nondatrow),-(1:n.phe)],genotypes), ncol=ncol(data)-n.phe) } else allgeno <- matrix(as.numeric(data[-(1:nondatrow),-(1:n.phe)]), ncol=ncol(data)-n.phe) # Fix up phenotypes sw2numeric <- function(x, dec) { wh1 <- is.na(x) n <- sum(!is.na(x)) y <- suppressWarnings(asnumericwithdec(as.character(x), dec)) wh2 <- is.na(y) m <- sum(!is.na(y)) if(n==m || (n-m) < 2 || (n-m) < n*0.05) { if(sum(!wh1 & wh2) > 0) { u <- unique(as.character(x[!wh1 & wh2])) if(length(u) > 1) { themessage <- paste("The phenotype values", paste("\"", u, "\"", sep="", collapse=" ")) themessage <- paste(themessage, " were", sep="") } else { themessage <- paste("The phenotype value \"", u, "\" ", sep="") themessage <- paste(themessage, " was", sep="") } themessage <- paste(themessage, "interpreted as missing.") warning(themessage) } return(y) } else return(x) } pheno <- data.frame(lapply(pheno, sw2numeric, dec=dec)) # re-order the markers by chr and position # try to figure out the chr labels if(all(chr %in% c(1:999,"X","x"))) { # 1...19 + X tempchr <- chr tempchr[chr=="X" | chr=="x"] <- 1000 tempchr <- as.numeric(tempchr) if(map.included) neworder <- order(tempchr, map) else neworder <- order(tempchr) chr <- chr[neworder] map <- map[neworder] allgeno <- allgeno[,neworder,drop=FALSE] mnames <- mnames[neworder] } # fix up dummy map if(!map.included) { map <- split(rep(0,length(chr)),chr)[unique(chr)] map <- unlist(lapply(map,function(a) seq(0,length=length(a),by=5))) names(map) <- NULL } # fix up map information # number of chromosomes uchr <- unique(chr) n.chr <- length(uchr) geno <- vector("list",n.chr) names(geno) <- uchr min.mar <- 1 allautogeno <- NULL for(i in 1:n.chr) { # loop over chromosomes # create map temp.map <- map[chr==uchr[i]] names(temp.map) <- mnames[chr==uchr[i]] # pull out appropriate portion of genotype data data <- allgeno[,min.mar:(length(temp.map)+min.mar-1),drop=FALSE] min.mar <- min.mar + length(temp.map) colnames(data) <- names(temp.map) geno[[i]] <- list(data=data,map=temp.map) if(uchr[i] == "X" || uchr[i] == "x") class(geno[[i]]) <- "X" else { class(geno[[i]]) <- "A" if(is.null(allautogeno)) allautogeno <- data else allautogeno <- cbind(allautogeno,data) } } if(is.null(allautogeno)) allautogeno <- allgeno # check that data dimensions match n.mar1 <- sapply(geno,function(a) ncol(a$data)) n.mar2 <- sapply(geno,function(a) length(a$map)) n.phe <- ncol(pheno) n.ind1 <- nrow(pheno) n.ind2 <- sapply(geno,function(a) nrow(a$data)) if(any(n.ind1 != n.ind2)) { cat(n.ind1,n.ind2,"\n") stop("Number of individuals in genotypes and phenotypes do not match."); } if(any(n.mar1 != n.mar2)) { cat(n.mar1,n.mar2,"\n") stop("Numbers of markers in genotypes and marker names files do not match."); } # print some information about the amount of data read cat(" --Read the following data:\n"); cat("\t", n.ind1, " individuals\n"); cat("\t", sum(n.mar1), " markers\n"); cat("\t", n.phe, " phenotypes\n"); if(all(is.na(allgeno))) warning("There is no genotype data!\n") # determine map type: f2 or bc or 4way? if(all(is.na(allautogeno)) || max(allautogeno,na.rm=TRUE)<=2) type <- "bc" else if(max(allautogeno,na.rm=TRUE)<=5) type <- "f2" else type <- "4way" cross <- list(geno=geno,pheno=pheno) class(cross) <- c(type,"cross") # check that nothing is strange in the genotype data cross.type <- class(cross)[1] if(cross.type=="f2") max.gen <- 5 else if(cross.type=="bc") max.gen <- 2 else max.gen <- 14 # check that markers are in proper order # if not, fix up the order for(i in 1:n.chr) { if(any(diff(cross$geno[[i]]$map)<0)) { o <- order(cross$geno[[i]]$map) cross$geno[[i]]$map <- cross$geno[[i]]$map[o] cross$geno[[i]]$data <- cross$geno[[i]]$data[,o,drop=FALSE] } } # if 4-way cross, make the maps matrices if(type=="4way") { for(i in 1:n.chr) cross$geno[[i]]$map <- rbind(cross$geno[[i]]$map, cross$geno[[i]]$map) } # estimate genetic map if(estimate.map && !map.included) estmap <- TRUE else estmap <- FALSE # return cross + indicator of whether to run est.map list(cross,estmap) } # end of read.cross.csv.R
/R/read.cross.csv.R
no_license
pjotrp/rqtl-mqm
R
false
false
9,287
r
###################################################################### # # read.cross.csv.R # # copyright (c) 2000-2010, Karl W Broman # last modified Apr, 2010 # first written Aug, 2000 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License, # version 3, as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but without any warranty; without even the implied warranty of # merchantability or fitness for a particular purpose. See the GNU # General Public License, version 3, for more details. # # A copy of the GNU General Public License, version 3, is available # at http://www.r-project.org/Licenses/GPL-3 # # Part of the R/qtl package # Contains: read.cross.csv # [See read.cross.R for the main read.cross function.] # ###################################################################### ###################################################################### # # read.cross.csv # # read data in comma-delimited format # ###################################################################### read.cross.csv <- function(dir, file, na.strings=c("-","NA"), genotypes=c("A","H","B","D","C"), estimate.map=TRUE, rotate=FALSE, ...) { # create file names if(missing(file)) file <- "data.csv" if(!missing(dir) && dir != "") { file <- file.path(dir, file) } args <- list(...) # if user wants to use comma for decimal point, we need if(length(args) > 0 && "dec" %in% names(args)) { dec <- args[["dec"]] } else dec <- "." # read the data file if(length(args) < 1 || !("sep" %in% names(args))) { # "sep" not in the "..." argument and so take sep="," if(length(args) < 1 || !("comment.char" %in% names(args))) data <- read.table(file, sep=",", na.strings=na.strings, colClasses="character", fill=TRUE, blank.lines.skip=TRUE, comment.char="", ...) else data <- read.table(file, sep=",", na.strings=na.strings, colClasses="character", fill=TRUE, blank.lines.skip=TRUE, ...) } else { if(length(args) < 1 || !("comment.char" %in% names(args))) data <- read.table(file, na.strings=na.strings, colClasses="character", fill=TRUE, blank.lines.skip=TRUE, comment.char="", ...) else data <- read.table(file, na.strings=na.strings, colClasses="character", fill=TRUE, blank.lines.skip=TRUE, ...) } if(rotate) data <- as.data.frame(t(data), stringsAsFactors=FALSE) # determine number of phenotypes based on initial blanks in row 2 if(length(grep("^\\s*$", data[2,1]))==0) stop("You must include at least one phenotype (e.g., an index).") empty <- grep("^\\s*$", data[2,]) n.phe <- min((1:ncol(data))[-empty])-1 # Is map included? yes if first n.phe columns in row 3 are all blank empty <- rep(FALSE, n.phe) empty[grep("^\\s*$", data[3,1:n.phe])] <- TRUE if(all(!is.na(data[3,1:n.phe]) & empty)) { map.included <- TRUE map <- asnumericwithdec(unlist(data[3,-(1:n.phe)]), dec=dec) if(any(is.na(map))) { temp <- unique(unlist(data[3,-(1:n.phe)])[is.na(map)]) stop(paste("There are missing marker positions.\n", " In particular, we see these values: ", paste("\"", temp, "\"", collapse=" ", sep=""))) } nondatrow <- 3 } else { map.included <- FALSE map <- rep(0,ncol(data)-n.phe) nondatrow <- 2 # last non-data row } pheno <- as.data.frame(data[-(1:nondatrow),1:n.phe,drop=FALSE]) colnames(pheno) <- data[1,1:n.phe] # replace empty cells with NA data <- sapply(data,function(a) { a[!is.na(a) & a==""] <- NA; a }) # pull apart phenotypes, genotypes and map mnames <- data[1,-(1:n.phe)] if(any(is.na(mnames))) stop("There are missing marker names.") chr <- data[2,-(1:n.phe)] if(any(is.na(chr))) stop("There are missing chromosome IDs.") if(length(genotypes) > 0) { # look for strange entries in the genotype data temp <- unique(as.character(data[-(1:nondatrow),-(1:n.phe),drop=FALSE])) temp <- temp[!is.na(temp)] wh <- !(temp %in% genotypes) if(any(wh)) { warn <- "The following unexpected genotype codes were treated as missing.\n " ge <- paste("|", paste(temp[wh],collapse="|"),"|",sep="") warn <- paste(warn,ge,"\n",sep="") warning(warn) } # convert genotype data allgeno <- matrix(match(data[-(1:nondatrow),-(1:n.phe)],genotypes), ncol=ncol(data)-n.phe) } else allgeno <- matrix(as.numeric(data[-(1:nondatrow),-(1:n.phe)]), ncol=ncol(data)-n.phe) # Fix up phenotypes sw2numeric <- function(x, dec) { wh1 <- is.na(x) n <- sum(!is.na(x)) y <- suppressWarnings(asnumericwithdec(as.character(x), dec)) wh2 <- is.na(y) m <- sum(!is.na(y)) if(n==m || (n-m) < 2 || (n-m) < n*0.05) { if(sum(!wh1 & wh2) > 0) { u <- unique(as.character(x[!wh1 & wh2])) if(length(u) > 1) { themessage <- paste("The phenotype values", paste("\"", u, "\"", sep="", collapse=" ")) themessage <- paste(themessage, " were", sep="") } else { themessage <- paste("The phenotype value \"", u, "\" ", sep="") themessage <- paste(themessage, " was", sep="") } themessage <- paste(themessage, "interpreted as missing.") warning(themessage) } return(y) } else return(x) } pheno <- data.frame(lapply(pheno, sw2numeric, dec=dec)) # re-order the markers by chr and position # try to figure out the chr labels if(all(chr %in% c(1:999,"X","x"))) { # 1...19 + X tempchr <- chr tempchr[chr=="X" | chr=="x"] <- 1000 tempchr <- as.numeric(tempchr) if(map.included) neworder <- order(tempchr, map) else neworder <- order(tempchr) chr <- chr[neworder] map <- map[neworder] allgeno <- allgeno[,neworder,drop=FALSE] mnames <- mnames[neworder] } # fix up dummy map if(!map.included) { map <- split(rep(0,length(chr)),chr)[unique(chr)] map <- unlist(lapply(map,function(a) seq(0,length=length(a),by=5))) names(map) <- NULL } # fix up map information # number of chromosomes uchr <- unique(chr) n.chr <- length(uchr) geno <- vector("list",n.chr) names(geno) <- uchr min.mar <- 1 allautogeno <- NULL for(i in 1:n.chr) { # loop over chromosomes # create map temp.map <- map[chr==uchr[i]] names(temp.map) <- mnames[chr==uchr[i]] # pull out appropriate portion of genotype data data <- allgeno[,min.mar:(length(temp.map)+min.mar-1),drop=FALSE] min.mar <- min.mar + length(temp.map) colnames(data) <- names(temp.map) geno[[i]] <- list(data=data,map=temp.map) if(uchr[i] == "X" || uchr[i] == "x") class(geno[[i]]) <- "X" else { class(geno[[i]]) <- "A" if(is.null(allautogeno)) allautogeno <- data else allautogeno <- cbind(allautogeno,data) } } if(is.null(allautogeno)) allautogeno <- allgeno # check that data dimensions match n.mar1 <- sapply(geno,function(a) ncol(a$data)) n.mar2 <- sapply(geno,function(a) length(a$map)) n.phe <- ncol(pheno) n.ind1 <- nrow(pheno) n.ind2 <- sapply(geno,function(a) nrow(a$data)) if(any(n.ind1 != n.ind2)) { cat(n.ind1,n.ind2,"\n") stop("Number of individuals in genotypes and phenotypes do not match."); } if(any(n.mar1 != n.mar2)) { cat(n.mar1,n.mar2,"\n") stop("Numbers of markers in genotypes and marker names files do not match."); } # print some information about the amount of data read cat(" --Read the following data:\n"); cat("\t", n.ind1, " individuals\n"); cat("\t", sum(n.mar1), " markers\n"); cat("\t", n.phe, " phenotypes\n"); if(all(is.na(allgeno))) warning("There is no genotype data!\n") # determine map type: f2 or bc or 4way? if(all(is.na(allautogeno)) || max(allautogeno,na.rm=TRUE)<=2) type <- "bc" else if(max(allautogeno,na.rm=TRUE)<=5) type <- "f2" else type <- "4way" cross <- list(geno=geno,pheno=pheno) class(cross) <- c(type,"cross") # check that nothing is strange in the genotype data cross.type <- class(cross)[1] if(cross.type=="f2") max.gen <- 5 else if(cross.type=="bc") max.gen <- 2 else max.gen <- 14 # check that markers are in proper order # if not, fix up the order for(i in 1:n.chr) { if(any(diff(cross$geno[[i]]$map)<0)) { o <- order(cross$geno[[i]]$map) cross$geno[[i]]$map <- cross$geno[[i]]$map[o] cross$geno[[i]]$data <- cross$geno[[i]]$data[,o,drop=FALSE] } } # if 4-way cross, make the maps matrices if(type=="4way") { for(i in 1:n.chr) cross$geno[[i]]$map <- rbind(cross$geno[[i]]$map, cross$geno[[i]]$map) } # estimate genetic map if(estimate.map && !map.included) estmap <- TRUE else estmap <- FALSE # return cross + indicator of whether to run est.map list(cross,estmap) } # end of read.cross.csv.R
library(data.table) options( java.parameters = "-Xmx50g" ) library(tm) library(qdap) library(openNLP) library(RWeka) ### ## Load all documents from the local repo US_English. ### en_US_path <- file.path ( "." , "en_US" ) en_US.corpus <- Corpus(DirSource(en_US_path, encoding="UTF-8"), readerControl=list(language="eng", reader=readPlain)) ## Check the lengths of each corpus. lengths <- sapply(1:3,function(i) length(en_US.corpus[[i]]$content));lengths ### ## Create the training corpus which consists of 80% of each document. ### en_US.corpus[[1]]$content <- en_US.corpus[[1]]$content[1:ceiling(.8*length(en_US.corpus[[1]]$content))] en_US.corpus[[2]]$content <- en_US.corpus[[2]]$content[1:ceiling(.8*length(en_US.corpus[[2]]$content))] en_US.corpus[[3]]$content <- en_US.corpus[[3]]$content[1:ceiling(.8*length(en_US.corpus[[3]]$content))] lengths <- sapply(1:3,function(i) length(en_US.corpus[[i]]$content));lengths ## Combine the three corpora. text <- c(en_US.corpus[[1]]$content, en_US.corpus[[2]]$content, en_US.corpus[[3]]$content) rm(en_US.corpus) ### ## Clean the corpus. ### corpus <- VCorpus(VectorSource(text)); ## Remove the text since it has becomes redundant and call the garbage collector. rm(text); gc(); save(corpus, file="rawCorpus.RData") ## Convert all character into utf8 encoding. corpus <- tm_map(corpus, content_transformer(function(x) iconv(enc2utf8(x), sub = "byte"))) ## Convert all character into lower cases. corpus <- tm_map(corpus, content_transformer(tolower)) ## Replace the specified symbols by a white space. corpus <- tm_map(corpus, content_transformer(function(x, pattern) gsub(pattern, " ", x)), "/|@|\\|~|_|\\*|#|%|\\^|&") ## Eliminate the extra white spaces. corpus <- tm_map(corpus, stripWhitespace) ## Remove the number in the corpus. corpus <- tm_map(corpus, removeNumbers) ## Break the corpus down into a vector having a single document as an entry. corpus <- as.vector(unlist(sapply(corpus, '[',"content"))) save(corpus, file ="corpus.RData") load("corpus.RData") ### ## Tokenize the corpus into 1-, 2-, 3- and 4-grams and tabulates the results ## accordingly. ## ## The corpus is broken down into subcorpora during the process due to its size. ## ## The n-gram table is updated on each iteration to minimize the memory space ## occupied. ### # Tokenization for unigram(1-gram). unigram <- data.table() n <- 5 k <- ceiling(length(corpus)/n) for(i in 1:n){ start <- (i-1)*k+1 end <- i*k if(i==n) end <- length(corpus) tempTokens <- NGramTokenizer(corpus[start:end], Weka_control(min=1,max=1)) tempTable <- data.table(word=tempTokens, freq=c(1)) unigram <- rbind(unigram, tempTable) setkey(unigram, word) unigram <- unigram[, sum(freq), by=word] setnames(unigram, names(unigram), c("word","freq")) } ## Check for NA and empty entries. which(unigram$word==""); which(unigram$word==NA) ## Remove the empty entry in the unigram model unigram <- unigram[-which(unigram$word==""), ] save(unigram, file="Grand_Unigram.RData") rm(tempTokens, tempTable, unigram); gc() ## Tokenization for bigram(2-gram). bigram <- data.table() n <- 10 k <- ceiling(length(corpus)/n) for(i in 1:n){ start <- (i-1)*k+1 end <- i*k if(i==n) end <- length(corpus) tempTokens <- NGramTokenizer(corpus[start:end], Weka_control(min=2,max=2)) tempTable <- data.table(word=tempTokens, freq=c(1)) bigram <- rbind(bigram, tempTable) setkey(bigram, word) bigram <- bigram[, sum(freq), by=word] setnames(bigram, names(bigram), c("word","freq")) } ## Check for NA and empty entries. which(bigram$word==""); which(bigram$word==NA) save(bigram, file="Grand_Bigram.RData") rm(tempTokens, tempTable, bigram); gc() ## Tokenization for trigram(3-gram). trigram <- data.table() n <- 20 k <- ceiling(length(corpus)/n) for(i in 1:n){ print(i) start <- (i-1)*k+1 end <- i*k if(i==n) end <- length(corpus) tempTokens <- NGramTokenizer(corpus[start:end], Weka_control(min=3,max=3)) tempTable <- data.table(word=tempTokens, freq=c(1)) trigram <- rbind(trigram, tempTable) setkey(trigram, word) trigram <- trigram[, sum(freq), by=word] setnames(trigram, names(trigram), c("word","freq")) } ## Check for NA and empty entries. which(trigram$word==""); which(trigram$word==NA) save(trigram, file="Grand_Trigram.RData") rm(tempTokens, tempTable, trigram); gc() ## Tokenization for quadgram(4-gram). quadgram <- data.table() n <- 25 k <- ceiling(length(corpus)/n) for(i in 1:n){ print(i) start <- (i-1)*k+1 end <- i*k if(i==n) end <- length(corpus) tempTokens <- NGramTokenizer(corpus[start:end], Weka_control(min=4,max=4)) tempTable <- data.table(word=tempTokens, freq=c(1)) quadgram <- rbind(quadgram, tempTable) setkey(quadgram, word) quadgram <- quadgram[, sum(freq), by=word] setnames(quadgram, names(quadgram), c("word","freq")) } ## Check for NA and empty entries. which(quadgram$word==""); which(quadgram$word==NA) save(quadgram, file="Grand_Quadgram.RData") rm(tempTokens, tempTable, quadgram, corpus); gc()
/preprocess.R
no_license
SamanthaLui/Next-Word-Prediction-App
R
false
false
5,367
r
library(data.table) options( java.parameters = "-Xmx50g" ) library(tm) library(qdap) library(openNLP) library(RWeka) ### ## Load all documents from the local repo US_English. ### en_US_path <- file.path ( "." , "en_US" ) en_US.corpus <- Corpus(DirSource(en_US_path, encoding="UTF-8"), readerControl=list(language="eng", reader=readPlain)) ## Check the lengths of each corpus. lengths <- sapply(1:3,function(i) length(en_US.corpus[[i]]$content));lengths ### ## Create the training corpus which consists of 80% of each document. ### en_US.corpus[[1]]$content <- en_US.corpus[[1]]$content[1:ceiling(.8*length(en_US.corpus[[1]]$content))] en_US.corpus[[2]]$content <- en_US.corpus[[2]]$content[1:ceiling(.8*length(en_US.corpus[[2]]$content))] en_US.corpus[[3]]$content <- en_US.corpus[[3]]$content[1:ceiling(.8*length(en_US.corpus[[3]]$content))] lengths <- sapply(1:3,function(i) length(en_US.corpus[[i]]$content));lengths ## Combine the three corpora. text <- c(en_US.corpus[[1]]$content, en_US.corpus[[2]]$content, en_US.corpus[[3]]$content) rm(en_US.corpus) ### ## Clean the corpus. ### corpus <- VCorpus(VectorSource(text)); ## Remove the text since it has becomes redundant and call the garbage collector. rm(text); gc(); save(corpus, file="rawCorpus.RData") ## Convert all character into utf8 encoding. corpus <- tm_map(corpus, content_transformer(function(x) iconv(enc2utf8(x), sub = "byte"))) ## Convert all character into lower cases. corpus <- tm_map(corpus, content_transformer(tolower)) ## Replace the specified symbols by a white space. corpus <- tm_map(corpus, content_transformer(function(x, pattern) gsub(pattern, " ", x)), "/|@|\\|~|_|\\*|#|%|\\^|&") ## Eliminate the extra white spaces. corpus <- tm_map(corpus, stripWhitespace) ## Remove the number in the corpus. corpus <- tm_map(corpus, removeNumbers) ## Break the corpus down into a vector having a single document as an entry. corpus <- as.vector(unlist(sapply(corpus, '[',"content"))) save(corpus, file ="corpus.RData") load("corpus.RData") ### ## Tokenize the corpus into 1-, 2-, 3- and 4-grams and tabulates the results ## accordingly. ## ## The corpus is broken down into subcorpora during the process due to its size. ## ## The n-gram table is updated on each iteration to minimize the memory space ## occupied. ### # Tokenization for unigram(1-gram). unigram <- data.table() n <- 5 k <- ceiling(length(corpus)/n) for(i in 1:n){ start <- (i-1)*k+1 end <- i*k if(i==n) end <- length(corpus) tempTokens <- NGramTokenizer(corpus[start:end], Weka_control(min=1,max=1)) tempTable <- data.table(word=tempTokens, freq=c(1)) unigram <- rbind(unigram, tempTable) setkey(unigram, word) unigram <- unigram[, sum(freq), by=word] setnames(unigram, names(unigram), c("word","freq")) } ## Check for NA and empty entries. which(unigram$word==""); which(unigram$word==NA) ## Remove the empty entry in the unigram model unigram <- unigram[-which(unigram$word==""), ] save(unigram, file="Grand_Unigram.RData") rm(tempTokens, tempTable, unigram); gc() ## Tokenization for bigram(2-gram). bigram <- data.table() n <- 10 k <- ceiling(length(corpus)/n) for(i in 1:n){ start <- (i-1)*k+1 end <- i*k if(i==n) end <- length(corpus) tempTokens <- NGramTokenizer(corpus[start:end], Weka_control(min=2,max=2)) tempTable <- data.table(word=tempTokens, freq=c(1)) bigram <- rbind(bigram, tempTable) setkey(bigram, word) bigram <- bigram[, sum(freq), by=word] setnames(bigram, names(bigram), c("word","freq")) } ## Check for NA and empty entries. which(bigram$word==""); which(bigram$word==NA) save(bigram, file="Grand_Bigram.RData") rm(tempTokens, tempTable, bigram); gc() ## Tokenization for trigram(3-gram). trigram <- data.table() n <- 20 k <- ceiling(length(corpus)/n) for(i in 1:n){ print(i) start <- (i-1)*k+1 end <- i*k if(i==n) end <- length(corpus) tempTokens <- NGramTokenizer(corpus[start:end], Weka_control(min=3,max=3)) tempTable <- data.table(word=tempTokens, freq=c(1)) trigram <- rbind(trigram, tempTable) setkey(trigram, word) trigram <- trigram[, sum(freq), by=word] setnames(trigram, names(trigram), c("word","freq")) } ## Check for NA and empty entries. which(trigram$word==""); which(trigram$word==NA) save(trigram, file="Grand_Trigram.RData") rm(tempTokens, tempTable, trigram); gc() ## Tokenization for quadgram(4-gram). quadgram <- data.table() n <- 25 k <- ceiling(length(corpus)/n) for(i in 1:n){ print(i) start <- (i-1)*k+1 end <- i*k if(i==n) end <- length(corpus) tempTokens <- NGramTokenizer(corpus[start:end], Weka_control(min=4,max=4)) tempTable <- data.table(word=tempTokens, freq=c(1)) quadgram <- rbind(quadgram, tempTable) setkey(quadgram, word) quadgram <- quadgram[, sum(freq), by=word] setnames(quadgram, names(quadgram), c("word","freq")) } ## Check for NA and empty entries. which(quadgram$word==""); which(quadgram$word==NA) save(quadgram, file="Grand_Quadgram.RData") rm(tempTokens, tempTable, quadgram, corpus); gc()
library(shiny) library(shinyjs) shinyUI( fluidPage(align="center", includeCSS("styles.css"), useShinyjs(), headerPanel("Text Prediction"), mainPanel(class="col-sm-offset-2", h3("Please type or paste text into the box below"), h5("This application will predict the next word in your input. The single prediction is in the blue box below. The additional predictions in the other boxes are for demonstration purposes only."), tags$textarea(id="text", class="form-control input-lg", label = "Input Text", value="enter text here", rows="4"), span('Please wait several seconds for the algorithm to run.', class="help-block text-left"), tags$hr(), actionButton("result2", textOutput('result2', inline = TRUE), class="btn btn-lg"), actionButton("result1", textOutput('result1', inline = TRUE), class="btn btn-lg btn-primary"), actionButton("result3", textOutput('result3', inline = TRUE), class="btn btn-lg") ) ) )
/ui.R
no_license
asadowns/DSSCapstone
R
false
false
1,147
r
library(shiny) library(shinyjs) shinyUI( fluidPage(align="center", includeCSS("styles.css"), useShinyjs(), headerPanel("Text Prediction"), mainPanel(class="col-sm-offset-2", h3("Please type or paste text into the box below"), h5("This application will predict the next word in your input. The single prediction is in the blue box below. The additional predictions in the other boxes are for demonstration purposes only."), tags$textarea(id="text", class="form-control input-lg", label = "Input Text", value="enter text here", rows="4"), span('Please wait several seconds for the algorithm to run.', class="help-block text-left"), tags$hr(), actionButton("result2", textOutput('result2', inline = TRUE), class="btn btn-lg"), actionButton("result1", textOutput('result1', inline = TRUE), class="btn btn-lg btn-primary"), actionButton("result3", textOutput('result3', inline = TRUE), class="btn btn-lg") ) ) )
install.packages("rjson") # Load the package required to read JSON files. library("rjson") #Initialize the files file1="pro_leave1.json" file2="pro_leave2.json" file3="pro_leave3.json" file4="pro_remain.json" file5="pro_remain2.json" file6="pro_remain3.json" # Give the input file name to the function. result <- fromJSON(file = file6) # Print the result. print(result) # # Convert JSON file to a data frame. names=result$data$attributes$signatures_by_constituency #do.call(rbind.data.frame, names) ## Compute maximum length max.length <- max(sapply(names, length)) ## Add NA values to list elements names1 <- lapply(names, function(v) { c(v, rep(NA, max.length-length(v)))}) ## Rbind my_data=do.call(rbind, names1) my_data=as.data.frame(my_data) my_data=my_data[,c("ons_code","signature_count")] #Write the data into text files #Initialize the text file path path1="C:/Users/Administrator/Desktop/FREELANCER_COM_WORK/BREXIT analysis/proleave1.txt" path2="C:/Users/Administrator/Desktop/FREELANCER_COM_WORK/BREXIT analysis/proleave2.txt" path3="C:/Users/Administrator/Desktop/FREELANCER_COM_WORK/BREXIT analysis/proleave3.txt" path4="C:/Users/Administrator/Desktop/FREELANCER_COM_WORK/BREXIT analysis/proremain1.txt" path5="C:/Users/Administrator/Desktop/FREELANCER_COM_WORK/BREXIT analysis/proremain2.txt" path6="C:/Users/Administrator/Desktop/FREELANCER_COM_WORK/BREXIT analysis/proremain3.txt" library(data.table) fwrite(x = my_data, file = path6, sep = ",", col.names=T, append=T)
/FREELANCER FILES/Freelancer_com projects/BREXIT analysis/programming/R_codes.R
no_license
watex95/R-FOR-DATA-SCIENCE
R
false
false
1,523
r
install.packages("rjson") # Load the package required to read JSON files. library("rjson") #Initialize the files file1="pro_leave1.json" file2="pro_leave2.json" file3="pro_leave3.json" file4="pro_remain.json" file5="pro_remain2.json" file6="pro_remain3.json" # Give the input file name to the function. result <- fromJSON(file = file6) # Print the result. print(result) # # Convert JSON file to a data frame. names=result$data$attributes$signatures_by_constituency #do.call(rbind.data.frame, names) ## Compute maximum length max.length <- max(sapply(names, length)) ## Add NA values to list elements names1 <- lapply(names, function(v) { c(v, rep(NA, max.length-length(v)))}) ## Rbind my_data=do.call(rbind, names1) my_data=as.data.frame(my_data) my_data=my_data[,c("ons_code","signature_count")] #Write the data into text files #Initialize the text file path path1="C:/Users/Administrator/Desktop/FREELANCER_COM_WORK/BREXIT analysis/proleave1.txt" path2="C:/Users/Administrator/Desktop/FREELANCER_COM_WORK/BREXIT analysis/proleave2.txt" path3="C:/Users/Administrator/Desktop/FREELANCER_COM_WORK/BREXIT analysis/proleave3.txt" path4="C:/Users/Administrator/Desktop/FREELANCER_COM_WORK/BREXIT analysis/proremain1.txt" path5="C:/Users/Administrator/Desktop/FREELANCER_COM_WORK/BREXIT analysis/proremain2.txt" path6="C:/Users/Administrator/Desktop/FREELANCER_COM_WORK/BREXIT analysis/proremain3.txt" library(data.table) fwrite(x = my_data, file = path6, sep = ",", col.names=T, append=T)
/techos_heatmap.R
no_license
danidlsa/datosdemiercoles
R
false
false
2,302
r
library(arf3DS4) ### Name: fmri.data-class ### Title: arf3DS4 "fmri.data" class ### Aliases: fmri.data-class .fmri.data.aux_file,fmri.data-method ### .fmri.data.aux_file<-,fmri.data-method ### .fmri.data.bitpix,fmri.data-method ### .fmri.data.bitpix<-,fmri.data-method ### .fmri.data.cal_max,fmri.data-method ### .fmri.data.cal_max<-,fmri.data-method ### .fmri.data.cal_min,fmri.data-method ### .fmri.data.cal_min<-,fmri.data-method ### .fmri.data.data_type,fmri.data-method ### .fmri.data.data_type<-,fmri.data-method ### .fmri.data.data.signed,fmri.data-method ### .fmri.data.data.signed<-,fmri.data-method ### .fmri.data.data.type,fmri.data-method ### .fmri.data.data.type<-,fmri.data-method ### .fmri.data.datatype,fmri.data-method ### .fmri.data.datatype<-,fmri.data-method ### .fmri.data.datavec,fmri.data-method ### .fmri.data.datavec<-,fmri.data-method ### .fmri.data.db_name,fmri.data-method ### .fmri.data.db_name<-,fmri.data-method ### .fmri.data.descrip,fmri.data-method ### .fmri.data.descrip<-,fmri.data-method ### .fmri.data.dim_info,fmri.data-method ### .fmri.data.dim_info<-,fmri.data-method ### .fmri.data.dims,fmri.data-method .fmri.data.dims<-,fmri.data-method ### .fmri.data.endian,fmri.data-method ### .fmri.data.endian<-,fmri.data-method ### .fmri.data.extension,fmri.data-method ### .fmri.data.extension<-,fmri.data-method ### .fmri.data.extents,fmri.data-method ### .fmri.data.extents<-,fmri.data-method ### .fmri.data.filename,fmri.data-method ### .fmri.data.filename<-,fmri.data-method ### .fmri.data.filetype,fmri.data-method ### .fmri.data.filetype<-,fmri.data-method ### .fmri.data.fullpath,fmri.data-method ### .fmri.data.fullpath<-,fmri.data-method ### .fmri.data.glmax,fmri.data-method .fmri.data.glmax<-,fmri.data-method ### .fmri.data.glmin,fmri.data-method .fmri.data.glmin<-,fmri.data-method ### .fmri.data.gzipped,fmri.data-method ### .fmri.data.gzipped<-,fmri.data-method ### .fmri.data.intent_code,fmri.data-method ### .fmri.data.intent_code<-,fmri.data-method ### .fmri.data.intent_name,fmri.data-method ### .fmri.data.intent_name<-,fmri.data-method ### .fmri.data.intent_p1,fmri.data-method ### .fmri.data.intent_p1<-,fmri.data-method ### .fmri.data.intent_p2,fmri.data-method ### .fmri.data.intent_p2<-,fmri.data-method ### .fmri.data.intent_p3,fmri.data-method ### .fmri.data.intent_p3<-,fmri.data-method ### .fmri.data.magic,fmri.data-method .fmri.data.magic<-,fmri.data-method ### .fmri.data.pixdim,fmri.data-method ### .fmri.data.pixdim<-,fmri.data-method ### .fmri.data.qform_code,fmri.data-method ### .fmri.data.qform_code<-,fmri.data-method ### .fmri.data.qoffset_x,fmri.data-method ### .fmri.data.qoffset_x<-,fmri.data-method ### .fmri.data.qoffset_y,fmri.data-method ### .fmri.data.qoffset_y<-,fmri.data-method ### .fmri.data.qoffset_z,fmri.data-method ### .fmri.data.qoffset_z<-,fmri.data-method ### .fmri.data.quatern_b,fmri.data-method ### .fmri.data.quatern_b<-,fmri.data-method ### .fmri.data.quatern_c,fmri.data-method ### .fmri.data.quatern_c<-,fmri.data-method ### .fmri.data.quatern_d,fmri.data-method ### .fmri.data.quatern_d<-,fmri.data-method ### .fmri.data.regular,fmri.data-method ### .fmri.data.regular<-,fmri.data-method ### .fmri.data.scl_inter,fmri.data-method ### .fmri.data.scl_inter<-,fmri.data-method ### .fmri.data.scl_slope,fmri.data-method ### .fmri.data.scl_slope<-,fmri.data-method ### .fmri.data.session_error,fmri.data-method ### .fmri.data.session_error<-,fmri.data-method ### .fmri.data.sform_code,fmri.data-method ### .fmri.data.sform_code<-,fmri.data-method ### .fmri.data.sizeof_hdr,fmri.data-method ### .fmri.data.sizeof_hdr<-,fmri.data-method ### .fmri.data.slice_code,fmri.data-method ### .fmri.data.slice_code<-,fmri.data-method ### .fmri.data.slice_duration,fmri.data-method ### .fmri.data.slice_duration<-,fmri.data-method ### .fmri.data.slice_end,fmri.data-method ### .fmri.data.slice_end<-,fmri.data-method ### .fmri.data.slice_start,fmri.data-method ### .fmri.data.slice_start<-,fmri.data-method ### .fmri.data.srow_x,fmri.data-method ### .fmri.data.srow_x<-,fmri.data-method ### .fmri.data.srow_y,fmri.data-method ### .fmri.data.srow_y<-,fmri.data-method ### .fmri.data.srow_z,fmri.data-method ### .fmri.data.srow_z<-,fmri.data-method ### .fmri.data.toffset,fmri.data-method ### .fmri.data.toffset<-,fmri.data-method ### .fmri.data.version,fmri.data-method ### .fmri.data.version<-,fmri.data-method ### .fmri.data.vox_offset,fmri.data-method ### .fmri.data.vox_offset<-,fmri.data-method ### .fmri.data.xyzt_units,fmri.data-method ### .fmri.data.xyzt_units<-,fmri.data-method ### plot,fmri.data,missing-method show,fmri.data-method ### summary,fmri.data-method ### Keywords: classes ### ** Examples showClass("fmri.data")
/data/genthat_extracted_code/arf3DS4/examples/fmri.data-class.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
4,930
r
library(arf3DS4) ### Name: fmri.data-class ### Title: arf3DS4 "fmri.data" class ### Aliases: fmri.data-class .fmri.data.aux_file,fmri.data-method ### .fmri.data.aux_file<-,fmri.data-method ### .fmri.data.bitpix,fmri.data-method ### .fmri.data.bitpix<-,fmri.data-method ### .fmri.data.cal_max,fmri.data-method ### .fmri.data.cal_max<-,fmri.data-method ### .fmri.data.cal_min,fmri.data-method ### .fmri.data.cal_min<-,fmri.data-method ### .fmri.data.data_type,fmri.data-method ### .fmri.data.data_type<-,fmri.data-method ### .fmri.data.data.signed,fmri.data-method ### .fmri.data.data.signed<-,fmri.data-method ### .fmri.data.data.type,fmri.data-method ### .fmri.data.data.type<-,fmri.data-method ### .fmri.data.datatype,fmri.data-method ### .fmri.data.datatype<-,fmri.data-method ### .fmri.data.datavec,fmri.data-method ### .fmri.data.datavec<-,fmri.data-method ### .fmri.data.db_name,fmri.data-method ### .fmri.data.db_name<-,fmri.data-method ### .fmri.data.descrip,fmri.data-method ### .fmri.data.descrip<-,fmri.data-method ### .fmri.data.dim_info,fmri.data-method ### .fmri.data.dim_info<-,fmri.data-method ### .fmri.data.dims,fmri.data-method .fmri.data.dims<-,fmri.data-method ### .fmri.data.endian,fmri.data-method ### .fmri.data.endian<-,fmri.data-method ### .fmri.data.extension,fmri.data-method ### .fmri.data.extension<-,fmri.data-method ### .fmri.data.extents,fmri.data-method ### .fmri.data.extents<-,fmri.data-method ### .fmri.data.filename,fmri.data-method ### .fmri.data.filename<-,fmri.data-method ### .fmri.data.filetype,fmri.data-method ### .fmri.data.filetype<-,fmri.data-method ### .fmri.data.fullpath,fmri.data-method ### .fmri.data.fullpath<-,fmri.data-method ### .fmri.data.glmax,fmri.data-method .fmri.data.glmax<-,fmri.data-method ### .fmri.data.glmin,fmri.data-method .fmri.data.glmin<-,fmri.data-method ### .fmri.data.gzipped,fmri.data-method ### .fmri.data.gzipped<-,fmri.data-method ### .fmri.data.intent_code,fmri.data-method ### .fmri.data.intent_code<-,fmri.data-method ### .fmri.data.intent_name,fmri.data-method ### .fmri.data.intent_name<-,fmri.data-method ### .fmri.data.intent_p1,fmri.data-method ### .fmri.data.intent_p1<-,fmri.data-method ### .fmri.data.intent_p2,fmri.data-method ### .fmri.data.intent_p2<-,fmri.data-method ### .fmri.data.intent_p3,fmri.data-method ### .fmri.data.intent_p3<-,fmri.data-method ### .fmri.data.magic,fmri.data-method .fmri.data.magic<-,fmri.data-method ### .fmri.data.pixdim,fmri.data-method ### .fmri.data.pixdim<-,fmri.data-method ### .fmri.data.qform_code,fmri.data-method ### .fmri.data.qform_code<-,fmri.data-method ### .fmri.data.qoffset_x,fmri.data-method ### .fmri.data.qoffset_x<-,fmri.data-method ### .fmri.data.qoffset_y,fmri.data-method ### .fmri.data.qoffset_y<-,fmri.data-method ### .fmri.data.qoffset_z,fmri.data-method ### .fmri.data.qoffset_z<-,fmri.data-method ### .fmri.data.quatern_b,fmri.data-method ### .fmri.data.quatern_b<-,fmri.data-method ### .fmri.data.quatern_c,fmri.data-method ### .fmri.data.quatern_c<-,fmri.data-method ### .fmri.data.quatern_d,fmri.data-method ### .fmri.data.quatern_d<-,fmri.data-method ### .fmri.data.regular,fmri.data-method ### .fmri.data.regular<-,fmri.data-method ### .fmri.data.scl_inter,fmri.data-method ### .fmri.data.scl_inter<-,fmri.data-method ### .fmri.data.scl_slope,fmri.data-method ### .fmri.data.scl_slope<-,fmri.data-method ### .fmri.data.session_error,fmri.data-method ### .fmri.data.session_error<-,fmri.data-method ### .fmri.data.sform_code,fmri.data-method ### .fmri.data.sform_code<-,fmri.data-method ### .fmri.data.sizeof_hdr,fmri.data-method ### .fmri.data.sizeof_hdr<-,fmri.data-method ### .fmri.data.slice_code,fmri.data-method ### .fmri.data.slice_code<-,fmri.data-method ### .fmri.data.slice_duration,fmri.data-method ### .fmri.data.slice_duration<-,fmri.data-method ### .fmri.data.slice_end,fmri.data-method ### .fmri.data.slice_end<-,fmri.data-method ### .fmri.data.slice_start,fmri.data-method ### .fmri.data.slice_start<-,fmri.data-method ### .fmri.data.srow_x,fmri.data-method ### .fmri.data.srow_x<-,fmri.data-method ### .fmri.data.srow_y,fmri.data-method ### .fmri.data.srow_y<-,fmri.data-method ### .fmri.data.srow_z,fmri.data-method ### .fmri.data.srow_z<-,fmri.data-method ### .fmri.data.toffset,fmri.data-method ### .fmri.data.toffset<-,fmri.data-method ### .fmri.data.version,fmri.data-method ### .fmri.data.version<-,fmri.data-method ### .fmri.data.vox_offset,fmri.data-method ### .fmri.data.vox_offset<-,fmri.data-method ### .fmri.data.xyzt_units,fmri.data-method ### .fmri.data.xyzt_units<-,fmri.data-method ### plot,fmri.data,missing-method show,fmri.data-method ### summary,fmri.data-method ### Keywords: classes ### ** Examples showClass("fmri.data")
# Converts countries.geojson to sf file and saves as RDS. This is # to spare people from needing to have rgdal installed just to # run the app. library(rgdal) library(sf) download.file("https://raw.githubusercontent.com/rstudio/leaflet/master/docs/json/countries.geojson", "countries.geojson") countries <- readOGR("countries.geojson", "OGRGeoJSON") saveRDS(st_as_sf(countries), "countries.rds") message("Saved countries.rds")
/144-colors/countries.R
permissive
mine-cetinkaya-rundel/shiny-examples
R
false
false
429
r
# Converts countries.geojson to sf file and saves as RDS. This is # to spare people from needing to have rgdal installed just to # run the app. library(rgdal) library(sf) download.file("https://raw.githubusercontent.com/rstudio/leaflet/master/docs/json/countries.geojson", "countries.geojson") countries <- readOGR("countries.geojson", "OGRGeoJSON") saveRDS(st_as_sf(countries), "countries.rds") message("Saved countries.rds")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Visualizations.R \name{ncp_from_d} \alias{ncp_from_d} \title{Compute non-centrality parameter for the non-central t distribution} \usage{ ncp_from_d(d, n1, n2) } \arguments{ \item{d}{Cohen's d} \item{n1}{Sample size in group 1} \item{n2}{Sample size in group 2} } \value{ The non-centrality parameter } \description{ Compute non-centrality parameter for the non-central t distribution } \details{ See page 7 in Erdfelder, Faul, & Buchner (1996) for the formula. } \examples{ ncp_from_d(0.3, 50, 50) } \references{ Erdfelder, E., Faul, F., & Buchner, A. (1996). GPOWER: A general power analysis program. Behavior research methods, instruments, & computers, 28(1), 1-11. }
/man/ncp_from_d.Rd
permissive
m-Py/bayesEd
R
false
true
758
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Visualizations.R \name{ncp_from_d} \alias{ncp_from_d} \title{Compute non-centrality parameter for the non-central t distribution} \usage{ ncp_from_d(d, n1, n2) } \arguments{ \item{d}{Cohen's d} \item{n1}{Sample size in group 1} \item{n2}{Sample size in group 2} } \value{ The non-centrality parameter } \description{ Compute non-centrality parameter for the non-central t distribution } \details{ See page 7 in Erdfelder, Faul, & Buchner (1996) for the formula. } \examples{ ncp_from_d(0.3, 50, 50) } \references{ Erdfelder, E., Faul, F., & Buchner, A. (1996). GPOWER: A general power analysis program. Behavior research methods, instruments, & computers, 28(1), 1-11. }
library(shiny) # Define UI for dataset viewer application shinyUI(pageWithSidebar( # Header title headerPanel("Module 9: Developing Data Products with Shiny App by taroAthirah"), # Sidebar with controls to select a dataset and specify the number # of observations to view sidebarPanel( h3("Now make a choice of car make and its feature", style = "color:red"), br(), img(src="https://pixabay.com/static/uploads/photo/2013/07/13/11/29/car-158239_960_720.png", width="60%"), selectInput("make", "Choose a car make >>>>", choices = c("Mazda", "Datsun", "Hornet","Valiant","Duster","Merc","Cadillac","Lincoln","Chrysler","Fiat","Honda","Toyota","Dodge","AMC","Camaro","Pontiac","Porsche","Lotus","Ford","Ferrari","Maserati","Volvo")), h3("Legend:-", style = "color:red"), p ("1.qsec = 1/4 mile time"), p ("2. mpg = Miles/(US) gallon"), p ("3. cyl = Number of cylinders") , p ("4. disp = Displacement (cu.in.)"), p ("5. hp = Gross Horsepower") , p ("6. drat = Rear axle ratio") , p ("7. wt = Weight (lb/1000)"), p ("8. vs = V/S"), p ("9. am = Transmission (0 is automatic, 1 is manual)") , p ("10. gear = Number of forward gears"), p ("11. carb = Number of carburetors"), selectInput("feature", "Choose a car feature >>>>", choices = c("qsec","mpg", "cyl", "disp","hp","drat","wt","vs","am","gear","carb")) ), # Show a summary of the dataset and HTML table with the requested # number of observations mainPanel( list(tags$head(tags$style("body {background-color: #ADD8E6; }"))), p("This Project Assignment will give you some information on car models and your desired car's feature based on mtcars data available in"), img(src="http://web.warwick.ac.uk/statsdept/useR-2011/pics/logo_rstudio.jpg", length="50", width="30%"), br(), p(""), verbatimTextOutput("make"), verbatimTextOutput("feature"), verbatimTextOutput("tabletext"), tableOutput("view"), verbatimTextOutput("mean") ) ))
/ui.R
no_license
taroathirah/Developing-Data-Products-Project
R
false
false
2,135
r
library(shiny) # Define UI for dataset viewer application shinyUI(pageWithSidebar( # Header title headerPanel("Module 9: Developing Data Products with Shiny App by taroAthirah"), # Sidebar with controls to select a dataset and specify the number # of observations to view sidebarPanel( h3("Now make a choice of car make and its feature", style = "color:red"), br(), img(src="https://pixabay.com/static/uploads/photo/2013/07/13/11/29/car-158239_960_720.png", width="60%"), selectInput("make", "Choose a car make >>>>", choices = c("Mazda", "Datsun", "Hornet","Valiant","Duster","Merc","Cadillac","Lincoln","Chrysler","Fiat","Honda","Toyota","Dodge","AMC","Camaro","Pontiac","Porsche","Lotus","Ford","Ferrari","Maserati","Volvo")), h3("Legend:-", style = "color:red"), p ("1.qsec = 1/4 mile time"), p ("2. mpg = Miles/(US) gallon"), p ("3. cyl = Number of cylinders") , p ("4. disp = Displacement (cu.in.)"), p ("5. hp = Gross Horsepower") , p ("6. drat = Rear axle ratio") , p ("7. wt = Weight (lb/1000)"), p ("8. vs = V/S"), p ("9. am = Transmission (0 is automatic, 1 is manual)") , p ("10. gear = Number of forward gears"), p ("11. carb = Number of carburetors"), selectInput("feature", "Choose a car feature >>>>", choices = c("qsec","mpg", "cyl", "disp","hp","drat","wt","vs","am","gear","carb")) ), # Show a summary of the dataset and HTML table with the requested # number of observations mainPanel( list(tags$head(tags$style("body {background-color: #ADD8E6; }"))), p("This Project Assignment will give you some information on car models and your desired car's feature based on mtcars data available in"), img(src="http://web.warwick.ac.uk/statsdept/useR-2011/pics/logo_rstudio.jpg", length="50", width="30%"), br(), p(""), verbatimTextOutput("make"), verbatimTextOutput("feature"), verbatimTextOutput("tabletext"), tableOutput("view"), verbatimTextOutput("mean") ) ))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/airport.R \docType{data} \name{airports} \alias{airports} \title{Airport metadata} \format{A data frame with columns: \describe{ \item{faa}{FAA airport code} \item{name}{Usual name of the aiport} \item{lat,lon}{Location of airport} \item{alt}{Altitude, in feet} \item{tz}{Timezone offset from GMT} \item{dst}{Daylight savings time zone. A = Standard US DST: starts on the second Sunday of March, ends on the first Sunday of November. U = unknown. N = no dst.} \item{tzone}{IANA time zone, as determined by GeoNames webservice} }} \source{ \url{http://openflights.org/data.html}, downloaded 2018-01-31 } \usage{ airports } \description{ Useful metadata about airports. } \examples{ if (require("dplyr")) { airports airports \%>\% mutate(dest = faa) \%>\% semi_join(flights) flights \%>\% anti_join(airports \%>\% mutate(dest = faa)) airports \%>\% mutate(origin = faa) \%>\% semi_join(flights) } } \keyword{datasets}
/man/airports.Rd
no_license
erikerhardt/flightsABQ17
R
false
true
1,011
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/airport.R \docType{data} \name{airports} \alias{airports} \title{Airport metadata} \format{A data frame with columns: \describe{ \item{faa}{FAA airport code} \item{name}{Usual name of the aiport} \item{lat,lon}{Location of airport} \item{alt}{Altitude, in feet} \item{tz}{Timezone offset from GMT} \item{dst}{Daylight savings time zone. A = Standard US DST: starts on the second Sunday of March, ends on the first Sunday of November. U = unknown. N = no dst.} \item{tzone}{IANA time zone, as determined by GeoNames webservice} }} \source{ \url{http://openflights.org/data.html}, downloaded 2018-01-31 } \usage{ airports } \description{ Useful metadata about airports. } \examples{ if (require("dplyr")) { airports airports \%>\% mutate(dest = faa) \%>\% semi_join(flights) flights \%>\% anti_join(airports \%>\% mutate(dest = faa)) airports \%>\% mutate(origin = faa) \%>\% semi_join(flights) } } \keyword{datasets}
#' cell counter #' @description count the cells from a single frame tiff file and output xml for imageJ #' @param file tiff file name #' @param channels channel to be detected #' @param formula the relationship of channels. Here '*'=='intersect' and '+'=='contain' #' eg. blue*green+red means the cell must have blue and green, and red is in the cell. #' blue*green-red means the cell must have blue and green, and red is NOT in the cell. #' blue*green means the cell must have blue and green #' blue+red means the cell must have blue, and red is in the cell. #' @param maskValue the cutoff value for set the mask is TRUE. #' @param offset the offset of color from background, (0, 1). #' @param cellSizeRange cell size range #' @param xmlfile filename of xml #' @param imageFilename filename of the original czi file #' @param adjustPipeline adjust pipeline before cell detection #' @param detectFun the function used to detect the cell, see \link{detectObjects}, \link{detectObjects2}. #' @param saveAdjustImage the file name for adjusted image. NULL to ignore saveing. #' @param silence output the message or not #' @param ... parameters could be used in the pipeline #' @import EBImage #' @import scales #' @import XML #' @export #' @author Jianhong Ou #' @examples #' library(EBImage) #' library(scales) #' library(XML) #' cellCounterSingleFrame(system.file("extdata", "low.jpg", package="cellCounter"), #' formula="blue+red", xmlfile="low.xml", imageFilename="low.jpg") cellCounterSingleFrame <- function(file, channels=c("red", "green", "blue"), formula, maskValue=0.3, offset=0.1, cellSizeRange=c(10, 1000), xmlfile=sub("\\.(tiff|tif)$", ".cellCounter.xml", file, ignore.case = TRUE), imageFilename=sub("\\.(tiff|tif)$", ".czi", basename(file), ignore.case = TRUE), adjustPipeline=c(ScurveAdjust), detectFun=detectObjects, saveAdjustImage=NULL, silence=FALSE, ...){ channels <- match.arg(channels, choices = c("green", "red", "blue"), several.ok = TRUE) stopifnot(offset<1) stopifnot(offset>0) if(!silence) message("reading tiff file") img <- readFile(file)$img ## pre adjust if(!silence) message("adjust image file") for(fun in adjustPipeline){ stopifnot(is.function(fun)) img <- fun(img=img, ...) } if(is.character(saveAdjustImage)){ writeImage(img, files = saveAdjustImage, type="TIFF") } zmap <- colorZmap(channels) zmap <- zmap[rowSums(zmap)>0, , drop=FALSE] if(!missing(formula)){ formula <- gsub("\\s+", "", formula[1]) formula1 <- strsplit(formula, "\\+|-|\\*") operators <- strsplit(formula, "[a-zA-Z]+") operators <- lapply(operators, function(.ele) .ele[.ele!=""]) keep <- rep(FALSE, nrow(zmap)) for(i in seq_along(formula1)){ if(!all(formula1[[i]] %in% c("red", "green", "blue"))){ stop("channels in formula is not in blue, green or red.") } if(any(table(formula1[[i]])>1)){ stop("channels in formula can be appeared only once.") } if(length(operators[[i]])>2){ stop("formula is too complicated!") } if(any(operators[[i]]=="*")){ oid <- which(operators[[i]]=="*") if(length(oid)==1){ keep[zmap[, formula1[[1]][oid]] & zmap[, formula1[[1]][oid+1]] & rowSums(zmap)!=3] <- TRUE }else{ keep[rowSums(zmap)==3] <- TRUE } } } zmap <- zmap[rowSums(zmap)==1 | keep, , drop=FALSE] }else{ formula1 <- NULL operators <- NULL zmap <- zmap[rowSums(zmap)==1, , drop=FALSE] } img <- lapply(channels, channel, x=img) names(img) <- channels img <- lapply(seq.int(nrow(zmap)), function(.ele){ imgs <- img[colnames(zmap)[zmap[.ele, ]]] setImage(imgs, "intersect", maskValue) }) img <- combine(img) ## detect the cells if(!silence) message("detecting cells") img <- detectFun(img, offset=offset, cellSizeRange=cellSizeRange, ...) ## remove object for different counter type for(i in seq_along(formula)){ if("+" %in% operators[[i]]){ ## remove object by `+` oid <- which(operators[[i]]=="+") newChannel <- NULL for(ioid in oid){ if(length(newChannel)==0){ plusLeft <- formula1[[i]][ioid] if(ioid>1){ if(operators[[i]][ioid-1]=="*"){ plusLeft <- formula1[[i]][c(ioid-1, ioid)] } } idLeft <- which(rowSums(zmap[, plusLeft, drop=FALSE])==length(plusLeft)) frameLeft <- getFrame(img, idLeft, type="render") }else{ frameLeft <- newChannel } plusRight <- formula1[[i]][ioid+1] if(ioid+1<length(operators[[i]])){ if(operators[[i]][ioid+2]=="*"){ plusRight <- formula1[[i]][c(ioid+1, ioid+2)] } } ## plusRight must be in plusLeft idRight <- which(rowSums(zmap[, plusRight, drop=FALSE])==length(plusRight)) frameRight <- getFrame(img, idRight, type="render") tbl0 <- table(imageData(frameLeft)) tbl <- imageData(frameLeft) * as.numeric(imageData(frameRight)>0) tbl <- table(tbl) tbl <- as.numeric(names(tbl0)[!names(tbl0) %in% names(tbl[tbl>0])]) newChannel <- rmObjects(frameLeft, tbl, reenumerate=FALSE) } img <- combine(img, newChannel) } } ## coutput the xml if(!silence) message("output xml file") saveSingleFrameCountXML(img, xmlfile = xmlfile, file = imageFilename, zmap=zmap, formula=formula, ...) }
/R/cellCounterSingleFrame.R
no_license
jianhong/cellCounter
R
false
false
5,646
r
#' cell counter #' @description count the cells from a single frame tiff file and output xml for imageJ #' @param file tiff file name #' @param channels channel to be detected #' @param formula the relationship of channels. Here '*'=='intersect' and '+'=='contain' #' eg. blue*green+red means the cell must have blue and green, and red is in the cell. #' blue*green-red means the cell must have blue and green, and red is NOT in the cell. #' blue*green means the cell must have blue and green #' blue+red means the cell must have blue, and red is in the cell. #' @param maskValue the cutoff value for set the mask is TRUE. #' @param offset the offset of color from background, (0, 1). #' @param cellSizeRange cell size range #' @param xmlfile filename of xml #' @param imageFilename filename of the original czi file #' @param adjustPipeline adjust pipeline before cell detection #' @param detectFun the function used to detect the cell, see \link{detectObjects}, \link{detectObjects2}. #' @param saveAdjustImage the file name for adjusted image. NULL to ignore saveing. #' @param silence output the message or not #' @param ... parameters could be used in the pipeline #' @import EBImage #' @import scales #' @import XML #' @export #' @author Jianhong Ou #' @examples #' library(EBImage) #' library(scales) #' library(XML) #' cellCounterSingleFrame(system.file("extdata", "low.jpg", package="cellCounter"), #' formula="blue+red", xmlfile="low.xml", imageFilename="low.jpg") cellCounterSingleFrame <- function(file, channels=c("red", "green", "blue"), formula, maskValue=0.3, offset=0.1, cellSizeRange=c(10, 1000), xmlfile=sub("\\.(tiff|tif)$", ".cellCounter.xml", file, ignore.case = TRUE), imageFilename=sub("\\.(tiff|tif)$", ".czi", basename(file), ignore.case = TRUE), adjustPipeline=c(ScurveAdjust), detectFun=detectObjects, saveAdjustImage=NULL, silence=FALSE, ...){ channels <- match.arg(channels, choices = c("green", "red", "blue"), several.ok = TRUE) stopifnot(offset<1) stopifnot(offset>0) if(!silence) message("reading tiff file") img <- readFile(file)$img ## pre adjust if(!silence) message("adjust image file") for(fun in adjustPipeline){ stopifnot(is.function(fun)) img <- fun(img=img, ...) } if(is.character(saveAdjustImage)){ writeImage(img, files = saveAdjustImage, type="TIFF") } zmap <- colorZmap(channels) zmap <- zmap[rowSums(zmap)>0, , drop=FALSE] if(!missing(formula)){ formula <- gsub("\\s+", "", formula[1]) formula1 <- strsplit(formula, "\\+|-|\\*") operators <- strsplit(formula, "[a-zA-Z]+") operators <- lapply(operators, function(.ele) .ele[.ele!=""]) keep <- rep(FALSE, nrow(zmap)) for(i in seq_along(formula1)){ if(!all(formula1[[i]] %in% c("red", "green", "blue"))){ stop("channels in formula is not in blue, green or red.") } if(any(table(formula1[[i]])>1)){ stop("channels in formula can be appeared only once.") } if(length(operators[[i]])>2){ stop("formula is too complicated!") } if(any(operators[[i]]=="*")){ oid <- which(operators[[i]]=="*") if(length(oid)==1){ keep[zmap[, formula1[[1]][oid]] & zmap[, formula1[[1]][oid+1]] & rowSums(zmap)!=3] <- TRUE }else{ keep[rowSums(zmap)==3] <- TRUE } } } zmap <- zmap[rowSums(zmap)==1 | keep, , drop=FALSE] }else{ formula1 <- NULL operators <- NULL zmap <- zmap[rowSums(zmap)==1, , drop=FALSE] } img <- lapply(channels, channel, x=img) names(img) <- channels img <- lapply(seq.int(nrow(zmap)), function(.ele){ imgs <- img[colnames(zmap)[zmap[.ele, ]]] setImage(imgs, "intersect", maskValue) }) img <- combine(img) ## detect the cells if(!silence) message("detecting cells") img <- detectFun(img, offset=offset, cellSizeRange=cellSizeRange, ...) ## remove object for different counter type for(i in seq_along(formula)){ if("+" %in% operators[[i]]){ ## remove object by `+` oid <- which(operators[[i]]=="+") newChannel <- NULL for(ioid in oid){ if(length(newChannel)==0){ plusLeft <- formula1[[i]][ioid] if(ioid>1){ if(operators[[i]][ioid-1]=="*"){ plusLeft <- formula1[[i]][c(ioid-1, ioid)] } } idLeft <- which(rowSums(zmap[, plusLeft, drop=FALSE])==length(plusLeft)) frameLeft <- getFrame(img, idLeft, type="render") }else{ frameLeft <- newChannel } plusRight <- formula1[[i]][ioid+1] if(ioid+1<length(operators[[i]])){ if(operators[[i]][ioid+2]=="*"){ plusRight <- formula1[[i]][c(ioid+1, ioid+2)] } } ## plusRight must be in plusLeft idRight <- which(rowSums(zmap[, plusRight, drop=FALSE])==length(plusRight)) frameRight <- getFrame(img, idRight, type="render") tbl0 <- table(imageData(frameLeft)) tbl <- imageData(frameLeft) * as.numeric(imageData(frameRight)>0) tbl <- table(tbl) tbl <- as.numeric(names(tbl0)[!names(tbl0) %in% names(tbl[tbl>0])]) newChannel <- rmObjects(frameLeft, tbl, reenumerate=FALSE) } img <- combine(img, newChannel) } } ## coutput the xml if(!silence) message("output xml file") saveSingleFrameCountXML(img, xmlfile = xmlfile, file = imageFilename, zmap=zmap, formula=formula, ...) }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/geom.R \name{geom_confint} \alias{geom_confint} \title{Connect observations by stairs.} \usage{ geom_confint(mapping = NULL, data = NULL, stat = "identity", position = "identity", na.rm = FALSE, ...) } \arguments{ \item{mapping}{the aesthetic mapping} \item{data}{a layer specific dataset} \item{stat}{the statistical transformation to use on the data for this layer} \item{position}{the position adjustment to use for overlapping points on this layer} \item{na.rm}{logical frag whether silently remove missing values} \item{...}{other arguments passed to methods} } \description{ Connect observations by stairs. }
/man/geom_confint.Rd
no_license
LionelGeo/ggfortify
R
false
false
709
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/geom.R \name{geom_confint} \alias{geom_confint} \title{Connect observations by stairs.} \usage{ geom_confint(mapping = NULL, data = NULL, stat = "identity", position = "identity", na.rm = FALSE, ...) } \arguments{ \item{mapping}{the aesthetic mapping} \item{data}{a layer specific dataset} \item{stat}{the statistical transformation to use on the data for this layer} \item{position}{the position adjustment to use for overlapping points on this layer} \item{na.rm}{logical frag whether silently remove missing values} \item{...}{other arguments passed to methods} } \description{ Connect observations by stairs. }
library(tropicalSparse) ### Name: tropicalsparse.axpyi ### Title: tropicalsparse.axpyi() ### Aliases: tropicalsparse.axpyi ### ** Examples a <- c(2, Inf, 5, 0, Inf, Inf, Inf, 10, Inf) b <- c(0, 5, Inf, Inf, 12, 2, Inf, Inf, 3) alpha <- 5 tropicalsparse.axpyi(a, alpha, b, 'minplus') # [1] 2 10 5 0 17 7 Inf 10 8
/data/genthat_extracted_code/tropicalSparse/examples/tropicalsparse.axpyi.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
335
r
library(tropicalSparse) ### Name: tropicalsparse.axpyi ### Title: tropicalsparse.axpyi() ### Aliases: tropicalsparse.axpyi ### ** Examples a <- c(2, Inf, 5, 0, Inf, Inf, Inf, 10, Inf) b <- c(0, 5, Inf, Inf, 12, 2, Inf, Inf, 3) alpha <- 5 tropicalsparse.axpyi(a, alpha, b, 'minplus') # [1] 2 10 5 0 17 7 Inf 10 8
#' MeanCoverage #' #' Calculates mean allelic coverage (mat+pat) among given replicates. #' #' @param df Allele counts dataframe: with 2n+1 columns, "ID" and 2n columns with ref & alt counts (rep1_ref, rep1_alt, rep2_ref, rep2_alt, ...) #' @param reps Optional (default=NA, all replicates), a vector of replicate numbers for which the analysis should be applied #' @param thr Optional (default=NA), threshold on the overall number of counts for a gene to be considered in the analysis #' @param thrUP Optional (default=NA), threshold for max gene coverage (default = NA) #' @param thrType Optional (default = "each", also can be "average" for average coverage on replicates), threshold type #' #' @return A table with IDs and calculated mean allelic coverage for given set of replicates #' #' @export #' #' @examples MeanCoverage(allelicCountsTable, reps=c(3,4), thr=8) #' MeanCoverage <- function(df, reps=NA, thr=NA, thrUP=NA, thrType="each"){ ddf <- ThresholdingCounts(df, reps, thr, thrUP, thrType) res_df <- data.frame(df[, 1], meanCOV = rowMeans(ddf[, -1])*2, stringsAsFactors = FALSE) names(res_df)[1] <- names(df)[1] return(res_df) }
/R/MeanCoverage.R
permissive
gimelbrantlab/QCumber
R
false
false
1,152
r
#' MeanCoverage #' #' Calculates mean allelic coverage (mat+pat) among given replicates. #' #' @param df Allele counts dataframe: with 2n+1 columns, "ID" and 2n columns with ref & alt counts (rep1_ref, rep1_alt, rep2_ref, rep2_alt, ...) #' @param reps Optional (default=NA, all replicates), a vector of replicate numbers for which the analysis should be applied #' @param thr Optional (default=NA), threshold on the overall number of counts for a gene to be considered in the analysis #' @param thrUP Optional (default=NA), threshold for max gene coverage (default = NA) #' @param thrType Optional (default = "each", also can be "average" for average coverage on replicates), threshold type #' #' @return A table with IDs and calculated mean allelic coverage for given set of replicates #' #' @export #' #' @examples MeanCoverage(allelicCountsTable, reps=c(3,4), thr=8) #' MeanCoverage <- function(df, reps=NA, thr=NA, thrUP=NA, thrType="each"){ ddf <- ThresholdingCounts(df, reps, thr, thrUP, thrType) res_df <- data.frame(df[, 1], meanCOV = rowMeans(ddf[, -1])*2, stringsAsFactors = FALSE) names(res_df)[1] <- names(df)[1] return(res_df) }
#' Set default inputs for quantile mapping #' #' \code{set.defaults} creates a list object that stores the default #' input parameters for the package \code{quantproj}, to be #' used as an input for most other functions. It also creates the #' directories set as defaults (and their subdirectories used in #' various bits of code in this package) if they do not yet exist. #' #' @section Directory structure: #' Generally, this package uses three main directories (though #' \code{aux.dir} may be the same as either of the data directories, #' the user is discouraged from setting the two data directories equal #' or using pre-existing directories, for file dependency reasons): #' \describe{ #' \item{\code{aux_dir}}{a directory containing two sub-directories, #' \code{bases} for saved spline basis functions (the #' predictors in the quantile regression), and \code{run_logs} #' for logging progress on the bigger scripts} #' \item{\code{base.data.dir}}{the directory in which to place the raw data to #' be projected. Contains a sub-directory, \code{output}, for the resultant #' projections.} #' \item{\code{mod.data.dir}}{the directory in which to place the model data #' whose distributional changes are used to project the data in #' \code{base.data.dir}. Contains a sub-directory, \code{params}, in which the #' quantile fit parameters are stored.} #' } #' #' @section Note on file structure: #' This package assumes all files follow CMIP5 standard file naming #' conventions; in other words, it assumes that variable, data frequency, model #' name, and file year range are all encoded in the filenames themselves (see #' the #' \href{https://cmip.llnl.gov/cmip5/docs/cmip5_data_reference_syntax.pdf}{CMIP5 #' syntax guide} for more information). The filename format is: #' \code{[variable]_[frequency]_[model]_[experiment]_[run/ensemble #' member]_[timeframe](_[suffix])} #' #' @param base.data.dir the directory with raw data to be projected #' @param mod.data.dir the directory with raw model ensemble data used to project #' @param aux.dir directory for auxiliary files - saved basis functions, etc. #' #' @param fn.suffix optional add-on to all output filenames #' #' @param lat.clip,lon.clip if desired, a \code{c(min,max)} vector giving bounds for a lon/lat box; only data within this box will be loaded and processed #' #' @param filevar variable shorthand (CMIP5 syntax, def: \code{"tas"} for near-surface air temperature) #' @param freq data frequency shorthand (CMIP5 syntax, def: \code{"day"} for daily) #' @param mod.name name of model ensemble (def: \code{"LENS"}) #' @param base.name name of data product to be projected (def: \code{"ERA-INTERIM"}) #' #' @param base.year.range desired time frame of the "base" time period to project (def: \code{c(1979,2010)}) #' @param mod.year.range desired time frame of the time frame to be processed (quantiles calculated) for the model ensmeble; make sure this range includes \code{base.year.range} (def: \code{c(1979,2099)}) #' @param proj.year.range desired time frame to project to (def: \code{c(2011,2099)}) #' #' @param q_norm normalizing quantiles (must be a \code{3 x 1} vector, i.e. \code{c(0.1,0.5,0.9)}) #' @param q_bulk bulk quantiles (must be a \code{[n_bulk_quantiles x 1]} vector, i.e. \code{c(0.25,0.3,0.5,0.6,0.75)}) #' @param q_tail tail quantiles (must be a \code{[n_bulk_quantiles x 1]} vector, i.e. \code{c(0.5,0.9,0.95)}). These are fractions of the difference between \code{max(q_bulk)} and \code{0}/\code{1}! #' @param norm.x.df,bulk.x.df,tail.x.df degrees of freedom for normalizing, bulk, and tail quantile regressions, in a \code{3 x 1} vector for \code{c([seasonal],[long-term],[interaction])} #' @param get.volc include a volcanic predictor in the normalization (def: \code{FALSE}) #' #' @param nboots number of bootstrap runs (def: \code{50}) #' @param block.size bootstrap block size (def: \code{40}) (ACROSS RUNS - SO 20 means each bootstrap block is across 20 randomly chosen runs) #' #' @param varnames a list with each element giving the possible names for the #' variable with the same name as the list element. For example, the list #' element \code{lat=c("lat","latitude","Latititude","latitude_1")} makes #' it so that the latitude variable in the NetCDF files loaded through #' \code{\link{get.ncdf}} can be named any of those names without the code #' running into trouble. #' @param search.str a search string used to look for the source NetCDF files #' (def: "\code{[filevar]_day_.*nc}", with \code{[filevar]} set above) #' #' #' @return a list object containing all of the parameters listed above, to be used #' as an input in thd other functions in this package. set.defaults <- function( #----- FILE PATHS ----- # The directory in which code is stored aux.dir=paste0(getwd(),"/aux/"), # The directory with weather data to be projected base.data.dir=paste0(getwd(),"/base_data/"), # The directory with raw model ensemble data mod.data.dir=paste0(getwd(),"/model_data/"), # A custom suffix for output files fn.suffix=character(), #----- VARIABLE ----- # Variable shorthand (use CMIP5) filevar="tas", #----- MODEL NAMES AND IDENTIFIERS ----- # Name of the model ensmeble mod.name="LENS", # Name of the weather product base.name="ERA-INTERIM", #------ GEOGRAPHIC PARAMETERS ----- # Set to only process a subset of locations lat.clip=numeric(), lon.clip=numeric(), #------ QUANTILE MAPPING PARAMETERS ----- # Base data process year range (this is the desired # length of the processing - if the reanalysis files # spread farther than this range, only a subset will # be used) base.year.range=c(1979,2010), # Model process year range (this is the desired # length of the processing, as above. Make sure this # range includes the base.year.range above!) mod.year.range=c(1979,2099), # Years to project to proj.year.range=c(2011,2099), # Normalizing quantiles q_norm=c(0.1,0.5,0.9), # Normalizing degrees of freedom (model) norm.x.df=c(14,6,3), # Normalizing degrees of freedom (weather) base.norm.x.df=c(10,1,0), # Include a volcanic predictor in the normalization get.volc=F, # Bulk quantiles q_bulk=c(0.10,0.18,0.25,0.35,0.42,0.50,0.58,0.65,0.75,0.82,0.90), # Bulk degrees of freedom bulk.x.df=c(14,6,3), # Tail quantiles q_tail=c(0.01,0.10,0.25,0.50,0.75), # Tail degrees of freedom tail.x.df=c(3,1,0), #------ PROJECTION OPTIONS ----- # How to determine 'base' days for projection index.type="resampling_rep", resampling.timescale="year", #------ BOOTSTRAPPING ----- bootstrapping=F, nboots=50, block.size=40, #----- FILENAME/VARNAME ISSUES ----- varnames=list(lon=c("lon","longitude","Longitude","longitude_1"), lat=c("lat","latitude","Latititude","latitude_1"), time=c("time"), run=c("run"), loc=c("global_loc","loc","location","lonlat")), search.str=character() ) { # Warning if the years don't overlap. if (min(base.year.range)<min(mod.year.range) || max(base.year.range)>max(mod.year.range)) { warning(paste0('The year range chosen for the model processing (', paste0(mod.year.range,collapse='-'),') does not ', 'include the years chosen for the weather data processing (', paste0(base.year.range,collapse='-'),'), please reconsider.')) } # Set filename search string if (length(search.str)==0) { search.str <- paste0(filevar,"_day_.*nc") } # Throw a warning if the model name contains a . or a / if (grepl("\\.|\\_",mod.name)) { warning(paste0("The name of the projecting model (ensemble), ", mod.name, " contains a '.' or a '/'. Please avoid this (yes, rename your files if you have to), since it tends to mess up the file identification procedures.")) } if (grepl("\\.|\\_",base.name)) { warning(paste0("The name of the base dataset/model, ", base.name, " contains a '.' or a '/'. Please avoid this (yes, rename your files if you have to), since it tends to mess up the file identification procedures.")) } # Create directories if they don't yet exist --------- # Auxiliary directory and subdirectories if (!dir.exists(aux.dir)) {dir.create(aux.dir); cat(paste0("\n",aux.dir," created for auxiliary files!"),fill=T)} else {cat(paste0("\n",aux.dir," set as location of auxiliary files!"),fill=T)} if (!dir.exists(paste0(aux.dir,"bases/"))) {dir.create(paste0(aux.dir,"bases/")); cat(paste0(aux.dir,"bases/","created for predictor basis files!"),fill=T)} else {cat(paste0(aux.dir,"bases/ set as location of predictor basis files!"),fill=T)} if (!dir.exists(paste0(aux.dir,"run_logs/"))) {dir.create(paste0(aux.dir,"run_logs/")); cat(paste0(aux.dir,"run_logs/ created for run logs!"),fill=T)} else {cat(paste0(aux.dir,"run_logs/ set as location of run logs!"),fill=T)} # Base data directory if (!dir.exists(base.data.dir)) {dir.create(base.data.dir); cat(paste0("\n",base.data.dir," created for data to be projected! Please make sure the only files in here that match the search string '",search.str,"' are the base data files you want to project."),fill=T) } else { cat(paste0("\n",base.data.dir," set as location of the data to be projected! Please make sure the only files in here that match the search string '",search.str,"' are the base data files you want to project."),fill=T) } if (!dir.exists(paste0(base.data.dir,"output/"))) {dir.create(paste0(base.data.dir,"output/")); cat(paste0(base.data.dir,"output/ created for the outputted, projected data!"),fill=T)} else {cat(paste0(base.data.dir,"output/ set as location for the outputted, projected data!"),fill=T)} # Model/projecting data directory if (!dir.exists(mod.data.dir)) {dir.create(mod.data.dir); cat(paste0("\n",mod.data.dir," created for projecting data! Please make sure the only files in here that match the search string '",search.str,"' are the model data files you want to use for projecting."),fill=T) } else { cat(paste0("\n",mod.data.dir," set as location for projecting data! Please make sure the only files in here that match the search string '",search.str,"' are the model data files you want to use for projecting."),fill=T) } if (!dir.exists(paste0(mod.data.dir,"params/"))) {dir.create(paste0(mod.data.dir,"params/")); cat(paste0(mod.data.dir,"params/ created for projecting data quantile fits!"),fill=T)} else {cat(paste0(mod.data.dir,"params/ set as location for projecting data quantile fits!"),fill=T)} # Save ----------------------------------------------- defaults <- list() for (var.idx in ls()[ls()!="defaults"]) { defaults[[var.idx]] <- eval(parse(text=var.idx)) } rm(var.idx) return(defaults) }
/R/various_defaults_func.R
no_license
ks905383/quantproj
R
false
false
10,559
r
#' Set default inputs for quantile mapping #' #' \code{set.defaults} creates a list object that stores the default #' input parameters for the package \code{quantproj}, to be #' used as an input for most other functions. It also creates the #' directories set as defaults (and their subdirectories used in #' various bits of code in this package) if they do not yet exist. #' #' @section Directory structure: #' Generally, this package uses three main directories (though #' \code{aux.dir} may be the same as either of the data directories, #' the user is discouraged from setting the two data directories equal #' or using pre-existing directories, for file dependency reasons): #' \describe{ #' \item{\code{aux_dir}}{a directory containing two sub-directories, #' \code{bases} for saved spline basis functions (the #' predictors in the quantile regression), and \code{run_logs} #' for logging progress on the bigger scripts} #' \item{\code{base.data.dir}}{the directory in which to place the raw data to #' be projected. Contains a sub-directory, \code{output}, for the resultant #' projections.} #' \item{\code{mod.data.dir}}{the directory in which to place the model data #' whose distributional changes are used to project the data in #' \code{base.data.dir}. Contains a sub-directory, \code{params}, in which the #' quantile fit parameters are stored.} #' } #' #' @section Note on file structure: #' This package assumes all files follow CMIP5 standard file naming #' conventions; in other words, it assumes that variable, data frequency, model #' name, and file year range are all encoded in the filenames themselves (see #' the #' \href{https://cmip.llnl.gov/cmip5/docs/cmip5_data_reference_syntax.pdf}{CMIP5 #' syntax guide} for more information). The filename format is: #' \code{[variable]_[frequency]_[model]_[experiment]_[run/ensemble #' member]_[timeframe](_[suffix])} #' #' @param base.data.dir the directory with raw data to be projected #' @param mod.data.dir the directory with raw model ensemble data used to project #' @param aux.dir directory for auxiliary files - saved basis functions, etc. #' #' @param fn.suffix optional add-on to all output filenames #' #' @param lat.clip,lon.clip if desired, a \code{c(min,max)} vector giving bounds for a lon/lat box; only data within this box will be loaded and processed #' #' @param filevar variable shorthand (CMIP5 syntax, def: \code{"tas"} for near-surface air temperature) #' @param freq data frequency shorthand (CMIP5 syntax, def: \code{"day"} for daily) #' @param mod.name name of model ensemble (def: \code{"LENS"}) #' @param base.name name of data product to be projected (def: \code{"ERA-INTERIM"}) #' #' @param base.year.range desired time frame of the "base" time period to project (def: \code{c(1979,2010)}) #' @param mod.year.range desired time frame of the time frame to be processed (quantiles calculated) for the model ensmeble; make sure this range includes \code{base.year.range} (def: \code{c(1979,2099)}) #' @param proj.year.range desired time frame to project to (def: \code{c(2011,2099)}) #' #' @param q_norm normalizing quantiles (must be a \code{3 x 1} vector, i.e. \code{c(0.1,0.5,0.9)}) #' @param q_bulk bulk quantiles (must be a \code{[n_bulk_quantiles x 1]} vector, i.e. \code{c(0.25,0.3,0.5,0.6,0.75)}) #' @param q_tail tail quantiles (must be a \code{[n_bulk_quantiles x 1]} vector, i.e. \code{c(0.5,0.9,0.95)}). These are fractions of the difference between \code{max(q_bulk)} and \code{0}/\code{1}! #' @param norm.x.df,bulk.x.df,tail.x.df degrees of freedom for normalizing, bulk, and tail quantile regressions, in a \code{3 x 1} vector for \code{c([seasonal],[long-term],[interaction])} #' @param get.volc include a volcanic predictor in the normalization (def: \code{FALSE}) #' #' @param nboots number of bootstrap runs (def: \code{50}) #' @param block.size bootstrap block size (def: \code{40}) (ACROSS RUNS - SO 20 means each bootstrap block is across 20 randomly chosen runs) #' #' @param varnames a list with each element giving the possible names for the #' variable with the same name as the list element. For example, the list #' element \code{lat=c("lat","latitude","Latititude","latitude_1")} makes #' it so that the latitude variable in the NetCDF files loaded through #' \code{\link{get.ncdf}} can be named any of those names without the code #' running into trouble. #' @param search.str a search string used to look for the source NetCDF files #' (def: "\code{[filevar]_day_.*nc}", with \code{[filevar]} set above) #' #' #' @return a list object containing all of the parameters listed above, to be used #' as an input in thd other functions in this package. set.defaults <- function( #----- FILE PATHS ----- # The directory in which code is stored aux.dir=paste0(getwd(),"/aux/"), # The directory with weather data to be projected base.data.dir=paste0(getwd(),"/base_data/"), # The directory with raw model ensemble data mod.data.dir=paste0(getwd(),"/model_data/"), # A custom suffix for output files fn.suffix=character(), #----- VARIABLE ----- # Variable shorthand (use CMIP5) filevar="tas", #----- MODEL NAMES AND IDENTIFIERS ----- # Name of the model ensmeble mod.name="LENS", # Name of the weather product base.name="ERA-INTERIM", #------ GEOGRAPHIC PARAMETERS ----- # Set to only process a subset of locations lat.clip=numeric(), lon.clip=numeric(), #------ QUANTILE MAPPING PARAMETERS ----- # Base data process year range (this is the desired # length of the processing - if the reanalysis files # spread farther than this range, only a subset will # be used) base.year.range=c(1979,2010), # Model process year range (this is the desired # length of the processing, as above. Make sure this # range includes the base.year.range above!) mod.year.range=c(1979,2099), # Years to project to proj.year.range=c(2011,2099), # Normalizing quantiles q_norm=c(0.1,0.5,0.9), # Normalizing degrees of freedom (model) norm.x.df=c(14,6,3), # Normalizing degrees of freedom (weather) base.norm.x.df=c(10,1,0), # Include a volcanic predictor in the normalization get.volc=F, # Bulk quantiles q_bulk=c(0.10,0.18,0.25,0.35,0.42,0.50,0.58,0.65,0.75,0.82,0.90), # Bulk degrees of freedom bulk.x.df=c(14,6,3), # Tail quantiles q_tail=c(0.01,0.10,0.25,0.50,0.75), # Tail degrees of freedom tail.x.df=c(3,1,0), #------ PROJECTION OPTIONS ----- # How to determine 'base' days for projection index.type="resampling_rep", resampling.timescale="year", #------ BOOTSTRAPPING ----- bootstrapping=F, nboots=50, block.size=40, #----- FILENAME/VARNAME ISSUES ----- varnames=list(lon=c("lon","longitude","Longitude","longitude_1"), lat=c("lat","latitude","Latititude","latitude_1"), time=c("time"), run=c("run"), loc=c("global_loc","loc","location","lonlat")), search.str=character() ) { # Warning if the years don't overlap. if (min(base.year.range)<min(mod.year.range) || max(base.year.range)>max(mod.year.range)) { warning(paste0('The year range chosen for the model processing (', paste0(mod.year.range,collapse='-'),') does not ', 'include the years chosen for the weather data processing (', paste0(base.year.range,collapse='-'),'), please reconsider.')) } # Set filename search string if (length(search.str)==0) { search.str <- paste0(filevar,"_day_.*nc") } # Throw a warning if the model name contains a . or a / if (grepl("\\.|\\_",mod.name)) { warning(paste0("The name of the projecting model (ensemble), ", mod.name, " contains a '.' or a '/'. Please avoid this (yes, rename your files if you have to), since it tends to mess up the file identification procedures.")) } if (grepl("\\.|\\_",base.name)) { warning(paste0("The name of the base dataset/model, ", base.name, " contains a '.' or a '/'. Please avoid this (yes, rename your files if you have to), since it tends to mess up the file identification procedures.")) } # Create directories if they don't yet exist --------- # Auxiliary directory and subdirectories if (!dir.exists(aux.dir)) {dir.create(aux.dir); cat(paste0("\n",aux.dir," created for auxiliary files!"),fill=T)} else {cat(paste0("\n",aux.dir," set as location of auxiliary files!"),fill=T)} if (!dir.exists(paste0(aux.dir,"bases/"))) {dir.create(paste0(aux.dir,"bases/")); cat(paste0(aux.dir,"bases/","created for predictor basis files!"),fill=T)} else {cat(paste0(aux.dir,"bases/ set as location of predictor basis files!"),fill=T)} if (!dir.exists(paste0(aux.dir,"run_logs/"))) {dir.create(paste0(aux.dir,"run_logs/")); cat(paste0(aux.dir,"run_logs/ created for run logs!"),fill=T)} else {cat(paste0(aux.dir,"run_logs/ set as location of run logs!"),fill=T)} # Base data directory if (!dir.exists(base.data.dir)) {dir.create(base.data.dir); cat(paste0("\n",base.data.dir," created for data to be projected! Please make sure the only files in here that match the search string '",search.str,"' are the base data files you want to project."),fill=T) } else { cat(paste0("\n",base.data.dir," set as location of the data to be projected! Please make sure the only files in here that match the search string '",search.str,"' are the base data files you want to project."),fill=T) } if (!dir.exists(paste0(base.data.dir,"output/"))) {dir.create(paste0(base.data.dir,"output/")); cat(paste0(base.data.dir,"output/ created for the outputted, projected data!"),fill=T)} else {cat(paste0(base.data.dir,"output/ set as location for the outputted, projected data!"),fill=T)} # Model/projecting data directory if (!dir.exists(mod.data.dir)) {dir.create(mod.data.dir); cat(paste0("\n",mod.data.dir," created for projecting data! Please make sure the only files in here that match the search string '",search.str,"' are the model data files you want to use for projecting."),fill=T) } else { cat(paste0("\n",mod.data.dir," set as location for projecting data! Please make sure the only files in here that match the search string '",search.str,"' are the model data files you want to use for projecting."),fill=T) } if (!dir.exists(paste0(mod.data.dir,"params/"))) {dir.create(paste0(mod.data.dir,"params/")); cat(paste0(mod.data.dir,"params/ created for projecting data quantile fits!"),fill=T)} else {cat(paste0(mod.data.dir,"params/ set as location for projecting data quantile fits!"),fill=T)} # Save ----------------------------------------------- defaults <- list() for (var.idx in ls()[ls()!="defaults"]) { defaults[[var.idx]] <- eval(parse(text=var.idx)) } rm(var.idx) return(defaults) }
# Copyright 2016 The Board of Trustees of the Leland Stanford Junior University. # Direct inquiries to Sam Borgeson (sborgeson@stanford.edu) # or professor Ram Rajagopal (ramr@stanford.edu) #' @title Prepare data frame column names for export #' #' @description Removes punctuation from data frame column names, replacing all with underscores #' and removing underscores that are repeated one after another #' #' @param df The data frame whose columns are to be renamed #' #' @param prefix An optional prefix to place in front of all column names #' #' @return A data frame identical to the one passed in, but with new column names. #' #' @export fixNames = function(df,prefix='') { if('data.frame'%in% class(df) ) { nms = names(df) } else if ('character' %in% class(df) ) { nms = df } else if ('factor' %in% class(df) ) { nms = levels(df)[df] } else { stop(paste("Unrecognized class.", class(df),"Can't figure out names to be fixed")) } fixed = gsub('[[[:punct:] ]','_',nms) # change ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ to underscores fixed = gsub('__+','_',fixed) # remove double or more underscores fixed = gsub('_$','',fixed) # remove trailing underscores fixed = gsub('^_','',fixed) # remove leading underscores fixed = paste(prefix,fixed,sep='') return(fixed) } #' @title Merge load shape features into feature data frame #' #' @description Pulls load shape features from a shape results object and appends them to #' an existing feature data frame #' #' @param features Data frame of feature data #' #' @param shape.results Load shape clustering and assignment results object to pull features from #' #' @return A data frame identical to the one passed in, but with new laod shape feature columns #' #' @export mergeShapeFeatures = function(features,shape.results) { newFeatures = merge(features,shape.results$shape.features[,c('id','entropy')],by.x='id',by.y='id',all.x=T) catCounts = shape.results$shape.stats$category.counts catTotals = rowSums(catCounts[,-1]) catCounts[,-1] = catCounts[,-1] / catTotals names(catCounts) = gsub(pattern='count',replacement='pct',names(catCounts)) newFeatures = merge(newFeatures,catCounts,by.x='id',by.y='id',all.x=T) return(newFeatures) } #' @title Convert POSIXct data frame columns to integer seconds since the epoch #' #' @description Searches the data frame for columns with 'POSIXct' in their class values and #' converts them to integers representing seconds since the epoch #' #' @param df Data frame of feature data #' #' @return A data frame identical to the one passed in, but with integer columns replacing POSIXct ones #' #' @export datesToEpoch = function(df) { cls = lapply(df, class) dateCols = which(unlist(lapply(cls, function(x) { 'POSIXct' %in% x }))) for( col in dateCols){ df[,col] <- as.integer(df[,col]) } return(df) } #' @title Clean up feature data in preparation for saving #' #' @description This function renames data columns for export via fixNames(), converts factors #' into characters, and checks for id and zip5 colums #' #' @param features The data frame of feature to be cleaned up #' #' @param checkId boolean indicating whether to enforce a check for an id column with an error message. This should #' be true when exporting features or other id matched data and false otherwise. #' #' @param checkGeo boolean indicating whether to enforce a check zip5 columns with a warning message. This should #' be true when exporting features that will be mapped. #' #' @return A copy of the original data frame that is cleaned up #' #' @export cleanFeatureDF = function(features, checkId=TRUE, checkGeo=TRUE) { names(features) <- fixNames(features) # convert any factors to regular characters (otherwise the values are the factor indices) i <- sapply(features, is.factor) features[i] <- lapply(features[i], as.character) if(checkId) { if( ! c('id') %in% names(features)) { stop('id column required for exported data') } } if(checkGeo) { if( ! c('zip5') %in% names(features)) { print('[cleanFeatureDF] WARNING: VISDOM-web requires a zip5 geography column in features to produce maps') } } return(features) } #' @title Write feature data frame to an hdf5 file #' #' @description Write feature data frame to an hdf5 file #' #' @param data The feature data frame to be written #' #' @param fName The name of the hdf5 formatted file to write the data to #' #' @param label The name of the data table within the hdf5 file #' #' @param filePath optional path to the location where exported files should be written (if applicable). Default is \code{getwd()} #' #' @export writeH5Data = function(data,fName,label, filePath=NA) { if( ! 'rhdf5' %in% rownames(installed.packages()) ) { print('Package rhdf5 does not appear to be installed. This comes from outside of CRAN.') print('Run the following to install it') print('source("https://bioconductor.org/biocLite.R")') print('biocLite("rhdf5")') } #source("https://bioconductor.org/biocLite.R") #biocLite("rhdf5") require(rhdf5) fName = paste(fName,'h5',sep='.') if( ! is.na(filePath ) ){ dir.create(filePath, showWarnings = FALSE) fName = file.path( filePath, fName) } if(! file.exists(fName)) { rhdf5::h5createFile(fName) } rhdf5::h5write(data,fName,label) } # Internal function for parsing name=value configuration file parseConfig = function(config_path) { cfg = read.csv(config_path,sep="=",header=F) cfg[] <- lapply(cfg, as.character) cfg = as.list(setNames(cfg$V2,cfg$V1)) return(cfg) } #' @title Get the database id of a feature_set/run combination #' #' @description Load user-specified run config file and return a unique numeric #' id from the feature_runs metadata table. Create the feature_runs table if #' it does not exist. #' #' @param conn A database connection, usually obtained from \code{conf.dbCon} or \code{\link{DBI::dbConnect}} #' #' @param runConfig The run configuration file with key-value pairs of #' feature_set, feature_set_description, run_name and run_description. See #' 1inst/feature_set_run_conf/exampl_feature_set.conf1 for an example. #' #' @export getRunId = function(conn, runConfig) { cfg = parseConfig(runConfig) # Create the feature_runs table if it doesn't exist. table_name = "feature_runs" if( ! DBI::dbExistsTable(conn, table_name) ) { sql_dialect = getSQLdialect(conn) create_table_path = file.path(system.file(package='visdom'), "sql", paste("feature_runs.create.", sql_dialect, ".sql", sep="")) if( file.exists(create_table_path) ) { sql_create = readChar(create_table_path, file.info(create_table_path)$size) } else { stop(sprintf(paste( "The sql file to create the %s table does not exist for", "SQL dialect %s at path %s. You need to manually create the feature_run", "table in your database, or store the create statement(s) at that path."), table_name, sql_dialect, create_table_path)) } rows_affected = DBI::dbExecute(conn, sql_create) } # Validate incoming data. Start by determining length of varchar columns. The length column may or may not be available outside of the MySQL database driver. sql_schema_query = DBI::sqlInterpolate( conn, paste("SELECT *", "FROM", DBI::dbQuoteIdentifier(conn, table_name), "limit 0")) rs <- DBI::dbSendQuery(conn, sql_schema_query) column_info = DBI::dbColumnInfo(rs) DBI::dbClearResult(rs) # Now check length of data against length of columns. for(column in names(cfg)) { dat_length = nchar(cfg[column]) allowed_length = column_info[column_info$name == column, ]$length if(dat_length > allowed_length) { stop(sprintf(paste( "Data for %s from run config file %s is too long.", "Max character length is %d, and the data is %d characters long."), column, runConfig, allowed_length, dat_length)) } } # Look for an existing record in the table based on feature_set and run_name sql_query = DBI::sqlInterpolate( conn, paste("SELECT *", "FROM", DBI::dbQuoteIdentifier(conn, table_name), "WHERE feature_set = ?feature_set and run_name = ?run_name"), feature_set=cfg$feature_set, run_name=cfg$run_name) dat = DBI::dbGetQuery(conn, sql_query) # Insert a record for this run if one does not exist if( nrow(dat) == 0 ) { sql_insert = DBI::sqlInterpolate( conn, paste("INSERT INTO", DBI::dbQuoteIdentifier(conn, table_name), "(feature_set, feature_set_description, run_name, run_description,create_time)", "VALUES (?feature_set, ?feature_set_description, ?run_name, ?run_description, NOW())"), feature_set = cfg$feature_set, feature_set_description = cfg$feature_set_description, run_name = cfg$run_name, run_description = cfg$run_description) rows_affected = DBI::dbExecute(conn, sql_insert) if( rows_affected != 1) { stop(sprintf("Error inserting into %s table data from %s.", table_name, runConfig)) } # Look up the id now. dat = DBI::dbGetQuery(conn, sql_query) } return(dat$id) } #' @title Write feature data frame to a database #' #' @description Write feature data frame to a database using a \code{\link{DBI::dbWriteTable}} call #' #' @param data The feature data frame to be written #' #' @param name Unused, but present for compatibility with other write* fucntions #' #' @param label Unused, but present for compatibility with other write* fucntions #' #' @param conn A DBI dbConnection object to the database that will host the table #' #' @param overwrite Boolean indicator for whether the data written should overwrite any existing table or append it #' #' @param runConfig Path to a run configuration file with names and descriptions #' of the feature set and run. See #' `inst/feature_set_run_conf/exampl_feature_set.conf` for an example. #' #' @export writeDatabaseData = function(data, name=NULL, label=NULL, conn, overwrite=TRUE, runConfig) { # con <- dbConnect(SQLite(), dbname="filename.sqlite") # Use cbind so runId is the first column. data = cbind(runId=getRunId(conn, runConfig), data) tableName = parseConfig(runConfig)$feature_set DBI::dbWriteTable(conn=conn, name=tableName, value=data, row.names=F, overwrite=overwrite, append=!overwrite) # write data frame to table # Update the time in the metadata table table_name = "feature_runs" sql_update = DBI::sqlInterpolate( conn, paste("UPDATE", DBI::dbQuoteIdentifier(conn, table_name), "SET update_time=CURRENT_TIMESTAMP")) rows_affected = DBI::dbExecute(conn, sql_update) if( rows_affected != 1) { stop(sprintf("Error updating time in %s table.", table_name)) } # DBI::dbDisconnect(conn) #print('No SQLite support yet!') #if (require("RSQLite")) { # con <- dbConnect(RSQLite::SQLite(), ":memory:") # dbDisconnect(con) #} #dbWriteTable(conn=db, name="allBasics", value=basicsPlus, row.names=F, overwrite=T) # write data frame to table #dbListFields(db,"allBasics") # list of column names #db_allBasics = dbReadTable(conn=db, name="allBasics") # load data frame from table #dbDisconnect(db) # cleanup: close the file } #' @title Write feature data frame to a csv file #' #' @description Write feature data frame to a csv file #' #' @param data The feature data frame to be written #' #' @param fName The name of the csv file to write the data to #' #' @param label Unused, but present for compatibility with other write* fucntions #' #' @param filePath optional path to the location where exported files should be written (if applicable). Default is \code{getwd()} #' #' @export writeCSVData = function(data, fName, label=NA, filePath=NA) { if( ! is.na(label) ) { fName = paste(fName, label, sep='_') } fName = paste(fName, 'csv', sep='.') if( ! is.na(filePath ) ){ dir.create(filePath, showWarnings = FALSE) fName = file.path( filePath, fName) } write.csv(data, file=fName, row.names=F) } #' @title Save load shape results #' #' @description Exports standardized load shape clustering and assignment data into a #' corresponding set of exported data tables #' #' @param shape.results the shape feature results to export. These should be in the format returned by #' \code{visdomloadshape::shapeFeatures()}, as in #' \code{shapeFeatures(shapeCategoryEncoding(rawData=DATA_SOURCE$getAllData(), metaCols=1:4, encoding.dict=someDict))} #' #' @param prefix a prefix to apply to the feature column names #' #' @param format the data format for export. One of the values supported by the \code{format} paramater in \code{exportData()} #' #' @param filePath optional path to the location where exported files should be written (if applicable). Default is \code{getwd()} #' #' @export exportShapes = function(shape.results,prefix='',format='hdf5', filePath='.') { name = paste(prefix,'LoadShape',sep='') exportData(df=shape.results$shape.stats$cluster.counts, name=name, label='counts', format=format, checkId=TRUE, checkGeo=FALSE, filePath=filePath) exportData(df=shape.results$shape.stats$cluster.energy, name=name, label='sums', format=format, checkId=TRUE, checkGeo=FALSE, filePath=filePath) exportData(df=as.data.frame(shape.results$encoding.dict), name=name, label='centers', format=format, checkId=FALSE, checkGeo=FALSE, filePath=filePath) exportData(df=shape.results$encoding.dict.category.info, name=name, label='categoryMapping', format=format, checkId=FALSE, checkGeo=FALSE, filePath=filePath) } #' @title Export feature data into a selection of formats #' #' @description Runs the export function for a given data format on feature data #' #' @param df Data frame of feature data to export #' #' @param name Primary name of export, meaning file name or database table name #' #' @param label Optional data label for export formats. For example if not NA, this would be the name #' of the data table within an hdf5 file or a suffix to the csv file name, as in \code{paste(name, label, sep='_')} #' #' @param format One of the supported formats for data export, currently 'hdf5', 'csv', or 'database' #' #' @param checkId boolean control over whether to error out with a \code{stop()} if an id column is not present #' #' @param checkGeo boolean control over whether to warn if a geographic field, \code{zip5} in this case, is not present. #' #' @param ... Pass through parameters for specific export methods. For example, #' database export requires a conn object. #' #' @export exportData = function(df,name,label=NA,format='hdf5', checkId=TRUE, checkGeo=TRUE, ...) { if ('matrix' %in% class(df) ) { print('Warning. Converting matrix to data.frame') df = as.data.frame(df) } print(paste('Exporting name:', name, 'label:', label)) fn = list( hdf5=writeH5Data, hdf=writeH5Data, h5=writeH5Data, csv=writeCSVData, database=writeDatabaseData) df = cleanFeatureDF(df, checkId, checkGeo) fn[[format]](df, name, label, ... ) # call the format appropriate export function } #' @title Export feature run and load shape results #' #' @description Loads feature data and load shape clustering data from RData files and #' saves them into the selected export format #' #' @param feature.data File path to an RData file with feature data data frame or the data frame itself #' #' @param shape.results.data Optional file path to an RData file containing load shape clustering results #' or the results object itself. i.e. results from #' \code{visdomloadshape::shapeFeatures(visdomloadshape::shapeCategoryEncoding())} #' #' @param format Export data format - one of the ones supported by exportData() #' #' @param prefix Optional prefix to put n froun of all feature names #' #' @param filePath optional path to the directory where exported data should be written if the export type is a file. '.' by default. #' #' @export exportFeatureAndShapeResults = function(feature.data, shape.results.data=NULL, format='hdf5', prefix='', filePath='.') { if( 'character' %in% class(feature.data) ) { # if character, it must be a file path for loading feature data print(paste('Using feature data from file', feature.data)) load(feature.data) # should provide var named featureDF } else { print('Using passed feature data') featureDF = feature.data rm('feature.data') } if(! is.null(shape.results.data)) { if( 'character' %in% class(shape.results.data) ) { # if character, it must be a file path for loading shape data print(paste('Using shape data from file',shape.results.data)) load(shape.results.data) # should provide var named shape.results } else { shape.results = shape.results.data rm('shape.results.data') } print('Merging shape features into basic features') featureDF = mergeShapeFeatures(featureDF,shape.results) print(paste('Writing load shape data to',format)) exportShapes(shape.results,prefix,format, filePath) } print(paste('Writing feature data frame to',format)) exportData(df = featureDF, name = paste(prefix,'Basics',sep=''), label = 'basics', format = format, filePath=filePath) }
/R/util-export.R
permissive
ahmeduncc/visdom-1
R
false
false
18,271
r
# Copyright 2016 The Board of Trustees of the Leland Stanford Junior University. # Direct inquiries to Sam Borgeson (sborgeson@stanford.edu) # or professor Ram Rajagopal (ramr@stanford.edu) #' @title Prepare data frame column names for export #' #' @description Removes punctuation from data frame column names, replacing all with underscores #' and removing underscores that are repeated one after another #' #' @param df The data frame whose columns are to be renamed #' #' @param prefix An optional prefix to place in front of all column names #' #' @return A data frame identical to the one passed in, but with new column names. #' #' @export fixNames = function(df,prefix='') { if('data.frame'%in% class(df) ) { nms = names(df) } else if ('character' %in% class(df) ) { nms = df } else if ('factor' %in% class(df) ) { nms = levels(df)[df] } else { stop(paste("Unrecognized class.", class(df),"Can't figure out names to be fixed")) } fixed = gsub('[[[:punct:] ]','_',nms) # change ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ to underscores fixed = gsub('__+','_',fixed) # remove double or more underscores fixed = gsub('_$','',fixed) # remove trailing underscores fixed = gsub('^_','',fixed) # remove leading underscores fixed = paste(prefix,fixed,sep='') return(fixed) } #' @title Merge load shape features into feature data frame #' #' @description Pulls load shape features from a shape results object and appends them to #' an existing feature data frame #' #' @param features Data frame of feature data #' #' @param shape.results Load shape clustering and assignment results object to pull features from #' #' @return A data frame identical to the one passed in, but with new laod shape feature columns #' #' @export mergeShapeFeatures = function(features,shape.results) { newFeatures = merge(features,shape.results$shape.features[,c('id','entropy')],by.x='id',by.y='id',all.x=T) catCounts = shape.results$shape.stats$category.counts catTotals = rowSums(catCounts[,-1]) catCounts[,-1] = catCounts[,-1] / catTotals names(catCounts) = gsub(pattern='count',replacement='pct',names(catCounts)) newFeatures = merge(newFeatures,catCounts,by.x='id',by.y='id',all.x=T) return(newFeatures) } #' @title Convert POSIXct data frame columns to integer seconds since the epoch #' #' @description Searches the data frame for columns with 'POSIXct' in their class values and #' converts them to integers representing seconds since the epoch #' #' @param df Data frame of feature data #' #' @return A data frame identical to the one passed in, but with integer columns replacing POSIXct ones #' #' @export datesToEpoch = function(df) { cls = lapply(df, class) dateCols = which(unlist(lapply(cls, function(x) { 'POSIXct' %in% x }))) for( col in dateCols){ df[,col] <- as.integer(df[,col]) } return(df) } #' @title Clean up feature data in preparation for saving #' #' @description This function renames data columns for export via fixNames(), converts factors #' into characters, and checks for id and zip5 colums #' #' @param features The data frame of feature to be cleaned up #' #' @param checkId boolean indicating whether to enforce a check for an id column with an error message. This should #' be true when exporting features or other id matched data and false otherwise. #' #' @param checkGeo boolean indicating whether to enforce a check zip5 columns with a warning message. This should #' be true when exporting features that will be mapped. #' #' @return A copy of the original data frame that is cleaned up #' #' @export cleanFeatureDF = function(features, checkId=TRUE, checkGeo=TRUE) { names(features) <- fixNames(features) # convert any factors to regular characters (otherwise the values are the factor indices) i <- sapply(features, is.factor) features[i] <- lapply(features[i], as.character) if(checkId) { if( ! c('id') %in% names(features)) { stop('id column required for exported data') } } if(checkGeo) { if( ! c('zip5') %in% names(features)) { print('[cleanFeatureDF] WARNING: VISDOM-web requires a zip5 geography column in features to produce maps') } } return(features) } #' @title Write feature data frame to an hdf5 file #' #' @description Write feature data frame to an hdf5 file #' #' @param data The feature data frame to be written #' #' @param fName The name of the hdf5 formatted file to write the data to #' #' @param label The name of the data table within the hdf5 file #' #' @param filePath optional path to the location where exported files should be written (if applicable). Default is \code{getwd()} #' #' @export writeH5Data = function(data,fName,label, filePath=NA) { if( ! 'rhdf5' %in% rownames(installed.packages()) ) { print('Package rhdf5 does not appear to be installed. This comes from outside of CRAN.') print('Run the following to install it') print('source("https://bioconductor.org/biocLite.R")') print('biocLite("rhdf5")') } #source("https://bioconductor.org/biocLite.R") #biocLite("rhdf5") require(rhdf5) fName = paste(fName,'h5',sep='.') if( ! is.na(filePath ) ){ dir.create(filePath, showWarnings = FALSE) fName = file.path( filePath, fName) } if(! file.exists(fName)) { rhdf5::h5createFile(fName) } rhdf5::h5write(data,fName,label) } # Internal function for parsing name=value configuration file parseConfig = function(config_path) { cfg = read.csv(config_path,sep="=",header=F) cfg[] <- lapply(cfg, as.character) cfg = as.list(setNames(cfg$V2,cfg$V1)) return(cfg) } #' @title Get the database id of a feature_set/run combination #' #' @description Load user-specified run config file and return a unique numeric #' id from the feature_runs metadata table. Create the feature_runs table if #' it does not exist. #' #' @param conn A database connection, usually obtained from \code{conf.dbCon} or \code{\link{DBI::dbConnect}} #' #' @param runConfig The run configuration file with key-value pairs of #' feature_set, feature_set_description, run_name and run_description. See #' 1inst/feature_set_run_conf/exampl_feature_set.conf1 for an example. #' #' @export getRunId = function(conn, runConfig) { cfg = parseConfig(runConfig) # Create the feature_runs table if it doesn't exist. table_name = "feature_runs" if( ! DBI::dbExistsTable(conn, table_name) ) { sql_dialect = getSQLdialect(conn) create_table_path = file.path(system.file(package='visdom'), "sql", paste("feature_runs.create.", sql_dialect, ".sql", sep="")) if( file.exists(create_table_path) ) { sql_create = readChar(create_table_path, file.info(create_table_path)$size) } else { stop(sprintf(paste( "The sql file to create the %s table does not exist for", "SQL dialect %s at path %s. You need to manually create the feature_run", "table in your database, or store the create statement(s) at that path."), table_name, sql_dialect, create_table_path)) } rows_affected = DBI::dbExecute(conn, sql_create) } # Validate incoming data. Start by determining length of varchar columns. The length column may or may not be available outside of the MySQL database driver. sql_schema_query = DBI::sqlInterpolate( conn, paste("SELECT *", "FROM", DBI::dbQuoteIdentifier(conn, table_name), "limit 0")) rs <- DBI::dbSendQuery(conn, sql_schema_query) column_info = DBI::dbColumnInfo(rs) DBI::dbClearResult(rs) # Now check length of data against length of columns. for(column in names(cfg)) { dat_length = nchar(cfg[column]) allowed_length = column_info[column_info$name == column, ]$length if(dat_length > allowed_length) { stop(sprintf(paste( "Data for %s from run config file %s is too long.", "Max character length is %d, and the data is %d characters long."), column, runConfig, allowed_length, dat_length)) } } # Look for an existing record in the table based on feature_set and run_name sql_query = DBI::sqlInterpolate( conn, paste("SELECT *", "FROM", DBI::dbQuoteIdentifier(conn, table_name), "WHERE feature_set = ?feature_set and run_name = ?run_name"), feature_set=cfg$feature_set, run_name=cfg$run_name) dat = DBI::dbGetQuery(conn, sql_query) # Insert a record for this run if one does not exist if( nrow(dat) == 0 ) { sql_insert = DBI::sqlInterpolate( conn, paste("INSERT INTO", DBI::dbQuoteIdentifier(conn, table_name), "(feature_set, feature_set_description, run_name, run_description,create_time)", "VALUES (?feature_set, ?feature_set_description, ?run_name, ?run_description, NOW())"), feature_set = cfg$feature_set, feature_set_description = cfg$feature_set_description, run_name = cfg$run_name, run_description = cfg$run_description) rows_affected = DBI::dbExecute(conn, sql_insert) if( rows_affected != 1) { stop(sprintf("Error inserting into %s table data from %s.", table_name, runConfig)) } # Look up the id now. dat = DBI::dbGetQuery(conn, sql_query) } return(dat$id) } #' @title Write feature data frame to a database #' #' @description Write feature data frame to a database using a \code{\link{DBI::dbWriteTable}} call #' #' @param data The feature data frame to be written #' #' @param name Unused, but present for compatibility with other write* fucntions #' #' @param label Unused, but present for compatibility with other write* fucntions #' #' @param conn A DBI dbConnection object to the database that will host the table #' #' @param overwrite Boolean indicator for whether the data written should overwrite any existing table or append it #' #' @param runConfig Path to a run configuration file with names and descriptions #' of the feature set and run. See #' `inst/feature_set_run_conf/exampl_feature_set.conf` for an example. #' #' @export writeDatabaseData = function(data, name=NULL, label=NULL, conn, overwrite=TRUE, runConfig) { # con <- dbConnect(SQLite(), dbname="filename.sqlite") # Use cbind so runId is the first column. data = cbind(runId=getRunId(conn, runConfig), data) tableName = parseConfig(runConfig)$feature_set DBI::dbWriteTable(conn=conn, name=tableName, value=data, row.names=F, overwrite=overwrite, append=!overwrite) # write data frame to table # Update the time in the metadata table table_name = "feature_runs" sql_update = DBI::sqlInterpolate( conn, paste("UPDATE", DBI::dbQuoteIdentifier(conn, table_name), "SET update_time=CURRENT_TIMESTAMP")) rows_affected = DBI::dbExecute(conn, sql_update) if( rows_affected != 1) { stop(sprintf("Error updating time in %s table.", table_name)) } # DBI::dbDisconnect(conn) #print('No SQLite support yet!') #if (require("RSQLite")) { # con <- dbConnect(RSQLite::SQLite(), ":memory:") # dbDisconnect(con) #} #dbWriteTable(conn=db, name="allBasics", value=basicsPlus, row.names=F, overwrite=T) # write data frame to table #dbListFields(db,"allBasics") # list of column names #db_allBasics = dbReadTable(conn=db, name="allBasics") # load data frame from table #dbDisconnect(db) # cleanup: close the file } #' @title Write feature data frame to a csv file #' #' @description Write feature data frame to a csv file #' #' @param data The feature data frame to be written #' #' @param fName The name of the csv file to write the data to #' #' @param label Unused, but present for compatibility with other write* fucntions #' #' @param filePath optional path to the location where exported files should be written (if applicable). Default is \code{getwd()} #' #' @export writeCSVData = function(data, fName, label=NA, filePath=NA) { if( ! is.na(label) ) { fName = paste(fName, label, sep='_') } fName = paste(fName, 'csv', sep='.') if( ! is.na(filePath ) ){ dir.create(filePath, showWarnings = FALSE) fName = file.path( filePath, fName) } write.csv(data, file=fName, row.names=F) } #' @title Save load shape results #' #' @description Exports standardized load shape clustering and assignment data into a #' corresponding set of exported data tables #' #' @param shape.results the shape feature results to export. These should be in the format returned by #' \code{visdomloadshape::shapeFeatures()}, as in #' \code{shapeFeatures(shapeCategoryEncoding(rawData=DATA_SOURCE$getAllData(), metaCols=1:4, encoding.dict=someDict))} #' #' @param prefix a prefix to apply to the feature column names #' #' @param format the data format for export. One of the values supported by the \code{format} paramater in \code{exportData()} #' #' @param filePath optional path to the location where exported files should be written (if applicable). Default is \code{getwd()} #' #' @export exportShapes = function(shape.results,prefix='',format='hdf5', filePath='.') { name = paste(prefix,'LoadShape',sep='') exportData(df=shape.results$shape.stats$cluster.counts, name=name, label='counts', format=format, checkId=TRUE, checkGeo=FALSE, filePath=filePath) exportData(df=shape.results$shape.stats$cluster.energy, name=name, label='sums', format=format, checkId=TRUE, checkGeo=FALSE, filePath=filePath) exportData(df=as.data.frame(shape.results$encoding.dict), name=name, label='centers', format=format, checkId=FALSE, checkGeo=FALSE, filePath=filePath) exportData(df=shape.results$encoding.dict.category.info, name=name, label='categoryMapping', format=format, checkId=FALSE, checkGeo=FALSE, filePath=filePath) } #' @title Export feature data into a selection of formats #' #' @description Runs the export function for a given data format on feature data #' #' @param df Data frame of feature data to export #' #' @param name Primary name of export, meaning file name or database table name #' #' @param label Optional data label for export formats. For example if not NA, this would be the name #' of the data table within an hdf5 file or a suffix to the csv file name, as in \code{paste(name, label, sep='_')} #' #' @param format One of the supported formats for data export, currently 'hdf5', 'csv', or 'database' #' #' @param checkId boolean control over whether to error out with a \code{stop()} if an id column is not present #' #' @param checkGeo boolean control over whether to warn if a geographic field, \code{zip5} in this case, is not present. #' #' @param ... Pass through parameters for specific export methods. For example, #' database export requires a conn object. #' #' @export exportData = function(df,name,label=NA,format='hdf5', checkId=TRUE, checkGeo=TRUE, ...) { if ('matrix' %in% class(df) ) { print('Warning. Converting matrix to data.frame') df = as.data.frame(df) } print(paste('Exporting name:', name, 'label:', label)) fn = list( hdf5=writeH5Data, hdf=writeH5Data, h5=writeH5Data, csv=writeCSVData, database=writeDatabaseData) df = cleanFeatureDF(df, checkId, checkGeo) fn[[format]](df, name, label, ... ) # call the format appropriate export function } #' @title Export feature run and load shape results #' #' @description Loads feature data and load shape clustering data from RData files and #' saves them into the selected export format #' #' @param feature.data File path to an RData file with feature data data frame or the data frame itself #' #' @param shape.results.data Optional file path to an RData file containing load shape clustering results #' or the results object itself. i.e. results from #' \code{visdomloadshape::shapeFeatures(visdomloadshape::shapeCategoryEncoding())} #' #' @param format Export data format - one of the ones supported by exportData() #' #' @param prefix Optional prefix to put n froun of all feature names #' #' @param filePath optional path to the directory where exported data should be written if the export type is a file. '.' by default. #' #' @export exportFeatureAndShapeResults = function(feature.data, shape.results.data=NULL, format='hdf5', prefix='', filePath='.') { if( 'character' %in% class(feature.data) ) { # if character, it must be a file path for loading feature data print(paste('Using feature data from file', feature.data)) load(feature.data) # should provide var named featureDF } else { print('Using passed feature data') featureDF = feature.data rm('feature.data') } if(! is.null(shape.results.data)) { if( 'character' %in% class(shape.results.data) ) { # if character, it must be a file path for loading shape data print(paste('Using shape data from file',shape.results.data)) load(shape.results.data) # should provide var named shape.results } else { shape.results = shape.results.data rm('shape.results.data') } print('Merging shape features into basic features') featureDF = mergeShapeFeatures(featureDF,shape.results) print(paste('Writing load shape data to',format)) exportShapes(shape.results,prefix,format, filePath) } print(paste('Writing feature data frame to',format)) exportData(df = featureDF, name = paste(prefix,'Basics',sep=''), label = 'basics', format = format, filePath=filePath) }
tpi.bs <- function(gene_expr,ks_int,kd_int,delta_int,times,times2,time_step,noise,delay,NI) { mat_tpi_part=matrix(0,3,NI) hr=splinefun(times2,c(rep(gene_expr[1],length.out=length(seq(-20,-1,time_step))),gene_expr)) for ( n in 1:NI) { ks=runif(3,min=ks_int[1],max=ks_int[2]) kd=runif(2,min=kd_int[1],max=kd_int[2]) delta=runif(2,min=delta_int[1],max=delta_int[2]) # Feed-forward loop y=prediction_multiL_ff_norm(times,gene_expr,ksb=100*ks_int[2],ks1=ks[1],ks21=ks[2],ks22=ks[3],kd1=kd[1],kd2=kd[2],delta1=delta[1],delta2=delta[2],noise=noise) targ1=y$tar1 targ2=y$tar2 ht1=splinefun(times2,c(rep(targ1[1],length.out=length(seq(-20,-1,time_step))),targ1)) ht2=splinefun(times2,c(rep(targ2[1],length.out=length(seq(-20,-1,time_step))),targ2)) mat_tpi_part[1,n]=tpi.index(hr,ht1,ht2,time_l=times[1]+delay,time_u=times[length(times)],time_step=1,delay=delay) # Cascade targ2 = prediction_L_norm(times,targ1,100*ks_int[2],ks[2],kd[2],delta[2],noise=noise) ht2=splinefun(times2,c(rep(targ2[1],length.out=length(seq(-20,-1,time_step))),targ2)) mat_tpi_part[2,n]=tpi.index(hr,ht1,ht2,time_l=times[1]+delay,time_u=times[length(times)],time_step=1,delay=delay) # Co-regulation targ2 = prediction_L_norm(times,gene_expr,100*ks_int[2],ks[2],kd[2],delta[1],noise=noise) ht2=splinefun(times2,c(rep(targ2[1],length.out=length(seq(-20,-1,time_step))),targ2)) mat_tpi_part[3,n]=tpi.index(hr,ht1,ht2,time_l=times[1]+delay,time_u=times[length(times)],time_step=1,delay=delay) } return(mat_tpi_part) }
/TDCor/R/tpi.bs.R
no_license
ingted/R-Examples
R
false
false
1,549
r
tpi.bs <- function(gene_expr,ks_int,kd_int,delta_int,times,times2,time_step,noise,delay,NI) { mat_tpi_part=matrix(0,3,NI) hr=splinefun(times2,c(rep(gene_expr[1],length.out=length(seq(-20,-1,time_step))),gene_expr)) for ( n in 1:NI) { ks=runif(3,min=ks_int[1],max=ks_int[2]) kd=runif(2,min=kd_int[1],max=kd_int[2]) delta=runif(2,min=delta_int[1],max=delta_int[2]) # Feed-forward loop y=prediction_multiL_ff_norm(times,gene_expr,ksb=100*ks_int[2],ks1=ks[1],ks21=ks[2],ks22=ks[3],kd1=kd[1],kd2=kd[2],delta1=delta[1],delta2=delta[2],noise=noise) targ1=y$tar1 targ2=y$tar2 ht1=splinefun(times2,c(rep(targ1[1],length.out=length(seq(-20,-1,time_step))),targ1)) ht2=splinefun(times2,c(rep(targ2[1],length.out=length(seq(-20,-1,time_step))),targ2)) mat_tpi_part[1,n]=tpi.index(hr,ht1,ht2,time_l=times[1]+delay,time_u=times[length(times)],time_step=1,delay=delay) # Cascade targ2 = prediction_L_norm(times,targ1,100*ks_int[2],ks[2],kd[2],delta[2],noise=noise) ht2=splinefun(times2,c(rep(targ2[1],length.out=length(seq(-20,-1,time_step))),targ2)) mat_tpi_part[2,n]=tpi.index(hr,ht1,ht2,time_l=times[1]+delay,time_u=times[length(times)],time_step=1,delay=delay) # Co-regulation targ2 = prediction_L_norm(times,gene_expr,100*ks_int[2],ks[2],kd[2],delta[1],noise=noise) ht2=splinefun(times2,c(rep(targ2[1],length.out=length(seq(-20,-1,time_step))),targ2)) mat_tpi_part[3,n]=tpi.index(hr,ht1,ht2,time_l=times[1]+delay,time_u=times[length(times)],time_step=1,delay=delay) } return(mat_tpi_part) }
## general description ## makeCacheMatrix - creates a "special matrix" needed for cacheSolve ## cacheSolve - reads inverse from cache or calculates and saves it ## ## function makeCacheMatrix creates a "special matrix" ## x <- makeCacheMatrix(matrix(rnorm(1000000, mean=5, sd=20),nrow=1000,ncol=1000)) ## x$get gets "special matrix" values ## x$setmatrinv sets "special matrix" inverse ## x$getmatrinv gets "special matrix" inverse if stored ## push to the parent environment x <<- y to be accessible by cacheSolve function ## makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setmatrinv <- function(matrinv) m <<- matrinv getmatrinv <- function() m list(set = set, get = get, setmatrinv = setmatrinv, getmatrinv = getmatrinv) } cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getmatrinv() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setmatrinv(m) m } ## math check ## y <- cacheSolve(x) ## z <- x$get() ## z %*% y ## Correct if Last expression yields Identity matrix
/cachematrix.R
no_license
mskunca/ProgrammingAssignment2
R
false
false
1,348
r
## general description ## makeCacheMatrix - creates a "special matrix" needed for cacheSolve ## cacheSolve - reads inverse from cache or calculates and saves it ## ## function makeCacheMatrix creates a "special matrix" ## x <- makeCacheMatrix(matrix(rnorm(1000000, mean=5, sd=20),nrow=1000,ncol=1000)) ## x$get gets "special matrix" values ## x$setmatrinv sets "special matrix" inverse ## x$getmatrinv gets "special matrix" inverse if stored ## push to the parent environment x <<- y to be accessible by cacheSolve function ## makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setmatrinv <- function(matrinv) m <<- matrinv getmatrinv <- function() m list(set = set, get = get, setmatrinv = setmatrinv, getmatrinv = getmatrinv) } cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getmatrinv() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setmatrinv(m) m } ## math check ## y <- cacheSolve(x) ## z <- x$get() ## z %*% y ## Correct if Last expression yields Identity matrix
\name{sd-methods} \docType{methods} \alias{sd-methods} \alias{sd,oligoSnpSet-method} \alias{sd} \alias{sd,CopyNumberSet-method} %\alias{sd,oligoSnpSet-method} \title{Methods for estimating copy number standard deviations.} \description{Estimate the standard deviation for \code{CopyNumberSet} and \code{oligoSnpSet} objects.} \usage{ sd(x, na.rm=FALSE) } \arguments{ \item{x}{A \code{CopyNumberSet} or \code{oligoSnpSet}} \item{na.rm}{Logical. } } \value{ A matrix. } \details{ The sd method for \code{CopyNumberSet} and \code{oligoSnpSet} objects retrieves the copy number confidence scores from the \code{cnConfidence} assay data element. The confidence matrix is a R x C matrix for an object with R features and C samples. Valid confidence estimates must be positive and not missing (not \code{NA}). If any elements in the confidence matrix are invalid, a robust estimate of the standard deviation is computed (described below). If all elements are valid, the standard deviation matrix is returned as 1 / confidence. If any elements in the confidence matrix are invalid, the standard deviation for each marker and sample is calculated as follows. If autosomal markers are present, the standard deviation is estimated as the median absolute deviation across autosomal markers for each sample. This gives a vector of length C. The R x C standard deviation matrix is populated by row from the vector of length C (the standard deviation for each marker in a sample is given the same standard deviation). If autosomal markers are not present, the median absolute deviation across X-chromosome markers and Y-chromosome markers are estimated independently, providing to vectors of length C. The matrix of standard deviations for the X chromosome is populated by the C-length vector for the X-chromosome (by-row) and likewise for the Y chromosome. } \seealso{ \code{\link{mad}} } \examples{ data(oligoSetExample) sds <- sd(oligoSet) } \keyword{methods}
/man/sd-methods.Rd
no_license
lmireles/VanillaICE
R
false
false
2,029
rd
\name{sd-methods} \docType{methods} \alias{sd-methods} \alias{sd,oligoSnpSet-method} \alias{sd} \alias{sd,CopyNumberSet-method} %\alias{sd,oligoSnpSet-method} \title{Methods for estimating copy number standard deviations.} \description{Estimate the standard deviation for \code{CopyNumberSet} and \code{oligoSnpSet} objects.} \usage{ sd(x, na.rm=FALSE) } \arguments{ \item{x}{A \code{CopyNumberSet} or \code{oligoSnpSet}} \item{na.rm}{Logical. } } \value{ A matrix. } \details{ The sd method for \code{CopyNumberSet} and \code{oligoSnpSet} objects retrieves the copy number confidence scores from the \code{cnConfidence} assay data element. The confidence matrix is a R x C matrix for an object with R features and C samples. Valid confidence estimates must be positive and not missing (not \code{NA}). If any elements in the confidence matrix are invalid, a robust estimate of the standard deviation is computed (described below). If all elements are valid, the standard deviation matrix is returned as 1 / confidence. If any elements in the confidence matrix are invalid, the standard deviation for each marker and sample is calculated as follows. If autosomal markers are present, the standard deviation is estimated as the median absolute deviation across autosomal markers for each sample. This gives a vector of length C. The R x C standard deviation matrix is populated by row from the vector of length C (the standard deviation for each marker in a sample is given the same standard deviation). If autosomal markers are not present, the median absolute deviation across X-chromosome markers and Y-chromosome markers are estimated independently, providing to vectors of length C. The matrix of standard deviations for the X chromosome is populated by the C-length vector for the X-chromosome (by-row) and likewise for the Y chromosome. } \seealso{ \code{\link{mad}} } \examples{ data(oligoSetExample) sds <- sd(oligoSet) } \keyword{methods}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SSdiagsMCMC_old.R \name{SSdiagsMCMC_old} \alias{SSdiagsMCMC_old} \title{SSdiagsMCMC_old()} \usage{ SSdiagsMCMC_old( mcmcdir, Bref = c("MSY", "Btrg"), Fref = c("MSY", "Ftrg"), run = "MCMC", Fstarter = c("_abs_F", "(F)/(Fmsy)", "Fstd_Btgt"), forecast = FALSE, plot = TRUE, thin = 1, biomass = "SSB", refs = c("SSB_unfished", "SSB_MSY", "SSB_Btgt", "SSB_SPR", "SPR_MSY", "Fstd_MSY", "Fstd_SPR", "Fstd_Btgt", "Recr_unfished", "B_MSY.SSB_unfished", "Dead_Catch_MSY", "Ret_Catch_MSY") ) } \arguments{ \item{mcmcdir}{file path for folder with the derived_posteriors.sso file} \item{Bref}{Choice of reference point for stock SSB/X c("MSY","Btrg")} \item{Fref}{Choice of reference point for stock SSB/XFref=c("MSY","Ftrg")} \item{run}{qualifier for model run} \item{Fstarter}{starter settings for c("_abs_F","(F)/(Fmsy)","Fstd_Btgt") see SSsettingsBratioF()} \item{forecast}{option to include forecasts TRUE/FALSE} \item{thin}{option to use additional thinning} \item{biomass}{the function is only tested to run with default biomass = "SSB"} \item{refs}{required reference quantaties} \item{Plot}{option to plot results with SSplotEnsemble()} } \value{ list of (1) all quataties "sims" (2) object of SSplotEnsemble() "kb" and settings } \description{ function to read mcmc file outputs for Kobe and SSplotEnsemble() plotting } \author{ Henning Winker (JRC-EC), Massimiliano and Laurence Kell (Sea++) }
/man/SSdiagsMCMC_old.Rd
no_license
realsmak88/ss3diags
R
false
true
1,509
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SSdiagsMCMC_old.R \name{SSdiagsMCMC_old} \alias{SSdiagsMCMC_old} \title{SSdiagsMCMC_old()} \usage{ SSdiagsMCMC_old( mcmcdir, Bref = c("MSY", "Btrg"), Fref = c("MSY", "Ftrg"), run = "MCMC", Fstarter = c("_abs_F", "(F)/(Fmsy)", "Fstd_Btgt"), forecast = FALSE, plot = TRUE, thin = 1, biomass = "SSB", refs = c("SSB_unfished", "SSB_MSY", "SSB_Btgt", "SSB_SPR", "SPR_MSY", "Fstd_MSY", "Fstd_SPR", "Fstd_Btgt", "Recr_unfished", "B_MSY.SSB_unfished", "Dead_Catch_MSY", "Ret_Catch_MSY") ) } \arguments{ \item{mcmcdir}{file path for folder with the derived_posteriors.sso file} \item{Bref}{Choice of reference point for stock SSB/X c("MSY","Btrg")} \item{Fref}{Choice of reference point for stock SSB/XFref=c("MSY","Ftrg")} \item{run}{qualifier for model run} \item{Fstarter}{starter settings for c("_abs_F","(F)/(Fmsy)","Fstd_Btgt") see SSsettingsBratioF()} \item{forecast}{option to include forecasts TRUE/FALSE} \item{thin}{option to use additional thinning} \item{biomass}{the function is only tested to run with default biomass = "SSB"} \item{refs}{required reference quantaties} \item{Plot}{option to plot results with SSplotEnsemble()} } \value{ list of (1) all quataties "sims" (2) object of SSplotEnsemble() "kb" and settings } \description{ function to read mcmc file outputs for Kobe and SSplotEnsemble() plotting } \author{ Henning Winker (JRC-EC), Massimiliano and Laurence Kell (Sea++) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add_bootstrap.R \name{add_bootstrap} \alias{add_bootstrap} \title{An extension of nodelabels to add symbols indicating the bootstrap support pass certain threshold} \usage{ add_bootstrap(phy, pch, threshold) } \arguments{ \item{phy}{A tree object with bootstrap support values in nodes.} \item{pch}{Shape to add} \item{threshold}{A threshold value. Only the nodes that have support higher than this number will be plotted.} } \value{ plots shapes indicating bootstrap support in the current plot window } \description{ An extension of nodelabels to add symbols indicating the bootstrap support pass certain threshold }
/man/add_bootstrap.Rd
no_license
mshakya/Phamer
R
false
true
699
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add_bootstrap.R \name{add_bootstrap} \alias{add_bootstrap} \title{An extension of nodelabels to add symbols indicating the bootstrap support pass certain threshold} \usage{ add_bootstrap(phy, pch, threshold) } \arguments{ \item{phy}{A tree object with bootstrap support values in nodes.} \item{pch}{Shape to add} \item{threshold}{A threshold value. Only the nodes that have support higher than this number will be plotted.} } \value{ plots shapes indicating bootstrap support in the current plot window } \description{ An extension of nodelabels to add symbols indicating the bootstrap support pass certain threshold }
########################################################## ### Create commodity use list ########## ########################################################## dat1 <<- read.csv(SimFile , header = TRUE) ColNames1 <<- colnames(dat1) listGradesI <<- grep("_pct",ColNames1 ) ListCNames<<- c() for (xx in listGradesI) { CName <<- sub("._pct","",ColNames1[xx]) ListCNames<<- c(ListCNames,CName ) } x <<- 1 ########################################################## ### Create CV variables /list ########## ########################################################## CVList <<- c() for (cc in ListCNames) { CVa <<- paste("CV_",cc,sep="") assign(CVa , -999, env = .GlobalEnv) CVList <<- c(CVList, CVa) } CVListNum <<- length (CVList) ########################################################## ### Read CV Values ########## ########################################################## FileinCV <<- paste(InputFolder1,"/AuxFiles/ValueTabs","/CValues.csv", sep="") CVS <<- read.csv(FileinCV , header= FALSE) ## input table with commodity values CVl <<- length (CVS) CVl <<- as.numeric(CVl) CVw <<- 1 while (CVw < (CVl + 1)) { print (CVw) CVn1 <<- CVS[1,CVw] CVn <<- toString(CVn1) print (CVn) cg <<- 1 while ( cg < (CVListNum + 1)) { if (CVn == ListCNames[cg]) { CV234<<- as.numeric( toString(CVS[2,cg])) assign(CVList[cg],CV234) } cg <<- cg + 1 } CVw <<-( CVw +1 ) } ########################################################## ### Flag missing CV Values ########## ########################################################## for (CVObj in CVList) { print (CVObj) print (svalue(CVObj)) sw <<- 1 if (svalue(CVObj) == -999) { #gmessage("There are missing commodity values, please correct them before continuing, by filling out the next dialogs", title="message",icon = "error") sw <<- 2 print ( "TRUE") CVinput <- gwindow("",horizontal= FALSE) CVobj00 <<- sub("CV_","",CVObj ) CVLabel0 <<- paste("The commodity", CVobj00 ,"is missing a value", sep=" ") CVLabel1 <<- paste("please input an updated value", sep=" ") CVLabel2 <<- paste("or click ignore the commodity", sep=" ") CVlabel <<- glabel(CVLabel0 , container = CVinput ) CVlabel <<- glabel(CVLabel1 , container = CVinput ) CVlabel <<- glabel(CVLabel2 , container = CVinput ) CVe <<- gedit("Enter missing value ($/metric ton)",width = 30,container = CVinput ) obj <- gbutton( text = "Update value", container= CVinput , handler = function(h,...) { assign(CVObj, as.double(svalue(CVe)), env = .GlobalEnv) sw <<- 1 dispose(CVinput) }) obj <- gbutton(text = "Ignore missing value",container= CVinput ,handler = function(h,...) { assign(CVObj,0, env = .GlobalEnv) sw <<- 1 dispose(CVinput) }) while (sw ==2){ print ("waiting for user input")} } } for (CVObj in CVList) { print (CVObj) print (svalue(CVObj)) } ############################################################################################################################editted 1/17/18 to add if statements about mill custom or regualrt, if mill cvustom - try to set specific mrr commidity values if (MillChoice != "Customize Mill Options") { ########################################################## ### Create MRR variables /list ########## ########################################################## MRRList <<- c() for (cc in ListCNames) { MRRa <<- paste("MRR_",cc,sep="") assign(MRRa , -999, env = .GlobalEnv) MRRList <<- c(MRRList, MRRa ) } ########################################################## ### Set Mill numbers ########## ########################################################## MillNum<<- 13 ## base number for mill number temproarily if (MillChoice == "3-Product Flotation (Omit lowest value commodity)") { MillNum <<- 11 } if (MillChoice == "3 - Product Flotation") { MillNum <<- 11 } if (MillChoice == "None") { MillNum <<- 13 } if (MillChoice == "User Define") { MillNum3 <<- 13 } ########################################################## ### Read MRR Values ########## ########################################################## FileinCV <<- paste(InputFolder1,"/AuxFiles/ValueTabs","/MillR.csv", sep="") MR <<- read.csv(FileinCV , header= FALSE) ## input table with commodity values MRl <<- nrow (MR ) ## number of rows in table MRl <<- as.numeric(MRl ) ## convert the number to a numeric value MRw <<- 2 ## start at row 2 while (MRw < (MRl + 1)) { print (MRw) MRn1 <<- MR[MRw,1] MRn <<- toString(MRn1) print (MRn) cg <<- 1 while ( cg < (CVListNum + 1)) { if (MRn == ListCNames[cg]) { MR234<<- as.numeric( toString(MR[MRw,MillNum])) assign(MRRList[cg],MR234) } cg <<- cg + 1 } MRw <<-( MRw +1 ) } } ##Ends if mil choice is not custom ################################################################start if mill choice is custom if (MillChoice == "Customize Mill Options") { ########################################################## ### Create MRR variables /list ########## ########################################################## MRRList <<- c() for (cc in ListCNames) { MRRa <<- paste("MRR_",cc,sep="") assign(MRRa , -999, env = .GlobalEnv) MRRList <<- c(MRRList, MRRa ) } ########################################################## ### Set Mill MRR ########## ########################################################## MillNum<<- 13 ## base number for mill number temproarily for (hh in MillCList) { zz <<- svalue(hh) print (zz) cn0 <<- sub("Mill", '', hh) print (cn0) print (hh) FileinCV <<- paste(InputFolder1,"/AuxFiles/ValueTabs","/MillR.csv", sep="") MR <<- read.csv(FileinCV , header= FALSE) ## input table with commodity values MRl <<- nrow (MR ) ## number of rows in table MRl <<- as.numeric(MRl ) ## convert the number to a numeric value MRw <<- 2 ## start at row 2 if (zz == "3-Product") { print ("3 product true") MillNum <<- 11 print (MillNum) cg <<- 1 while (MRw < (MRl + 1)) ### loop each row starting at 2 { MRn1 <<- MR[MRw,1] MRn <<- toString(MRn1) if (MRn == cn0) { MR234<<- as.numeric( toString(MR[MRw,MillNum])) assign(MRRList[cg],MR234) print ("3 product MRR set") } MRw <<-( MRw +1 ) cg <<- cg + 1 } } if (zz == "2-Product") { print ("2 product true") MillNum <<- 9 print (MillNum) cg <<- 1 while (MRw < (MRl + 1)) { MillNum <<- 9 MRn1 <<- MR[MRw,1] MRn <<- toString(MRn1) if (MRn == cn0) { MR234<<- as.numeric( toString(MR[MRw,MillNum])) assign(MRRList[cg],MR234) print ("2 product MRR set") } MRw <<-( MRw +1 ) cg <<- cg + 1 } } if (zz == "1-Product") { print ("1 product true") MillNum <<- 8 print (MillNum) cg <<- 1 while (MRw < (MRl + 1)) { MillNum <<- 8 MRn1 <<- MR[MRw,1] MRn <<- toString(MRn1) if (MRn == cn0) { MillNum <<- 8 MR234<<- as.numeric( toString(MR[MRw,MillNum])) assign(MRRList[cg],MR234) print (MRRList[cg]) print (MR234) print ("1 product MRR set") } MRw <<-( MRw +1 ) cg <<- cg + 1 } } if (zz == "None") { print ("None is True") dp11 <<- sub("Mill", "", hh) YName <<- dp11 cg <<- 1 while (MRw < (MRl + 1)) { MRn1 <<- MR[MRw,1] MRn <<- toString(MRn1) if (MRn == cn0) { assign(MRRList[cg],0) } MRw <<-( MRw +1 ) cg <<- cg + 1 } } if (zz == "User Define") { print ("User Define TRUE") sw <<- 2 MRinput <- gwindow("",horizontal= FALSE) MRLabel0 <<- paste("Enter user define mill parameters for: ", cn0 ,sep=" ") MRLabel1 <<- paste("Enter metalogical recovery rate (decimal fraction)", sep=" ") MRlabel <<- glabel(MRLabel0 , container = MRinput ) MRlabel <<- glabel(MRLabel1 , container = MRinput ) MRe <<- gedit("",width = 30,container = MRinput ) UDLabel1 <<- glabel("Enter the capital cost equation parameters: Constant and the Power log. Example Floation 3 equation is: 83600 * Cml^0.708 ", container = MRinput ) UDName1 <<- gedit("Name of user defined mill? ",width = 30,container = MRinput ) KC1 <<- gedit("$ constant for captial Cost",width = 30,container = MRinput ) KC2 <<- gedit("Power log for capital cost",width = 30,container = MRinput ) UDLabel1 <<- glabel("Enter the operating cost equation parameters: Constant and the Power log. Example Floation 3 equation is: 153 * Cml^(-0.344)", container = MRinput ) KO1 <<- gedit("$ constant for operating Cost",width = 30,container = MRinput ) KO2 <<- gedit("Power log for operating cost",width = 30,container = MRinput ) obj <- gbutton( text = "Submit User Define Mill", container= MRinput , handler = function(h,...) { assign(MRObj, as.double(svalue(MRe)), env = .GlobalEnv) sw <<- 1 UDName1 <<- svalue(UDName1) KC1 <<- svalue(KC1) KC2 <<- svalue(KC2) KO1 <<- svalue(KO1) KO2 <<- svalue(KO2) dispose(MRinput) }) while (sw ==2) { print ("waiting for user input") } } ## for is user define if (zz != "User Define") { for (MRObj in MRRList) { print (MRObj) print (svalue(MRObj)) mrsv1 <<- svalue(MRObj) sw <<- 1 if (svalue(MRObj) == mrsv1) { sw <<- 2 print ( "TRUE") MRinput <- gwindow("",horizontal= FALSE) MRobj00 <<- sub("MRR_","",MRObj ) MRLabel0 <<- paste("The commodity", MRobj00 ,"is missing a Metalogical Recovery Rate", sep=" ") MRLabel1 <<- paste("please input an updated value", sep=" ") MRLabel2 <<- paste("or click ignore the commodity", sep=" ") MRlabel <<- glabel(MRLabel0 , container = MRinput ) MRlabel <<- glabel(MRLabel1 , container = MRinput ) MRlabel <<- glabel(MRLabel2 , container = MRinput ) MRe <<- gedit("Enter missing value (decimal fraction)",width = 30,container = MRinput ) obj <- gbutton( text = "Update value", container= MRinput , handler = function(h,...) { assign(MRObj, as.double(svalue(MRe)), env = .GlobalEnv) sw <<- 1 dispose(MRinput) }) obj <- gbutton(text = "Ignore missing value",container= MRinput ,handler = function(h,...) { assign(MRObj,0, env = .GlobalEnv) sw <<- 1 dispose(MRinput) }) while (sw ==2) { print ("waiting for user input") } } ## ends if mrobj == mrsv1 } ## ends for each mrobj } ## ends if not user define }## ends each hh in millclist } ## ends if customize mill option #gmessage("All data has been checked and confirmed", title="message",icon = "info")
/MapWizardi/Tools/scripts/RAEF/Package/AuxFiles/RScripts/CVMRtrial12518_236pm.R
permissive
Joonasha/MapWizard
R
false
false
11,119
r
########################################################## ### Create commodity use list ########## ########################################################## dat1 <<- read.csv(SimFile , header = TRUE) ColNames1 <<- colnames(dat1) listGradesI <<- grep("_pct",ColNames1 ) ListCNames<<- c() for (xx in listGradesI) { CName <<- sub("._pct","",ColNames1[xx]) ListCNames<<- c(ListCNames,CName ) } x <<- 1 ########################################################## ### Create CV variables /list ########## ########################################################## CVList <<- c() for (cc in ListCNames) { CVa <<- paste("CV_",cc,sep="") assign(CVa , -999, env = .GlobalEnv) CVList <<- c(CVList, CVa) } CVListNum <<- length (CVList) ########################################################## ### Read CV Values ########## ########################################################## FileinCV <<- paste(InputFolder1,"/AuxFiles/ValueTabs","/CValues.csv", sep="") CVS <<- read.csv(FileinCV , header= FALSE) ## input table with commodity values CVl <<- length (CVS) CVl <<- as.numeric(CVl) CVw <<- 1 while (CVw < (CVl + 1)) { print (CVw) CVn1 <<- CVS[1,CVw] CVn <<- toString(CVn1) print (CVn) cg <<- 1 while ( cg < (CVListNum + 1)) { if (CVn == ListCNames[cg]) { CV234<<- as.numeric( toString(CVS[2,cg])) assign(CVList[cg],CV234) } cg <<- cg + 1 } CVw <<-( CVw +1 ) } ########################################################## ### Flag missing CV Values ########## ########################################################## for (CVObj in CVList) { print (CVObj) print (svalue(CVObj)) sw <<- 1 if (svalue(CVObj) == -999) { #gmessage("There are missing commodity values, please correct them before continuing, by filling out the next dialogs", title="message",icon = "error") sw <<- 2 print ( "TRUE") CVinput <- gwindow("",horizontal= FALSE) CVobj00 <<- sub("CV_","",CVObj ) CVLabel0 <<- paste("The commodity", CVobj00 ,"is missing a value", sep=" ") CVLabel1 <<- paste("please input an updated value", sep=" ") CVLabel2 <<- paste("or click ignore the commodity", sep=" ") CVlabel <<- glabel(CVLabel0 , container = CVinput ) CVlabel <<- glabel(CVLabel1 , container = CVinput ) CVlabel <<- glabel(CVLabel2 , container = CVinput ) CVe <<- gedit("Enter missing value ($/metric ton)",width = 30,container = CVinput ) obj <- gbutton( text = "Update value", container= CVinput , handler = function(h,...) { assign(CVObj, as.double(svalue(CVe)), env = .GlobalEnv) sw <<- 1 dispose(CVinput) }) obj <- gbutton(text = "Ignore missing value",container= CVinput ,handler = function(h,...) { assign(CVObj,0, env = .GlobalEnv) sw <<- 1 dispose(CVinput) }) while (sw ==2){ print ("waiting for user input")} } } for (CVObj in CVList) { print (CVObj) print (svalue(CVObj)) } ############################################################################################################################editted 1/17/18 to add if statements about mill custom or regualrt, if mill cvustom - try to set specific mrr commidity values if (MillChoice != "Customize Mill Options") { ########################################################## ### Create MRR variables /list ########## ########################################################## MRRList <<- c() for (cc in ListCNames) { MRRa <<- paste("MRR_",cc,sep="") assign(MRRa , -999, env = .GlobalEnv) MRRList <<- c(MRRList, MRRa ) } ########################################################## ### Set Mill numbers ########## ########################################################## MillNum<<- 13 ## base number for mill number temproarily if (MillChoice == "3-Product Flotation (Omit lowest value commodity)") { MillNum <<- 11 } if (MillChoice == "3 - Product Flotation") { MillNum <<- 11 } if (MillChoice == "None") { MillNum <<- 13 } if (MillChoice == "User Define") { MillNum3 <<- 13 } ########################################################## ### Read MRR Values ########## ########################################################## FileinCV <<- paste(InputFolder1,"/AuxFiles/ValueTabs","/MillR.csv", sep="") MR <<- read.csv(FileinCV , header= FALSE) ## input table with commodity values MRl <<- nrow (MR ) ## number of rows in table MRl <<- as.numeric(MRl ) ## convert the number to a numeric value MRw <<- 2 ## start at row 2 while (MRw < (MRl + 1)) { print (MRw) MRn1 <<- MR[MRw,1] MRn <<- toString(MRn1) print (MRn) cg <<- 1 while ( cg < (CVListNum + 1)) { if (MRn == ListCNames[cg]) { MR234<<- as.numeric( toString(MR[MRw,MillNum])) assign(MRRList[cg],MR234) } cg <<- cg + 1 } MRw <<-( MRw +1 ) } } ##Ends if mil choice is not custom ################################################################start if mill choice is custom if (MillChoice == "Customize Mill Options") { ########################################################## ### Create MRR variables /list ########## ########################################################## MRRList <<- c() for (cc in ListCNames) { MRRa <<- paste("MRR_",cc,sep="") assign(MRRa , -999, env = .GlobalEnv) MRRList <<- c(MRRList, MRRa ) } ########################################################## ### Set Mill MRR ########## ########################################################## MillNum<<- 13 ## base number for mill number temproarily for (hh in MillCList) { zz <<- svalue(hh) print (zz) cn0 <<- sub("Mill", '', hh) print (cn0) print (hh) FileinCV <<- paste(InputFolder1,"/AuxFiles/ValueTabs","/MillR.csv", sep="") MR <<- read.csv(FileinCV , header= FALSE) ## input table with commodity values MRl <<- nrow (MR ) ## number of rows in table MRl <<- as.numeric(MRl ) ## convert the number to a numeric value MRw <<- 2 ## start at row 2 if (zz == "3-Product") { print ("3 product true") MillNum <<- 11 print (MillNum) cg <<- 1 while (MRw < (MRl + 1)) ### loop each row starting at 2 { MRn1 <<- MR[MRw,1] MRn <<- toString(MRn1) if (MRn == cn0) { MR234<<- as.numeric( toString(MR[MRw,MillNum])) assign(MRRList[cg],MR234) print ("3 product MRR set") } MRw <<-( MRw +1 ) cg <<- cg + 1 } } if (zz == "2-Product") { print ("2 product true") MillNum <<- 9 print (MillNum) cg <<- 1 while (MRw < (MRl + 1)) { MillNum <<- 9 MRn1 <<- MR[MRw,1] MRn <<- toString(MRn1) if (MRn == cn0) { MR234<<- as.numeric( toString(MR[MRw,MillNum])) assign(MRRList[cg],MR234) print ("2 product MRR set") } MRw <<-( MRw +1 ) cg <<- cg + 1 } } if (zz == "1-Product") { print ("1 product true") MillNum <<- 8 print (MillNum) cg <<- 1 while (MRw < (MRl + 1)) { MillNum <<- 8 MRn1 <<- MR[MRw,1] MRn <<- toString(MRn1) if (MRn == cn0) { MillNum <<- 8 MR234<<- as.numeric( toString(MR[MRw,MillNum])) assign(MRRList[cg],MR234) print (MRRList[cg]) print (MR234) print ("1 product MRR set") } MRw <<-( MRw +1 ) cg <<- cg + 1 } } if (zz == "None") { print ("None is True") dp11 <<- sub("Mill", "", hh) YName <<- dp11 cg <<- 1 while (MRw < (MRl + 1)) { MRn1 <<- MR[MRw,1] MRn <<- toString(MRn1) if (MRn == cn0) { assign(MRRList[cg],0) } MRw <<-( MRw +1 ) cg <<- cg + 1 } } if (zz == "User Define") { print ("User Define TRUE") sw <<- 2 MRinput <- gwindow("",horizontal= FALSE) MRLabel0 <<- paste("Enter user define mill parameters for: ", cn0 ,sep=" ") MRLabel1 <<- paste("Enter metalogical recovery rate (decimal fraction)", sep=" ") MRlabel <<- glabel(MRLabel0 , container = MRinput ) MRlabel <<- glabel(MRLabel1 , container = MRinput ) MRe <<- gedit("",width = 30,container = MRinput ) UDLabel1 <<- glabel("Enter the capital cost equation parameters: Constant and the Power log. Example Floation 3 equation is: 83600 * Cml^0.708 ", container = MRinput ) UDName1 <<- gedit("Name of user defined mill? ",width = 30,container = MRinput ) KC1 <<- gedit("$ constant for captial Cost",width = 30,container = MRinput ) KC2 <<- gedit("Power log for capital cost",width = 30,container = MRinput ) UDLabel1 <<- glabel("Enter the operating cost equation parameters: Constant and the Power log. Example Floation 3 equation is: 153 * Cml^(-0.344)", container = MRinput ) KO1 <<- gedit("$ constant for operating Cost",width = 30,container = MRinput ) KO2 <<- gedit("Power log for operating cost",width = 30,container = MRinput ) obj <- gbutton( text = "Submit User Define Mill", container= MRinput , handler = function(h,...) { assign(MRObj, as.double(svalue(MRe)), env = .GlobalEnv) sw <<- 1 UDName1 <<- svalue(UDName1) KC1 <<- svalue(KC1) KC2 <<- svalue(KC2) KO1 <<- svalue(KO1) KO2 <<- svalue(KO2) dispose(MRinput) }) while (sw ==2) { print ("waiting for user input") } } ## for is user define if (zz != "User Define") { for (MRObj in MRRList) { print (MRObj) print (svalue(MRObj)) mrsv1 <<- svalue(MRObj) sw <<- 1 if (svalue(MRObj) == mrsv1) { sw <<- 2 print ( "TRUE") MRinput <- gwindow("",horizontal= FALSE) MRobj00 <<- sub("MRR_","",MRObj ) MRLabel0 <<- paste("The commodity", MRobj00 ,"is missing a Metalogical Recovery Rate", sep=" ") MRLabel1 <<- paste("please input an updated value", sep=" ") MRLabel2 <<- paste("or click ignore the commodity", sep=" ") MRlabel <<- glabel(MRLabel0 , container = MRinput ) MRlabel <<- glabel(MRLabel1 , container = MRinput ) MRlabel <<- glabel(MRLabel2 , container = MRinput ) MRe <<- gedit("Enter missing value (decimal fraction)",width = 30,container = MRinput ) obj <- gbutton( text = "Update value", container= MRinput , handler = function(h,...) { assign(MRObj, as.double(svalue(MRe)), env = .GlobalEnv) sw <<- 1 dispose(MRinput) }) obj <- gbutton(text = "Ignore missing value",container= MRinput ,handler = function(h,...) { assign(MRObj,0, env = .GlobalEnv) sw <<- 1 dispose(MRinput) }) while (sw ==2) { print ("waiting for user input") } } ## ends if mrobj == mrsv1 } ## ends for each mrobj } ## ends if not user define }## ends each hh in millclist } ## ends if customize mill option #gmessage("All data has been checked and confirmed", title="message",icon = "info")
library(tidyverse) library(RColorBrewer) library(maptools) library(cowplot) setwd('/Users/alicia/daly_lab/ukbb_diverse_pops/pca') pop_assign <- read_delim(gzfile('globalref_ukbb_pca_pops_rf_50.txt.gz'), delim='\t') %>% select(s, pop) %>% mutate(pop=case_when(pop=='oth' ~ 'Other', TRUE ~ pop)) pop_assign$s <- as.character(pop_assign$s) ethnicity <- read_tsv('../data/ukb31063.ethnicity_birth.txt') ethnicity_ancestry <- pop_assign %>% left_join(ethnicity, by=c('s'='userId')) %>% mutate(Ethnicity=case_when(ethnicity=='-3' ~ 'Prefer not to answer', ethnicity=='-1' ~ 'Do not know', ethnicity=='1' ~ 'White', ethnicity=='1001' ~ 'British', ethnicity=='1002' ~ 'Irish', ethnicity=='1003' ~ 'Any other white background', ethnicity=='2' ~ 'Mixed', ethnicity=='2001' ~ 'White and Black Caribbean', ethnicity=='2002' ~ 'White and Black African', ethnicity=='2003' ~ 'White and Asian', ethnicity=='2004' ~ 'Any other mixed background', ethnicity=='3' ~ 'Asian or Asian British', ethnicity=='3001' ~ 'Indian', ethnicity=='3002' ~ 'Pakistani', ethnicity=='3003' ~ 'Bangladeshi', ethnicity=='4' ~ 'Black or Black British', ethnicity=='4001' ~ 'Caribbean', ethnicity=='4002' ~ 'African', ethnicity=='4003' ~ 'Any other Black background', ethnicity=='5' ~ 'Chinese', ethnicity=='6' ~ 'Other ethnic group', TRUE ~ 'NA' )) table(ethnicity_ancestry$ethnicity, ethnicity_ancestry$pop) table(ethnicity_ancestry$continent, ethnicity_ancestry$pop) conts <- unique(pop_assign$pop) immigrant_country_ancestry <- function(cont) { a <- as.data.frame(table(ethnicity_ancestry$pop, ethnicity_ancestry$country)) %>% subset(Var1==cont) %>% arrange(desc(Freq)) %>% filter(Freq>0) print(a) return(a) } anc_country <- map_dfr(conts, immigrant_country_ancestry) write.table(anc_country, 'country_immigration_ancestry.txt', quote=F, row.names=F, sep='\t') # Plot global PCs --------------------------------------------------------- ref <- read.table(gzfile('globalref_ukbb_scores.txt.bgz'), header=T) ref_info <- read.csv('/Users/alicia/Dropbox (Partners HealthCare)/martin_lab/projects/hgdp_tgp/tgp_hgdp.csv', header=T) ref <- ref %>% left_join(ref_info, by=c('s'='Sample.ID')) %>% mutate(Population=Genetic.region) %>% select(c(s, starts_with('PC'), Population)) ukbb <- read.table(gzfile('ukbb_globalref_scores.txt.bgz'), header=T) %>% mutate(Population='UKBB') ukbb$s <- as.character(ukbb$s) ref_ukbb <- bind_rows(ukbb, ref) ukb_pop_colors = c('AFR' = '#941494', 'AMR' = '#ED1E24', 'CSA' = '#FF9912', 'EAS' = '#108C44', 'EUR' = '#6AA5CD', 'MID' = '#EEA9B8', 'OCE' = '#a6761d', 'UKBB' = 'black', 'Other' = '#ABB9B9') ref_ukbb$Population <- factor(ref_ukbb$Population, levels=as.character(names(ukb_pop_colors))) plot_global_pca <- function(pcs, pop_color, pop_shape=NA, first_pc='PC1', second_pc='PC2', legend_name='Population') { if(is.na(pop_shape)) { pca_pop <- ggplot(pcs, aes_string(x=first_pc, y=second_pc, color=legend_name)) + geom_point(alpha=0.8) } else { pca_pop <- ggplot(pcs, aes_string(x=first_pc, y=second_pc, color=legend_name, shape=legend_name)) + geom_point(alpha=0.8) } pca_pop <- pca_pop + scale_color_manual(values=pop_color, name=legend_name) + scale_shape_manual(values=pop_shape, name=legend_name) + theme_classic() + theme(text = element_text(size=14), axis.text = element_text(color='black'), legend.text = element_text(size=10)) x_lim = ggplot_build(pca_pop)$layout$panel_scales_x[[1]]$range$range y_lim = ggplot_build(pca_pop)$layout$panel_scales_y[[1]]$range$range return(list(pca_pop, x_lim, y_lim)) } global_pcs_1_2 <- plot_global_pca(ref_ukbb, ukb_pop_colors) global_pcs_3_4 <- plot_global_pca(ref_ukbb, ukb_pop_colors, first_pc = 'PC3', second_pc = 'PC4') global_pcs_5_6 <- plot_global_pca(ref_ukbb, ukb_pop_colors, first_pc = 'PC5', second_pc = 'PC6') plot_pca_density <- function(pcs, x_lim, y_lim, first_pc='PC1', second_pc = 'PC2') { dens_pca <- ggplot(pcs, aes_string(x=first_pc, y=second_pc)) + geom_hex(bins=50) + scale_fill_gradientn(trans = "log", breaks=c(1,20,400,8000,163000), name='Count', colours = rev(brewer.pal(5,'Spectral'))) + theme_classic() + lims(x=x_lim, y=y_lim) + theme(text = element_text(size=14), axis.text = element_text(color='black'), legend.text = element_text(size=10)) return(dens_pca) } ukb_dens_pcs_1_2 <- plot_pca_density(ukbb, global_pcs_1_2[[2]], global_pcs_1_2[[3]]) ukb_dens_pcs_3_4 <- plot_pca_density(ukbb, global_pcs_3_4[[2]], global_pcs_3_4[[3]], first_pc='PC3', second_pc='PC4') ukb_dens_pcs_5_6 <- plot_pca_density(ukbb, global_pcs_5_6[[2]], global_pcs_5_6[[3]], first_pc='PC5', second_pc='PC6') ukbb_assign <- ukbb %>% left_join(pop_assign) %>% mutate(Population=pop) %>% select(-pop) %>% filter(!is.na(Population)) global_pcs_1_2_assign <- plot_global_pca(ukbb_assign, ukb_pop_colors) global_pcs_3_4_assign <- plot_global_pca(ukbb_assign, ukb_pop_colors, first_pc = 'PC3', second_pc = 'PC4') global_pcs_5_6_assign <- plot_global_pca(ukbb_assign, ukb_pop_colors, first_pc = 'PC5', second_pc = 'PC6') global_pcs <- plot_grid(global_pcs_1_2[[1]], global_pcs_3_4[[1]], global_pcs_5_6[[1]], ukb_dens_pcs_1_2, ukb_dens_pcs_3_4, ukb_dens_pcs_5_6, global_pcs_1_2_assign[[1]], global_pcs_3_4_assign[[1]], global_pcs_5_6_assign[[1]], labels=LETTERS[1:9], nrow=3) ggsave('ukbb_ref_agg_dens_rf_pca.png', global_pcs, width=12.5, height=10) # Plot PCA x self-reported ethnicity info --------------------------------- blues <- brewer.pal(5, 'Blues')[2:5] #1-1003 reds <- brewer.pal(6, 'Reds')[2:6] #2-2004 oranges <- brewer.pal(6, 'Oranges')[2:6] #3-3004 purples <- brewer.pal(5, 'Purples')[2:5] #4-4003 greys <- brewer.pal(4, 'Greys')[2:4] #-3, -1, 6 ethnicity_colors = c(greys[1:2], blues, reds, oranges, purples, '#108C44', greys[3]) names(ethnicity_colors) <- c('Prefer not to answer', 'Do not know', 'White', 'British', 'Irish', 'Any other white background', 'Mixed', 'White and Black Caribbean', 'White and Black African', 'White and Asian', 'Any other mixed background', 'Asian or Asian British', 'Indian', 'Pakistani', 'Bangladeshi', 'Any other Asian background', 'Black or Black British', 'Caribbean', 'African', 'Any other Black background', 'Chinese', 'Other ethnic group') ethnicity_shapes <- c(rep(4, 2), rep(15, 4), rep(16, 5), rep(17, 5), rep(18, 4), 3, 4) names(ethnicity_shapes) <- names(ethnicity_colors) ethnicity_ancestry$s <- as.character(ethnicity_ancestry$s) ukbb_ethnicity <- ukbb %>% left_join(ethnicity_ancestry %>% filter(Ethnicity!='NA')) ukbb_ethnicity$Ethnicity <- factor(ukbb_ethnicity$Ethnicity, levels = names(ethnicity_colors)) global_pcs_1_2_eth <- plot_global_pca(ukbb_ethnicity, ethnicity_colors, ethnicity_shapes, legend_name='Ethnicity') global_pcs_3_4_eth <- plot_global_pca(ukbb_ethnicity, ethnicity_colors, ethnicity_shapes, first_pc = 'PC3', second_pc = 'PC4', legend_name='Ethnicity') global_pcs_5_6_eth <- plot_global_pca(ukbb_ethnicity, ethnicity_colors, ethnicity_shapes, first_pc = 'PC5', second_pc = 'PC6', legend_name='Ethnicity') legend_b <- get_legend(global_pcs_1_2_eth[[1]] + theme(legend.position='bottom')) anc_eth <- plot_grid(global_pcs_1_2_eth[[1]] + guides(color=F, shape=F), global_pcs_3_4_eth[[1]] + guides(color=F, shape=F), global_pcs_5_6_eth[[1]] + guides(color=F, shape=F), labels=LETTERS[1:3], nrow=1) anc_eth_legend <- plot_grid(anc_eth, legend_b, ncol=1, rel_heights=c(1, .4)) ggsave('ukbb_pca_eth.png', anc_eth_legend, width=12, height=7) # # # ukbb$s <- as.character(ukbb$s) # rf <- read.table('ukbb_pca_pops_rf.txt.gz', header=T) %>% select(c('s', 'pop')) # rf$s <- as.character(rf$s) # in_gwas <- read.table('../ukb31063.gwas_samples.both_sexes.txt', header=T) # pigment <- read.table('../skin_color_tanning.txt.gz', header=T, sep='\t') # pigment$s <- as.character(pigment$s) # ukbb2 <- ukbb %>% # mutate(in_gwas=ifelse(s %in% in_gwas$s, TRUE, FALSE)) %>% # left_join(rf, by='s') %>% # left_join(pigment, by='s') # # tgp_pops <- read.table('../integrated_call_samples_v3.20130502.ALL.panel', header=T) # tgp <- merge(tgp, tgp_pops, by.x='s', by.y='sample', all=T) # # ukbb_tgp <- ukbb %>% # bind_rows(tgp) %>% # mutate(all_pop=ifelse(is.na(super_pop), 'UKBB', as.character(super_pop))) # # brewer_vec <- brewer.pal(7, 'Set1') # brewer_vec <- c(brewer_vec, 'black', 'black', brewer_vec[4]) # names(brewer_vec) <- c('EUR', 'EAS', 'AMR', 'SAS', 'AFR', 'MID', 'OCE', 'UKBB', 'oth', 'CSA') # Plot location maps ------------------------------------------------------ data(wrld_simpl) world <- fortify(wrld_simpl) # Read the latitude/longitdue/plotting data for reference populations pop_pos <- read.csv('/Users/alicia/Dropbox (Partners HealthCare)/daly_lab/UKBB-Diverse-Pops/data/pop_plot_info.csv', header=T) %>% filter(Population !='ASW') plot_cont_map <- function(cont_name, lon_lim, lat_lim, rand_col=FALSE) { pop_pos_plot <- subset(pop_pos, Continent == cont_name) pop_pos_plot$Population <- factor(pop_pos_plot$Population, levels=as.character(pop_pos_plot$Population)) if(rand_col) { color_vec <- colorRampPalette(brewer.pal(4, 'Spectral'))(length(pop_pos_plot$Population)) } else { color_vec <- as.character(pop_pos_plot$Color) } shape_vec <- rep_len(c(21:25), length.out = length(color_vec)) names(color_vec) <- pop_pos_plot$Population names(shape_vec) <- pop_pos_plot$Population # plot the map of Africa with data points labeled p_map <- ggplot() + geom_polygon(data = world, aes(long, lat, group=group), fill='lightyellow', color='lightgrey') + geom_point(data = pop_pos_plot, aes(Longitude, Latitude, color=Population, fill=Population, shape=Population), size=3) + coord_fixed(xlim = lon_lim, ylim = lat_lim) + labs(x='Longitude', y='Latitude') + theme_classic() + scale_fill_manual(name = "Population", values = color_vec) + scale_color_manual(name = "Population", values = color_vec) + scale_shape_manual(name = "Population", values = shape_vec) + theme(panel.background = element_rect(fill = "lightblue"), plot.background = element_rect(fill = "transparent", color = NA), #legend.position='bottom', text = element_text(size=14), axis.text = element_text(color='black'), legend.text = element_text(size=10)) return(list(p_map, color_vec, shape_vec)) } afr <- plot_cont_map('AFR', c(-20,50), c(-35,35)) amr <- plot_cont_map('AMR', c(-140,-35), c(-50,65), rand_col=TRUE) csa <- plot_cont_map('CSA', c(60,95), c(5,45), rand_col=TRUE) eas <- plot_cont_map('EAS', c(78,148), c(0,70), rand_col=TRUE) eur <- plot_cont_map('EUR', c(-25,40), c(34,71), rand_col=TRUE) mid <- plot_cont_map('MID', c(0,60), c(10,50), rand_col=TRUE) ggsave('afr_ref_map.pdf', afr[[1]], width=10, height=10) ggsave('csa_ref_map.pdf', csa[[1]]) ggsave('eas_ref_map.pdf', eas[[1]]) ggsave('mid_ref_map.pdf', mid[[1]]) ggsave('amr_ref_map.pdf', amr[[1]]) ggsave('eur_ref_map.pdf', eur[[1]]) p2 <- p1 + guides(fill=F, color=F, shape=F) # Load population PCA and covariate info ---------------------------------- # NOTE: change order here to correspond to order in color_vec load_ref_pcs <- function(ref_pcs, ref_fam, pop_color) { ref_pcs <- read_delim(gzfile(ref_pcs), delim='\t') fam <- read.table(ref_fam, col.names=c('pop', 's', 'dad', 'mom', 'sex', 'pheno')) %>% select(pop, s, sex) ref_data <- merge(ref_pcs, fam, by='s') ref_data$pop <- factor(ref_data$pop, levels = names(pop_color)) return(ref_data) } ref_afr <- load_ref_pcs('AFR_HGDP_1kG_AGVP_maf005_geno05_unrel_ukbb_scores.txt.bgz', 'AFR_HGDP_1kG_AGVP_maf005_geno05_unrel.fam', afr[[2]]) ref_amr <- load_ref_pcs('AMR_HGDP_1kG_maf005_geno05_unrel_ukbb_scores.txt.bgz', 'AMR_HGDP_1kG_maf005_geno05_unrel.fam', amr[[2]]) ref_csa <- load_ref_pcs('CSA_HGDP_1kG_maf005_geno05_unrel_ukbb_scores.txt.bgz', 'CSA_HGDP_1kG_maf005_geno05_unrel.fam', csa[[2]]) ref_eas <- load_ref_pcs('EAS_HGDP_1kG_maf005_geno05_unrel_ukbb_scores.txt.bgz', 'EAS_HGDP_1kG_maf005_geno05_unrel.fam', eas[[2]]) ref_eur <- load_ref_pcs('EUR_HGDP_1kG_maf005_geno05_unrel_ukbb_scores.txt.bgz', 'EUR_HGDP_1kG_maf005_geno05_unrel.fam', eur[[2]]) ref_mid <- load_ref_pcs('MID_HGDP_1kG_maf005_geno05_unrel_ukbb_scores.txt.bgz', 'MID_HGDP_1kG_maf005_geno05_unrel.fam', mid[[2]]) # Load UKB population data ------------------------------------------------ load_ukb <- function(cont_name, filename) { ukb_pop <- read_delim(gzfile(filename), delim='\t') %>% left_join(pop_assign) %>% filter(pop==cont_name) %>% left_join(ethnicity, by=c('s'='userId')) ##### } ukb_afr <- load_ukb('AFR', 'ukbb_AFR_HGDP_1kG_AGVP_maf005_geno05_unrel_scores.txt.bgz') ukb_csa <- load_ukb('CSA', 'ukbb_CSA_HGDP_1kG_maf005_geno05_unrel_scores.txt.bgz') ukb_eas <- load_ukb('EAS', 'ukbb_EAS_HGDP_1kG_maf005_geno05_unrel_scores.txt.bgz') ukb_eur <- load_ukb('EUR', 'ukbb_EUR_HGDP_1kG_maf005_geno05_unrel_scores.txt.bgz') ukb_mid <- load_ukb('MID', 'ukbb_MID_HGDP_1kG_maf005_geno05_unrel_scores.txt.bgz') ukb_amr <- load_ukb('AMR', 'ukbb_AMR_HGDP_1kG_maf005_geno05_unrel_scores.txt.bgz') #ukb_afr %>% dplyr::count(country) %>% arrange(desc(n)) %>% head(11) # Plot PCA ---------------------------------------------------------------- p_afr <- ggplot(afr, aes(x=PC1, y=PC2, color=pop)) + geom_point(data=ukb_afr, color='grey') + geom_point() + scale_color_manual(values=color_vec, name='Population') + theme_classic() + theme(text = element_text(size=16)) ggsave('afr_cont_projection.pdf', p_afr, width=8, height=6) plot_pca_ref_ukb <- function(ref_pop, ukb_pop, pop_color, pop_shape, first_pc='PC1', second_pc='PC2') { pca_pop <- ggplot(ref_pop, aes_string(x=first_pc, y=second_pc, color='pop', fill='pop', shape='pop')) + geom_point(data=ukb_pop, color='grey', fill='grey', shape=21) + geom_point() + scale_color_manual(values=pop_color, name='Population') + scale_fill_manual(values=pop_color, name='Population') + scale_shape_manual(values=pop_shape, name='Population') + guides(color=F, fill=F, shape=F) + theme_classic() + theme(text = element_text(size=12)) x_lim <- ggplot_build(pca_pop)$layout$panel_scales_x[[1]]$range$range y_lim <- ggplot_build(pca_pop)$layout$panel_scales_y[[1]]$range$range pca_density <- ggplot(ukb_pop, aes_string(x=first_pc, y=second_pc)) + geom_hex(bins=50) + scale_fill_gradientn(trans='sqrt', name='Count', colours = rev(brewer.pal(5,'Spectral'))) + lims(x=x_lim, y=y_lim) + theme_classic() + theme(text = element_text(size=12)) return(list(pca_pop, pca_density)) } save_pca_plot <- function(pop, pop_name, ref_pop, ukb_pop, base_height, base_width) { p_pop_1_2 <- plot_pca_ref_ukb(ref_pop, ukb_pop, pop[[2]], pop[[3]], 'PC1', 'PC2') p_pop_3_4 <- plot_pca_ref_ukb(ref_pop, ukb_pop, pop[[2]], pop[[3]], 'PC3', 'PC4') p_pop_5_6 <- plot_pca_ref_ukb(ref_pop, ukb_pop, pop[[2]], pop[[3]], 'PC5', 'PC6') my_plot_1_2=plot_grid(p_pop_1_2[[1]], p_pop_1_2[[2]], rel_widths=c(1, 1.15)) my_plot_3_4=plot_grid(p_pop_3_4[[1]], p_pop_3_4[[2]], rel_widths=c(1, 1.15)) my_plot_5_6=plot_grid(p_pop_5_6[[1]], p_pop_5_6[[2]], rel_widths=c(1, 1.15)) my_plot = plot_grid(pop[[1]], my_plot_1_2, my_plot_3_4, my_plot_5_6, ncol=1, labels=c('A', 'B', 'C', 'D'), rel_heights=c(1.5, 1, 1, 1)) save_plot(paste0(pop_name, '_cont_projection_1-6.png'), my_plot, base_height = base_height, base_width = base_width) # save_plot(filename=paste0(pop_name, '_cont_projection_1_2.png'), plot=my_plot, base_height = 5, base_width=10) # save_plot(paste0(pop_name, '_cont_projection_3_4.png'), plot_grid(p_pop_3_4[[1]], p_pop_3_4[[2]], rel_widths=c(1, 1.15)), base_height = 5, base_width=10) # save_plot(paste0(pop_name, '_cont_projection_5_6.png'), plot_grid(p_pop_5_6[[1]], p_pop_5_6[[2]], rel_widths=c(1, 1.15)), base_height = 5, base_width=10) } save_pca_plot(afr, 'afr', ref_afr, ukb_afr, base_height=12, base_width=8) save_pca_plot(amr, 'amr', ref_amr, ukb_amr, base_height=12, base_width=8) save_pca_plot(csa, 'csa', ref_csa, ukb_csa, base_height=12, base_width=8) save_pca_plot(eas, 'eas', ref_eas, ukb_eas, base_height=12, base_width=8) save_pca_plot(mid, 'mid', ref_mid, ukb_mid, base_height=12, base_width=8) save_pca_plot(eur, 'eur', ref_eur, ukb_eur, base_height=12, base_width=8) # p_afr_1_2 <- plot_pca_ref_ukb(ref_afr, ukb_afr, afr[[2]], afr[[3]], 'PC1', 'PC2') # p_afr_3_4 <- plot_pca_ref_ukb(ref_afr, ukb_afr, afr[[2]], afr[[3]], 'PC3', 'PC4') # p_afr_5_6 <- plot_pca_ref_ukb(ref_afr, ukb_afr, afr[[2]], afr[[3]], 'PC5', 'PC6') # my_plot <- plot_grid(p_afr_1_2[[1]], p_afr_1_2[[2]], rel_widths=c(1, 1.15)) # save_plot('afr_cont_projection_1_2.png', my_plot, base_height = 5, base_width=10) # save_plot('afr_cont_projection_3_4.png', plot_grid(p_afr_3_4[[1]], p_afr_3_4[[2]], rel_widths=c(1, 1.15)), base_height = 5, base_width=10) # save_plot('afr_cont_projection_5_6.png', plot_grid(p_afr_5_6[[1]], p_afr_5_6[[2]], rel_widths=c(1, 1.15)), base_height = 5, base_width=10) # Within pop PCA (no ref) ------------------------------------------------- setwd('/Users/alicia/daly_lab/ukbb_diverse_pops/pca/ukb_within_continent') read_pca <- function(pop_name, rel_unrel) { if(rel_unrel == 'rel') { pca <- read.table(gzfile(paste0(pop_name, '_rel_scores.txt.bgz')), header=T) %>% mutate(pop=pop_name, rel=rel_unrel) } else { pca <- read.table(gzfile(paste0(pop_name, '_scores.txt.bgz')), header=T) %>% mutate(pop=pop_name, rel=rel_unrel) } return(pca) } pops <- c('AFR', 'AMR', 'CSA', 'MID', 'EAS', 'EUR') age_sex <- read.table(gzfile('uk_round2_allSamples_phenos_phesant.6148_5.tsv.gz'), header=T, sep='\t') %>% select(userId, age, sex) bind_rels <- function(pop) { pop_rel <- read_pca(pop, 'rel') pop_unrel <- read_pca(pop, 'unrel') pop_bind <- pop_rel %>% bind_rows(pop_unrel) } afr <- bind_rels('AFR') amr <- bind_rels('AMR') csa <- bind_rels('CSA') mid <- bind_rels('MID') eas <- bind_rels('EAS') eur <- bind_rels('EUR') bind_pops <- afr %>% bind_rows(amr) %>% bind_rows(csa) %>% bind_rows(mid) %>% bind_rows(eas) %>% bind_rows(eur) %>% left_join(age_sex, by=c('s'='userId')) %>% mutate(age2 = age^2, age_sex = age*sex, age2_sex = age^2 * sex) write.table(bind_pops, 'within_pop_pc_covs.txt', quote=F, row.names=F, sep='\t') plot_pca_density <- function(dataset, first_pc, second_pc) { pc_biplot <- ggplot(dataset, aes_string(x=first_pc, y=second_pc)) + geom_hex(bins=50) + scale_fill_gradientn(trans = "log", breaks=c(1,20,400,8000,163000), name='Count', colours = rev(brewer.pal(5,'Spectral'))) + theme_classic() + theme(text = element_text(size=16)) return(pc_biplot) } pop_ellipse <- function(df, num_ellipses) { # get mean and SD of each PC among each pop pc_nams <- paste("PC",1:10,sep="") mean_pcs <- colMeans(df[,pc_nams]) sd_pcs <- apply(df[,pc_nams],2,sd) # compute centroid distance for each individual centroid_dist <- rep(0,nrow(df)) for(i in 1:num_ellipses) { centroid_dist <- centroid_dist + (df[,pc_nams[i]]-mean_pcs[i])^2/(sd_pcs[i]^2) } pop_dist <- df %>% mutate(centroid_dist=centroid_dist) return(pop_dist) } pop_centroid <- function(ind_dist, cutpoint0, cutpoint1) { pop_cut <- subset(ind_dist, centroid_dist < cutpoint0) p_centroid <- ggplot(pop_cut, aes(x=centroid_dist)) + geom_histogram(bins=50) + labs(title=paste0('Sample size: ', nrow(subset(ind_dist, centroid_dist < cutpoint0)), ' -> ', nrow(subset(ind_dist, centroid_dist < cutpoint1)))) + xlab('Centroid distance') + ylab('Count') + geom_vline(xintercept=cutpoint1) + theme_bw() + theme(text = element_text(size=16)) return(list(p=p_centroid, pop_cut=pop_cut)) } save_filt_plots <- function(pop_name, pop_dist, cutpoint0, cutpoint1) { p_centroid0 = pop_centroid(pop_dist, cutpoint0, cutpoint1) ggsave(paste0(pop_name, '_within_pop_centroid_nofilt.pdf'), p_centroid0$p, height=7, width=7) p2 <- plot_pca_density(subset(pop_dist, centroid_dist < cutpoint0), 'PC1', 'PC2') ggsave(paste0(pop_name, '_within_pop_nofilt_pc1_2.png'), p2) p3 <- plot_pca_density(subset(pop_dist, centroid_dist < cutpoint0), 'PC3', 'PC4') ggsave(paste0(pop_name, '_within_pop_nofilt_pc3_4.png'), p3) p4 <- plot_pca_density(subset(pop_dist, centroid_dist < cutpoint0), 'PC5', 'PC6') ggsave(paste0(pop_name, '_within_pop_nofilt_pc5_6.png'), p4) p_centroid1 = pop_centroid(pop_dist, cutpoint1, cutpoint1) ggsave(paste0(pop_name, '_within_pop_centroid_filt.pdf'), p_centroid1$p, height=7, width=7) p6 <- plot_pca_density(subset(pop_dist, centroid_dist < cutpoint1), 'PC1', 'PC2') ggsave(paste0(pop_name, '_within_pop_filt_pc1_2.png'), p6) p7 <- plot_pca_density(subset(pop_dist, centroid_dist < cutpoint1), 'PC3', 'PC4') ggsave(paste0(pop_name, '_within_pop_filt_pc3_4.png'), p7) p8 <- plot_pca_density(subset(pop_dist, centroid_dist < cutpoint1), 'PC5', 'PC6') ggsave(paste0(pop_name, '_within_pop_filt_pc5_6.png'), p8) my_plot=plot_grid(p_centroid0$p, p2, p3, p4, p_centroid1$p, p6, p7, p8, nrow=2) save_plot(paste0(pop_name, '_within_pop.png'), my_plot, base_height=10, base_width = 18) return(p_centroid1$pop_cut) } csa_cut <- save_filt_plots('csa', csa_dist <- pop_ellipse(csa, 3), 1000, 3) #3, 3 afr_cut <- save_filt_plots('afr', afr_dist <- pop_ellipse(afr, 3), 1000, 2) eas_cut <- save_filt_plots('eas', eas_dist <- pop_ellipse(eas, 3), 1000, 7.5) amr_cut <- save_filt_plots('amr', amr_dist <- pop_ellipse(amr, 3), 1000, 4.8) mid_cut <- save_filt_plots('mid', mid_dist <- pop_ellipse(mid, 5), 1000, 15) eur_cut <- save_filt_plots('eur', eur_dist <- pop_ellipse(eur, 5), 1000, 10) pop_cuts <- csa_cut %>% bind_rows(afr_cut, eas_cut, amr_cut, mid_cut, eur_cut) %>% select(s, pop) write.table(pop_cuts, 'ukb_diverse_pops_pruned.tsv', row.names=F, sep='\t', quote=F)
/plot_ukbb_continental_pca.R
permissive
atgu/ukbb_pan_ancestry
R
false
false
22,964
r
library(tidyverse) library(RColorBrewer) library(maptools) library(cowplot) setwd('/Users/alicia/daly_lab/ukbb_diverse_pops/pca') pop_assign <- read_delim(gzfile('globalref_ukbb_pca_pops_rf_50.txt.gz'), delim='\t') %>% select(s, pop) %>% mutate(pop=case_when(pop=='oth' ~ 'Other', TRUE ~ pop)) pop_assign$s <- as.character(pop_assign$s) ethnicity <- read_tsv('../data/ukb31063.ethnicity_birth.txt') ethnicity_ancestry <- pop_assign %>% left_join(ethnicity, by=c('s'='userId')) %>% mutate(Ethnicity=case_when(ethnicity=='-3' ~ 'Prefer not to answer', ethnicity=='-1' ~ 'Do not know', ethnicity=='1' ~ 'White', ethnicity=='1001' ~ 'British', ethnicity=='1002' ~ 'Irish', ethnicity=='1003' ~ 'Any other white background', ethnicity=='2' ~ 'Mixed', ethnicity=='2001' ~ 'White and Black Caribbean', ethnicity=='2002' ~ 'White and Black African', ethnicity=='2003' ~ 'White and Asian', ethnicity=='2004' ~ 'Any other mixed background', ethnicity=='3' ~ 'Asian or Asian British', ethnicity=='3001' ~ 'Indian', ethnicity=='3002' ~ 'Pakistani', ethnicity=='3003' ~ 'Bangladeshi', ethnicity=='4' ~ 'Black or Black British', ethnicity=='4001' ~ 'Caribbean', ethnicity=='4002' ~ 'African', ethnicity=='4003' ~ 'Any other Black background', ethnicity=='5' ~ 'Chinese', ethnicity=='6' ~ 'Other ethnic group', TRUE ~ 'NA' )) table(ethnicity_ancestry$ethnicity, ethnicity_ancestry$pop) table(ethnicity_ancestry$continent, ethnicity_ancestry$pop) conts <- unique(pop_assign$pop) immigrant_country_ancestry <- function(cont) { a <- as.data.frame(table(ethnicity_ancestry$pop, ethnicity_ancestry$country)) %>% subset(Var1==cont) %>% arrange(desc(Freq)) %>% filter(Freq>0) print(a) return(a) } anc_country <- map_dfr(conts, immigrant_country_ancestry) write.table(anc_country, 'country_immigration_ancestry.txt', quote=F, row.names=F, sep='\t') # Plot global PCs --------------------------------------------------------- ref <- read.table(gzfile('globalref_ukbb_scores.txt.bgz'), header=T) ref_info <- read.csv('/Users/alicia/Dropbox (Partners HealthCare)/martin_lab/projects/hgdp_tgp/tgp_hgdp.csv', header=T) ref <- ref %>% left_join(ref_info, by=c('s'='Sample.ID')) %>% mutate(Population=Genetic.region) %>% select(c(s, starts_with('PC'), Population)) ukbb <- read.table(gzfile('ukbb_globalref_scores.txt.bgz'), header=T) %>% mutate(Population='UKBB') ukbb$s <- as.character(ukbb$s) ref_ukbb <- bind_rows(ukbb, ref) ukb_pop_colors = c('AFR' = '#941494', 'AMR' = '#ED1E24', 'CSA' = '#FF9912', 'EAS' = '#108C44', 'EUR' = '#6AA5CD', 'MID' = '#EEA9B8', 'OCE' = '#a6761d', 'UKBB' = 'black', 'Other' = '#ABB9B9') ref_ukbb$Population <- factor(ref_ukbb$Population, levels=as.character(names(ukb_pop_colors))) plot_global_pca <- function(pcs, pop_color, pop_shape=NA, first_pc='PC1', second_pc='PC2', legend_name='Population') { if(is.na(pop_shape)) { pca_pop <- ggplot(pcs, aes_string(x=first_pc, y=second_pc, color=legend_name)) + geom_point(alpha=0.8) } else { pca_pop <- ggplot(pcs, aes_string(x=first_pc, y=second_pc, color=legend_name, shape=legend_name)) + geom_point(alpha=0.8) } pca_pop <- pca_pop + scale_color_manual(values=pop_color, name=legend_name) + scale_shape_manual(values=pop_shape, name=legend_name) + theme_classic() + theme(text = element_text(size=14), axis.text = element_text(color='black'), legend.text = element_text(size=10)) x_lim = ggplot_build(pca_pop)$layout$panel_scales_x[[1]]$range$range y_lim = ggplot_build(pca_pop)$layout$panel_scales_y[[1]]$range$range return(list(pca_pop, x_lim, y_lim)) } global_pcs_1_2 <- plot_global_pca(ref_ukbb, ukb_pop_colors) global_pcs_3_4 <- plot_global_pca(ref_ukbb, ukb_pop_colors, first_pc = 'PC3', second_pc = 'PC4') global_pcs_5_6 <- plot_global_pca(ref_ukbb, ukb_pop_colors, first_pc = 'PC5', second_pc = 'PC6') plot_pca_density <- function(pcs, x_lim, y_lim, first_pc='PC1', second_pc = 'PC2') { dens_pca <- ggplot(pcs, aes_string(x=first_pc, y=second_pc)) + geom_hex(bins=50) + scale_fill_gradientn(trans = "log", breaks=c(1,20,400,8000,163000), name='Count', colours = rev(brewer.pal(5,'Spectral'))) + theme_classic() + lims(x=x_lim, y=y_lim) + theme(text = element_text(size=14), axis.text = element_text(color='black'), legend.text = element_text(size=10)) return(dens_pca) } ukb_dens_pcs_1_2 <- plot_pca_density(ukbb, global_pcs_1_2[[2]], global_pcs_1_2[[3]]) ukb_dens_pcs_3_4 <- plot_pca_density(ukbb, global_pcs_3_4[[2]], global_pcs_3_4[[3]], first_pc='PC3', second_pc='PC4') ukb_dens_pcs_5_6 <- plot_pca_density(ukbb, global_pcs_5_6[[2]], global_pcs_5_6[[3]], first_pc='PC5', second_pc='PC6') ukbb_assign <- ukbb %>% left_join(pop_assign) %>% mutate(Population=pop) %>% select(-pop) %>% filter(!is.na(Population)) global_pcs_1_2_assign <- plot_global_pca(ukbb_assign, ukb_pop_colors) global_pcs_3_4_assign <- plot_global_pca(ukbb_assign, ukb_pop_colors, first_pc = 'PC3', second_pc = 'PC4') global_pcs_5_6_assign <- plot_global_pca(ukbb_assign, ukb_pop_colors, first_pc = 'PC5', second_pc = 'PC6') global_pcs <- plot_grid(global_pcs_1_2[[1]], global_pcs_3_4[[1]], global_pcs_5_6[[1]], ukb_dens_pcs_1_2, ukb_dens_pcs_3_4, ukb_dens_pcs_5_6, global_pcs_1_2_assign[[1]], global_pcs_3_4_assign[[1]], global_pcs_5_6_assign[[1]], labels=LETTERS[1:9], nrow=3) ggsave('ukbb_ref_agg_dens_rf_pca.png', global_pcs, width=12.5, height=10) # Plot PCA x self-reported ethnicity info --------------------------------- blues <- brewer.pal(5, 'Blues')[2:5] #1-1003 reds <- brewer.pal(6, 'Reds')[2:6] #2-2004 oranges <- brewer.pal(6, 'Oranges')[2:6] #3-3004 purples <- brewer.pal(5, 'Purples')[2:5] #4-4003 greys <- brewer.pal(4, 'Greys')[2:4] #-3, -1, 6 ethnicity_colors = c(greys[1:2], blues, reds, oranges, purples, '#108C44', greys[3]) names(ethnicity_colors) <- c('Prefer not to answer', 'Do not know', 'White', 'British', 'Irish', 'Any other white background', 'Mixed', 'White and Black Caribbean', 'White and Black African', 'White and Asian', 'Any other mixed background', 'Asian or Asian British', 'Indian', 'Pakistani', 'Bangladeshi', 'Any other Asian background', 'Black or Black British', 'Caribbean', 'African', 'Any other Black background', 'Chinese', 'Other ethnic group') ethnicity_shapes <- c(rep(4, 2), rep(15, 4), rep(16, 5), rep(17, 5), rep(18, 4), 3, 4) names(ethnicity_shapes) <- names(ethnicity_colors) ethnicity_ancestry$s <- as.character(ethnicity_ancestry$s) ukbb_ethnicity <- ukbb %>% left_join(ethnicity_ancestry %>% filter(Ethnicity!='NA')) ukbb_ethnicity$Ethnicity <- factor(ukbb_ethnicity$Ethnicity, levels = names(ethnicity_colors)) global_pcs_1_2_eth <- plot_global_pca(ukbb_ethnicity, ethnicity_colors, ethnicity_shapes, legend_name='Ethnicity') global_pcs_3_4_eth <- plot_global_pca(ukbb_ethnicity, ethnicity_colors, ethnicity_shapes, first_pc = 'PC3', second_pc = 'PC4', legend_name='Ethnicity') global_pcs_5_6_eth <- plot_global_pca(ukbb_ethnicity, ethnicity_colors, ethnicity_shapes, first_pc = 'PC5', second_pc = 'PC6', legend_name='Ethnicity') legend_b <- get_legend(global_pcs_1_2_eth[[1]] + theme(legend.position='bottom')) anc_eth <- plot_grid(global_pcs_1_2_eth[[1]] + guides(color=F, shape=F), global_pcs_3_4_eth[[1]] + guides(color=F, shape=F), global_pcs_5_6_eth[[1]] + guides(color=F, shape=F), labels=LETTERS[1:3], nrow=1) anc_eth_legend <- plot_grid(anc_eth, legend_b, ncol=1, rel_heights=c(1, .4)) ggsave('ukbb_pca_eth.png', anc_eth_legend, width=12, height=7) # # # ukbb$s <- as.character(ukbb$s) # rf <- read.table('ukbb_pca_pops_rf.txt.gz', header=T) %>% select(c('s', 'pop')) # rf$s <- as.character(rf$s) # in_gwas <- read.table('../ukb31063.gwas_samples.both_sexes.txt', header=T) # pigment <- read.table('../skin_color_tanning.txt.gz', header=T, sep='\t') # pigment$s <- as.character(pigment$s) # ukbb2 <- ukbb %>% # mutate(in_gwas=ifelse(s %in% in_gwas$s, TRUE, FALSE)) %>% # left_join(rf, by='s') %>% # left_join(pigment, by='s') # # tgp_pops <- read.table('../integrated_call_samples_v3.20130502.ALL.panel', header=T) # tgp <- merge(tgp, tgp_pops, by.x='s', by.y='sample', all=T) # # ukbb_tgp <- ukbb %>% # bind_rows(tgp) %>% # mutate(all_pop=ifelse(is.na(super_pop), 'UKBB', as.character(super_pop))) # # brewer_vec <- brewer.pal(7, 'Set1') # brewer_vec <- c(brewer_vec, 'black', 'black', brewer_vec[4]) # names(brewer_vec) <- c('EUR', 'EAS', 'AMR', 'SAS', 'AFR', 'MID', 'OCE', 'UKBB', 'oth', 'CSA') # Plot location maps ------------------------------------------------------ data(wrld_simpl) world <- fortify(wrld_simpl) # Read the latitude/longitdue/plotting data for reference populations pop_pos <- read.csv('/Users/alicia/Dropbox (Partners HealthCare)/daly_lab/UKBB-Diverse-Pops/data/pop_plot_info.csv', header=T) %>% filter(Population !='ASW') plot_cont_map <- function(cont_name, lon_lim, lat_lim, rand_col=FALSE) { pop_pos_plot <- subset(pop_pos, Continent == cont_name) pop_pos_plot$Population <- factor(pop_pos_plot$Population, levels=as.character(pop_pos_plot$Population)) if(rand_col) { color_vec <- colorRampPalette(brewer.pal(4, 'Spectral'))(length(pop_pos_plot$Population)) } else { color_vec <- as.character(pop_pos_plot$Color) } shape_vec <- rep_len(c(21:25), length.out = length(color_vec)) names(color_vec) <- pop_pos_plot$Population names(shape_vec) <- pop_pos_plot$Population # plot the map of Africa with data points labeled p_map <- ggplot() + geom_polygon(data = world, aes(long, lat, group=group), fill='lightyellow', color='lightgrey') + geom_point(data = pop_pos_plot, aes(Longitude, Latitude, color=Population, fill=Population, shape=Population), size=3) + coord_fixed(xlim = lon_lim, ylim = lat_lim) + labs(x='Longitude', y='Latitude') + theme_classic() + scale_fill_manual(name = "Population", values = color_vec) + scale_color_manual(name = "Population", values = color_vec) + scale_shape_manual(name = "Population", values = shape_vec) + theme(panel.background = element_rect(fill = "lightblue"), plot.background = element_rect(fill = "transparent", color = NA), #legend.position='bottom', text = element_text(size=14), axis.text = element_text(color='black'), legend.text = element_text(size=10)) return(list(p_map, color_vec, shape_vec)) } afr <- plot_cont_map('AFR', c(-20,50), c(-35,35)) amr <- plot_cont_map('AMR', c(-140,-35), c(-50,65), rand_col=TRUE) csa <- plot_cont_map('CSA', c(60,95), c(5,45), rand_col=TRUE) eas <- plot_cont_map('EAS', c(78,148), c(0,70), rand_col=TRUE) eur <- plot_cont_map('EUR', c(-25,40), c(34,71), rand_col=TRUE) mid <- plot_cont_map('MID', c(0,60), c(10,50), rand_col=TRUE) ggsave('afr_ref_map.pdf', afr[[1]], width=10, height=10) ggsave('csa_ref_map.pdf', csa[[1]]) ggsave('eas_ref_map.pdf', eas[[1]]) ggsave('mid_ref_map.pdf', mid[[1]]) ggsave('amr_ref_map.pdf', amr[[1]]) ggsave('eur_ref_map.pdf', eur[[1]]) p2 <- p1 + guides(fill=F, color=F, shape=F) # Load population PCA and covariate info ---------------------------------- # NOTE: change order here to correspond to order in color_vec load_ref_pcs <- function(ref_pcs, ref_fam, pop_color) { ref_pcs <- read_delim(gzfile(ref_pcs), delim='\t') fam <- read.table(ref_fam, col.names=c('pop', 's', 'dad', 'mom', 'sex', 'pheno')) %>% select(pop, s, sex) ref_data <- merge(ref_pcs, fam, by='s') ref_data$pop <- factor(ref_data$pop, levels = names(pop_color)) return(ref_data) } ref_afr <- load_ref_pcs('AFR_HGDP_1kG_AGVP_maf005_geno05_unrel_ukbb_scores.txt.bgz', 'AFR_HGDP_1kG_AGVP_maf005_geno05_unrel.fam', afr[[2]]) ref_amr <- load_ref_pcs('AMR_HGDP_1kG_maf005_geno05_unrel_ukbb_scores.txt.bgz', 'AMR_HGDP_1kG_maf005_geno05_unrel.fam', amr[[2]]) ref_csa <- load_ref_pcs('CSA_HGDP_1kG_maf005_geno05_unrel_ukbb_scores.txt.bgz', 'CSA_HGDP_1kG_maf005_geno05_unrel.fam', csa[[2]]) ref_eas <- load_ref_pcs('EAS_HGDP_1kG_maf005_geno05_unrel_ukbb_scores.txt.bgz', 'EAS_HGDP_1kG_maf005_geno05_unrel.fam', eas[[2]]) ref_eur <- load_ref_pcs('EUR_HGDP_1kG_maf005_geno05_unrel_ukbb_scores.txt.bgz', 'EUR_HGDP_1kG_maf005_geno05_unrel.fam', eur[[2]]) ref_mid <- load_ref_pcs('MID_HGDP_1kG_maf005_geno05_unrel_ukbb_scores.txt.bgz', 'MID_HGDP_1kG_maf005_geno05_unrel.fam', mid[[2]]) # Load UKB population data ------------------------------------------------ load_ukb <- function(cont_name, filename) { ukb_pop <- read_delim(gzfile(filename), delim='\t') %>% left_join(pop_assign) %>% filter(pop==cont_name) %>% left_join(ethnicity, by=c('s'='userId')) ##### } ukb_afr <- load_ukb('AFR', 'ukbb_AFR_HGDP_1kG_AGVP_maf005_geno05_unrel_scores.txt.bgz') ukb_csa <- load_ukb('CSA', 'ukbb_CSA_HGDP_1kG_maf005_geno05_unrel_scores.txt.bgz') ukb_eas <- load_ukb('EAS', 'ukbb_EAS_HGDP_1kG_maf005_geno05_unrel_scores.txt.bgz') ukb_eur <- load_ukb('EUR', 'ukbb_EUR_HGDP_1kG_maf005_geno05_unrel_scores.txt.bgz') ukb_mid <- load_ukb('MID', 'ukbb_MID_HGDP_1kG_maf005_geno05_unrel_scores.txt.bgz') ukb_amr <- load_ukb('AMR', 'ukbb_AMR_HGDP_1kG_maf005_geno05_unrel_scores.txt.bgz') #ukb_afr %>% dplyr::count(country) %>% arrange(desc(n)) %>% head(11) # Plot PCA ---------------------------------------------------------------- p_afr <- ggplot(afr, aes(x=PC1, y=PC2, color=pop)) + geom_point(data=ukb_afr, color='grey') + geom_point() + scale_color_manual(values=color_vec, name='Population') + theme_classic() + theme(text = element_text(size=16)) ggsave('afr_cont_projection.pdf', p_afr, width=8, height=6) plot_pca_ref_ukb <- function(ref_pop, ukb_pop, pop_color, pop_shape, first_pc='PC1', second_pc='PC2') { pca_pop <- ggplot(ref_pop, aes_string(x=first_pc, y=second_pc, color='pop', fill='pop', shape='pop')) + geom_point(data=ukb_pop, color='grey', fill='grey', shape=21) + geom_point() + scale_color_manual(values=pop_color, name='Population') + scale_fill_manual(values=pop_color, name='Population') + scale_shape_manual(values=pop_shape, name='Population') + guides(color=F, fill=F, shape=F) + theme_classic() + theme(text = element_text(size=12)) x_lim <- ggplot_build(pca_pop)$layout$panel_scales_x[[1]]$range$range y_lim <- ggplot_build(pca_pop)$layout$panel_scales_y[[1]]$range$range pca_density <- ggplot(ukb_pop, aes_string(x=first_pc, y=second_pc)) + geom_hex(bins=50) + scale_fill_gradientn(trans='sqrt', name='Count', colours = rev(brewer.pal(5,'Spectral'))) + lims(x=x_lim, y=y_lim) + theme_classic() + theme(text = element_text(size=12)) return(list(pca_pop, pca_density)) } save_pca_plot <- function(pop, pop_name, ref_pop, ukb_pop, base_height, base_width) { p_pop_1_2 <- plot_pca_ref_ukb(ref_pop, ukb_pop, pop[[2]], pop[[3]], 'PC1', 'PC2') p_pop_3_4 <- plot_pca_ref_ukb(ref_pop, ukb_pop, pop[[2]], pop[[3]], 'PC3', 'PC4') p_pop_5_6 <- plot_pca_ref_ukb(ref_pop, ukb_pop, pop[[2]], pop[[3]], 'PC5', 'PC6') my_plot_1_2=plot_grid(p_pop_1_2[[1]], p_pop_1_2[[2]], rel_widths=c(1, 1.15)) my_plot_3_4=plot_grid(p_pop_3_4[[1]], p_pop_3_4[[2]], rel_widths=c(1, 1.15)) my_plot_5_6=plot_grid(p_pop_5_6[[1]], p_pop_5_6[[2]], rel_widths=c(1, 1.15)) my_plot = plot_grid(pop[[1]], my_plot_1_2, my_plot_3_4, my_plot_5_6, ncol=1, labels=c('A', 'B', 'C', 'D'), rel_heights=c(1.5, 1, 1, 1)) save_plot(paste0(pop_name, '_cont_projection_1-6.png'), my_plot, base_height = base_height, base_width = base_width) # save_plot(filename=paste0(pop_name, '_cont_projection_1_2.png'), plot=my_plot, base_height = 5, base_width=10) # save_plot(paste0(pop_name, '_cont_projection_3_4.png'), plot_grid(p_pop_3_4[[1]], p_pop_3_4[[2]], rel_widths=c(1, 1.15)), base_height = 5, base_width=10) # save_plot(paste0(pop_name, '_cont_projection_5_6.png'), plot_grid(p_pop_5_6[[1]], p_pop_5_6[[2]], rel_widths=c(1, 1.15)), base_height = 5, base_width=10) } save_pca_plot(afr, 'afr', ref_afr, ukb_afr, base_height=12, base_width=8) save_pca_plot(amr, 'amr', ref_amr, ukb_amr, base_height=12, base_width=8) save_pca_plot(csa, 'csa', ref_csa, ukb_csa, base_height=12, base_width=8) save_pca_plot(eas, 'eas', ref_eas, ukb_eas, base_height=12, base_width=8) save_pca_plot(mid, 'mid', ref_mid, ukb_mid, base_height=12, base_width=8) save_pca_plot(eur, 'eur', ref_eur, ukb_eur, base_height=12, base_width=8) # p_afr_1_2 <- plot_pca_ref_ukb(ref_afr, ukb_afr, afr[[2]], afr[[3]], 'PC1', 'PC2') # p_afr_3_4 <- plot_pca_ref_ukb(ref_afr, ukb_afr, afr[[2]], afr[[3]], 'PC3', 'PC4') # p_afr_5_6 <- plot_pca_ref_ukb(ref_afr, ukb_afr, afr[[2]], afr[[3]], 'PC5', 'PC6') # my_plot <- plot_grid(p_afr_1_2[[1]], p_afr_1_2[[2]], rel_widths=c(1, 1.15)) # save_plot('afr_cont_projection_1_2.png', my_plot, base_height = 5, base_width=10) # save_plot('afr_cont_projection_3_4.png', plot_grid(p_afr_3_4[[1]], p_afr_3_4[[2]], rel_widths=c(1, 1.15)), base_height = 5, base_width=10) # save_plot('afr_cont_projection_5_6.png', plot_grid(p_afr_5_6[[1]], p_afr_5_6[[2]], rel_widths=c(1, 1.15)), base_height = 5, base_width=10) # Within pop PCA (no ref) ------------------------------------------------- setwd('/Users/alicia/daly_lab/ukbb_diverse_pops/pca/ukb_within_continent') read_pca <- function(pop_name, rel_unrel) { if(rel_unrel == 'rel') { pca <- read.table(gzfile(paste0(pop_name, '_rel_scores.txt.bgz')), header=T) %>% mutate(pop=pop_name, rel=rel_unrel) } else { pca <- read.table(gzfile(paste0(pop_name, '_scores.txt.bgz')), header=T) %>% mutate(pop=pop_name, rel=rel_unrel) } return(pca) } pops <- c('AFR', 'AMR', 'CSA', 'MID', 'EAS', 'EUR') age_sex <- read.table(gzfile('uk_round2_allSamples_phenos_phesant.6148_5.tsv.gz'), header=T, sep='\t') %>% select(userId, age, sex) bind_rels <- function(pop) { pop_rel <- read_pca(pop, 'rel') pop_unrel <- read_pca(pop, 'unrel') pop_bind <- pop_rel %>% bind_rows(pop_unrel) } afr <- bind_rels('AFR') amr <- bind_rels('AMR') csa <- bind_rels('CSA') mid <- bind_rels('MID') eas <- bind_rels('EAS') eur <- bind_rels('EUR') bind_pops <- afr %>% bind_rows(amr) %>% bind_rows(csa) %>% bind_rows(mid) %>% bind_rows(eas) %>% bind_rows(eur) %>% left_join(age_sex, by=c('s'='userId')) %>% mutate(age2 = age^2, age_sex = age*sex, age2_sex = age^2 * sex) write.table(bind_pops, 'within_pop_pc_covs.txt', quote=F, row.names=F, sep='\t') plot_pca_density <- function(dataset, first_pc, second_pc) { pc_biplot <- ggplot(dataset, aes_string(x=first_pc, y=second_pc)) + geom_hex(bins=50) + scale_fill_gradientn(trans = "log", breaks=c(1,20,400,8000,163000), name='Count', colours = rev(brewer.pal(5,'Spectral'))) + theme_classic() + theme(text = element_text(size=16)) return(pc_biplot) } pop_ellipse <- function(df, num_ellipses) { # get mean and SD of each PC among each pop pc_nams <- paste("PC",1:10,sep="") mean_pcs <- colMeans(df[,pc_nams]) sd_pcs <- apply(df[,pc_nams],2,sd) # compute centroid distance for each individual centroid_dist <- rep(0,nrow(df)) for(i in 1:num_ellipses) { centroid_dist <- centroid_dist + (df[,pc_nams[i]]-mean_pcs[i])^2/(sd_pcs[i]^2) } pop_dist <- df %>% mutate(centroid_dist=centroid_dist) return(pop_dist) } pop_centroid <- function(ind_dist, cutpoint0, cutpoint1) { pop_cut <- subset(ind_dist, centroid_dist < cutpoint0) p_centroid <- ggplot(pop_cut, aes(x=centroid_dist)) + geom_histogram(bins=50) + labs(title=paste0('Sample size: ', nrow(subset(ind_dist, centroid_dist < cutpoint0)), ' -> ', nrow(subset(ind_dist, centroid_dist < cutpoint1)))) + xlab('Centroid distance') + ylab('Count') + geom_vline(xintercept=cutpoint1) + theme_bw() + theme(text = element_text(size=16)) return(list(p=p_centroid, pop_cut=pop_cut)) } save_filt_plots <- function(pop_name, pop_dist, cutpoint0, cutpoint1) { p_centroid0 = pop_centroid(pop_dist, cutpoint0, cutpoint1) ggsave(paste0(pop_name, '_within_pop_centroid_nofilt.pdf'), p_centroid0$p, height=7, width=7) p2 <- plot_pca_density(subset(pop_dist, centroid_dist < cutpoint0), 'PC1', 'PC2') ggsave(paste0(pop_name, '_within_pop_nofilt_pc1_2.png'), p2) p3 <- plot_pca_density(subset(pop_dist, centroid_dist < cutpoint0), 'PC3', 'PC4') ggsave(paste0(pop_name, '_within_pop_nofilt_pc3_4.png'), p3) p4 <- plot_pca_density(subset(pop_dist, centroid_dist < cutpoint0), 'PC5', 'PC6') ggsave(paste0(pop_name, '_within_pop_nofilt_pc5_6.png'), p4) p_centroid1 = pop_centroid(pop_dist, cutpoint1, cutpoint1) ggsave(paste0(pop_name, '_within_pop_centroid_filt.pdf'), p_centroid1$p, height=7, width=7) p6 <- plot_pca_density(subset(pop_dist, centroid_dist < cutpoint1), 'PC1', 'PC2') ggsave(paste0(pop_name, '_within_pop_filt_pc1_2.png'), p6) p7 <- plot_pca_density(subset(pop_dist, centroid_dist < cutpoint1), 'PC3', 'PC4') ggsave(paste0(pop_name, '_within_pop_filt_pc3_4.png'), p7) p8 <- plot_pca_density(subset(pop_dist, centroid_dist < cutpoint1), 'PC5', 'PC6') ggsave(paste0(pop_name, '_within_pop_filt_pc5_6.png'), p8) my_plot=plot_grid(p_centroid0$p, p2, p3, p4, p_centroid1$p, p6, p7, p8, nrow=2) save_plot(paste0(pop_name, '_within_pop.png'), my_plot, base_height=10, base_width = 18) return(p_centroid1$pop_cut) } csa_cut <- save_filt_plots('csa', csa_dist <- pop_ellipse(csa, 3), 1000, 3) #3, 3 afr_cut <- save_filt_plots('afr', afr_dist <- pop_ellipse(afr, 3), 1000, 2) eas_cut <- save_filt_plots('eas', eas_dist <- pop_ellipse(eas, 3), 1000, 7.5) amr_cut <- save_filt_plots('amr', amr_dist <- pop_ellipse(amr, 3), 1000, 4.8) mid_cut <- save_filt_plots('mid', mid_dist <- pop_ellipse(mid, 5), 1000, 15) eur_cut <- save_filt_plots('eur', eur_dist <- pop_ellipse(eur, 5), 1000, 10) pop_cuts <- csa_cut %>% bind_rows(afr_cut, eas_cut, amr_cut, mid_cut, eur_cut) %>% select(s, pop) write.table(pop_cuts, 'ukb_diverse_pops_pruned.tsv', row.names=F, sep='\t', quote=F)
## read in the dataset powerData <- read.table(file = "household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE) ## create a datetime column powerData$Datetime <- as.POSIXct(paste(powerData$Date, powerData$Time), format = "%d/%m/%Y %H:%M:%S") ## convert the Date column to a date object powerData$Date <- as.Date(powerData$Date, "%d/%m/%Y") ## subset the data to get the dates ## 2007-02-01 and 2007-02-02 powerData <- subset(powerData, Date == "2007-02-01" | Date == "2007-02-02") ## convert numeric columns to numeric powerData$Global_active_power <- as.numeric(powerData$Global_active_power) powerData$Voltage <- as.numeric(powerData$Voltage) powerData$Global_reactive_power <- as.numeric(powerData$Global_reactive_power) ## create the png file png(filename = "plot4.png") ## create the multi-plot frame par(mfrow = c(2,2)) ## ## create the first plot ## Global_active_power by datetime ######################################################## plot(powerData$Datetime, powerData$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power") ## ## create the second plot ## Voltage by datetime ######################################################## plot(powerData$Datetime, powerData$Voltage, type = "l", xlab = "datetime", ylab = "Voltage") ## ## create the third plot ## Sub_metering by type ######################################################## ## create empty line graph for Sub_metering_1 plot(powerData$Datetime, powerData$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering") ## add the line graph for Sub_metering_2 lines(powerData$Datetime, powerData$Sub_metering_2, type = "l", col = "red") ## add the line graph for Sub_metering_3 lines(powerData$Datetime, powerData$Sub_metering_3, type = "l", col = "blue") ## create the legend for the graph legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lwd = 1, bty = "n", cex = 0.7) ## ## create the fourth plot ## Global_reactive_power by datetime ######################################################## plot(powerData$Datetime, powerData$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power") ## close the png file dev.off()
/plot4.R
no_license
bnbalint/ExData_Plotting1
R
false
false
2,406
r
## read in the dataset powerData <- read.table(file = "household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE) ## create a datetime column powerData$Datetime <- as.POSIXct(paste(powerData$Date, powerData$Time), format = "%d/%m/%Y %H:%M:%S") ## convert the Date column to a date object powerData$Date <- as.Date(powerData$Date, "%d/%m/%Y") ## subset the data to get the dates ## 2007-02-01 and 2007-02-02 powerData <- subset(powerData, Date == "2007-02-01" | Date == "2007-02-02") ## convert numeric columns to numeric powerData$Global_active_power <- as.numeric(powerData$Global_active_power) powerData$Voltage <- as.numeric(powerData$Voltage) powerData$Global_reactive_power <- as.numeric(powerData$Global_reactive_power) ## create the png file png(filename = "plot4.png") ## create the multi-plot frame par(mfrow = c(2,2)) ## ## create the first plot ## Global_active_power by datetime ######################################################## plot(powerData$Datetime, powerData$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power") ## ## create the second plot ## Voltage by datetime ######################################################## plot(powerData$Datetime, powerData$Voltage, type = "l", xlab = "datetime", ylab = "Voltage") ## ## create the third plot ## Sub_metering by type ######################################################## ## create empty line graph for Sub_metering_1 plot(powerData$Datetime, powerData$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering") ## add the line graph for Sub_metering_2 lines(powerData$Datetime, powerData$Sub_metering_2, type = "l", col = "red") ## add the line graph for Sub_metering_3 lines(powerData$Datetime, powerData$Sub_metering_3, type = "l", col = "blue") ## create the legend for the graph legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lwd = 1, bty = "n", cex = 0.7) ## ## create the fourth plot ## Global_reactive_power by datetime ######################################################## plot(powerData$Datetime, powerData$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power") ## close the png file dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pokemon_data_partial.R \name{pokemon_data_partial} \alias{pokemon_data_partial} \title{Show chosen statistics of pokemons} \usage{ pokemon_data_partial(para = "Weight") } \arguments{ \item{para}{specific characteristic you want to check about pokemon data, like "weight", "height", "HP"...,set default to "Weight"} } \value{ A dataset containing statistics of pokemons that chosen by the user } \description{ Show chosen statistics of pokemons }
/man/pokemon_data_partial.Rd
no_license
sunqihui1221/QihuiSunFinal
R
false
true
524
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pokemon_data_partial.R \name{pokemon_data_partial} \alias{pokemon_data_partial} \title{Show chosen statistics of pokemons} \usage{ pokemon_data_partial(para = "Weight") } \arguments{ \item{para}{specific characteristic you want to check about pokemon data, like "weight", "height", "HP"...,set default to "Weight"} } \value{ A dataset containing statistics of pokemons that chosen by the user } \description{ Show chosen statistics of pokemons }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/FunctionsForHasse.R \name{\%lte\%} \alias{\%lte\%} \title{Function to compare 2 structuples/profiles for 'less than or equal' relation} \usage{ x \%lte\% y } \arguments{ \item{x}{First structuple/profile} \item{y}{Second structuple/profile} } \description{ Function to compare 2 structuples/profiles for 'less than or equal' relation } \examples{ c(1,2)\%lte\%c(2,1) c(1,2,1,5)\%lte\%c(11,2,1,7) }
/man/grapes-lte-grapes.Rd
permissive
CambridgeAssessmentResearch/POSAC
R
false
true
497
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/FunctionsForHasse.R \name{\%lte\%} \alias{\%lte\%} \title{Function to compare 2 structuples/profiles for 'less than or equal' relation} \usage{ x \%lte\% y } \arguments{ \item{x}{First structuple/profile} \item{y}{Second structuple/profile} } \description{ Function to compare 2 structuples/profiles for 'less than or equal' relation } \examples{ c(1,2)\%lte\%c(2,1) c(1,2,1,5)\%lte\%c(11,2,1,7) }
getwd() setwd(dir = "D:/Shiny/Office/LTE_GTM/") dir() ### Libraries library(openxlsx) library(dplyr) library(ggplot2) ### Unzip unzip("Both brand.zip") dir("Both brand/") ### Read df_Robi<-read.xlsx(xlsxFile = "Both brand/Robi single sheet.xlsx",sheet = 1,startRow = 3) str(df_Robi) df_Airtel<-read.xlsx(xlsxFile = "Both brand/Airtel single sheet.xlsx",sheet = 1,startRow = 2) str(df_Airtel) ### Sanitizing # Robi df_Robi$Brand<-"Robi" df_Robi[,c(1:6,13)]<-lapply(df_Robi[,c(1:6,13)],factor) df_Robi[,7:12][is.na(df_Robi[,7:12])]<-0 summary(df_Robi) # Airtel df_Airtel$Brand<-"Airtel" df_Airtel[,c(1:6,13)]<-lapply(df_Airtel[,c(1:6,13)],factor) df_Airtel[,7:12][is.na(df_Airtel[,7:12])]<-0 summary(df_Airtel) str(df_Airtel) ### Merging both together names(df_Airtel)<-names(df_Robi) str(df_Robi) str(df_Airtel) df_merged<-rbind(df_Robi,df_Airtel) saveRDS(df_merged,file = "df_merged.rds") write.xlsx(df_merged,"df_merged.xlsx") df_merged<-readRDS("df_merged.rds") str(df_merged) class(df_merged) summary(df_merged) ### Exploration df_regional_clean<-df_merged%>% filter(!REGION %in% c("Unclassified","Unknown"))%>% filter(!is.na(REGION))%>% group_by(Brand,REGION,FOURG_AREA,USIM_STATUS,Phone.type,Phone.make)%>% summarise( c1_Count=sum(C_1_USER_COUNT), c1_Usage=sum(C_1_TOTAL_DATA_USAGE_MB), c2_Count=sum(C_2_USER_COUNT), c2_Usage=sum(C_2_TOTAL_DATA_USAGE_MB), c3_Count=sum(C_3_USER_COUNT), c3_Usage=sum(C_3_TOTAL_DATA_USAGE_MB) ) df_regional_clean<-as.data.frame(df_regional_clean) str(df_regional_clean) class(df_regional_clean) saveRDS(df_regional_clean,file = "df_regional_clean.rds") write.xlsx(df_regional_clean,file = "df_regional_clean.xlsx") #### New Data # Robi Part df_Robi_New<-read.csv2("data/Robi.csv",header = T,sep=",") df_Robi_New%>% head() str(df_Robi_New) df_Robi_New[,c(1:4)]<-lapply(df_Robi_New[,c(1:4)],factor) df_Robi_New[,5:27][is.na(df_Robi_New[,5:27])]<-0 summary(df_Robi_New) df_Robi_New$Brand<-'Robi' # Airtel Part df_AT_New<-read.csv2("data/Airtel.csv",header = T,sep=",") df_AT_New%>% head() str(df_AT_New) df_AT_New[,c(1:4)]<-lapply(df_AT_New[,c(1:4)],factor) df_AT_New[,5:27][is.na(df_AT_New[,5:27])]<-0 summary(df_AT_New) df_AT_New$Brand<-'AT' # Merging df_merged_New<-rbind(df_Robi_New,df_AT_New) saveRDS(df_merged_New,file = "df_merged_New.rds") write.xlsx(df_merged_New,"df_merged_New.xlsx") df_merged_New<-readRDS("df_merged_New.rds") str(df_merged_New) class(df_merged_New) summary(df_merged_New) # Exploration df_merged_New[,c(1:4)]<-lapply(df_merged_New[,c(1:4)],as.character) str(df_merged_New) df_regional_clean_New df_merged_New%>% filter(nchar(REGION_NAME)>0)%>% mutate(COUNT=1)%>% group_by(Brand,REGION_NAME,SITE_TYPE,FOURG_AREA)%>% summarise_at( .vars= names(.)[c(5:27,29)], .funs=c(sum="sum" ) )%>% View() df_regional_clean_New<-as.data.frame(df_regional_clean_New) str(df_regional_clean_New) class(df_regional_clean_New) saveRDS(df_regional_clean_New,file = "df_regional_clean_20171214.rds") write.xlsx(df_regional_clean_New,file = "df_regional_clean_20171214.xlsx")
/LTE_GTM.R
no_license
SaifKA/Retailer_Analysis
R
false
false
3,248
r
getwd() setwd(dir = "D:/Shiny/Office/LTE_GTM/") dir() ### Libraries library(openxlsx) library(dplyr) library(ggplot2) ### Unzip unzip("Both brand.zip") dir("Both brand/") ### Read df_Robi<-read.xlsx(xlsxFile = "Both brand/Robi single sheet.xlsx",sheet = 1,startRow = 3) str(df_Robi) df_Airtel<-read.xlsx(xlsxFile = "Both brand/Airtel single sheet.xlsx",sheet = 1,startRow = 2) str(df_Airtel) ### Sanitizing # Robi df_Robi$Brand<-"Robi" df_Robi[,c(1:6,13)]<-lapply(df_Robi[,c(1:6,13)],factor) df_Robi[,7:12][is.na(df_Robi[,7:12])]<-0 summary(df_Robi) # Airtel df_Airtel$Brand<-"Airtel" df_Airtel[,c(1:6,13)]<-lapply(df_Airtel[,c(1:6,13)],factor) df_Airtel[,7:12][is.na(df_Airtel[,7:12])]<-0 summary(df_Airtel) str(df_Airtel) ### Merging both together names(df_Airtel)<-names(df_Robi) str(df_Robi) str(df_Airtel) df_merged<-rbind(df_Robi,df_Airtel) saveRDS(df_merged,file = "df_merged.rds") write.xlsx(df_merged,"df_merged.xlsx") df_merged<-readRDS("df_merged.rds") str(df_merged) class(df_merged) summary(df_merged) ### Exploration df_regional_clean<-df_merged%>% filter(!REGION %in% c("Unclassified","Unknown"))%>% filter(!is.na(REGION))%>% group_by(Brand,REGION,FOURG_AREA,USIM_STATUS,Phone.type,Phone.make)%>% summarise( c1_Count=sum(C_1_USER_COUNT), c1_Usage=sum(C_1_TOTAL_DATA_USAGE_MB), c2_Count=sum(C_2_USER_COUNT), c2_Usage=sum(C_2_TOTAL_DATA_USAGE_MB), c3_Count=sum(C_3_USER_COUNT), c3_Usage=sum(C_3_TOTAL_DATA_USAGE_MB) ) df_regional_clean<-as.data.frame(df_regional_clean) str(df_regional_clean) class(df_regional_clean) saveRDS(df_regional_clean,file = "df_regional_clean.rds") write.xlsx(df_regional_clean,file = "df_regional_clean.xlsx") #### New Data # Robi Part df_Robi_New<-read.csv2("data/Robi.csv",header = T,sep=",") df_Robi_New%>% head() str(df_Robi_New) df_Robi_New[,c(1:4)]<-lapply(df_Robi_New[,c(1:4)],factor) df_Robi_New[,5:27][is.na(df_Robi_New[,5:27])]<-0 summary(df_Robi_New) df_Robi_New$Brand<-'Robi' # Airtel Part df_AT_New<-read.csv2("data/Airtel.csv",header = T,sep=",") df_AT_New%>% head() str(df_AT_New) df_AT_New[,c(1:4)]<-lapply(df_AT_New[,c(1:4)],factor) df_AT_New[,5:27][is.na(df_AT_New[,5:27])]<-0 summary(df_AT_New) df_AT_New$Brand<-'AT' # Merging df_merged_New<-rbind(df_Robi_New,df_AT_New) saveRDS(df_merged_New,file = "df_merged_New.rds") write.xlsx(df_merged_New,"df_merged_New.xlsx") df_merged_New<-readRDS("df_merged_New.rds") str(df_merged_New) class(df_merged_New) summary(df_merged_New) # Exploration df_merged_New[,c(1:4)]<-lapply(df_merged_New[,c(1:4)],as.character) str(df_merged_New) df_regional_clean_New df_merged_New%>% filter(nchar(REGION_NAME)>0)%>% mutate(COUNT=1)%>% group_by(Brand,REGION_NAME,SITE_TYPE,FOURG_AREA)%>% summarise_at( .vars= names(.)[c(5:27,29)], .funs=c(sum="sum" ) )%>% View() df_regional_clean_New<-as.data.frame(df_regional_clean_New) str(df_regional_clean_New) class(df_regional_clean_New) saveRDS(df_regional_clean_New,file = "df_regional_clean_20171214.rds") write.xlsx(df_regional_clean_New,file = "df_regional_clean_20171214.xlsx")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/agTrend.R \name{updateTrend} \alias{updateTrend} \title{Recalculate posterior predictive trend with a different time scale.} \usage{ updateTrend(x, start, end, type = "pred", order = "lin") } \arguments{ \item{x}{An mcmc augmentation object produced by a call to \code{\link{mcmc.aggregate}} or an element of the list produced by a call to \code{\link{newAggregation}}.} \item{start}{A new start value for the time span} \item{end}{A new end value for the time span} \item{type}{The type of trend calculated. Use \code{"pred"} for posterior predictive trends and \code{"real"} to use the estimated, realized abumndance aggregation.} \item{order}{The order of trend calculated. Can be one of \code{"lin"}, for linear trends, or, \code{"const"}, for mean log-abundence.} } \description{ Given a fitted model object (fit with 'keep.site.abund=TRUE') the function recalculates the posterior sample for an alternate time scale than was originally specified in the model fitting MCMC sample. }
/man/updateTrend.Rd
no_license
NMML/agTrend
R
false
true
1,073
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/agTrend.R \name{updateTrend} \alias{updateTrend} \title{Recalculate posterior predictive trend with a different time scale.} \usage{ updateTrend(x, start, end, type = "pred", order = "lin") } \arguments{ \item{x}{An mcmc augmentation object produced by a call to \code{\link{mcmc.aggregate}} or an element of the list produced by a call to \code{\link{newAggregation}}.} \item{start}{A new start value for the time span} \item{end}{A new end value for the time span} \item{type}{The type of trend calculated. Use \code{"pred"} for posterior predictive trends and \code{"real"} to use the estimated, realized abumndance aggregation.} \item{order}{The order of trend calculated. Can be one of \code{"lin"}, for linear trends, or, \code{"const"}, for mean log-abundence.} } \description{ Given a fitted model object (fit with 'keep.site.abund=TRUE') the function recalculates the posterior sample for an alternate time scale than was originally specified in the model fitting MCMC sample. }
library(ape) testtree <- read.tree("8235_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="8235_0_unrooted.txt")
/codeml_files/newick_trees_processed/8235_0/rinput.R
no_license
DaniBoo/cyanobacteria_project
R
false
false
135
r
library(ape) testtree <- read.tree("8235_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="8235_0_unrooted.txt")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/envelopes2d.r \name{print.combined_global_envelope_2d} \alias{print.combined_global_envelope_2d} \title{Print method for the class 'combined_global_envelope_2d'} \usage{ \method{print}{combined_global_envelope_2d}(x, ...) } \arguments{ \item{x}{an 'combined_global_envelope_2d' object} \item{...}{Ignored.} } \description{ Print method for the class 'combined_global_envelope_2d' }
/man/print.combined_global_envelope_2d.Rd
no_license
jiro74/GET
R
false
true
461
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/envelopes2d.r \name{print.combined_global_envelope_2d} \alias{print.combined_global_envelope_2d} \title{Print method for the class 'combined_global_envelope_2d'} \usage{ \method{print}{combined_global_envelope_2d}(x, ...) } \arguments{ \item{x}{an 'combined_global_envelope_2d' object} \item{...}{Ignored.} } \description{ Print method for the class 'combined_global_envelope_2d' }
\name{caeRem2.xenoRefGene.LENGTH} \docType{data} \alias{caeRem2.xenoRefGene.LENGTH} \title{Transcript length data for the organism caeRem} \description{caeRem2.xenoRefGene.LENGTH is an R object which maps transcripts to the length (in bp) of their mature mRNA transcripts. Where available, it will also provide the mapping between a gene ID and its associated transcripts. The data is obtained from the UCSC table browser (http://genome.ucsc.edu/cgi-bin/hgTables) using the xenoRefGene table. The data file was made by calling downloadLengthFromUCSC(caeRem2, xenoRefGene) on the date on which the package was last updated.} \seealso{ \code{\link{downloadLengthFromUCSC}}} \examples{ data(caeRem2.xenoRefGene.LENGTH) head(caeRem2.xenoRefGene.LENGTH) } \keyword{datasets}
/man/caeRem2.xenoRefGene.LENGTH.Rd
no_license
nadiadavidson/geneLenDataBase
R
false
false
773
rd
\name{caeRem2.xenoRefGene.LENGTH} \docType{data} \alias{caeRem2.xenoRefGene.LENGTH} \title{Transcript length data for the organism caeRem} \description{caeRem2.xenoRefGene.LENGTH is an R object which maps transcripts to the length (in bp) of their mature mRNA transcripts. Where available, it will also provide the mapping between a gene ID and its associated transcripts. The data is obtained from the UCSC table browser (http://genome.ucsc.edu/cgi-bin/hgTables) using the xenoRefGene table. The data file was made by calling downloadLengthFromUCSC(caeRem2, xenoRefGene) on the date on which the package was last updated.} \seealso{ \code{\link{downloadLengthFromUCSC}}} \examples{ data(caeRem2.xenoRefGene.LENGTH) head(caeRem2.xenoRefGene.LENGTH) } \keyword{datasets}
############### #LIBS ############### library(lme4) library(reshape) library(foreign) library(ggplot2) library(plyr) library(data.table) library(reshape2) library(Hmisc) library(mgcv) library(gdata) library(car) ########### import datasets #import NDVI ndvid<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/ndviid_aodid.csv") ndvi<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN006_NDVI_yearly/ndvi.rds") #import PBL pbl <- fread("/media/NAS/Uni/Data/Europe/PBL_Europe/dailymeanpbl/fianlpblXY_2002.csv") allbestpredlist <- list() path.data<-"/media/NAS/Uni/Data/Europe/PBL_Europe/dailymeanpbl/" for(i in 2002:2013){ allbestpredlist[[paste0("year_", i)]] <- fread(paste0(path.data, "fianlpblXY_", i, ".csv")) print(i) } allbestpred <- rbindlist(allbestpredlist) rm(allbestpredlist) pbl <- allbestpred[ longitude > 32 & longitude < 37 & latitude < 34 & latitude > 29, ] pbl <- pbl [, day:=as.Date(strptime(date, "%m/%d/%Y"))] #import LU lu1<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/LU1.csv") #add Land cover to LU p_os<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/p_os.csv") p_dev<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/p_devHG.csv") p_dos<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/p_devOS.csv") p_farm<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/p_farming.csv") p_for<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/p_forest.csv") p_ind<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/p_industry.csv") lu1 <- merge(lu1, p_os[, list(aodid,MEAN)], all.x= T, by = c("aodid")) lu1[,p_os:=MEAN*100] lu1[,MEAN:=NULL] lu1 <- merge(lu1, p_dev[, list(aodid,MEAN)], all.x= T, by = c("aodid")) lu1[,p_dev:=MEAN*100] lu1[,MEAN:=NULL] lu1 <- merge(lu1, p_dos[, list(aodid,MEAN)], all.x= T, by = c("aodid")) lu1[,p_dos:=MEAN*100] lu1[,MEAN:=NULL] lu1 <- merge(lu1, p_farm[, list(aodid,MEAN)], all.x= T, by = c("aodid")) lu1[,p_farm:=MEAN*100] lu1[,MEAN:=NULL] lu1 <- merge(lu1, p_for[, list(aodid,MEAN)], all.x= T, by = c("aodid")) lu1[,p_for:=MEAN*100] lu1[,MEAN:=NULL] lu1 <- merge(lu1, p_ind[, list(aodid,MEAN)], all.x= T, by = c("aodid")) lu1[,p_ind:=MEAN*100] lu1[,MEAN:=NULL] #delete "palestine" wlu<-lu1[!is.na(p_for)] #Temp Temp <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/temporal MOEP/Temp_D.csv") Temp$date<-paste(Temp$Day,Temp$Month,Temp$Year,sep="/") Temp[, day:=as.Date(strptime(date, "%d/%m/%Y"))] Temp[, c := as.numeric(format(day, "%Y")) ] Temp[,c("Year","Month","Day","date"):=NULL] Temp <- Temp[X != 'NaN'] Temp <- Temp[Temp != 'NaN'] #WD WD <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/temporal MOEP/WD_D.csv") WD$date<-paste(WD$Day,WD$Month,WD$Year,sep="/") WD[, day:=as.Date(strptime(date, "%d/%m/%Y"))] WD[, c := as.numeric(format(day, "%Y")) ] WD[,c("Year","Month","Day","date"):=NULL] WS<- WD[X != 'NaN'] #WS WS <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/temporal MOEP/WS_D.csv") WS$date<-paste(WS$Day,WS$Month,WS$Year,sep="/") WS[, day:=as.Date(strptime(date, "%d/%m/%Y"))] WS[, c := as.numeric(format(day, "%Y")) ] WS[,c("Year","Month","Day","date"):=NULL] WS <- WS[X != 'NA'] #remove missing WS[,length(na.omit(WS)),by=list(stn,c)] WS[, WS_miss := length(na.omit(WS)),by=list(stn,c)] WS<-WS[WS_miss > 300] #RH RH <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/temporal MOEP/RH_D.csv") RH$date<-paste(RH$Day,RH$Month,RH$Year,sep="/") RH[, day:=as.Date(strptime(date, "%d/%m/%Y"))] RH[, c := as.numeric(format(day, "%Y")) ] RH[,c("Year","Month","Day","date"):=NULL] RH <- RH[X != 'NaN'] #remove missing RH[,length(na.omit(RH)),by=list(stn,c)] RH[, RH_miss := length(na.omit(RH)),by=list(stn,c)] RH<-RH[RH_miss > 300] #NO2 NO2 <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/temporal MOEP/NO2_D.csv") NO2$date<-paste(NO2$Day,NO2$Month,NO2$Year,sep="/") NO2[, day:=as.Date(strptime(date, "%d/%m/%Y"))] NO2[, c := as.numeric(format(day, "%Y")) ] NO2[,c("Year","Month","Day","date"):=NULL] NO2 <- NO2[X != 'NaN'] #remove missing NO2[,length(na.omit(NO2)),by=list(stn,c)] NO2[, NO2_miss := length(na.omit(NO2)),by=list(stn,c)] NO2<-NO2[NO2_miss > 300] #Rain Rain <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/temporal MOEP/Rain_D.csv") Rain$date<-paste(Rain$Day,Rain$Month,Rain$Year,sep="/") Rain[, day:=as.Date(strptime(date, "%d/%m/%Y"))] Rain[, c := as.numeric(format(day, "%Y")) ] Rain[,c("Year","Month","Day","date"):=NULL] Rain <- Rain[X != 'NaN'] #remove missing Rain[,length(na.omit(Rain)),by=list(stn,c)] Rain[, Rain_miss := length(na.omit(Rain)),by=list(stn,c)] Rain<-Rain[Rain_miss > 300] #load PA grid (points in "palestine authority") ilgreen <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/IL.green_grid.csv") # #################### # ###load Terra/Aqua # #################### # #load aod data # terra<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN003_AOd_allyears/AOD_TR_0014.RDS") # terra<- terra[yr >= "2003"] # #clear PA points # terra <- terra[terra$aodid %in% ilgreen$aodid, ] ###load Aqua #load aod data aqua<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN003_AOd_allyears/AOD_AQ_0014.RDS") aqua<- aqua[yr >= "2003"] aqua <- aqua[aqua$aodid %in% ilgreen$aodid, ] #create full LU TS days<-seq.Date(from = as.Date("2003-01-01"), to = as.Date("2013-12-31"), 1) #create date range fullaod <- data.table(expand.grid(aodid = aqua[, unique(aodid)], day = days)) setkey(fullaod,aodid) setkey(aqua,aodid) aodf<-left_join(fullaod,aqua) aodf<-aodf[, c(3:4,6:11) := NULL] #add land use setkey(aodf,aodid) setkey(wlu,aodid) aodf.lu<-left_join(aodf,wlu) source("/media/NAS/Uni/org/files/Uni/Projects/code/P031.MIAC_PM/code_snips/nearestbyday_MPM.r") #create PM matrix met.m <- makepointsmatrix(met2003, "X", "Y", "stn") #create aod matrix setkey(test4.se, aodid) lu.m <- makepointsmatrix(test4.se[test4.se[,unique(aodid)], list(x_aod_ITM, y_aod_ITM, aodid), mult = "first"], "x_aod_ITM", "y_aod_ITM", "aodid") closestaodse<- nearestbyday(lu.m ,met.m , test4.se, met2003[, list(day,Temp,stn)], "aodid", "stn", "meanT", "Temp", knearest = 1, maxdistance = NA) ##################################### ##################################### #TEST ##################################### # aodpm5k<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/aod_pm_5k.csv") # aqua.se <- aqua[aqua$aodid %in% aodpm5k$aodid, ] # aqua.se<-aqua.se[, c(8:24) := NULL] # # # setkey(aodpm5k,aodid) # setkey(aqua.se,aodid) # aqua.se <- merge(aqua.se,aodpm5k[,list(dist,aodid,stn)], all.x = T) # # setkey(PM25,day,stn) # setkey(aqua.se,day,stn) # m1.s1 <- merge(PM25,aqua.se,all.x = T) # # #to leave only THE 1 closest sat data point to station in each day # setkey(m1.s1,stn,day,dist) # #take first ocurance by day per STN (its sorted by dist so the shortest one) # x<-m1.s1[unique(m1.s1[, list(stn, day)]), mult = "first"] ################################ ##2003 ################################ ## subset AOD aod2003<-aodf[c==2003] #xtract year met met2003<- Temp[c==2003] #create full LU TS days_2003<-seq.Date(from = as.Date("2003-01-01"), to = as.Date("2003-12-31"), 1) #create date range test3.se <- data.table(expand.grid(aodid = wlu[, unique(aodid)], day = days_2003)) setkey(test3.se,aodid) setkey(wlu,aodid) test4.se<- merge(test3.se,wlu,all.x=TRUE) source("/media/NAS/Uni/org/files/Uni/Projects/code/P031.MIAC_PM/code_snips/nearestbyday_MPM.r") #create PM matrix met.m <- makepointsmatrix(met2003, "X", "Y", "stn") #create aod matrix setkey(test4.se, aodid) lu.m <- makepointsmatrix(test4.se[test4.se[,unique(aodid)], list(x_aod_ITM, y_aod_ITM, aodid), mult = "first"], "x_aod_ITM", "y_aod_ITM", "aodid") closestaodse<- nearestbyday(lu.m ,met.m , test4.se, met2003[, list(day,Temp,stn)], "aodid", "stn", "meanT", "Temp", knearest = 1, maxdistance = NA) setkey(test4.se,aodid,day) setkey(closestaodse,aodid,day) ot2003 <- merge(test4.se[,list(stn,day,aodid,elev,x_aod_ITM, y_aod_ITM,pblid)], closestaodse[,list(day,Temp,aodid)], all.x = T) #join AOD setkey(ot2003 ,aodid,day) setkey(aod2003,aodid,day) #note there will be missing in PA areas (gaza and east bank) x<-left_join(aod2003, ot2003) x1<-as.data.table(x) x2<- x1[!is.na(pblid)] #Join PBL setkey(pbl , day, pblid) setkey(x2, day, pblid) x3<-left_join(x2, pbl) ################################## #PM25 ################################## #PM25 PM25 <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/PM/PM25_D.csv") PM25$date<-paste(PM25$Day,PM25$Month,PM25$Year,sep="/") PM25[, day:=as.Date(strptime(date, "%d/%m/%Y"))] PM25[, c := as.numeric(format(day, "%Y")) ] PM25[,c("Year","Month","Day","date"):=NULL] PM25 <- PM25[X != 'NaN'] #num. of obsv per year per stn PM25[,length(na.omit(PM25)),by=list(stn,c)] #PM25_m means avialble obs per year PM25[, PM25_n := length(na.omit(PM25)),by=list(stn,c)] #clear non PM25 days PM25<-PM25[!is.na(PM25)] #clear non continous stations PM25 <- PM25[PM25_n > 5 , ] setnames(PM25,"X","x_stn_ITM") setnames(PM25,"Y","y_stn_ITM") ######### #terra ######### # import monitor data and spatial merge with nearestbyday() source("/home/zeltak/org/files/Uni/Projects/code/P031.MIAC_PM/code_snips/nearestbyday.r") #create PM matrix pm.m <- makepointsmatrix(PM25, "x_stn_ITM", "y_stn_ITM", "stn") #create aod terra matrix aod.m <- makepointsmatrix(terra[terra[,unique(aodid)], list(x_aod_ITM, y_aod_ITM, aodid), mult = "first"], "x_aod_ITM", "y_aod_ITM", "aodid") ########### join Terra to PM25 closestaod <- nearestbyday(pm.m, aod.m, PM25, terra [, list(day, aodid, aod,UN,WV,QA)], "stn", "aodid", "closestaod", "aod", knearest = 5, maxdistance = 1500) setkey(PM25,stn,day) setkey(closestaod,stn,day) PM25.m1 <- merge(PM25, closestaod[,list(stn,day,aod,UN,WV,QA)], all.x = T) ##################### #to create a date range based on start and end points use days<-seq.Date(from = as.Date("2002-01-01"), to = as.Date("2012-12-31"), 1) #create date range mg <- data.table(expand.grid(stn = PM25.m1[,unique(stn)], day = days)) ##### start merges setkey(PM25.m1, day,stn) setkey(mg, day,stn) J1 <- merge(mg,PM25.m1, all.x = T) #readd year J1[, c := as.numeric(format(day, "%Y")) ] setkey(Temp, day,stn) setkey(J1, day,stn) J3 <- merge(J1,Temp[,list(day,stn,Temp)], all.x = T) setkey(RH, day,stn) setkey(J3, day,stn) J4 <- merge(J3,RH[,list(day,stn,RH)], all.x = T) setkey(WD, day,stn) setkey(J4, day,stn) J5 <- merge(J4,WD[,list(day,stn,WD)], all.x = T) setkey(WS, day,stn) setkey(J5, day,stn) J5 <- merge(J5,WS[,list(day,stn,WS)], all.x = T) setkey(Rain, day,stn) setkey(J5, day,stn) J5 <- merge(J5,Rain[,list(day,stn,Rain)], all.x = T) setkey(NO2, day,stn) setkey(J5, day,stn) J5 <- merge(J5,NO2[,list(day,stn,NO2)], all.x = T) #import stn keytable stnkey<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/ILstnXY_aodid.csv") setkey(stnkey,stn) setkey(J5,stn) J7 <- merge(J5,stnkey[,list(stn, aodid)], all.x = T) setkey(lu1,aodid) setkey(J7,aodid) J8 <- merge(J7,lu1, all.x = T) #delete with missing aodid J9 <- J8[!is.na(long_aod)] #add season J9$month <- as.numeric(format(J9$day, "%m")) #1-winter, 2-spring,3-summer,4-autum J9$season<-recode(J9$month,"1=1;2=1;3=2;4=2;5=2;6=3;7=3;8=3;9=4;10=4;11=4;12=1") #1-winter, 2-summer J9$seasonSW<-recode(J9$month,"1=1;2=1;3=1;4=2;5=2;6=2;7=2;8=2;9=2;10=1;11=1;12=1") #Join PBL setkey(pbl , day, pblid) setkey(J9, day, pblid) J11 <- merge(J9, pbl, all.x = T) J11[, c("date", "longitude.x", "latitude.x","latitude.y", "longitude.y") := NULL] #add month J11[, m := as.numeric(format(day, "%m")) ] #join NDVI to aod setkey(ndvid, aodid) setkey(J11, aodid) J12 <- merge(J11, ndvid, all.x = T) #readd year J12[, c := as.numeric(format(day, "%Y")) ] #join NDVI to aod setkey(ndvi, ndviid, c, m ) setkey(J12, ndviid, c, m) J13 <- merge(J12, ndvi, all.x = T) #add dust days dust2<-fread("/media/NAS/Uni/Data/Israel/Dust/DDAqTer28.5.2014.csv") dust2$date<-paste(dust2$Day,dust2$Month,dust2$Year,sep="/") dust2[, day:=as.Date(strptime(date, "%d/%m/%Y"))] dust2[,c("Year","Month","Day","Max","date"):=NULL] setnames(dust2,"StationID","stn") setkey(J13 , day, stn) setkey(dust2, day, stn) J14 <- merge(J13, dust2, all.x = T) J14<-J14[is.na(Dust), Dust:= 0] #add regions and flags reg<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/IL_reg_accurate.csv") #PM25 setkey(J14 , aodid) setkey(reg, aodid) jreg10 <- merge(J14, reg, all.x = T) PM25.terra<-jreg10 PM25.terra$A_T<-0 ######### #aqua ######### # import monitor data and spatial merge with nearestbyday() source("/home/zeltak/org/files/Uni/Projects/code/P031.MIAC_PM/code_snips/nearestbyday.r") #create PM matrix pm.m <- makepointsmatrix(PM25, "x_stn_ITM", "y_stn_ITM", "stn") #create aod aqua matrix aod.m <- makepointsmatrix(aqua[aqua[,unique(aodid)], list(x_aod_ITM, y_aod_ITM, aodid), mult = "first"], "x_aod_ITM", "y_aod_ITM", "aodid") ########### join Terra to PM25 closestaod <- nearestbyday(pm.m, aod.m, PM25, aqua [, list(day, aodid, aod,UN,WV,QA)], "stn", "aodid", "closestaod", "aod", knearest = 5, maxdistance = 1500) setkey(PM25,stn,day) setkey(closestaod,stn,day) PM25.m1 <- merge(PM25, closestaod[,list(stn,day,aod,UN,WV,QA)], all.x = T) ##################### #to create a date range based on start and end points use days<-seq.Date(from = as.Date("2002-01-01"), to = as.Date("2012-12-31"), 1) #create date range mg <- data.table(expand.grid(stn = PM25.m1[,unique(stn)], day = days)) ##### start merges setkey(PM25.m1, day,stn) setkey(mg, day,stn) J1 <- merge(mg,PM25.m1, all.x = T) #readd year J1[, c := as.numeric(format(day, "%Y")) ] setkey(Temp, day,stn) setkey(J1, day,stn) J3 <- merge(J1,Temp[,list(day,stn,Temp)], all.x = T) setkey(RH, day,stn) setkey(J3, day,stn) J4 <- merge(J3,RH[,list(day,stn,RH)], all.x = T) setkey(WD, day,stn) setkey(J4, day,stn) J5 <- merge(J4,WD[,list(day,stn,WD)], all.x = T) setkey(WS, day,stn) setkey(J5, day,stn) J5 <- merge(J5,WS[,list(day,stn,WS)], all.x = T) setkey(Rain, day,stn) setkey(J5, day,stn) J5 <- merge(J5,Rain[,list(day,stn,Rain)], all.x = T) setkey(NO2, day,stn) setkey(J5, day,stn) J5 <- merge(J5,NO2[,list(day,stn,NO2)], all.x = T) #import stn keytable stnkey<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/ILstnXY_aodid.csv") setkey(stnkey,stn) setkey(J5,stn) J7 <- merge(J5,stnkey[,list(stn, aodid)], all.x = T) setkey(lu1,aodid) setkey(J7,aodid) J8 <- merge(J7,lu1, all.x = T) #delete with missing aodid J9 <- J8[!is.na(long_aod)] #add season J9$month <- as.numeric(format(J9$day, "%m")) #1-winter, 2-spring,3-summer,4-autum J9$season<-recode(J9$month,"1=1;2=1;3=2;4=2;5=2;6=3;7=3;8=3;9=4;10=4;11=4;12=1") #1-winter, 2-summer J9$seasonSW<-recode(J9$month,"1=1;2=1;3=1;4=2;5=2;6=2;7=2;8=2;9=2;10=1;11=1;12=1") #Join PBL setkey(pbl , day, pblid) setkey(J9, day, pblid) J11 <- merge(J9, pbl, all.x = T) J11[, c("date", "longitude.x", "latitude.x","latitude.y", "longitude.y") := NULL] #add month J11[, m := as.numeric(format(day, "%m")) ] #join NDVI to aod setkey(ndvid, aodid) setkey(J11, aodid) J12 <- merge(J11, ndvid, all.x = T) #readd year J12[, c := as.numeric(format(day, "%Y")) ] #join NDVI to aod setkey(ndvi, ndviid, c, m ) setkey(J12, ndviid, c, m) J13 <- merge(J12, ndvi, all.x = T) #add dust days dust2<-fread("/media/NAS/Uni/Data/Israel/Dust/DDAqTer28.5.2014.csv") dust2$date<-paste(dust2$Day,dust2$Month,dust2$Year,sep="/") dust2[, day:=as.Date(strptime(date, "%d/%m/%Y"))] dust2[,c("Year","Month","Day","Max","date"):=NULL] setnames(dust2,"StationID","stn") setkey(J13 , day, stn) setkey(dust2, day, stn) J14 <- merge(J13, dust2, all.x = T) J14<-J14[is.na(Dust), Dust:= 0] #add regions and flags reg<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/IL_reg_accurate.csv") #PM25 setkey(J14 , aodid) setkey(reg, aodid) jreg10 <- merge(J14, reg, all.x = T) PM25.aqua<-jreg10 PM25.aqua$A_T<-1 ##SAVE PM25.AT<-rbindlist(list(PM25.aqua,PM25.terra)) saveRDS(PM25.AT,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN008_model_prep/mod1.PM25all_reg.RDS") ################################## #PM10 ################################## #PM10 PM10 <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/PM/PM10_D.csv") PM10$date<-paste(PM10$Day,PM10$Month,PM10$Year,sep="/") PM10[, day:=as.Date(strptime(date, "%d/%m/%Y"))] PM10[, c := as.numeric(format(day, "%Y")) ] PM10[,c("Year","Month","Day","date"):=NULL] PM10 <- PM10[X != 'NaN'] #num. of obsv per year per stn PM10[,length(na.omit(PM10)),by=list(stn,c)] #pm10_m means avialble obs per year PM10[, PM10_n := length(na.omit(PM10)),by=list(stn,c)] #clear non PM10 days PM10<-PM10[!is.na(PM10)] #clear non continous stations PM10 <- PM10[PM10_n > 5 , ] setnames(PM10,"X","x_stn_ITM") setnames(PM10,"Y","y_stn_ITM") ######### #terra ######### # import monitor data and spatial merge with nearestbyday() source("/home/zeltak/org/files/Uni/Projects/code/P031.MIAC_PM/code_snips/nearestbyday.r") #create PM matrix pm.m <- makepointsmatrix(PM10, "x_stn_ITM", "y_stn_ITM", "stn") #create aod terra matrix aod.m <- makepointsmatrix(terra[terra[,unique(aodid)], list(x_aod_ITM, y_aod_ITM, aodid), mult = "first"], "x_aod_ITM", "y_aod_ITM", "aodid") ########### join Terra to PM10 closestaod <- nearestbyday(pm.m, aod.m, PM10, terra [, list(day, aodid, aod,UN,WV,QA)], "stn", "aodid", "closestaod", "aod", knearest = 5, maxdistance = 1500) ########### # meanaod <- nearestbyday(pm.m, aod.m, # PM10, terra [, list(day, aodid, aod,UN,WV,QA)], # "stn", "aodid", "closestaod", "aod", knearest = 9, maxdistance = 2200, nearestmean = T) setkey(PM10,stn,day) setkey(closestaod,stn,day) PM10.m1 <- merge(PM10, closestaod[,list(stn,day,aod,UN,WV,QA)], all.x = T) ##################### #to create a date range based on start and end points use days<-seq.Date(from = as.Date("2002-01-01"), to = as.Date("2012-12-31"), 1) #create date range mg <- data.table(expand.grid(stn = PM10.m1[,unique(stn)], day = days)) ##### start merges setkey(PM10.m1, day,stn) setkey(mg, day,stn) J1 <- merge(mg,PM10.m1, all.x = T) #readd year J1[, c := as.numeric(format(day, "%Y")) ] setkey(Temp, day,stn) setkey(J1, day,stn) J3 <- merge(J1,Temp[,list(day,stn,Temp)], all.x = T) setkey(RH, day,stn) setkey(J3, day,stn) J4 <- merge(J3,RH[,list(day,stn,RH)], all.x = T) setkey(WD, day,stn) setkey(J4, day,stn) J5 <- merge(J4,WD[,list(day,stn,WD)], all.x = T) setkey(WS, day,stn) setkey(J5, day,stn) J5 <- merge(J5,WS[,list(day,stn,WS)], all.x = T) setkey(Rain, day,stn) setkey(J5, day,stn) J5 <- merge(J5,Rain[,list(day,stn,Rain)], all.x = T) setkey(NO2, day,stn) setkey(J5, day,stn) J5 <- merge(J5,NO2[,list(day,stn,NO2)], all.x = T) #import stn keytable stnkey<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/ILstnXY_aodid.csv") setkey(stnkey,stn) setkey(J5,stn) J7 <- merge(J5,stnkey[,list(stn, aodid)], all.x = T) setkey(lu1,aodid) setkey(J7,aodid) J8 <- merge(J7,lu1, all.x = T) #delete with missing aodid J9 <- J8[!is.na(long_aod)] #add season J9$month <- as.numeric(format(J9$day, "%m")) #1-winter, 2-spring,3-summer,4-autum J9$season<-recode(J9$month,"1=1;2=1;3=2;4=2;5=2;6=3;7=3;8=3;9=4;10=4;11=4;12=1") #1-winter, 2-summer J9$seasonSW<-recode(J9$month,"1=1;2=1;3=1;4=2;5=2;6=2;7=2;8=2;9=2;10=1;11=1;12=1") #Join PBL setkey(pbl , day, pblid) setkey(J9, day, pblid) J11 <- merge(J9, pbl, all.x = T) J11[, c("date", "longitude.x", "latitude.x","latitude.y", "longitude.y") := NULL] #add month J11[, m := as.numeric(format(day, "%m")) ] #join NDVI to aod setkey(ndvid, aodid) setkey(J11, aodid) J12 <- merge(J11, ndvid, all.x = T) #readd year J12[, c := as.numeric(format(day, "%Y")) ] #join NDVI to aod setkey(ndvi, ndviid, c, m ) setkey(J12, ndviid, c, m) J13 <- merge(J12, ndvi, all.x = T) #add dust days dust2<-fread("/media/NAS/Uni/Data/Israel/Dust/DDAqTer28.5.2014.csv") dust2$date<-paste(dust2$Day,dust2$Month,dust2$Year,sep="/") dust2[, day:=as.Date(strptime(date, "%d/%m/%Y"))] dust2[,c("Year","Month","Day","Max","date"):=NULL] setnames(dust2,"StationID","stn") setkey(J13 , day, stn) setkey(dust2, day, stn) J14 <- merge(J13, dust2, all.x = T) J14<-J14[is.na(Dust), Dust:= 0] #add regions and flags reg<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/IL_reg_accurate.csv") #PM10 setkey(J14 , aodid) setkey(reg, aodid) jreg10 <- merge(J14, reg, all.x = T) PM10.terra<-jreg10 PM10.terra$A_T<-1 ######### #aqua ######### # import monitor data and spatial merge with nearestbyday() source("/home/zeltak/org/files/Uni/Projects/code/P031.MIAC_PM/code_snips/nearestbyday.r") #create PM matrix pm.m <- makepointsmatrix(PM10, "x_stn_ITM", "y_stn_ITM", "stn") #create aod aqua matrix aod.m <- makepointsmatrix(aqua[aqua[,unique(aodid)], list(x_aod_ITM, y_aod_ITM, aodid), mult = "first"], "x_aod_ITM", "y_aod_ITM", "aodid") ########### join Terra to PM10 closestaod <- nearestbyday(pm.m, aod.m, PM10, aqua [, list(day, aodid, aod,UN,WV,QA)], "stn", "aodid", "closestaod", "aod", knearest = 5, maxdistance = 1500) setkey(PM10,stn,day) setkey(closestaod,stn,day) PM10.m1 <- merge(PM10, closestaod[,list(stn,day,aod,UN,WV,QA)], all.x = T) ##################### #to create a date range based on start and end points use days<-seq.Date(from = as.Date("2002-01-01"), to = as.Date("2012-12-31"), 1) #create date range mg <- data.table(expand.grid(stn = PM10.m1[,unique(stn)], day = days)) ##### start merges setkey(PM10.m1, day,stn) setkey(mg, day,stn) J1 <- merge(mg,PM10.m1, all.x = T) #readd year J1[, c := as.numeric(format(day, "%Y")) ] setkey(Temp, day,stn) setkey(J1, day,stn) J3 <- merge(J1,Temp[,list(day,stn,Temp)], all.x = T) setkey(RH, day,stn) setkey(J3, day,stn) J4 <- merge(J3,RH[,list(day,stn,RH)], all.x = T) setkey(WD, day,stn) setkey(J4, day,stn) J5 <- merge(J4,WD[,list(day,stn,WD)], all.x = T) setkey(WS, day,stn) setkey(J5, day,stn) J5 <- merge(J5,WS[,list(day,stn,WS)], all.x = T) setkey(Rain, day,stn) setkey(J5, day,stn) J5 <- merge(J5,Rain[,list(day,stn,Rain)], all.x = T) setkey(NO2, day,stn) setkey(J5, day,stn) J5 <- merge(J5,NO2[,list(day,stn,NO2)], all.x = T) #import stn keytable stnkey<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/ILstnXY_aodid.csv") setkey(stnkey,stn) setkey(J5,stn) J7 <- merge(J5,stnkey[,list(stn, aodid)], all.x = T) setkey(lu1,aodid) setkey(J7,aodid) J8 <- merge(J7,lu1, all.x = T) #delete with missing aodid J9 <- J8[!is.na(long_aod)] #add season J9$month <- as.numeric(format(J9$day, "%m")) #1-winter, 2-spring,3-summer,4-autum J9$season<-recode(J9$month,"1=1;2=1;3=2;4=2;5=2;6=3;7=3;8=3;9=4;10=4;11=4;12=1") #1-winter, 2-summer J9$seasonSW<-recode(J9$month,"1=1;2=1;3=1;4=2;5=2;6=2;7=2;8=2;9=2;10=1;11=1;12=1") #Join PBL setkey(pbl , day, pblid) setkey(J9, day, pblid) J11 <- merge(J9, pbl, all.x = T) J11[, c("date", "longitude.x", "latitude.x","latitude.y", "longitude.y") := NULL] #add month J11[, m := as.numeric(format(day, "%m")) ] #join NDVI to aod setkey(ndvid, aodid) setkey(J11, aodid) J12 <- merge(J11, ndvid, all.x = T) #readd year J12[, c := as.numeric(format(day, "%Y")) ] #join NDVI to aod setkey(ndvi, ndviid, c, m ) setkey(J12, ndviid, c, m) J13 <- merge(J12, ndvi, all.x = T) #add dust days dust2<-fread("/media/NAS/Uni/Data/Israel/Dust/DDAqTer28.5.2014.csv") dust2$date<-paste(dust2$Day,dust2$Month,dust2$Year,sep="/") dust2[, day:=as.Date(strptime(date, "%d/%m/%Y"))] dust2[,c("Year","Month","Day","Max","date"):=NULL] setnames(dust2,"StationID","stn") setkey(J13 , day, stn) setkey(dust2, day, stn) J14 <- merge(J13, dust2, all.x = T) J14<-J14[is.na(Dust), Dust:= 0] #add regions and flags reg<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/IL_reg_accurate.csv") #PM10 setkey(J14 , aodid) setkey(reg, aodid) jreg10 <- merge(J14, reg, all.x = T) PM10.aqua<-jreg10 PM10.aqua$A_T<-0 ##SAVE PM10.AT<-rbindlist(list(PM10.aqua,PM10.terra)) saveRDS(PM10.AT,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN008_model_prep/mod1.PM10all_reg.RDS")
/Uni/Projects/code/P046.Israel_MAIAC/archive/p004_stn_clean_V1.12.2014_V2.r
no_license
zeltak/org
R
false
false
24,923
r
############### #LIBS ############### library(lme4) library(reshape) library(foreign) library(ggplot2) library(plyr) library(data.table) library(reshape2) library(Hmisc) library(mgcv) library(gdata) library(car) ########### import datasets #import NDVI ndvid<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/ndviid_aodid.csv") ndvi<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN006_NDVI_yearly/ndvi.rds") #import PBL pbl <- fread("/media/NAS/Uni/Data/Europe/PBL_Europe/dailymeanpbl/fianlpblXY_2002.csv") allbestpredlist <- list() path.data<-"/media/NAS/Uni/Data/Europe/PBL_Europe/dailymeanpbl/" for(i in 2002:2013){ allbestpredlist[[paste0("year_", i)]] <- fread(paste0(path.data, "fianlpblXY_", i, ".csv")) print(i) } allbestpred <- rbindlist(allbestpredlist) rm(allbestpredlist) pbl <- allbestpred[ longitude > 32 & longitude < 37 & latitude < 34 & latitude > 29, ] pbl <- pbl [, day:=as.Date(strptime(date, "%m/%d/%Y"))] #import LU lu1<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/LU1.csv") #add Land cover to LU p_os<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/p_os.csv") p_dev<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/p_devHG.csv") p_dos<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/p_devOS.csv") p_farm<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/p_farming.csv") p_for<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/p_forest.csv") p_ind<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/p_industry.csv") lu1 <- merge(lu1, p_os[, list(aodid,MEAN)], all.x= T, by = c("aodid")) lu1[,p_os:=MEAN*100] lu1[,MEAN:=NULL] lu1 <- merge(lu1, p_dev[, list(aodid,MEAN)], all.x= T, by = c("aodid")) lu1[,p_dev:=MEAN*100] lu1[,MEAN:=NULL] lu1 <- merge(lu1, p_dos[, list(aodid,MEAN)], all.x= T, by = c("aodid")) lu1[,p_dos:=MEAN*100] lu1[,MEAN:=NULL] lu1 <- merge(lu1, p_farm[, list(aodid,MEAN)], all.x= T, by = c("aodid")) lu1[,p_farm:=MEAN*100] lu1[,MEAN:=NULL] lu1 <- merge(lu1, p_for[, list(aodid,MEAN)], all.x= T, by = c("aodid")) lu1[,p_for:=MEAN*100] lu1[,MEAN:=NULL] lu1 <- merge(lu1, p_ind[, list(aodid,MEAN)], all.x= T, by = c("aodid")) lu1[,p_ind:=MEAN*100] lu1[,MEAN:=NULL] #delete "palestine" wlu<-lu1[!is.na(p_for)] #Temp Temp <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/temporal MOEP/Temp_D.csv") Temp$date<-paste(Temp$Day,Temp$Month,Temp$Year,sep="/") Temp[, day:=as.Date(strptime(date, "%d/%m/%Y"))] Temp[, c := as.numeric(format(day, "%Y")) ] Temp[,c("Year","Month","Day","date"):=NULL] Temp <- Temp[X != 'NaN'] Temp <- Temp[Temp != 'NaN'] #WD WD <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/temporal MOEP/WD_D.csv") WD$date<-paste(WD$Day,WD$Month,WD$Year,sep="/") WD[, day:=as.Date(strptime(date, "%d/%m/%Y"))] WD[, c := as.numeric(format(day, "%Y")) ] WD[,c("Year","Month","Day","date"):=NULL] WS<- WD[X != 'NaN'] #WS WS <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/temporal MOEP/WS_D.csv") WS$date<-paste(WS$Day,WS$Month,WS$Year,sep="/") WS[, day:=as.Date(strptime(date, "%d/%m/%Y"))] WS[, c := as.numeric(format(day, "%Y")) ] WS[,c("Year","Month","Day","date"):=NULL] WS <- WS[X != 'NA'] #remove missing WS[,length(na.omit(WS)),by=list(stn,c)] WS[, WS_miss := length(na.omit(WS)),by=list(stn,c)] WS<-WS[WS_miss > 300] #RH RH <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/temporal MOEP/RH_D.csv") RH$date<-paste(RH$Day,RH$Month,RH$Year,sep="/") RH[, day:=as.Date(strptime(date, "%d/%m/%Y"))] RH[, c := as.numeric(format(day, "%Y")) ] RH[,c("Year","Month","Day","date"):=NULL] RH <- RH[X != 'NaN'] #remove missing RH[,length(na.omit(RH)),by=list(stn,c)] RH[, RH_miss := length(na.omit(RH)),by=list(stn,c)] RH<-RH[RH_miss > 300] #NO2 NO2 <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/temporal MOEP/NO2_D.csv") NO2$date<-paste(NO2$Day,NO2$Month,NO2$Year,sep="/") NO2[, day:=as.Date(strptime(date, "%d/%m/%Y"))] NO2[, c := as.numeric(format(day, "%Y")) ] NO2[,c("Year","Month","Day","date"):=NULL] NO2 <- NO2[X != 'NaN'] #remove missing NO2[,length(na.omit(NO2)),by=list(stn,c)] NO2[, NO2_miss := length(na.omit(NO2)),by=list(stn,c)] NO2<-NO2[NO2_miss > 300] #Rain Rain <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/temporal MOEP/Rain_D.csv") Rain$date<-paste(Rain$Day,Rain$Month,Rain$Year,sep="/") Rain[, day:=as.Date(strptime(date, "%d/%m/%Y"))] Rain[, c := as.numeric(format(day, "%Y")) ] Rain[,c("Year","Month","Day","date"):=NULL] Rain <- Rain[X != 'NaN'] #remove missing Rain[,length(na.omit(Rain)),by=list(stn,c)] Rain[, Rain_miss := length(na.omit(Rain)),by=list(stn,c)] Rain<-Rain[Rain_miss > 300] #load PA grid (points in "palestine authority") ilgreen <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/IL.green_grid.csv") # #################### # ###load Terra/Aqua # #################### # #load aod data # terra<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN003_AOd_allyears/AOD_TR_0014.RDS") # terra<- terra[yr >= "2003"] # #clear PA points # terra <- terra[terra$aodid %in% ilgreen$aodid, ] ###load Aqua #load aod data aqua<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN003_AOd_allyears/AOD_AQ_0014.RDS") aqua<- aqua[yr >= "2003"] aqua <- aqua[aqua$aodid %in% ilgreen$aodid, ] #create full LU TS days<-seq.Date(from = as.Date("2003-01-01"), to = as.Date("2013-12-31"), 1) #create date range fullaod <- data.table(expand.grid(aodid = aqua[, unique(aodid)], day = days)) setkey(fullaod,aodid) setkey(aqua,aodid) aodf<-left_join(fullaod,aqua) aodf<-aodf[, c(3:4,6:11) := NULL] #add land use setkey(aodf,aodid) setkey(wlu,aodid) aodf.lu<-left_join(aodf,wlu) source("/media/NAS/Uni/org/files/Uni/Projects/code/P031.MIAC_PM/code_snips/nearestbyday_MPM.r") #create PM matrix met.m <- makepointsmatrix(met2003, "X", "Y", "stn") #create aod matrix setkey(test4.se, aodid) lu.m <- makepointsmatrix(test4.se[test4.se[,unique(aodid)], list(x_aod_ITM, y_aod_ITM, aodid), mult = "first"], "x_aod_ITM", "y_aod_ITM", "aodid") closestaodse<- nearestbyday(lu.m ,met.m , test4.se, met2003[, list(day,Temp,stn)], "aodid", "stn", "meanT", "Temp", knearest = 1, maxdistance = NA) ##################################### ##################################### #TEST ##################################### # aodpm5k<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/aod_pm_5k.csv") # aqua.se <- aqua[aqua$aodid %in% aodpm5k$aodid, ] # aqua.se<-aqua.se[, c(8:24) := NULL] # # # setkey(aodpm5k,aodid) # setkey(aqua.se,aodid) # aqua.se <- merge(aqua.se,aodpm5k[,list(dist,aodid,stn)], all.x = T) # # setkey(PM25,day,stn) # setkey(aqua.se,day,stn) # m1.s1 <- merge(PM25,aqua.se,all.x = T) # # #to leave only THE 1 closest sat data point to station in each day # setkey(m1.s1,stn,day,dist) # #take first ocurance by day per STN (its sorted by dist so the shortest one) # x<-m1.s1[unique(m1.s1[, list(stn, day)]), mult = "first"] ################################ ##2003 ################################ ## subset AOD aod2003<-aodf[c==2003] #xtract year met met2003<- Temp[c==2003] #create full LU TS days_2003<-seq.Date(from = as.Date("2003-01-01"), to = as.Date("2003-12-31"), 1) #create date range test3.se <- data.table(expand.grid(aodid = wlu[, unique(aodid)], day = days_2003)) setkey(test3.se,aodid) setkey(wlu,aodid) test4.se<- merge(test3.se,wlu,all.x=TRUE) source("/media/NAS/Uni/org/files/Uni/Projects/code/P031.MIAC_PM/code_snips/nearestbyday_MPM.r") #create PM matrix met.m <- makepointsmatrix(met2003, "X", "Y", "stn") #create aod matrix setkey(test4.se, aodid) lu.m <- makepointsmatrix(test4.se[test4.se[,unique(aodid)], list(x_aod_ITM, y_aod_ITM, aodid), mult = "first"], "x_aod_ITM", "y_aod_ITM", "aodid") closestaodse<- nearestbyday(lu.m ,met.m , test4.se, met2003[, list(day,Temp,stn)], "aodid", "stn", "meanT", "Temp", knearest = 1, maxdistance = NA) setkey(test4.se,aodid,day) setkey(closestaodse,aodid,day) ot2003 <- merge(test4.se[,list(stn,day,aodid,elev,x_aod_ITM, y_aod_ITM,pblid)], closestaodse[,list(day,Temp,aodid)], all.x = T) #join AOD setkey(ot2003 ,aodid,day) setkey(aod2003,aodid,day) #note there will be missing in PA areas (gaza and east bank) x<-left_join(aod2003, ot2003) x1<-as.data.table(x) x2<- x1[!is.na(pblid)] #Join PBL setkey(pbl , day, pblid) setkey(x2, day, pblid) x3<-left_join(x2, pbl) ################################## #PM25 ################################## #PM25 PM25 <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/PM/PM25_D.csv") PM25$date<-paste(PM25$Day,PM25$Month,PM25$Year,sep="/") PM25[, day:=as.Date(strptime(date, "%d/%m/%Y"))] PM25[, c := as.numeric(format(day, "%Y")) ] PM25[,c("Year","Month","Day","date"):=NULL] PM25 <- PM25[X != 'NaN'] #num. of obsv per year per stn PM25[,length(na.omit(PM25)),by=list(stn,c)] #PM25_m means avialble obs per year PM25[, PM25_n := length(na.omit(PM25)),by=list(stn,c)] #clear non PM25 days PM25<-PM25[!is.na(PM25)] #clear non continous stations PM25 <- PM25[PM25_n > 5 , ] setnames(PM25,"X","x_stn_ITM") setnames(PM25,"Y","y_stn_ITM") ######### #terra ######### # import monitor data and spatial merge with nearestbyday() source("/home/zeltak/org/files/Uni/Projects/code/P031.MIAC_PM/code_snips/nearestbyday.r") #create PM matrix pm.m <- makepointsmatrix(PM25, "x_stn_ITM", "y_stn_ITM", "stn") #create aod terra matrix aod.m <- makepointsmatrix(terra[terra[,unique(aodid)], list(x_aod_ITM, y_aod_ITM, aodid), mult = "first"], "x_aod_ITM", "y_aod_ITM", "aodid") ########### join Terra to PM25 closestaod <- nearestbyday(pm.m, aod.m, PM25, terra [, list(day, aodid, aod,UN,WV,QA)], "stn", "aodid", "closestaod", "aod", knearest = 5, maxdistance = 1500) setkey(PM25,stn,day) setkey(closestaod,stn,day) PM25.m1 <- merge(PM25, closestaod[,list(stn,day,aod,UN,WV,QA)], all.x = T) ##################### #to create a date range based on start and end points use days<-seq.Date(from = as.Date("2002-01-01"), to = as.Date("2012-12-31"), 1) #create date range mg <- data.table(expand.grid(stn = PM25.m1[,unique(stn)], day = days)) ##### start merges setkey(PM25.m1, day,stn) setkey(mg, day,stn) J1 <- merge(mg,PM25.m1, all.x = T) #readd year J1[, c := as.numeric(format(day, "%Y")) ] setkey(Temp, day,stn) setkey(J1, day,stn) J3 <- merge(J1,Temp[,list(day,stn,Temp)], all.x = T) setkey(RH, day,stn) setkey(J3, day,stn) J4 <- merge(J3,RH[,list(day,stn,RH)], all.x = T) setkey(WD, day,stn) setkey(J4, day,stn) J5 <- merge(J4,WD[,list(day,stn,WD)], all.x = T) setkey(WS, day,stn) setkey(J5, day,stn) J5 <- merge(J5,WS[,list(day,stn,WS)], all.x = T) setkey(Rain, day,stn) setkey(J5, day,stn) J5 <- merge(J5,Rain[,list(day,stn,Rain)], all.x = T) setkey(NO2, day,stn) setkey(J5, day,stn) J5 <- merge(J5,NO2[,list(day,stn,NO2)], all.x = T) #import stn keytable stnkey<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/ILstnXY_aodid.csv") setkey(stnkey,stn) setkey(J5,stn) J7 <- merge(J5,stnkey[,list(stn, aodid)], all.x = T) setkey(lu1,aodid) setkey(J7,aodid) J8 <- merge(J7,lu1, all.x = T) #delete with missing aodid J9 <- J8[!is.na(long_aod)] #add season J9$month <- as.numeric(format(J9$day, "%m")) #1-winter, 2-spring,3-summer,4-autum J9$season<-recode(J9$month,"1=1;2=1;3=2;4=2;5=2;6=3;7=3;8=3;9=4;10=4;11=4;12=1") #1-winter, 2-summer J9$seasonSW<-recode(J9$month,"1=1;2=1;3=1;4=2;5=2;6=2;7=2;8=2;9=2;10=1;11=1;12=1") #Join PBL setkey(pbl , day, pblid) setkey(J9, day, pblid) J11 <- merge(J9, pbl, all.x = T) J11[, c("date", "longitude.x", "latitude.x","latitude.y", "longitude.y") := NULL] #add month J11[, m := as.numeric(format(day, "%m")) ] #join NDVI to aod setkey(ndvid, aodid) setkey(J11, aodid) J12 <- merge(J11, ndvid, all.x = T) #readd year J12[, c := as.numeric(format(day, "%Y")) ] #join NDVI to aod setkey(ndvi, ndviid, c, m ) setkey(J12, ndviid, c, m) J13 <- merge(J12, ndvi, all.x = T) #add dust days dust2<-fread("/media/NAS/Uni/Data/Israel/Dust/DDAqTer28.5.2014.csv") dust2$date<-paste(dust2$Day,dust2$Month,dust2$Year,sep="/") dust2[, day:=as.Date(strptime(date, "%d/%m/%Y"))] dust2[,c("Year","Month","Day","Max","date"):=NULL] setnames(dust2,"StationID","stn") setkey(J13 , day, stn) setkey(dust2, day, stn) J14 <- merge(J13, dust2, all.x = T) J14<-J14[is.na(Dust), Dust:= 0] #add regions and flags reg<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/IL_reg_accurate.csv") #PM25 setkey(J14 , aodid) setkey(reg, aodid) jreg10 <- merge(J14, reg, all.x = T) PM25.terra<-jreg10 PM25.terra$A_T<-0 ######### #aqua ######### # import monitor data and spatial merge with nearestbyday() source("/home/zeltak/org/files/Uni/Projects/code/P031.MIAC_PM/code_snips/nearestbyday.r") #create PM matrix pm.m <- makepointsmatrix(PM25, "x_stn_ITM", "y_stn_ITM", "stn") #create aod aqua matrix aod.m <- makepointsmatrix(aqua[aqua[,unique(aodid)], list(x_aod_ITM, y_aod_ITM, aodid), mult = "first"], "x_aod_ITM", "y_aod_ITM", "aodid") ########### join Terra to PM25 closestaod <- nearestbyday(pm.m, aod.m, PM25, aqua [, list(day, aodid, aod,UN,WV,QA)], "stn", "aodid", "closestaod", "aod", knearest = 5, maxdistance = 1500) setkey(PM25,stn,day) setkey(closestaod,stn,day) PM25.m1 <- merge(PM25, closestaod[,list(stn,day,aod,UN,WV,QA)], all.x = T) ##################### #to create a date range based on start and end points use days<-seq.Date(from = as.Date("2002-01-01"), to = as.Date("2012-12-31"), 1) #create date range mg <- data.table(expand.grid(stn = PM25.m1[,unique(stn)], day = days)) ##### start merges setkey(PM25.m1, day,stn) setkey(mg, day,stn) J1 <- merge(mg,PM25.m1, all.x = T) #readd year J1[, c := as.numeric(format(day, "%Y")) ] setkey(Temp, day,stn) setkey(J1, day,stn) J3 <- merge(J1,Temp[,list(day,stn,Temp)], all.x = T) setkey(RH, day,stn) setkey(J3, day,stn) J4 <- merge(J3,RH[,list(day,stn,RH)], all.x = T) setkey(WD, day,stn) setkey(J4, day,stn) J5 <- merge(J4,WD[,list(day,stn,WD)], all.x = T) setkey(WS, day,stn) setkey(J5, day,stn) J5 <- merge(J5,WS[,list(day,stn,WS)], all.x = T) setkey(Rain, day,stn) setkey(J5, day,stn) J5 <- merge(J5,Rain[,list(day,stn,Rain)], all.x = T) setkey(NO2, day,stn) setkey(J5, day,stn) J5 <- merge(J5,NO2[,list(day,stn,NO2)], all.x = T) #import stn keytable stnkey<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/ILstnXY_aodid.csv") setkey(stnkey,stn) setkey(J5,stn) J7 <- merge(J5,stnkey[,list(stn, aodid)], all.x = T) setkey(lu1,aodid) setkey(J7,aodid) J8 <- merge(J7,lu1, all.x = T) #delete with missing aodid J9 <- J8[!is.na(long_aod)] #add season J9$month <- as.numeric(format(J9$day, "%m")) #1-winter, 2-spring,3-summer,4-autum J9$season<-recode(J9$month,"1=1;2=1;3=2;4=2;5=2;6=3;7=3;8=3;9=4;10=4;11=4;12=1") #1-winter, 2-summer J9$seasonSW<-recode(J9$month,"1=1;2=1;3=1;4=2;5=2;6=2;7=2;8=2;9=2;10=1;11=1;12=1") #Join PBL setkey(pbl , day, pblid) setkey(J9, day, pblid) J11 <- merge(J9, pbl, all.x = T) J11[, c("date", "longitude.x", "latitude.x","latitude.y", "longitude.y") := NULL] #add month J11[, m := as.numeric(format(day, "%m")) ] #join NDVI to aod setkey(ndvid, aodid) setkey(J11, aodid) J12 <- merge(J11, ndvid, all.x = T) #readd year J12[, c := as.numeric(format(day, "%Y")) ] #join NDVI to aod setkey(ndvi, ndviid, c, m ) setkey(J12, ndviid, c, m) J13 <- merge(J12, ndvi, all.x = T) #add dust days dust2<-fread("/media/NAS/Uni/Data/Israel/Dust/DDAqTer28.5.2014.csv") dust2$date<-paste(dust2$Day,dust2$Month,dust2$Year,sep="/") dust2[, day:=as.Date(strptime(date, "%d/%m/%Y"))] dust2[,c("Year","Month","Day","Max","date"):=NULL] setnames(dust2,"StationID","stn") setkey(J13 , day, stn) setkey(dust2, day, stn) J14 <- merge(J13, dust2, all.x = T) J14<-J14[is.na(Dust), Dust:= 0] #add regions and flags reg<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/IL_reg_accurate.csv") #PM25 setkey(J14 , aodid) setkey(reg, aodid) jreg10 <- merge(J14, reg, all.x = T) PM25.aqua<-jreg10 PM25.aqua$A_T<-1 ##SAVE PM25.AT<-rbindlist(list(PM25.aqua,PM25.terra)) saveRDS(PM25.AT,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN008_model_prep/mod1.PM25all_reg.RDS") ################################## #PM10 ################################## #PM10 PM10 <- fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/0.raw/PM/PM10_D.csv") PM10$date<-paste(PM10$Day,PM10$Month,PM10$Year,sep="/") PM10[, day:=as.Date(strptime(date, "%d/%m/%Y"))] PM10[, c := as.numeric(format(day, "%Y")) ] PM10[,c("Year","Month","Day","date"):=NULL] PM10 <- PM10[X != 'NaN'] #num. of obsv per year per stn PM10[,length(na.omit(PM10)),by=list(stn,c)] #pm10_m means avialble obs per year PM10[, PM10_n := length(na.omit(PM10)),by=list(stn,c)] #clear non PM10 days PM10<-PM10[!is.na(PM10)] #clear non continous stations PM10 <- PM10[PM10_n > 5 , ] setnames(PM10,"X","x_stn_ITM") setnames(PM10,"Y","y_stn_ITM") ######### #terra ######### # import monitor data and spatial merge with nearestbyday() source("/home/zeltak/org/files/Uni/Projects/code/P031.MIAC_PM/code_snips/nearestbyday.r") #create PM matrix pm.m <- makepointsmatrix(PM10, "x_stn_ITM", "y_stn_ITM", "stn") #create aod terra matrix aod.m <- makepointsmatrix(terra[terra[,unique(aodid)], list(x_aod_ITM, y_aod_ITM, aodid), mult = "first"], "x_aod_ITM", "y_aod_ITM", "aodid") ########### join Terra to PM10 closestaod <- nearestbyday(pm.m, aod.m, PM10, terra [, list(day, aodid, aod,UN,WV,QA)], "stn", "aodid", "closestaod", "aod", knearest = 5, maxdistance = 1500) ########### # meanaod <- nearestbyday(pm.m, aod.m, # PM10, terra [, list(day, aodid, aod,UN,WV,QA)], # "stn", "aodid", "closestaod", "aod", knearest = 9, maxdistance = 2200, nearestmean = T) setkey(PM10,stn,day) setkey(closestaod,stn,day) PM10.m1 <- merge(PM10, closestaod[,list(stn,day,aod,UN,WV,QA)], all.x = T) ##################### #to create a date range based on start and end points use days<-seq.Date(from = as.Date("2002-01-01"), to = as.Date("2012-12-31"), 1) #create date range mg <- data.table(expand.grid(stn = PM10.m1[,unique(stn)], day = days)) ##### start merges setkey(PM10.m1, day,stn) setkey(mg, day,stn) J1 <- merge(mg,PM10.m1, all.x = T) #readd year J1[, c := as.numeric(format(day, "%Y")) ] setkey(Temp, day,stn) setkey(J1, day,stn) J3 <- merge(J1,Temp[,list(day,stn,Temp)], all.x = T) setkey(RH, day,stn) setkey(J3, day,stn) J4 <- merge(J3,RH[,list(day,stn,RH)], all.x = T) setkey(WD, day,stn) setkey(J4, day,stn) J5 <- merge(J4,WD[,list(day,stn,WD)], all.x = T) setkey(WS, day,stn) setkey(J5, day,stn) J5 <- merge(J5,WS[,list(day,stn,WS)], all.x = T) setkey(Rain, day,stn) setkey(J5, day,stn) J5 <- merge(J5,Rain[,list(day,stn,Rain)], all.x = T) setkey(NO2, day,stn) setkey(J5, day,stn) J5 <- merge(J5,NO2[,list(day,stn,NO2)], all.x = T) #import stn keytable stnkey<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/ILstnXY_aodid.csv") setkey(stnkey,stn) setkey(J5,stn) J7 <- merge(J5,stnkey[,list(stn, aodid)], all.x = T) setkey(lu1,aodid) setkey(J7,aodid) J8 <- merge(J7,lu1, all.x = T) #delete with missing aodid J9 <- J8[!is.na(long_aod)] #add season J9$month <- as.numeric(format(J9$day, "%m")) #1-winter, 2-spring,3-summer,4-autum J9$season<-recode(J9$month,"1=1;2=1;3=2;4=2;5=2;6=3;7=3;8=3;9=4;10=4;11=4;12=1") #1-winter, 2-summer J9$seasonSW<-recode(J9$month,"1=1;2=1;3=1;4=2;5=2;6=2;7=2;8=2;9=2;10=1;11=1;12=1") #Join PBL setkey(pbl , day, pblid) setkey(J9, day, pblid) J11 <- merge(J9, pbl, all.x = T) J11[, c("date", "longitude.x", "latitude.x","latitude.y", "longitude.y") := NULL] #add month J11[, m := as.numeric(format(day, "%m")) ] #join NDVI to aod setkey(ndvid, aodid) setkey(J11, aodid) J12 <- merge(J11, ndvid, all.x = T) #readd year J12[, c := as.numeric(format(day, "%Y")) ] #join NDVI to aod setkey(ndvi, ndviid, c, m ) setkey(J12, ndviid, c, m) J13 <- merge(J12, ndvi, all.x = T) #add dust days dust2<-fread("/media/NAS/Uni/Data/Israel/Dust/DDAqTer28.5.2014.csv") dust2$date<-paste(dust2$Day,dust2$Month,dust2$Year,sep="/") dust2[, day:=as.Date(strptime(date, "%d/%m/%Y"))] dust2[,c("Year","Month","Day","Max","date"):=NULL] setnames(dust2,"StationID","stn") setkey(J13 , day, stn) setkey(dust2, day, stn) J14 <- merge(J13, dust2, all.x = T) J14<-J14[is.na(Dust), Dust:= 0] #add regions and flags reg<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/IL_reg_accurate.csv") #PM10 setkey(J14 , aodid) setkey(reg, aodid) jreg10 <- merge(J14, reg, all.x = T) PM10.terra<-jreg10 PM10.terra$A_T<-1 ######### #aqua ######### # import monitor data and spatial merge with nearestbyday() source("/home/zeltak/org/files/Uni/Projects/code/P031.MIAC_PM/code_snips/nearestbyday.r") #create PM matrix pm.m <- makepointsmatrix(PM10, "x_stn_ITM", "y_stn_ITM", "stn") #create aod aqua matrix aod.m <- makepointsmatrix(aqua[aqua[,unique(aodid)], list(x_aod_ITM, y_aod_ITM, aodid), mult = "first"], "x_aod_ITM", "y_aod_ITM", "aodid") ########### join Terra to PM10 closestaod <- nearestbyday(pm.m, aod.m, PM10, aqua [, list(day, aodid, aod,UN,WV,QA)], "stn", "aodid", "closestaod", "aod", knearest = 5, maxdistance = 1500) setkey(PM10,stn,day) setkey(closestaod,stn,day) PM10.m1 <- merge(PM10, closestaod[,list(stn,day,aod,UN,WV,QA)], all.x = T) ##################### #to create a date range based on start and end points use days<-seq.Date(from = as.Date("2002-01-01"), to = as.Date("2012-12-31"), 1) #create date range mg <- data.table(expand.grid(stn = PM10.m1[,unique(stn)], day = days)) ##### start merges setkey(PM10.m1, day,stn) setkey(mg, day,stn) J1 <- merge(mg,PM10.m1, all.x = T) #readd year J1[, c := as.numeric(format(day, "%Y")) ] setkey(Temp, day,stn) setkey(J1, day,stn) J3 <- merge(J1,Temp[,list(day,stn,Temp)], all.x = T) setkey(RH, day,stn) setkey(J3, day,stn) J4 <- merge(J3,RH[,list(day,stn,RH)], all.x = T) setkey(WD, day,stn) setkey(J4, day,stn) J5 <- merge(J4,WD[,list(day,stn,WD)], all.x = T) setkey(WS, day,stn) setkey(J5, day,stn) J5 <- merge(J5,WS[,list(day,stn,WS)], all.x = T) setkey(Rain, day,stn) setkey(J5, day,stn) J5 <- merge(J5,Rain[,list(day,stn,Rain)], all.x = T) setkey(NO2, day,stn) setkey(J5, day,stn) J5 <- merge(J5,NO2[,list(day,stn,NO2)], all.x = T) #import stn keytable stnkey<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/ILstnXY_aodid.csv") setkey(stnkey,stn) setkey(J5,stn) J7 <- merge(J5,stnkey[,list(stn, aodid)], all.x = T) setkey(lu1,aodid) setkey(J7,aodid) J8 <- merge(J7,lu1, all.x = T) #delete with missing aodid J9 <- J8[!is.na(long_aod)] #add season J9$month <- as.numeric(format(J9$day, "%m")) #1-winter, 2-spring,3-summer,4-autum J9$season<-recode(J9$month,"1=1;2=1;3=2;4=2;5=2;6=3;7=3;8=3;9=4;10=4;11=4;12=1") #1-winter, 2-summer J9$seasonSW<-recode(J9$month,"1=1;2=1;3=1;4=2;5=2;6=2;7=2;8=2;9=2;10=1;11=1;12=1") #Join PBL setkey(pbl , day, pblid) setkey(J9, day, pblid) J11 <- merge(J9, pbl, all.x = T) J11[, c("date", "longitude.x", "latitude.x","latitude.y", "longitude.y") := NULL] #add month J11[, m := as.numeric(format(day, "%m")) ] #join NDVI to aod setkey(ndvid, aodid) setkey(J11, aodid) J12 <- merge(J11, ndvid, all.x = T) #readd year J12[, c := as.numeric(format(day, "%Y")) ] #join NDVI to aod setkey(ndvi, ndviid, c, m ) setkey(J12, ndviid, c, m) J13 <- merge(J12, ndvi, all.x = T) #add dust days dust2<-fread("/media/NAS/Uni/Data/Israel/Dust/DDAqTer28.5.2014.csv") dust2$date<-paste(dust2$Day,dust2$Month,dust2$Year,sep="/") dust2[, day:=as.Date(strptime(date, "%d/%m/%Y"))] dust2[,c("Year","Month","Day","Max","date"):=NULL] setnames(dust2,"StationID","stn") setkey(J13 , day, stn) setkey(dust2, day, stn) J14 <- merge(J13, dust2, all.x = T) J14<-J14[is.na(Dust), Dust:= 0] #add regions and flags reg<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN007_Key_tables/IL_reg_accurate.csv") #PM10 setkey(J14 , aodid) setkey(reg, aodid) jreg10 <- merge(J14, reg, all.x = T) PM10.aqua<-jreg10 PM10.aqua$A_T<-0 ##SAVE PM10.AT<-rbindlist(list(PM10.aqua,PM10.terra)) saveRDS(PM10.AT,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN008_model_prep/mod1.PM10all_reg.RDS")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/catboost.R \name{catboost.sum_models} \alias{catboost.sum_models} \title{Sum models.} \usage{ catboost.sum_models( models, weights = NULL, ctr_merge_policy = "IntersectingCountersAverage" ) } \arguments{ \item{models}{Models for the summation. Default value: Required argument} \item{weights}{The weights of the models. Default value: NULL (use weight 1 for every model)} \item{ctr_merge_policy}{The counters merging policy. Possible values: \itemize{ \item 'FailIfCtrIntersects' Ensure that the models have zero intersecting counters \item 'LeaveMostDiversifiedTable' Use the most diversified counters by the count of unique hash values \item 'IntersectingCountersAverage' Use the average ctr counter values in the intersecting bins } Default value: 'IntersectingCountersAverage'} } \value{ Model object. } \description{ Blend trees and counters of two or more trained CatBoost models into a new model. Leaf values can be individually weighted for each input model. For example, it may be useful to blend models trained on different validation datasets. }
/catboost/R-package/man/catboost.sum_models.Rd
permissive
catboost/catboost
R
false
true
1,190
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/catboost.R \name{catboost.sum_models} \alias{catboost.sum_models} \title{Sum models.} \usage{ catboost.sum_models( models, weights = NULL, ctr_merge_policy = "IntersectingCountersAverage" ) } \arguments{ \item{models}{Models for the summation. Default value: Required argument} \item{weights}{The weights of the models. Default value: NULL (use weight 1 for every model)} \item{ctr_merge_policy}{The counters merging policy. Possible values: \itemize{ \item 'FailIfCtrIntersects' Ensure that the models have zero intersecting counters \item 'LeaveMostDiversifiedTable' Use the most diversified counters by the count of unique hash values \item 'IntersectingCountersAverage' Use the average ctr counter values in the intersecting bins } Default value: 'IntersectingCountersAverage'} } \value{ Model object. } \description{ Blend trees and counters of two or more trained CatBoost models into a new model. Leaf values can be individually weighted for each input model. For example, it may be useful to blend models trained on different validation datasets. }
png(file="plot1.png",width=480,height=480) electric<-read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?",stringsAsFactors=FALSE) electric<-electric[electric$Date=="1/2/2007" | electric$Date=="2/2/2007", ] x<-strptime(paste(electric$Date,electric$Time,sep=" "), "%d/%m/%Y %H:%M:%S" ) par(mar=c(5,5,4,2)) hist(electric$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power",ylab="Frequency") ##dev.copy(png,file="plot1.png",width=480,height=480) better graphics using png upfront dev.off()
/plot1.R
no_license
JohnRusnak100/ExData_Plotting1
R
false
false
580
r
png(file="plot1.png",width=480,height=480) electric<-read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?",stringsAsFactors=FALSE) electric<-electric[electric$Date=="1/2/2007" | electric$Date=="2/2/2007", ] x<-strptime(paste(electric$Date,electric$Time,sep=" "), "%d/%m/%Y %H:%M:%S" ) par(mar=c(5,5,4,2)) hist(electric$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power",ylab="Frequency") ##dev.copy(png,file="plot1.png",width=480,height=480) better graphics using png upfront dev.off()
############################ ### Activation Functions ### # useful in many (most?) statistical models in some way or another # useful in neural networks, etc... # Wikipedia entry: https://en.wikipedia.org/wiki/Activation_function # Identity Function identity_function = function(x){stopifnot(is.numeric(x));return(x)} # Binary Step Function step_function = function(x){ stopifnot(is.numeric(x)) if(x < 0){return(0)}else{ return(1) #x >= 0 } } # Standard Logistic Function logistic_function = function(x){ stopifnot(is.numeric(x)) return(1 / (1 + exp(-x))) } # Hyperbolic Tangent Function hyperbolic_tangent_function = function(x){ # rewriting instead of using tanh() canned function for personal edification stopifnot(is.numeric(x)) return((exp(x) - exp(-x))/(exp(x) + exp(-x))) } # Softsign Activation Function softsign_function = function(x){ stopifnot(is.numeric(x)) return(x/(1 + abs(x))) } # Inverse Square Root Unit Activation Function # https://arxiv.org/abs/1710.09967 isru_function = function(x, alpha){ stopifnot(is.numeric(x)) stopifnot(alpha >= 0) return(x/sqrt(1 + alpha*x^2)) } # Inverse Square Root Linear Unit Activation Function # https://arxiv.org/abs/1710.09967 isrlu_function = Vectorize(function(x, alpha){ stopifnot(is.numeric(x)) stopifnot(alpha >= 0) if(x >= 0){return(x)}else{return(x/sqrt(1 + alpha*x^2))} }, vectorize.args = "x") # Rectified Linear Unit Activation Function relu_function = Vectorize(function(x){ stopifnot(is.numeric(x)) if(x > 0){return(x)}else{return(0)} }, vectorize.args = "x") # Leaky Rectified Linear Unit Activation Function leaky_relu_function = Vectorize(function(x){ stopifnot(is.numeric(x)) if(x >= 0){return(x)}else{return(0.01*x)} }, vectorize.args = "x") # Exponential Linear Unit Activation Function elu_function = Vectorize(function(x, alpha){ stopifnot(is.numeric(x)) stopifnot(is.numeric(alpha)) if(x > 0){return(x)}else{return(alpha*(exp(x) - 1))} }, vectorize.args = "x") # Softplus Activation Function softplus_function = function(x){ stopifnot(is.numeric(x)) return(log(1 + exp(x))) } # Bent Identity Activation Function bent_identity_function = function(x){ stopifnot(is.numeric(x)) return((sqrt(x^2 + 1) - 1)/2 + x) }
/activation_function.R
no_license
jacobmunson/PetProjects
R
false
false
2,256
r
############################ ### Activation Functions ### # useful in many (most?) statistical models in some way or another # useful in neural networks, etc... # Wikipedia entry: https://en.wikipedia.org/wiki/Activation_function # Identity Function identity_function = function(x){stopifnot(is.numeric(x));return(x)} # Binary Step Function step_function = function(x){ stopifnot(is.numeric(x)) if(x < 0){return(0)}else{ return(1) #x >= 0 } } # Standard Logistic Function logistic_function = function(x){ stopifnot(is.numeric(x)) return(1 / (1 + exp(-x))) } # Hyperbolic Tangent Function hyperbolic_tangent_function = function(x){ # rewriting instead of using tanh() canned function for personal edification stopifnot(is.numeric(x)) return((exp(x) - exp(-x))/(exp(x) + exp(-x))) } # Softsign Activation Function softsign_function = function(x){ stopifnot(is.numeric(x)) return(x/(1 + abs(x))) } # Inverse Square Root Unit Activation Function # https://arxiv.org/abs/1710.09967 isru_function = function(x, alpha){ stopifnot(is.numeric(x)) stopifnot(alpha >= 0) return(x/sqrt(1 + alpha*x^2)) } # Inverse Square Root Linear Unit Activation Function # https://arxiv.org/abs/1710.09967 isrlu_function = Vectorize(function(x, alpha){ stopifnot(is.numeric(x)) stopifnot(alpha >= 0) if(x >= 0){return(x)}else{return(x/sqrt(1 + alpha*x^2))} }, vectorize.args = "x") # Rectified Linear Unit Activation Function relu_function = Vectorize(function(x){ stopifnot(is.numeric(x)) if(x > 0){return(x)}else{return(0)} }, vectorize.args = "x") # Leaky Rectified Linear Unit Activation Function leaky_relu_function = Vectorize(function(x){ stopifnot(is.numeric(x)) if(x >= 0){return(x)}else{return(0.01*x)} }, vectorize.args = "x") # Exponential Linear Unit Activation Function elu_function = Vectorize(function(x, alpha){ stopifnot(is.numeric(x)) stopifnot(is.numeric(alpha)) if(x > 0){return(x)}else{return(alpha*(exp(x) - 1))} }, vectorize.args = "x") # Softplus Activation Function softplus_function = function(x){ stopifnot(is.numeric(x)) return(log(1 + exp(x))) } # Bent Identity Activation Function bent_identity_function = function(x){ stopifnot(is.numeric(x)) return((sqrt(x^2 + 1) - 1)/2 + x) }
# library(haven) library(survey) # -- a few helpful functions -- # expit<-function(x){ exp(x)/(1+exp(x)) } logit<-function(x){ log(x/(1-x)) } # function to extend dataset to binary form extendData <- function(clustDatRow, v001, divideWeight=TRUE, useNumChildrenDied=TRUE){ # add extra columns for ageMonth, ageGrpD, v001, v002 if(useNumChildrenDied) { clustDatRow$n = clustDatRow$numChildren clustDatRow$y = clustDatRow$died } n = clustDatRow$n # tmp = data.frame(clustDatRow[c(1, 6:16)]) tmp = data.frame(clustDatRow[c(1, c(4, 6:ncol(clustDatRow)))]) tmp$v001 = v001 ageMonth = rep(0, n) ageGrpD = rep("[0,1)", n) v001 = rep(v001, n) # there is only one child and one mother per household. # All 25 households are sampled v002 = 1:n y = c(rep(0,n-clustDatRow$y), rep(1, clustDatRow$y)) if(clustDatRow["urban"][1,1]){ urbanRural = rep("urban", n) } else { urbanRural = rep("rural", n) } # admin1 = rep(clustDatRow$admin1, n) res = merge(data.frame(y, ageMonth, ageGrpD, v001, v002, urbanRural), tmp, by="v001") # the below line was commented out since each cluster only has one type of admin and urban level. # The equivalent line has been added into the parent function # res$regionRural <- with(res, interaction(admin1, urbanRural), drop=TRUE) if(divideWeight) res$samplingWeight = res$samplingWeight / n return(res) } extendDataDat <- function(clustDatRow, v001, divideWeight=TRUE){ # add extra columns for ageMonth, ageGrpD, v001, v002 n = clustDatRow$n # the only things we need are admin1 and sampling weight, but we must get rid of # urban, y, and the number of women since those will be recalculated # tmp = data.frame(clustDatRow[c(1, 6:16)]) # tmp = data.frame(clustDatRow[c(1, c(4, 6:ncol(clustDatRow)))]) tmp = data.frame(clustDatRow[c(1, c(4, 6:(ncol(clustDatRow) - 2)))]) tmp$v001 = v001 ageMonth = rep(0, n) # ageGrpD = rep("[0,1)", n) v001 = rep(v001, n) # there is only one child and one mother per household. # All 25 households are sampled v002 = 1:n y = c(rep(0,n-clustDatRow$y), rep(1, clustDatRow$y)) if(clustDatRow["urban"][1,1]){ urbanRural = rep("urban", n) } else { urbanRural = rep("rural", n) } # admin1 = rep(clustDatRow$admin1, n) # res = merge(data.frame(y, ageMonth, ageGrpD, v001, v002, urbanRural), tmp, by="v001") res = merge(data.frame(y, ageMonth, v001, v002, urbanRural), tmp, by="v001") # the below line was commented out since each cluster only has one type of admin and urban level. # The equivalent line has been added into the parent function # res$regionRural <- with(res, interaction(admin1, urbanRural), drop=TRUE) if(divideWeight) res$samplingWeight = res$samplingWeight / n return(res) } # - a function that reads in a glm or svyglm - # # - object and returns the estimate and SE - # # - specifics in the supplementary materials - # ## This function takes care of the delta method ## to calculate the variance of u5m as a function ## of the age specific hazards, \beta_a . get.est<-function(glm.ob){ beta<-summary(glm.ob)$coef[,1] est <-expit(beta) var.est <- vcov(glm.ob)[1,1] # compute 80% CI intervals lower <- logit(est)+qnorm(c(0.9))*sqrt(var.est) upper <- logit(est)+qnorm(c(0.1))*sqrt(var.est) return(c(est,lower, upper,logit(est),var.est)) } # -- a function to subset the design based on a region and time period -- # # -- and then run the svyglm function and return the get.est() results -- # ## First line in function allows you to subset your data and ALSO the specified ## svydesign object into area (usually v024 variable in DHS) ## and time (per5 is a variable we construct for the 5-year periods in the Stata step) ## Second line fits the survey-weighted glm region.time.HT<-function(dataobj, svydesign, area){ tmp<-subset(svydesign, (admin1==area)) tt2 <- tryCatch(glmob<-svyglm(y.x~1, design=tmp,family=quasibinomial, maxit=50), error=function(e) e, warning=function(w) w) if(is(tt2, "warning")){ if(grepl("agegroups", tt2)){ res <- get.est(glmob) res = c(res, 2) } else { res = c(rep(NA, 5), 3) } return(res) } if(is(tt2,"error")){ res = c(rep(NA, 5), 1) return(res) } else { res <- get.est(glmob) res = c(res, 0) return(res) } } region.time.HTDat<-function(dataobj, svydesign, area, nationalEstimate){ if(!nationalEstimate) { tmp<-subset(svydesign, (admin1==area)) tt2 <- tryCatch(glmob<-svyglm(y~1, design=tmp,family=quasibinomial, maxit=50), error=function(e) e, warning=function(w) w) } else { thisUrban = area == 1 tmp<-subset(svydesign, (urban==thisUrban)) tt2 <- tryCatch(glmob<-svyglm(y~1, design=tmp,family=quasibinomial, maxit=50), error=function(e) e, warning=function(w) w) } if(is(tt2, "warning")){ if(grepl("agegroups", tt2)){ res <- get.est(glmob) res = c(res, 2) } else { res = c(rep(NA, 5), 3) } return(res) } if(is(tt2,"error")){ res = c(rep(NA, 5), 1) return(res) } else { res <- get.est(glmob) res = c(res, 0) return(res) } } defineSurvey <- function(dat_obj, stratVar, useSamplingWeights=TRUE){ options(survey.lonely.psu="adjust") # --- setting up a place to store results --- # regions <- sort(unique(dat_obj$admin1)) regions_num <- 1:length(regions) results<-data.frame(admin1=rep(regions,each=1)) results$var.est<-results$logit.est<-results$upper<-results$lower<-results$est<-NA results$converge <- NA if(useSamplingWeights){ dat_obj$wt <- dat_obj$samplingWeight } else { dat_obj$wt <- NULL } if(is.null(stratVar)){ # --- setting up the design object --- # ## NOTE: -the v001 denote ## one stage cluster design (v001 is cluster) ## -This call below specifies our survey design ## nest = T argument nests clusters within strata my.svydesign <- svydesign(id= ~v001, strata =NULL, weights=NULL, data=dat_obj) } else { ## not in all surveys does v022 contain the correct sampling strata ## Thus, the correct vector has to be provided externally dat_obj$strat <- stratVar # --- setting up the design object --- # ## NOTE: -the v001 denote ## one stage cluster design (v001 is cluster) ## -This call below specifies our survey design ## nest = T argument nests clusters within strata my.svydesign <- svydesign(id= ~v001, strata=~strat, nest=T, weights=~wt, data=dat_obj) } for(i in 1:nrow(results)){ results[i, 2:7] <- region.time.HT(dataobj=dat_obj, svydesign=my.svydesign, area=results$admin1[i]) } return(results) } defineSurveyDat <- function(dat_obj, stratVar, useSamplingWeights=TRUE, nationalEstimate=FALSE, getContrast=nationalEstimate){ options(survey.lonely.psu="adjust") # --- setting up a place to store results --- # regions <- sort(unique(dat_obj$admin1)) regions_num <- 1:length(regions) if(!nationalEstimate) { results<-data.frame(admin1=rep(regions,each=1)) results$var.est<-results$logit.est<-results$upper<-results$lower<-results$est<-NA results$converge <- NA } else { results<-data.frame(urban=c(TRUE, FALSE)) results$var.est<-results$logit.est<-results$upper<-results$lower<-results$est<-NA results$converge <- NA } if(useSamplingWeights){ dat_obj$wt <- dat_obj$samplingWeight } else { dat_obj$wt <- NULL } if(is.null(stratVar)){ # --- setting up the design object --- # ## NOTE: -the v001 denote ## one stage cluster design (v001 is cluster) ## -This call below specifies our survey design ## nest = T argument nests clusters within strata my.svydesign <- svydesign(id= ~v001, strata =NULL, weights=NULL, data=dat_obj) } else { ## not in all surveys does v022 contain the correct sampling strata ## Thus, the correct vector has to be provided externally dat_obj$strat <- stratVar # --- setting up the design object --- # ## NOTE: -the v001 denote ## one stage cluster design (v001 is cluster) ## -This call below specifies our survey design ## nest = T argument nests clusters within strata my.svydesign <- svydesign(id= ~v001, strata=~strat, nest=T, weights=~wt, data=dat_obj) } for(i in 1:nrow(results)){ if(!nationalEstimate) { results[i, 2:7] <- region.time.HTDat(dataobj=dat_obj, svydesign=my.svydesign, area=results$admin1[i], nationalEstimate=nationalEstimate) } else { results[i, 2:7] <- region.time.HTDat(dataobj=dat_obj, svydesign=my.svydesign, area=i, nationalEstimate=nationalEstimate) } } if(getContrast) { # out = svyby(~y, by = ~urban, design = svydesign, svymean) glmob<-svyglm(y~urban, design=my.svydesign,family=quasibinomial, maxit=50) # get contrast mean and variance est = glmob$coefficients[2] urbanVar = vcov(glmob)[2,2] # get confidence interval lower = est + qnorm(0.025, sd=sqrt(urbanVar)) upper = est + qnorm(0.975, sd=sqrt(urbanVar)) contrastStats = list(est=est, sd=sqrt(urbanVar), lower95=lower, upper95=upper) return(list(results=results, contrastStats=contrastStats)) } else { return(results) } } # Set dat_obj$admin1 to be something else for different kinds of aggregations run_naive <- function(dat_obj){ regions <- sort(unique(dat_obj$admin1)) regions_num <- 1:length(regions) results<-data.frame(admin1=rep(regions,each=1)) results$var.est<-results$logit.est<-results$upper<-results$lower<-results$est<-NA results$converge <- NA for(i in 1:nrow(results)){ my.glm <- glm(y.x~1, family=binomial, data=dat_obj, subset = admin1 == results$admin1[i] ) # newdat = dat_obj[dat_obj$admin1==results$admin1[i], ] # my.glm2 <- glm(y.x~1, family=binomial, # data=newdat) results[i, 2:7] <- c(get.est(my.glm),0) } return(results) } # running the analysis for the actual mortality dataset is slightly different run_naiveDat <- function(dat_obj){ regions <- sort(unique(dat_obj$admin1)) regions_num <- 1:length(regions) results<-data.frame(admin1=rep(regions,each=1)) results$var.est<-results$logit.est<-results$upper<-results$lower<-results$est<-NA results$converge <- NA for(i in 1:nrow(results)){ my.glm <- glm(y~1, family=binomial, data=dat_obj, subset = admin1 == results$admin1[i] ) # newdat = dat_obj[dat_obj$admin1==results$admin1[i], ] # my.glm2 <- glm(y.x~1, family=binomial, # data=newdat) results[i, 2:7] <- c(get.est(my.glm),0) } return(results) }
/neonatalSimStudyWeighted.R
no_license
paigejo/NMRmanuscript
R
false
false
11,415
r
# library(haven) library(survey) # -- a few helpful functions -- # expit<-function(x){ exp(x)/(1+exp(x)) } logit<-function(x){ log(x/(1-x)) } # function to extend dataset to binary form extendData <- function(clustDatRow, v001, divideWeight=TRUE, useNumChildrenDied=TRUE){ # add extra columns for ageMonth, ageGrpD, v001, v002 if(useNumChildrenDied) { clustDatRow$n = clustDatRow$numChildren clustDatRow$y = clustDatRow$died } n = clustDatRow$n # tmp = data.frame(clustDatRow[c(1, 6:16)]) tmp = data.frame(clustDatRow[c(1, c(4, 6:ncol(clustDatRow)))]) tmp$v001 = v001 ageMonth = rep(0, n) ageGrpD = rep("[0,1)", n) v001 = rep(v001, n) # there is only one child and one mother per household. # All 25 households are sampled v002 = 1:n y = c(rep(0,n-clustDatRow$y), rep(1, clustDatRow$y)) if(clustDatRow["urban"][1,1]){ urbanRural = rep("urban", n) } else { urbanRural = rep("rural", n) } # admin1 = rep(clustDatRow$admin1, n) res = merge(data.frame(y, ageMonth, ageGrpD, v001, v002, urbanRural), tmp, by="v001") # the below line was commented out since each cluster only has one type of admin and urban level. # The equivalent line has been added into the parent function # res$regionRural <- with(res, interaction(admin1, urbanRural), drop=TRUE) if(divideWeight) res$samplingWeight = res$samplingWeight / n return(res) } extendDataDat <- function(clustDatRow, v001, divideWeight=TRUE){ # add extra columns for ageMonth, ageGrpD, v001, v002 n = clustDatRow$n # the only things we need are admin1 and sampling weight, but we must get rid of # urban, y, and the number of women since those will be recalculated # tmp = data.frame(clustDatRow[c(1, 6:16)]) # tmp = data.frame(clustDatRow[c(1, c(4, 6:ncol(clustDatRow)))]) tmp = data.frame(clustDatRow[c(1, c(4, 6:(ncol(clustDatRow) - 2)))]) tmp$v001 = v001 ageMonth = rep(0, n) # ageGrpD = rep("[0,1)", n) v001 = rep(v001, n) # there is only one child and one mother per household. # All 25 households are sampled v002 = 1:n y = c(rep(0,n-clustDatRow$y), rep(1, clustDatRow$y)) if(clustDatRow["urban"][1,1]){ urbanRural = rep("urban", n) } else { urbanRural = rep("rural", n) } # admin1 = rep(clustDatRow$admin1, n) # res = merge(data.frame(y, ageMonth, ageGrpD, v001, v002, urbanRural), tmp, by="v001") res = merge(data.frame(y, ageMonth, v001, v002, urbanRural), tmp, by="v001") # the below line was commented out since each cluster only has one type of admin and urban level. # The equivalent line has been added into the parent function # res$regionRural <- with(res, interaction(admin1, urbanRural), drop=TRUE) if(divideWeight) res$samplingWeight = res$samplingWeight / n return(res) } # - a function that reads in a glm or svyglm - # # - object and returns the estimate and SE - # # - specifics in the supplementary materials - # ## This function takes care of the delta method ## to calculate the variance of u5m as a function ## of the age specific hazards, \beta_a . get.est<-function(glm.ob){ beta<-summary(glm.ob)$coef[,1] est <-expit(beta) var.est <- vcov(glm.ob)[1,1] # compute 80% CI intervals lower <- logit(est)+qnorm(c(0.9))*sqrt(var.est) upper <- logit(est)+qnorm(c(0.1))*sqrt(var.est) return(c(est,lower, upper,logit(est),var.est)) } # -- a function to subset the design based on a region and time period -- # # -- and then run the svyglm function and return the get.est() results -- # ## First line in function allows you to subset your data and ALSO the specified ## svydesign object into area (usually v024 variable in DHS) ## and time (per5 is a variable we construct for the 5-year periods in the Stata step) ## Second line fits the survey-weighted glm region.time.HT<-function(dataobj, svydesign, area){ tmp<-subset(svydesign, (admin1==area)) tt2 <- tryCatch(glmob<-svyglm(y.x~1, design=tmp,family=quasibinomial, maxit=50), error=function(e) e, warning=function(w) w) if(is(tt2, "warning")){ if(grepl("agegroups", tt2)){ res <- get.est(glmob) res = c(res, 2) } else { res = c(rep(NA, 5), 3) } return(res) } if(is(tt2,"error")){ res = c(rep(NA, 5), 1) return(res) } else { res <- get.est(glmob) res = c(res, 0) return(res) } } region.time.HTDat<-function(dataobj, svydesign, area, nationalEstimate){ if(!nationalEstimate) { tmp<-subset(svydesign, (admin1==area)) tt2 <- tryCatch(glmob<-svyglm(y~1, design=tmp,family=quasibinomial, maxit=50), error=function(e) e, warning=function(w) w) } else { thisUrban = area == 1 tmp<-subset(svydesign, (urban==thisUrban)) tt2 <- tryCatch(glmob<-svyglm(y~1, design=tmp,family=quasibinomial, maxit=50), error=function(e) e, warning=function(w) w) } if(is(tt2, "warning")){ if(grepl("agegroups", tt2)){ res <- get.est(glmob) res = c(res, 2) } else { res = c(rep(NA, 5), 3) } return(res) } if(is(tt2,"error")){ res = c(rep(NA, 5), 1) return(res) } else { res <- get.est(glmob) res = c(res, 0) return(res) } } defineSurvey <- function(dat_obj, stratVar, useSamplingWeights=TRUE){ options(survey.lonely.psu="adjust") # --- setting up a place to store results --- # regions <- sort(unique(dat_obj$admin1)) regions_num <- 1:length(regions) results<-data.frame(admin1=rep(regions,each=1)) results$var.est<-results$logit.est<-results$upper<-results$lower<-results$est<-NA results$converge <- NA if(useSamplingWeights){ dat_obj$wt <- dat_obj$samplingWeight } else { dat_obj$wt <- NULL } if(is.null(stratVar)){ # --- setting up the design object --- # ## NOTE: -the v001 denote ## one stage cluster design (v001 is cluster) ## -This call below specifies our survey design ## nest = T argument nests clusters within strata my.svydesign <- svydesign(id= ~v001, strata =NULL, weights=NULL, data=dat_obj) } else { ## not in all surveys does v022 contain the correct sampling strata ## Thus, the correct vector has to be provided externally dat_obj$strat <- stratVar # --- setting up the design object --- # ## NOTE: -the v001 denote ## one stage cluster design (v001 is cluster) ## -This call below specifies our survey design ## nest = T argument nests clusters within strata my.svydesign <- svydesign(id= ~v001, strata=~strat, nest=T, weights=~wt, data=dat_obj) } for(i in 1:nrow(results)){ results[i, 2:7] <- region.time.HT(dataobj=dat_obj, svydesign=my.svydesign, area=results$admin1[i]) } return(results) } defineSurveyDat <- function(dat_obj, stratVar, useSamplingWeights=TRUE, nationalEstimate=FALSE, getContrast=nationalEstimate){ options(survey.lonely.psu="adjust") # --- setting up a place to store results --- # regions <- sort(unique(dat_obj$admin1)) regions_num <- 1:length(regions) if(!nationalEstimate) { results<-data.frame(admin1=rep(regions,each=1)) results$var.est<-results$logit.est<-results$upper<-results$lower<-results$est<-NA results$converge <- NA } else { results<-data.frame(urban=c(TRUE, FALSE)) results$var.est<-results$logit.est<-results$upper<-results$lower<-results$est<-NA results$converge <- NA } if(useSamplingWeights){ dat_obj$wt <- dat_obj$samplingWeight } else { dat_obj$wt <- NULL } if(is.null(stratVar)){ # --- setting up the design object --- # ## NOTE: -the v001 denote ## one stage cluster design (v001 is cluster) ## -This call below specifies our survey design ## nest = T argument nests clusters within strata my.svydesign <- svydesign(id= ~v001, strata =NULL, weights=NULL, data=dat_obj) } else { ## not in all surveys does v022 contain the correct sampling strata ## Thus, the correct vector has to be provided externally dat_obj$strat <- stratVar # --- setting up the design object --- # ## NOTE: -the v001 denote ## one stage cluster design (v001 is cluster) ## -This call below specifies our survey design ## nest = T argument nests clusters within strata my.svydesign <- svydesign(id= ~v001, strata=~strat, nest=T, weights=~wt, data=dat_obj) } for(i in 1:nrow(results)){ if(!nationalEstimate) { results[i, 2:7] <- region.time.HTDat(dataobj=dat_obj, svydesign=my.svydesign, area=results$admin1[i], nationalEstimate=nationalEstimate) } else { results[i, 2:7] <- region.time.HTDat(dataobj=dat_obj, svydesign=my.svydesign, area=i, nationalEstimate=nationalEstimate) } } if(getContrast) { # out = svyby(~y, by = ~urban, design = svydesign, svymean) glmob<-svyglm(y~urban, design=my.svydesign,family=quasibinomial, maxit=50) # get contrast mean and variance est = glmob$coefficients[2] urbanVar = vcov(glmob)[2,2] # get confidence interval lower = est + qnorm(0.025, sd=sqrt(urbanVar)) upper = est + qnorm(0.975, sd=sqrt(urbanVar)) contrastStats = list(est=est, sd=sqrt(urbanVar), lower95=lower, upper95=upper) return(list(results=results, contrastStats=contrastStats)) } else { return(results) } } # Set dat_obj$admin1 to be something else for different kinds of aggregations run_naive <- function(dat_obj){ regions <- sort(unique(dat_obj$admin1)) regions_num <- 1:length(regions) results<-data.frame(admin1=rep(regions,each=1)) results$var.est<-results$logit.est<-results$upper<-results$lower<-results$est<-NA results$converge <- NA for(i in 1:nrow(results)){ my.glm <- glm(y.x~1, family=binomial, data=dat_obj, subset = admin1 == results$admin1[i] ) # newdat = dat_obj[dat_obj$admin1==results$admin1[i], ] # my.glm2 <- glm(y.x~1, family=binomial, # data=newdat) results[i, 2:7] <- c(get.est(my.glm),0) } return(results) } # running the analysis for the actual mortality dataset is slightly different run_naiveDat <- function(dat_obj){ regions <- sort(unique(dat_obj$admin1)) regions_num <- 1:length(regions) results<-data.frame(admin1=rep(regions,each=1)) results$var.est<-results$logit.est<-results$upper<-results$lower<-results$est<-NA results$converge <- NA for(i in 1:nrow(results)){ my.glm <- glm(y~1, family=binomial, data=dat_obj, subset = admin1 == results$admin1[i] ) # newdat = dat_obj[dat_obj$admin1==results$admin1[i], ] # my.glm2 <- glm(y.x~1, family=binomial, # data=newdat) results[i, 2:7] <- c(get.est(my.glm),0) } return(results) }
#' RemoveType #' @return module #' @param ... parameters to pass #' @export RemoveType <- NULL #' @title Remove Silence #' #' @description Split signal at points of silence greater than 2*pad_ms #' #' #' @param remove_type remove type from RemoveType module #' @param threshold threshold point #' @param pad_ms pad milliseconds #' @return None #' @export RemoveSilence <- function(remove_type = RemoveType$Trim$value, threshold = 20, pad_ms = 20) { fastaudio$augment$preprocess$RemoveSilence( remove_type = remove_type, threshold = as.integer(threshold), pad_ms = as.integer(pad_ms) ) } #' @title Resample #' #' @description Resample using faster polyphase technique and avoiding FFT computation #' #' #' @param sr_new input #' @return None #' @export Resample <- function(sr_new) { fastaudio$augment$preprocess$Resample( sr_new = sr_new ) }
/R/audio_aug_preprocess.R
permissive
han-tun/fastai
R
false
false
883
r
#' RemoveType #' @return module #' @param ... parameters to pass #' @export RemoveType <- NULL #' @title Remove Silence #' #' @description Split signal at points of silence greater than 2*pad_ms #' #' #' @param remove_type remove type from RemoveType module #' @param threshold threshold point #' @param pad_ms pad milliseconds #' @return None #' @export RemoveSilence <- function(remove_type = RemoveType$Trim$value, threshold = 20, pad_ms = 20) { fastaudio$augment$preprocess$RemoveSilence( remove_type = remove_type, threshold = as.integer(threshold), pad_ms = as.integer(pad_ms) ) } #' @title Resample #' #' @description Resample using faster polyphase technique and avoiding FFT computation #' #' #' @param sr_new input #' @return None #' @export Resample <- function(sr_new) { fastaudio$augment$preprocess$Resample( sr_new = sr_new ) }
#' @rdname TxDbLite-class #' @export setMethod("dbconn", "TxDbLite", function(x) return(x@con)) #' @rdname TxDbLite-class #' @param object TxDbLite SQL-lite annotation database #' @import ensembldb setMethod("show", "TxDbLite", function(object) { # {{{ if(is.null(object@con)) stop(paste("Invalid", class(object), "instance!")) info <- metadata(object) cat(class(object), ":\n") catrow <- function(x) cat(paste0("|", x["name"], ": ", x["value"], "\n")) for(i in 1:nrow(info)) catrow(info[i,]) }) # }}} #' @rdname TxDbLite-class #' @param x TxDblite SQL-lite database instance #' @param ... additional parameters releated to annotation database setMethod("metadata", "TxDbLite", function(x, ...) { # {{{ md <- dbGetQuery(dbconn(x), "select * from metadata") rownames(md) <- md$name return(md) }) # }}} #' @importFrom GenomeInfoDb genome setMethod("transcripts", "TxDbLite", function(x) { # {{{ sql <- paste("select gene.seqnames, tx.start, tx.end, gene.strand,", " tx_length, gc_content, tx_id, gene_id, gene_name,", " entrezid, tx_biotype, gene_biotype,", " class as biotype_class", " from gene, tx, gene_biotype, tx_biotype, biotype_class", " where gene.gene = tx.gene", " and tx.tx_biotype_id = tx_biotype.id", " and gene.gene_biotype_id = gene_biotype.id", " and gene_biotype.gene_biotype = biotype_class.biotype", " order by tx_id asc") res <- makeGRangesFromDataFrame(dbGetQuery(dbconn(x), sql), keep.extra.columns=TRUE) genome(res) <- metadata(x)["genome_build","value"] names(res) <- res$tx_id return(res) }) # }}} setMethod("promoters", "TxDbLite", function(x,upstream=2000,downstream=200,...){ # {{{ trim(suppressWarnings(promoters(transcripts(x, ...), upstream=upstream, downstream=downstream))) }) # }}} #' #' #' get transcripts by gene, promoter, tx_biotype, gene_biotype, or biotype_class #' #' #' #' #' @return a GRangesList #' @rdname TxDbLite-class #' @importFrom GenomeInfoDb seqlevelsStyle #' @export setMethod("transcriptsBy", "TxDbLite", function(x, # {{{ by=c("gene", "promoter", "tx_biotype", "gene_biotype", "biotype_class")) { by <- match.arg(by) txs <- transcripts(x) name <- function(x) paste0(seqnames(x),":",start(x),"-",end(x),":",strand(x)) proms <- promoters(x) seqlevelsStyle(proms) <- "UCSC" # else names can potentially break downstream promoterNames <- name(proms) switch(by, gene=split(txs, txs$gene_id), promoter=split(txs, promoterNames), tx_biotype=split(txs, txs$tx_biotype), gene_biotype=split(txs, txs$gene_biotype), biotype_class=split(txs, txs$biotype_class)) }) # }}} #' @importFrom GenomeInfoDb genome setMethod("genes", "TxDbLite", function(x) { # {{{ sql <- paste("select seqnames, start, end, strand, ", " median_length as tx_length, 'NA' as gc_content, copyNumber,", " 'NA' as tx_id, gene_id, gene_name, entrezid,", " 'NA' as tx_biotype, gene_biotype,", " class as biotype_class", " from gene, gene_biotype, biotype_class", " where gene.gene_biotype_id = gene_biotype.id", " and gene_biotype.gene_biotype = biotype_class.biotype", " order by gene_id asc") res <- makeGRangesFromDataFrame(dbGetQuery(dbconn(x), sql), keep.extra.columns=TRUE) genome(res) <- metadata(x)["genome_build","value"] names(res) <- res$gene_id return(res) }) # }}} #' Generic for querying genes #' @name genesBy #' @rdname TxDbLite-class #' @export setGeneric("genesBy", function(x,by=c("gene_biotype","biotype_class"), ...){#{{{ standardGeneric("genesBy") }) # }}} #' #' #' get genes by gene_biotype or biotype_class #' #' #' @param by how to split the genes #' @aliases genesBy TxDbLite-method #' @return a GRangesList #' @rdname TxDbLite-class #' @export setMethod("genesBy", "TxDbLite", function(x, by=c("gene_biotype","biotype_class")) { # {{{ by <- match.arg(by) gxs <- genes(x) switch(by, gene_biotype=split(gxs, gxs$gene_biotype), biotype_class=split(gxs, gxs$biotype_class)) }) # }}} ## EnsDbLite methods #' @import ensembldb #' @importFrom GenomeInfoDb genome setMethod("genes", "EnsDbLite", function(x) { # {{{ sql <- paste("select seqnames, start, end, strand, ", " median_length as tx_length, 'NA' as gc_content, copyNumber,", " 'NA' as tx_id, gene_id, gene_name, entrezid,", " 'NA' as tx_biotype, gene_biotype,", " class as biotype_class", " from gene, gene_biotype, biotype_class", " where gene.gene_biotype_id = gene_biotype.id", " and gene_biotype.gene_biotype = biotype_class.biotype", " order by gene_id asc") res <- makeGRangesFromDataFrame(dbGetQuery(dbconn(x), sql), keep.extra.columns=TRUE) genome(res) <- metadata(x)["genome_build","value"] names(res) <- res$gene_id return(res) }) # }}} #' @importFrom GenomeInfoDb genome setMethod("transcripts", "EnsDbLite", function(x) { # {{{ sql <- paste("select gene.seqnames, tx.start, tx.end, gene.strand,", " tx_length, gc_content, tx_id, gene_id, gene_name,", " entrezid, tx_biotype, gene_biotype,", " class as biotype_class", " from gene, tx, gene_biotype, tx_biotype, biotype_class", " where gene.gene = tx.gene", " and tx.tx_biotype_id = tx_biotype.id", " and gene.gene_biotype_id = gene_biotype.id", " and gene_biotype.gene_biotype = biotype_class.biotype", " order by tx_id asc") res <- makeGRangesFromDataFrame(dbGetQuery(dbconn(x), sql), keep.extra.columns=TRUE) genome(res) <- metadata(x)["genome_build","value"] names(res) <- res$tx_id return(res) }) # }}} setMethod("listGenebiotypes", "EnsDbLite", function(x, ...){ # {{{ return(dbGetQuery(dbconn(x), "select * from gene_biotype")[,"gene_biotype"]) }) # }}} setMethod("listTxbiotypes", "EnsDbLite", function(x, ...){ # {{{ return(dbGetQuery(dbconn(x), "select * from tx_biotype")[,"tx_biotype"]) }) # }}} setMethod("show", "EnsDbLite", function(object) { # {{{ callNextMethod() # TxDbLite show method -- basic information on the db genesql <- "select count(distinct gene) from gene" g <- dbGetQuery(dbconn(object), genesql)[1,1] txsql <- "select count(distinct tx_id) from tx" tx <- dbGetQuery(dbconn(object), txsql)[1,1] cat(paste0("| ", tx, " transcripts from ", g, " bundles (genes).\n")) }) # }}} ## RepDbLite show method setMethod("show", "RepDbLite", function(object) { # {{{ callNextMethod() # TxDbLite show method -- basic information on the db repsql <- "select count(distinct tx_id) from tx" famsql <- "select count(distinct tx_biotype) from tx_biotype" rpts <- dbGetQuery(dbconn(object), repsql)[1,1] fam <- dbGetQuery(dbconn(object), famsql)[1,1] cat(paste0("| ", rpts, " repeat exemplars from ", fam, " repeat families (no known genes).\n")) }) # }}} ## RepDbLite objects have no genes in them setMethod("transcripts","RepDbLite",function(x) { sql <- paste("select gene.seqnames, tx.start, tx.end, gene.strand,", " tx_length, gc_content, tx_id, gene_id, gene_name,", " entrezid, tx_biotype, gene_biotype,", " class as biotype_class", " from gene, tx, gene_biotype, tx_biotype, biotype_class", " where gene.gene = tx.gene", " and tx.tx_biotype_id = tx_biotype.id", " and gene.gene_biotype_id = gene_biotype.id", " and gene_biotype.gene_biotype = biotype_class.biotype", " order by tx_id asc") res <- makeGRangesFromDataFrame(dbGetQuery(dbconn(x), sql), keep.extra.columns=TRUE) genome(res) <- metadata(x)["genome_build","value"] names(res) <- res$tx_id return(res) }) setMethod("genes", "RepDbLite", function(x) callNextMethod()[0] ) ## no genes setMethod("promoters", "RepDbLite", function(x) callNextMethod()[0] ) ## none ## ErccDbLite show method setMethod("show", "ErccDbLite", function(object) { # {{{ callNextMethod() # TxDbLite show method -- basic information on the db ctlsql <- "select count(distinct tx_id) from tx" grpsql <- "select count(distinct tx_biotype) from tx_biotype" ctl <- dbGetQuery(dbconn(object), ctlsql)[1,1] grp <- dbGetQuery(dbconn(object), grpsql)[1,1] ## subtract 1 from the number of subgroups as "unannotated" is in there cat(paste0("| ", ctl, " spike-in controls from ", grp - 1, " subgroups (no known genes).\n")) }) # }}} ## ErccDbLite show method setMethod("show", "ArrayControlDbLite", function(object) { # {{{ callNextMethod() # TxDbLite show method -- basic information on the db ctlsql <- "select count(distinct tx_id) from tx" grpsql <- "select count(distinct tx_biotype) from tx_biotype" ctl <- dbGetQuery(dbconn(object), ctlsql)[1,1] grp <- dbGetQuery(dbconn(object), grpsql)[1,1] ## subtract 1 from the number of subgroups as "unannotated" is in there cat(paste0("| ", ctl, " spike-in controls from ", grp - 1, " subgroups (no known genes).\n")) }) # }}} ## ErccDbLite objects have no genes in them setMethod("genes", "ErccDbLite", function(x) callNextMethod()[0] ) ## no genes setMethod("promoters", "ErccDbLite", function(x) callNextMethod()[0] ) ## none # ArrayControlDbLite objects have no genes in them setMethod("genes", "ArrayControlDbLite", function(x) callNextMethod()[0] ) ## no genes setMethod("promoters", "ArrayControlDbLite", function(x) callNextMethod()[0] ) ## none
/R/TxDbLite-methods.R
no_license
arcolombo/TxDbLite
R
false
false
10,406
r
#' @rdname TxDbLite-class #' @export setMethod("dbconn", "TxDbLite", function(x) return(x@con)) #' @rdname TxDbLite-class #' @param object TxDbLite SQL-lite annotation database #' @import ensembldb setMethod("show", "TxDbLite", function(object) { # {{{ if(is.null(object@con)) stop(paste("Invalid", class(object), "instance!")) info <- metadata(object) cat(class(object), ":\n") catrow <- function(x) cat(paste0("|", x["name"], ": ", x["value"], "\n")) for(i in 1:nrow(info)) catrow(info[i,]) }) # }}} #' @rdname TxDbLite-class #' @param x TxDblite SQL-lite database instance #' @param ... additional parameters releated to annotation database setMethod("metadata", "TxDbLite", function(x, ...) { # {{{ md <- dbGetQuery(dbconn(x), "select * from metadata") rownames(md) <- md$name return(md) }) # }}} #' @importFrom GenomeInfoDb genome setMethod("transcripts", "TxDbLite", function(x) { # {{{ sql <- paste("select gene.seqnames, tx.start, tx.end, gene.strand,", " tx_length, gc_content, tx_id, gene_id, gene_name,", " entrezid, tx_biotype, gene_biotype,", " class as biotype_class", " from gene, tx, gene_biotype, tx_biotype, biotype_class", " where gene.gene = tx.gene", " and tx.tx_biotype_id = tx_biotype.id", " and gene.gene_biotype_id = gene_biotype.id", " and gene_biotype.gene_biotype = biotype_class.biotype", " order by tx_id asc") res <- makeGRangesFromDataFrame(dbGetQuery(dbconn(x), sql), keep.extra.columns=TRUE) genome(res) <- metadata(x)["genome_build","value"] names(res) <- res$tx_id return(res) }) # }}} setMethod("promoters", "TxDbLite", function(x,upstream=2000,downstream=200,...){ # {{{ trim(suppressWarnings(promoters(transcripts(x, ...), upstream=upstream, downstream=downstream))) }) # }}} #' #' #' get transcripts by gene, promoter, tx_biotype, gene_biotype, or biotype_class #' #' #' #' #' @return a GRangesList #' @rdname TxDbLite-class #' @importFrom GenomeInfoDb seqlevelsStyle #' @export setMethod("transcriptsBy", "TxDbLite", function(x, # {{{ by=c("gene", "promoter", "tx_biotype", "gene_biotype", "biotype_class")) { by <- match.arg(by) txs <- transcripts(x) name <- function(x) paste0(seqnames(x),":",start(x),"-",end(x),":",strand(x)) proms <- promoters(x) seqlevelsStyle(proms) <- "UCSC" # else names can potentially break downstream promoterNames <- name(proms) switch(by, gene=split(txs, txs$gene_id), promoter=split(txs, promoterNames), tx_biotype=split(txs, txs$tx_biotype), gene_biotype=split(txs, txs$gene_biotype), biotype_class=split(txs, txs$biotype_class)) }) # }}} #' @importFrom GenomeInfoDb genome setMethod("genes", "TxDbLite", function(x) { # {{{ sql <- paste("select seqnames, start, end, strand, ", " median_length as tx_length, 'NA' as gc_content, copyNumber,", " 'NA' as tx_id, gene_id, gene_name, entrezid,", " 'NA' as tx_biotype, gene_biotype,", " class as biotype_class", " from gene, gene_biotype, biotype_class", " where gene.gene_biotype_id = gene_biotype.id", " and gene_biotype.gene_biotype = biotype_class.biotype", " order by gene_id asc") res <- makeGRangesFromDataFrame(dbGetQuery(dbconn(x), sql), keep.extra.columns=TRUE) genome(res) <- metadata(x)["genome_build","value"] names(res) <- res$gene_id return(res) }) # }}} #' Generic for querying genes #' @name genesBy #' @rdname TxDbLite-class #' @export setGeneric("genesBy", function(x,by=c("gene_biotype","biotype_class"), ...){#{{{ standardGeneric("genesBy") }) # }}} #' #' #' get genes by gene_biotype or biotype_class #' #' #' @param by how to split the genes #' @aliases genesBy TxDbLite-method #' @return a GRangesList #' @rdname TxDbLite-class #' @export setMethod("genesBy", "TxDbLite", function(x, by=c("gene_biotype","biotype_class")) { # {{{ by <- match.arg(by) gxs <- genes(x) switch(by, gene_biotype=split(gxs, gxs$gene_biotype), biotype_class=split(gxs, gxs$biotype_class)) }) # }}} ## EnsDbLite methods #' @import ensembldb #' @importFrom GenomeInfoDb genome setMethod("genes", "EnsDbLite", function(x) { # {{{ sql <- paste("select seqnames, start, end, strand, ", " median_length as tx_length, 'NA' as gc_content, copyNumber,", " 'NA' as tx_id, gene_id, gene_name, entrezid,", " 'NA' as tx_biotype, gene_biotype,", " class as biotype_class", " from gene, gene_biotype, biotype_class", " where gene.gene_biotype_id = gene_biotype.id", " and gene_biotype.gene_biotype = biotype_class.biotype", " order by gene_id asc") res <- makeGRangesFromDataFrame(dbGetQuery(dbconn(x), sql), keep.extra.columns=TRUE) genome(res) <- metadata(x)["genome_build","value"] names(res) <- res$gene_id return(res) }) # }}} #' @importFrom GenomeInfoDb genome setMethod("transcripts", "EnsDbLite", function(x) { # {{{ sql <- paste("select gene.seqnames, tx.start, tx.end, gene.strand,", " tx_length, gc_content, tx_id, gene_id, gene_name,", " entrezid, tx_biotype, gene_biotype,", " class as biotype_class", " from gene, tx, gene_biotype, tx_biotype, biotype_class", " where gene.gene = tx.gene", " and tx.tx_biotype_id = tx_biotype.id", " and gene.gene_biotype_id = gene_biotype.id", " and gene_biotype.gene_biotype = biotype_class.biotype", " order by tx_id asc") res <- makeGRangesFromDataFrame(dbGetQuery(dbconn(x), sql), keep.extra.columns=TRUE) genome(res) <- metadata(x)["genome_build","value"] names(res) <- res$tx_id return(res) }) # }}} setMethod("listGenebiotypes", "EnsDbLite", function(x, ...){ # {{{ return(dbGetQuery(dbconn(x), "select * from gene_biotype")[,"gene_biotype"]) }) # }}} setMethod("listTxbiotypes", "EnsDbLite", function(x, ...){ # {{{ return(dbGetQuery(dbconn(x), "select * from tx_biotype")[,"tx_biotype"]) }) # }}} setMethod("show", "EnsDbLite", function(object) { # {{{ callNextMethod() # TxDbLite show method -- basic information on the db genesql <- "select count(distinct gene) from gene" g <- dbGetQuery(dbconn(object), genesql)[1,1] txsql <- "select count(distinct tx_id) from tx" tx <- dbGetQuery(dbconn(object), txsql)[1,1] cat(paste0("| ", tx, " transcripts from ", g, " bundles (genes).\n")) }) # }}} ## RepDbLite show method setMethod("show", "RepDbLite", function(object) { # {{{ callNextMethod() # TxDbLite show method -- basic information on the db repsql <- "select count(distinct tx_id) from tx" famsql <- "select count(distinct tx_biotype) from tx_biotype" rpts <- dbGetQuery(dbconn(object), repsql)[1,1] fam <- dbGetQuery(dbconn(object), famsql)[1,1] cat(paste0("| ", rpts, " repeat exemplars from ", fam, " repeat families (no known genes).\n")) }) # }}} ## RepDbLite objects have no genes in them setMethod("transcripts","RepDbLite",function(x) { sql <- paste("select gene.seqnames, tx.start, tx.end, gene.strand,", " tx_length, gc_content, tx_id, gene_id, gene_name,", " entrezid, tx_biotype, gene_biotype,", " class as biotype_class", " from gene, tx, gene_biotype, tx_biotype, biotype_class", " where gene.gene = tx.gene", " and tx.tx_biotype_id = tx_biotype.id", " and gene.gene_biotype_id = gene_biotype.id", " and gene_biotype.gene_biotype = biotype_class.biotype", " order by tx_id asc") res <- makeGRangesFromDataFrame(dbGetQuery(dbconn(x), sql), keep.extra.columns=TRUE) genome(res) <- metadata(x)["genome_build","value"] names(res) <- res$tx_id return(res) }) setMethod("genes", "RepDbLite", function(x) callNextMethod()[0] ) ## no genes setMethod("promoters", "RepDbLite", function(x) callNextMethod()[0] ) ## none ## ErccDbLite show method setMethod("show", "ErccDbLite", function(object) { # {{{ callNextMethod() # TxDbLite show method -- basic information on the db ctlsql <- "select count(distinct tx_id) from tx" grpsql <- "select count(distinct tx_biotype) from tx_biotype" ctl <- dbGetQuery(dbconn(object), ctlsql)[1,1] grp <- dbGetQuery(dbconn(object), grpsql)[1,1] ## subtract 1 from the number of subgroups as "unannotated" is in there cat(paste0("| ", ctl, " spike-in controls from ", grp - 1, " subgroups (no known genes).\n")) }) # }}} ## ErccDbLite show method setMethod("show", "ArrayControlDbLite", function(object) { # {{{ callNextMethod() # TxDbLite show method -- basic information on the db ctlsql <- "select count(distinct tx_id) from tx" grpsql <- "select count(distinct tx_biotype) from tx_biotype" ctl <- dbGetQuery(dbconn(object), ctlsql)[1,1] grp <- dbGetQuery(dbconn(object), grpsql)[1,1] ## subtract 1 from the number of subgroups as "unannotated" is in there cat(paste0("| ", ctl, " spike-in controls from ", grp - 1, " subgroups (no known genes).\n")) }) # }}} ## ErccDbLite objects have no genes in them setMethod("genes", "ErccDbLite", function(x) callNextMethod()[0] ) ## no genes setMethod("promoters", "ErccDbLite", function(x) callNextMethod()[0] ) ## none # ArrayControlDbLite objects have no genes in them setMethod("genes", "ArrayControlDbLite", function(x) callNextMethod()[0] ) ## no genes setMethod("promoters", "ArrayControlDbLite", function(x) callNextMethod()[0] ) ## none
################################## # Assignment 1: Data Science Course # Kyle Ott & Cornelius Schneider # 26 September 2014 ################################## ################################################## # Average Heights and Weights for American Women # ################################################## # Select the dataset data(women) ?women # This data set provides the average heights (in) and # weights (lbs) for American women aged 30–39. # What are the variables? names(women) # height & weight # Convert the variables in centimeters and kilograms women$height_cm<-women$height*2.54 women$weight_kg<-women$weight*0.45359237 # Descriptive Statistics on the Distribution ######### summary(women) # Women's height in this dataset is between 147.3 and 182.9 cm # and the median equals the mean with 165.1 cm. # Women's weight in this dataset is between 52.15 and 74.39 km # with a median of 61.23 kg and a mean of 62.02 kg. # Histogram for Height hist(women$height_cm, main='Average Height for American Women', xlab='Height in Centimeters') # Most women have a height between 160 and 180 cm (8 women). # Only two are between 140 and 150 centimeters and two are # between 180 and 190 centimeters. # In a first impression this is a slighly right skewed normal distribution. # Histogram for Weight hist(women$weight_kg, main='Average Weight for American Women', xlab='Weight in Kilograms') # Most women (4 out of 15) weigh between 55kg and 60kg and the fewest (2 out of 15) # weigh between 70kg and 75kg. All other are equally distributed. # Plot plot(women$height_cm, women$weight_kg,main='Joint Distribution', xlab='Height in Centimeters', ylab='Weight in Kilograms') # The plot clearly shows a positive relationship between height and weight. # No outliers can be observed in this sample. # Variance for Height in Centimeters var(women$height_cm) # [1] 129.032 # Variance for Weight in Kilogram var(women$weight_kg) # [1] 49.42216 # Standard Deviation for Height in Centimeners sd(women$height_cm) # [1] 11.35923 # Standard Deviation for Weight in Kilograms sd(women$weight_kg) # [1] 7.030089 ##################### #Citation of Software ##################### citation() ## R Core Team (2014). R: A language and environment for ## statistical computing. R Foundation for Statistical Computing, ## Vienna, Austria. URL http://www.R-project.org/.
/women.R
no_license
cjfschneider/Assignment1_Ott_Schneider
R
false
false
2,417
r
################################## # Assignment 1: Data Science Course # Kyle Ott & Cornelius Schneider # 26 September 2014 ################################## ################################################## # Average Heights and Weights for American Women # ################################################## # Select the dataset data(women) ?women # This data set provides the average heights (in) and # weights (lbs) for American women aged 30–39. # What are the variables? names(women) # height & weight # Convert the variables in centimeters and kilograms women$height_cm<-women$height*2.54 women$weight_kg<-women$weight*0.45359237 # Descriptive Statistics on the Distribution ######### summary(women) # Women's height in this dataset is between 147.3 and 182.9 cm # and the median equals the mean with 165.1 cm. # Women's weight in this dataset is between 52.15 and 74.39 km # with a median of 61.23 kg and a mean of 62.02 kg. # Histogram for Height hist(women$height_cm, main='Average Height for American Women', xlab='Height in Centimeters') # Most women have a height between 160 and 180 cm (8 women). # Only two are between 140 and 150 centimeters and two are # between 180 and 190 centimeters. # In a first impression this is a slighly right skewed normal distribution. # Histogram for Weight hist(women$weight_kg, main='Average Weight for American Women', xlab='Weight in Kilograms') # Most women (4 out of 15) weigh between 55kg and 60kg and the fewest (2 out of 15) # weigh between 70kg and 75kg. All other are equally distributed. # Plot plot(women$height_cm, women$weight_kg,main='Joint Distribution', xlab='Height in Centimeters', ylab='Weight in Kilograms') # The plot clearly shows a positive relationship between height and weight. # No outliers can be observed in this sample. # Variance for Height in Centimeters var(women$height_cm) # [1] 129.032 # Variance for Weight in Kilogram var(women$weight_kg) # [1] 49.42216 # Standard Deviation for Height in Centimeners sd(women$height_cm) # [1] 11.35923 # Standard Deviation for Weight in Kilograms sd(women$weight_kg) # [1] 7.030089 ##################### #Citation of Software ##################### citation() ## R Core Team (2014). R: A language and environment for ## statistical computing. R Foundation for Statistical Computing, ## Vienna, Austria. URL http://www.R-project.org/.
# ============================================================= # Import data # ============================================================= # load library(plyr) library(ggplot2) toydata = read.csv( "/Users/Gaston/Documents/Insight/data/good_guide/care_products.csv", header = TRUE, stringsAsFactors = FALSE ) # ============================================================= # Numer of Ingredients # ============================================================= # number of ingredients summary(toydata$num_ings) # examine distribution of number of ingredients hist(toydata$num_ings) # ============================================================= # Numer of Ingredients -vs- Health Score # ============================================================= with(toydata, plot(num_ings, health)) abline(lm(toydata$health ~ toydata$num_ings), col = "orange", lwd = 2) # correlation (without missing num_ings) complete <- complete.cases( cbind(toydata[,c("health", "num_ings")]) ) cor(toydata$num_ings[complete], toydata$health[complete]) # number of ingedients and presence of high level table(toydata$high) summary(toydata$num_ings[toydata$health <= 2]) summary(toydata$num_ings[toydata$health > 2 & toydata$health <= 6]) summary(toydata$num_ings[toydata$health > 6]) # ============================================================= # Summary Statistics by product type (i.e. group) # ============================================================= # average health score by category ddply( toydata, .(category), summarize, num = length(category), avg_score = mean(health, na.rm=TRUE), avg_ings = mean(num_ings, na.rm = TRUE), has_high = round(100 * sum(num_high, na.rm=TRUE) / length(category), 2), has_medium = round(100 * sum(num_medium, na.rm=TRUE) / length(category), 2), has_low = round(100 * sum(num_low, na.rm=TRUE) / length(category), 2) ) # ============================================================= # Questions to answer # ============================================================= # How many ingredients of high level of concern # Top 10 common ingredients of high level of concern (in general) # Top 5 ingredients of high level of concern (by category) break_ingredients <- function(concern) { # split ingredients tmp = strsplit(concern, split = ",") # remove ingredient id lapply(tmp, function(x) gsub("\\d+-", "", x)) } # Ingredients of high level of concern (in general) x = lapply(toydata$high, break_ingredients) high_ingredients = sort(table(unlist(x)), decreasing = TRUE) high_ingredients round(100 * high_ingredients / nrow(toydata), 3) # Ingredients of high level of concern (by group) groups = unique(toydata$group) group_size = as.vector(table(toydata$group)) num_groups = length(group_size) group_high_ings <- vector("list", num_groups) group_high_ings_prop <- vector("list", num_groups) for (g in 1L:num_groups) { aux_group_ings = toydata$high[toydata$group == groups[g]] break_aux_group_ings = unlist(break_ingredients(aux_group_ings)) group_high_ings_table = table(break_aux_group_ings) group_high_ings[[g]] = sort(group_high_ings_table, decreasing = TRUE) group_high_ings_prop[[g]] = round( 100 * table(break_aux_group_ings) / group_size[g], 3) } names(group_high_ings) = groups names(group_high_ings_prop) = groups group_high_ings # in proportion group_high_ings group_high_ings_prop # What about 'fragrance'? round(100 * with(toydata, table(fragrance, health)) / nrow(toydata), 2) with(toydata, boxplot(health ~ fragrance)) ggplot(toydata, aes(x = factor(fragrance), y = health)) + + geom_boxplot() ggplot(toydata, aes(x = health)) + geom_histogram() # Examine distribution of score by group op = par(mar = c(10,3,2,2)) boxplot( formula = health ~ group, data = toydata, col = "gray90", las = 2) par(op) ggplot(toydata, aes(x = health)) + geom_histogram(fill = "orange") ggplot(toydata, aes(x = group, y = health, fill = group)) + geom_boxplot() + theme(axis.text.x = element_text(angle = 70, hjust = 1)) ggplot(toydata, aes(x = group, y = health, fill = group)) + geom_boxplot() + geom_jitter(colour = "gray20", alpha = 0.3) + theme(axis.text.x = element_text(angle = 70, hjust = 1)) # Examine distribution of num ings by group ggplot(toydata, aes(x = num_ings)) + geom_histogram(fill = "orange") ggplot(toydata, aes(x = group, y = num_ings, fill = group)) + geom_boxplot() + geom_jitter(colour = "gray20", alpha = 0.3) + theme(axis.text.x = element_text(angle = 70, hjust = 1)) # average health score by group ddply( toydata, .(group), summarize, num = length(group), avg_score = mean(health, na.rm=TRUE), avg_ings = mean(num_ings, na.rm = TRUE), has_high = round(100 * sum(high_concern, na.rm=TRUE) / length(group), 2), has_medium = round(100 * sum(medium_concern, na.rm=TRUE)/ length(group), 2), has_low = round(100 * sum(low_concern, na.rm=TRUE) / length(group), 2), avg_high = mean(high_num), avg_medium = mean(medium_num), avg_low = mean(low_num) ) # ============================================================= # Regression Tree # ============================================================= library(rpart) library(rpart.plot) summary(toydata$num_ings) table(toydata$group) summary(toydata$fragrance) table(toydata$has_high) # 70% training set.seed = 22222 training = sample( x = 1:nrow(toydata), size = ceiling(0.70 * nrow(toydata))) # 30% testing testing = setdiff(1:nrow(toydata), training) # regression tree model my_tree = rpart( health ~ num_ings + fragrance + has_high + group, data = toydata[testing,], method = "class" ) plot(my_tree) text(my_tree, use.n = TRUE) # regression tree model my_model = rpart( health ~ num_ings + fragrance + has_high + group, data = toydata[testing,] ) plot(my_model) text(my_model, use.n = TRUE) prp(my_model) rsq.rpart(my_model) health_pred = predict(my_model, toydata[testing,]) table(health_pred, toydata$health[testing]) [,1] [,2] [1,] "baby-lotion" "a" [2,] "baby-shampoo" "b" [3,] "baby-soap-bath" "c" [4,] "baby-sunscreen" "d" [5,] "baby-wipes" "e" [6,] "bubble-bath" "f" [7,] "dental-floss" "g" [8,] "deodorants-antiperspirants-mens" "h" [9,] "deodorants-antiperspirants-womens" "i" [10,] "feminine-moisturizer" "j" [11,] "feminine-powder_deodorant" "k" [12,] "fragrance-for-men" "l" [13,] "fragrance-for-women" "m" [14,] "mouthwash" "n" [15,] "personal-cleansing" "o" [16,] "shampoo" "p" [17,] "soap" "q" [18,] "toothpaste" "r" # ============================================================= # Random Forest # ============================================================= install.packages("randomForest") library(randomForest) toydata$group = factor(toydata$group) my_forest = randomForest( x = toydata[,c(3,5,6,7)], y = toydata$health) importance(my_forest) # ============================================================= # Levels of concern # ============================================================= b = get_ingredients(tooth$high) # which contan triclosan which_triclosan = unlist(lapply(b, function(x) x == "triclosan")) tooth$name[which_triclosan] num_ings[which_triclosan] mean(num_ings[which_triclosan]) count_level(toydata$high) count_level(toydata$medium) count_level(toydata$low) # proportion of ingredients in toothpastes based on level of concern 100 * count_level(tooth$high) / nrow(tooth) 100 * count_level(tooth$medium) / nrow(tooth) 100 * count_level(tooth$low) / nrow(tooth) df1$num_ings[] hist(df1$num_ings) names(tooth) mean(tooth$health, na.rm = TRUE) median(tooth$health, na.rm = TRUE) table(tooth$health) summary(df1$num_ings[df1$hscore <= 2]) summary(df1$num_ings[df1$hscore > 2 & df1$hscore <= 6]) summary(df1$num_ings[df1$hscore > 6])
/scripts/rpart_analysis.r
no_license
gastonstat/skincareprods
R
false
false
8,123
r
# ============================================================= # Import data # ============================================================= # load library(plyr) library(ggplot2) toydata = read.csv( "/Users/Gaston/Documents/Insight/data/good_guide/care_products.csv", header = TRUE, stringsAsFactors = FALSE ) # ============================================================= # Numer of Ingredients # ============================================================= # number of ingredients summary(toydata$num_ings) # examine distribution of number of ingredients hist(toydata$num_ings) # ============================================================= # Numer of Ingredients -vs- Health Score # ============================================================= with(toydata, plot(num_ings, health)) abline(lm(toydata$health ~ toydata$num_ings), col = "orange", lwd = 2) # correlation (without missing num_ings) complete <- complete.cases( cbind(toydata[,c("health", "num_ings")]) ) cor(toydata$num_ings[complete], toydata$health[complete]) # number of ingedients and presence of high level table(toydata$high) summary(toydata$num_ings[toydata$health <= 2]) summary(toydata$num_ings[toydata$health > 2 & toydata$health <= 6]) summary(toydata$num_ings[toydata$health > 6]) # ============================================================= # Summary Statistics by product type (i.e. group) # ============================================================= # average health score by category ddply( toydata, .(category), summarize, num = length(category), avg_score = mean(health, na.rm=TRUE), avg_ings = mean(num_ings, na.rm = TRUE), has_high = round(100 * sum(num_high, na.rm=TRUE) / length(category), 2), has_medium = round(100 * sum(num_medium, na.rm=TRUE) / length(category), 2), has_low = round(100 * sum(num_low, na.rm=TRUE) / length(category), 2) ) # ============================================================= # Questions to answer # ============================================================= # How many ingredients of high level of concern # Top 10 common ingredients of high level of concern (in general) # Top 5 ingredients of high level of concern (by category) break_ingredients <- function(concern) { # split ingredients tmp = strsplit(concern, split = ",") # remove ingredient id lapply(tmp, function(x) gsub("\\d+-", "", x)) } # Ingredients of high level of concern (in general) x = lapply(toydata$high, break_ingredients) high_ingredients = sort(table(unlist(x)), decreasing = TRUE) high_ingredients round(100 * high_ingredients / nrow(toydata), 3) # Ingredients of high level of concern (by group) groups = unique(toydata$group) group_size = as.vector(table(toydata$group)) num_groups = length(group_size) group_high_ings <- vector("list", num_groups) group_high_ings_prop <- vector("list", num_groups) for (g in 1L:num_groups) { aux_group_ings = toydata$high[toydata$group == groups[g]] break_aux_group_ings = unlist(break_ingredients(aux_group_ings)) group_high_ings_table = table(break_aux_group_ings) group_high_ings[[g]] = sort(group_high_ings_table, decreasing = TRUE) group_high_ings_prop[[g]] = round( 100 * table(break_aux_group_ings) / group_size[g], 3) } names(group_high_ings) = groups names(group_high_ings_prop) = groups group_high_ings # in proportion group_high_ings group_high_ings_prop # What about 'fragrance'? round(100 * with(toydata, table(fragrance, health)) / nrow(toydata), 2) with(toydata, boxplot(health ~ fragrance)) ggplot(toydata, aes(x = factor(fragrance), y = health)) + + geom_boxplot() ggplot(toydata, aes(x = health)) + geom_histogram() # Examine distribution of score by group op = par(mar = c(10,3,2,2)) boxplot( formula = health ~ group, data = toydata, col = "gray90", las = 2) par(op) ggplot(toydata, aes(x = health)) + geom_histogram(fill = "orange") ggplot(toydata, aes(x = group, y = health, fill = group)) + geom_boxplot() + theme(axis.text.x = element_text(angle = 70, hjust = 1)) ggplot(toydata, aes(x = group, y = health, fill = group)) + geom_boxplot() + geom_jitter(colour = "gray20", alpha = 0.3) + theme(axis.text.x = element_text(angle = 70, hjust = 1)) # Examine distribution of num ings by group ggplot(toydata, aes(x = num_ings)) + geom_histogram(fill = "orange") ggplot(toydata, aes(x = group, y = num_ings, fill = group)) + geom_boxplot() + geom_jitter(colour = "gray20", alpha = 0.3) + theme(axis.text.x = element_text(angle = 70, hjust = 1)) # average health score by group ddply( toydata, .(group), summarize, num = length(group), avg_score = mean(health, na.rm=TRUE), avg_ings = mean(num_ings, na.rm = TRUE), has_high = round(100 * sum(high_concern, na.rm=TRUE) / length(group), 2), has_medium = round(100 * sum(medium_concern, na.rm=TRUE)/ length(group), 2), has_low = round(100 * sum(low_concern, na.rm=TRUE) / length(group), 2), avg_high = mean(high_num), avg_medium = mean(medium_num), avg_low = mean(low_num) ) # ============================================================= # Regression Tree # ============================================================= library(rpart) library(rpart.plot) summary(toydata$num_ings) table(toydata$group) summary(toydata$fragrance) table(toydata$has_high) # 70% training set.seed = 22222 training = sample( x = 1:nrow(toydata), size = ceiling(0.70 * nrow(toydata))) # 30% testing testing = setdiff(1:nrow(toydata), training) # regression tree model my_tree = rpart( health ~ num_ings + fragrance + has_high + group, data = toydata[testing,], method = "class" ) plot(my_tree) text(my_tree, use.n = TRUE) # regression tree model my_model = rpart( health ~ num_ings + fragrance + has_high + group, data = toydata[testing,] ) plot(my_model) text(my_model, use.n = TRUE) prp(my_model) rsq.rpart(my_model) health_pred = predict(my_model, toydata[testing,]) table(health_pred, toydata$health[testing]) [,1] [,2] [1,] "baby-lotion" "a" [2,] "baby-shampoo" "b" [3,] "baby-soap-bath" "c" [4,] "baby-sunscreen" "d" [5,] "baby-wipes" "e" [6,] "bubble-bath" "f" [7,] "dental-floss" "g" [8,] "deodorants-antiperspirants-mens" "h" [9,] "deodorants-antiperspirants-womens" "i" [10,] "feminine-moisturizer" "j" [11,] "feminine-powder_deodorant" "k" [12,] "fragrance-for-men" "l" [13,] "fragrance-for-women" "m" [14,] "mouthwash" "n" [15,] "personal-cleansing" "o" [16,] "shampoo" "p" [17,] "soap" "q" [18,] "toothpaste" "r" # ============================================================= # Random Forest # ============================================================= install.packages("randomForest") library(randomForest) toydata$group = factor(toydata$group) my_forest = randomForest( x = toydata[,c(3,5,6,7)], y = toydata$health) importance(my_forest) # ============================================================= # Levels of concern # ============================================================= b = get_ingredients(tooth$high) # which contan triclosan which_triclosan = unlist(lapply(b, function(x) x == "triclosan")) tooth$name[which_triclosan] num_ings[which_triclosan] mean(num_ings[which_triclosan]) count_level(toydata$high) count_level(toydata$medium) count_level(toydata$low) # proportion of ingredients in toothpastes based on level of concern 100 * count_level(tooth$high) / nrow(tooth) 100 * count_level(tooth$medium) / nrow(tooth) 100 * count_level(tooth$low) / nrow(tooth) df1$num_ings[] hist(df1$num_ings) names(tooth) mean(tooth$health, na.rm = TRUE) median(tooth$health, na.rm = TRUE) table(tooth$health) summary(df1$num_ings[df1$hscore <= 2]) summary(df1$num_ings[df1$hscore > 2 & df1$hscore <= 6]) summary(df1$num_ings[df1$hscore > 6])
library(shiny) shinyServer(function(input, output) { projectRetirement <- reactive({ yearsObserving = input$n.obs # what period of time do we want to look at? monthsObserving = 12 * yearsObserving ageNow = input$age.now delayYears = input$years.wait retireAgeYears = ageNow + delayYears ageMonths= ageNow * 12 retireAgeMonths = retireAgeYears * 12 numSims = input$n.sim # how many simulations do we want to perform? liquidN401Ks = input$liquid.n401Ks totalPension = input$total.pension numPensionPayouts = input$number.increments pensionPayout = totalPension/numPensionPayouts # PENSION MATRIX if ( (numPensionPayouts > 0) & (totalPension > 0) ) { pensionMatrix = matrix(0,1,numSims) #matrix w bogus row1 for (j in 1:numPensionPayouts) { pensionMatrixTmp = matrix(j*pensionPayout, monthsObserving/numPensionPayouts, numSims) pensionMatrix = rbind(pensionMatrix,pensionMatrixTmp) } if (monthsObserving %% numPensionPayouts != 0) { #to avoid having pension drop out at end due to modulo pensionMatrixTmp = matrix(numPensionPayouts*pensionPayout, monthsObserving %% numPensionPayouts, numSims) pensionMatrix = rbind(pensionMatrix, pensionMatrixTmp) } pensionMatrix = pensionMatrix[-(1),] #remove bogus row1 } else { # no pension pensionMatrix = matrix(0,monthsObserving, numSims) } monthlyWithdrawals = input$monthly.withdrawals ageSeq = seq(from=ageMonths, by=1, length.out=monthsObserving) ageVec = matrix(ageSeq) ageVecYears = ageVec/12 ssAmount = input$social.security ssStartYear = input$social.security.start ssStartMonth = ssStartYear * 12 ssStartDelta = ssStartMonth - ageMonths if (ssStartDelta < 0 ) { ssStartDelta = 0 } # not dealing with negative time ssMatrixA = matrix(0, ssStartDelta, numSims) # two matrices - one before SS starts ssMatrixB = matrix(ssAmount, (monthsObserving-ssStartDelta), numSims) # one matrix for social security time ssMatrix = rbind(ssMatrixA, ssMatrixB) yearlyCapitalContribs = input$capital.contribs yearsContributing2capital = input$years.contributing if ( (yearlyCapitalContribs > 0) & (yearsContributing2capital > 0) ) { #assuming that capital contribution time finite monthlyCapitalContribs = yearlyCapitalContribs / 12 monthsContributing2capital = yearsContributing2capital * 12 capitalContribMatrixA = matrix(monthlyCapitalContribs, monthsContributing2capital, numSims) capitalContribMatrixB = matrix(0, (monthsObserving-monthsContributing2capital), numSims) capitalContribMatrix = rbind(capitalContribMatrixA, capitalContribMatrixB) } else { capitalContribMatrix = matrix(0, monthsObserving, numSims) } startCapital = pensionMatrix + liquidN401Ks + ssMatrix + capitalContribMatrix # monthly Investment and Inflation assumptions annualMeanReturn = input$annual.mean.return/100 monthlyReturnMean = annualMeanReturn / 12 annualReturnStdDev = input$annual.ret.std.dev/100 monthlyReturnStdDev = annualReturnStdDev / sqrt(12) # simulate Returns investReturnsMatrix = matrix(0, monthsObserving, numSims) investReturnsMatrix[] = rnorm(monthsObserving * numSims, mean = monthlyReturnMean, sd = monthlyReturnStdDev) annualInflation = input$annual.inflation/100 monthlyInflation = annualInflation / 12 annualInflationStdDev = input$annual.inf.std.dev/100 monthlyInflationStdDev = annualInflationStdDev / sqrt(12) # simulate effect of inflation inflationMatrix = matrix(0, monthsObserving, numSims) inflationMatrix[] = rnorm(monthsObserving * numSims, mean = monthlyInflation, sd = monthlyInflationStdDev) nav = startCapital for (j in 1:(monthsObserving-1)) { startCapital[j + 1, ] = startCapital[j, ] * (1 + investReturnsMatrix[j, ] - inflationMatrix[j, ]) - monthlyWithdrawals #nav[j , ] = nav[j , ] + startCapital/input$number.increments } #nav = nav[-(monthsObserving+1) , ] # remove that last row we added in #for (j in 1:input$number.increments*12) { #if (j %% 12 == 0) { #} #} startCapital[ startCapital < 0 ] = NA # once nav is below 0 => run out of money Retirement = startCapital / 1000000 # convert to millions Retirement=cbind(ageVecYears,Retirement) #output$documentationText = renderText({"Adjust the slider bars to reflect the retirement scenario you wish to simulate."}) output$documentationText = renderText({'... projecting retirement assets over time ...\n'}) output$sourceText = renderText({"Idea and original code by Pierre Chretien, updated by Michael Kapler, and then Lin Crampton. Source at https://github.com/lincrampton/time2retire. Comments/complaints to lin.crampton@gmail.com"}) return(Retirement) }) output$distPlot <- renderPlot({ Retirement = projectRetirement() layout(matrix(c(1,2,1,3),2,2)) matplot(Retirement[ , 1], Retirement[ , -1 ], type = 'l', las = 1, ylab='Millions', xlab='Age') }) })
/server.r
no_license
dmoliveira/devdataprod
R
false
false
4,820
r
library(shiny) shinyServer(function(input, output) { projectRetirement <- reactive({ yearsObserving = input$n.obs # what period of time do we want to look at? monthsObserving = 12 * yearsObserving ageNow = input$age.now delayYears = input$years.wait retireAgeYears = ageNow + delayYears ageMonths= ageNow * 12 retireAgeMonths = retireAgeYears * 12 numSims = input$n.sim # how many simulations do we want to perform? liquidN401Ks = input$liquid.n401Ks totalPension = input$total.pension numPensionPayouts = input$number.increments pensionPayout = totalPension/numPensionPayouts # PENSION MATRIX if ( (numPensionPayouts > 0) & (totalPension > 0) ) { pensionMatrix = matrix(0,1,numSims) #matrix w bogus row1 for (j in 1:numPensionPayouts) { pensionMatrixTmp = matrix(j*pensionPayout, monthsObserving/numPensionPayouts, numSims) pensionMatrix = rbind(pensionMatrix,pensionMatrixTmp) } if (monthsObserving %% numPensionPayouts != 0) { #to avoid having pension drop out at end due to modulo pensionMatrixTmp = matrix(numPensionPayouts*pensionPayout, monthsObserving %% numPensionPayouts, numSims) pensionMatrix = rbind(pensionMatrix, pensionMatrixTmp) } pensionMatrix = pensionMatrix[-(1),] #remove bogus row1 } else { # no pension pensionMatrix = matrix(0,monthsObserving, numSims) } monthlyWithdrawals = input$monthly.withdrawals ageSeq = seq(from=ageMonths, by=1, length.out=monthsObserving) ageVec = matrix(ageSeq) ageVecYears = ageVec/12 ssAmount = input$social.security ssStartYear = input$social.security.start ssStartMonth = ssStartYear * 12 ssStartDelta = ssStartMonth - ageMonths if (ssStartDelta < 0 ) { ssStartDelta = 0 } # not dealing with negative time ssMatrixA = matrix(0, ssStartDelta, numSims) # two matrices - one before SS starts ssMatrixB = matrix(ssAmount, (monthsObserving-ssStartDelta), numSims) # one matrix for social security time ssMatrix = rbind(ssMatrixA, ssMatrixB) yearlyCapitalContribs = input$capital.contribs yearsContributing2capital = input$years.contributing if ( (yearlyCapitalContribs > 0) & (yearsContributing2capital > 0) ) { #assuming that capital contribution time finite monthlyCapitalContribs = yearlyCapitalContribs / 12 monthsContributing2capital = yearsContributing2capital * 12 capitalContribMatrixA = matrix(monthlyCapitalContribs, monthsContributing2capital, numSims) capitalContribMatrixB = matrix(0, (monthsObserving-monthsContributing2capital), numSims) capitalContribMatrix = rbind(capitalContribMatrixA, capitalContribMatrixB) } else { capitalContribMatrix = matrix(0, monthsObserving, numSims) } startCapital = pensionMatrix + liquidN401Ks + ssMatrix + capitalContribMatrix # monthly Investment and Inflation assumptions annualMeanReturn = input$annual.mean.return/100 monthlyReturnMean = annualMeanReturn / 12 annualReturnStdDev = input$annual.ret.std.dev/100 monthlyReturnStdDev = annualReturnStdDev / sqrt(12) # simulate Returns investReturnsMatrix = matrix(0, monthsObserving, numSims) investReturnsMatrix[] = rnorm(monthsObserving * numSims, mean = monthlyReturnMean, sd = monthlyReturnStdDev) annualInflation = input$annual.inflation/100 monthlyInflation = annualInflation / 12 annualInflationStdDev = input$annual.inf.std.dev/100 monthlyInflationStdDev = annualInflationStdDev / sqrt(12) # simulate effect of inflation inflationMatrix = matrix(0, monthsObserving, numSims) inflationMatrix[] = rnorm(monthsObserving * numSims, mean = monthlyInflation, sd = monthlyInflationStdDev) nav = startCapital for (j in 1:(monthsObserving-1)) { startCapital[j + 1, ] = startCapital[j, ] * (1 + investReturnsMatrix[j, ] - inflationMatrix[j, ]) - monthlyWithdrawals #nav[j , ] = nav[j , ] + startCapital/input$number.increments } #nav = nav[-(monthsObserving+1) , ] # remove that last row we added in #for (j in 1:input$number.increments*12) { #if (j %% 12 == 0) { #} #} startCapital[ startCapital < 0 ] = NA # once nav is below 0 => run out of money Retirement = startCapital / 1000000 # convert to millions Retirement=cbind(ageVecYears,Retirement) #output$documentationText = renderText({"Adjust the slider bars to reflect the retirement scenario you wish to simulate."}) output$documentationText = renderText({'... projecting retirement assets over time ...\n'}) output$sourceText = renderText({"Idea and original code by Pierre Chretien, updated by Michael Kapler, and then Lin Crampton. Source at https://github.com/lincrampton/time2retire. Comments/complaints to lin.crampton@gmail.com"}) return(Retirement) }) output$distPlot <- renderPlot({ Retirement = projectRetirement() layout(matrix(c(1,2,1,3),2,2)) matplot(Retirement[ , 1], Retirement[ , -1 ], type = 'l', las = 1, ylab='Millions', xlab='Age') }) })
hash_image <- function(path) { if(i %% 1000 == 0) print(sprintf("%s: hashed %s images", Sys.time(), i)) i <<- i + 1 hash <- tryCatch({ OpenImageR::phash(OpenImageR::rgb_2gray(jpeg::readJPEG(path)), hash_size = 8, highfreq_factor = 4, MODE = 'hash', resize = "bilinear") }, error = function(e) { NA_character_ }) tibble::tibble( image = basename(path), hash = hash ) }
/shiny_explorer/functions/hash_image.R
no_license
G3rtjan/capstone_iphone_fraud_marktplaats
R
false
false
466
r
hash_image <- function(path) { if(i %% 1000 == 0) print(sprintf("%s: hashed %s images", Sys.time(), i)) i <<- i + 1 hash <- tryCatch({ OpenImageR::phash(OpenImageR::rgb_2gray(jpeg::readJPEG(path)), hash_size = 8, highfreq_factor = 4, MODE = 'hash', resize = "bilinear") }, error = function(e) { NA_character_ }) tibble::tibble( image = basename(path), hash = hash ) }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/doe.R \name{load_doe_um} \alias{load_doe_um} \title{Parser DOE} \usage{ load_doe_um(a) } \description{ Parser DOE }
/man/load_doe_um.Rd
permissive
abjur/tjspCrim
R
false
false
203
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/doe.R \name{load_doe_um} \alias{load_doe_um} \title{Parser DOE} \usage{ load_doe_um(a) } \description{ Parser DOE }
#--noma2.r #--Elliot Noma on face tube #--http://elliotnoma.wordpress.com/2013/01/22/construct-a-stock-portfolio-using-r/ library (timeSeries) library (quantmod) library (slam) library (fPortfolio) library (plotrix) library (corrplot) source ("c://python_projects//compfi//plot.drawdowns.ggplot2.r") source ("c://python_projects//compfi//multiplot.r") from = "2014-01-01" dir = "folio" get.data = function (symbols, dir = "test", from = "2009-01-01") { ###--return data frame with adjusted close and time series for daily returns ###--returns only symbols with data for all dates ###--one warning per symbol is normal symbols =sort(unique(symbols)) prices = NULL for (symbol in symbols) { print (paste("fetching", symbol)) adj = getSymbols.yahoo(symbol, from = from, verbose=F, auto.assign=F)[,6] colnames(adj) = tolower(symbol) prices = cbind(prices, adj) } symbols = colnames(prices) complete.symbols = NULL dim(prices) head(prices) for (symbol in symbols) { sigma = sum(prices[,symbol]) print (paste(symbol, sigma)) if (! is.na(sigma)) complete.symbols = c(complete.symbols, symbol) } print (complete.symbols) print (length(complete.symbols)) prices = prices[complete.cases(prices),] dates = as.Date(row.names(as.data.frame(prices))) summary(dates) summary(prices) prices = interpNA(prices, method = "linear") summary(prices) period = paste(min(rownames(prices)), "through", max(rownames(prices))) x1 = as.timeSeries(tail(prices,-1)) x2 = as.numeric(head(prices, -1)) returns = x1 / x2 - 1 summary(returns) main = paste(dir, min(dates), "-", max(dates)) data = list(prices = prices, returns = returns, dates = dates, symbols = complete.symbols, main = main) } symbol = "fxsix" show.relative.prices = function(data, ylim = c(.75, 1.25)) { #-- can work on finding ylim dynamically prices = data$prices symbols = data$symbols dates = time(prices) par (mfrow = c(2, 3)) for (symbol in symbols) { local = as.numeric(prices[, symbol]) local = local / local[1] plot (dates,local, main = symbol, ylab = "Relative Adj Close", col="blue", type = "s", ylim = ylim) abline (h=1, col = "gray") } } show.return.distributions = function (data, xmax = .04, ymax = 200, precision = 4) { #-- mshould put period in legend returns = data$returns symbols = colnames(as.data.frame(returns)) par (mfrow = c(2,3)) par (oma = c(1,1,3,1)) for (symbol in symbols) { symbol.returns = returns[,symbol] z = density(symbol.returns, na.rm=T) plot (z, main = symbol, xlim = c(-xmax, xmax), ylim = c(0, ymax), col = "blue", lwd = 2, xlab = "") msg.1 = paste("mean =", round(mean(symbol.returns), precision)) msg.2 = paste("sdev =", round(sd(symbol.returns), precision)) legend ("topleft", c(msg.1, msg.2), bty = "n") segments(0, 0, 0, max(z$y), col = "red", lty = 2) } mtext(outer=T, side=3, "Daily Return Densities", cex = 1.5, col = "blue") } crunch = function (data) { prices = data$prices returns = data$returns main = data$main symbols = data$symbols dates = data$dates msg.1 = main msg.2 = paste(min(dates), "through", max(dates)) returns = returns[complete.cases(returns),] ###--now redundant dim(returns) frontier = portfolioFrontier(data=returns) returns = as.data.frame(data$returns) plot(frontier, 1, main = main) legend ("bottomright", c(msg.1, msg.2)) getStatistics(frontier) cor(returns) points = frontierPoints(frontier) annualized.points = data.frame( target.risk = points[,"targetRisk"] * sqrt(252), target.return = points[,"targetReturn"] * 252) plot (annualized.points, main = "Annual Return vs Risk") legend ("bottomright", msg.1) annualized.points risk.free.rate = 0 sharpe = (annualized.points[,"target.return"] - risk.free.rate) / annualized.points[,"target.risk"] max.sharpe = max(sharpe) max.sharpe plot ((annualized.points[,"target.return"] - risk.free.rate) / annualized.points[,"target.risk"], main = "Sharpe Ratios", xlab = "Point on Efficient Frontier", ylab = "Sharpe Ratio") legend ("bottomright", msg.1) allocations = getWeights(frontier@portfolio) colnames(allocations) = tolower(symbols) barplot.main = paste(main, "Optimal Allocations") epsilon = 0.005 #--half a percent z = t(allocations) z = z[rowSums(z) > epsilon,] z names = rep("", 49) at = c(1, 10, 20, 30, 40) for (k in at) names[k] = as.character(k) n.symbols = nrow(z) barplot(z, col= rainbow(n.symbols), legend = rownames(z), main = barplot.main, xlab = "Portfolio Weights vs Index") getMu (frontier) export = cbind(allocations, annualized.points, sharpe) export } barp.allocation = function(allocations, index) { ###--show a Pareto bar plot of recommended weights for a portfolio selected by index main = paste("Index =", index) allocation = allocations[index,] target.return = allocation$target.return sharpe = allocation$sharpe allocation = as.data.frame( t (allocation[,tolower(symbols)])) msg.sharpe = paste("Sharpe =", round(sharpe,2)) allocation$symbol = rownames(allocation) colnames(allocation)[1] = "weight" allocation = allocation[order(- allocation$weight),] allocation = allocation[allocation$weight > 0,] allocation$weight = 100 * allocation$weight msg.0 = paste("index =", index) msg.1 = paste("exp rtn =", round(100*target.return), "%") barp(allocation$weight, names.arg = allocation$symbol, main = main, ylab = "Weight (%)") legend("topright", legend = c(msg.0, msg.1, msg.sharpe), col = "blue", cex = .9) text (1:nrow(allocation), 10, paste (round(allocation$weight), "%"), col = "blue") } plot.composite = function (data, allocations, index, ylim = NA) { ###--plot value of optimal portfolio[index] symbol = colnames(allocations)[1] symbols = colnames(allocations)[1:(ncol(allocations)-3)] returns = data$returns dates = as.Date(rownames(data$returns[,symbol])) df = data.frame(j = 1:length(dates), date = dates) principal = 1000 for (symbol in symbols) df[1,symbol] = allocations[index,symbol] * 1000 for (j in 2:nrow(df)) { date = dates[j] for (symbol in symbols) { #if (j <= 5) print (paste(j, date, symbol, "return =", returns[,symbol][j])) old = df[,symbol][df$date == dates[j-1]] new = old * (1 + returns[,symbol][j]) df[,symbol][df$date==date] = new #if (j <= 5) print (paste(j, date, symbol, "new =", new)) } } df$total = 0 for (symbol in symbols) df$total = df$total + df[,symbol] if (! complete.cases(ylim)[1]) ylim = range(df$total) plot (df$date, df$total, main = paste("Index =", index), type = "s", xlab = "", ylab = "NAV", ylim = ylim) segments(min(dates), 1000, max(dates), 1000, col = "gray") msg.1 = paste("target annual return =", round(100 * allocations$target.return[index],1), "%") msg.2 = paste("Sharpe ratio =", round(allocations$sharpe[index], 2)) legend ("topleft", c(msg.1, msg.2), col = "blue") } plot.all.relative = function(data) { prices = data$prices dates = data$dates symbols = data$symbols relative = prices for (symbol in symbols) relative[,symbol] = relative[,symbol] / relative[1, symbol] head(prices) head(relative) par(mfrow = c(1,1)) palette = rainbow(length(symbols)) plot (dates, relative[,1], main = data$main, ylim = c(.8, 1.3), col = palette[1]) for (k in 2:length(symbols)) lines (dates, relative[,k], col =palette[k]) legend ("topleft", symbols, lty=1, col = palette) dates = data$dates } barp.returns = function(data) { #--pareto chart of returns over period #--may need to drop columns in prices df = data.frame(symbols = data$symbols) df$symbols = as.character(df$symbols) symbols = data$symbols str(df) df$return = t((tail(data$prices,1) - head(data$prices,1)) / head(data$prices,1)) df$return = as.numeric(df$return) str(df) df df = df[order(-df$return),] lo = round(min(df$return),2) - .01 hi = round(max(df$return),2) + .01 ylim = c(lo, hi) ylim symbols = as.character(df$symbols) barp (df$return, main = data$main, ylim = ylim, names.arg = symbols) abline (h = seq (-2, 2, by = .05), col = "gray") } #----------------------------------------------------------------------------------------------------- index = 25 get.symbols = function(dir) { dir = tolower(dir) setwd ("c://python_projects//compfi") setwd (dir) if (dir == "jeff") symbols = c("ABEMX", "OAKIX", "OAKMX") if (dir == "folio") symbols = c("ANN", "ADKSX", "ARMH", "BMO", "BOH", "DVY", "EAT", "ENB", "EWC", "EWS", "HE", "IYR", "JWN", "LQD", "POT", "THI", "TNH", "TRP", "TU") if (dir == "mitre") symbols = c("FCNKX", "AMANX", "FAGIX", "FICDX", "FXSIX", "FDIKX") if (dir == "tsp") symbols = c("VFINX", "VEXMX","VDMIX","VBMFX") if (dir == "cyndi") symbols = c("VWINX", "VWELX", "VFINX", "ADKSX", "MQIFX") if (dir == "smm_jpm_agg") symbols = c("ECON","XLV","TLT","IEI","LQD","IEMG","MBB","EWC","EZU", "EPP","EWD","EWL","EWU","IBB","TIP","EMLC","PCY","PGX","XLF","JNK","BWX","VCR", "VDC","VDE","VIS","VGT","BIV","VAW","BSV","VOX","DXJ") if (dir == "cyndi") { symbols = c("adksx", "gsra", "hyls", "vlu", "tsla") symbols = unique(symbols) length(symbols) } symbols } show.composites = function() { ###--slow. Mya need to run twice to get ylims reasonable par (mfrow = c(2,2)) for (index in seq(15, 45, by = 10)) { print (index) plot.composite (data=data, allocations=allocations, index=index, ylim = c(0, 7500)) } mtext (side=3, outer=T, "Composite Reconstructions", cex=1.5, col = "blue") } get.smm.weights = function() { weights = read.csv("weights_20140321.csv") colnames(weights)[3] = "symbol" weights$weight = weights$Allocation weights$symbol = tolower(weights$symbol) weights$Allocation = NULL weights = weights[complete.cases(weights),] head(weights) row = data.frame(index=1) for (symbol in weights$symbol) { w = weights$weight[weights$symbol == symbol] row[,symbol] = w } row } } score.custom.weights = function(data, weights) { dates = tail(data$dates, -1) #--there are no returns on day 1 returns = as.data.frame(data$returns) returns$date = dates composite = data.frame(date = dates, return = NA) for (date in dates) { date = as.Date(date) daily.return = 0 for (symbol in symbols) { w = weights[,symbol] r = returns[,symbol][returns$date == date] this.return = w * r daily.return = daily.return + this.return } composite$return[composite$date == date] = daily.return } m = mean(composite$return) s = sd(composite$return) sharpe = sqrt(252) * m / s portfolio.period.return = 1 for (i in composite$return) portfolio.period.return = portfolio.period.return * ( 1 + i) interval = max(dates) - min(dates) + 1 years = as.numeric(interval) / 365.25 arr = ( portfolio.period.return) ^ (1 / years) - 1 stat = data.frame (n = length(composite$return), mean = m, sd = s, sharpe = sharpe, arr = arr) print (stat) stat } custom.report = function (allocations) { smm.weights = get.smm.weights() weights = smm.weights row = score.custom.weights(data, smm.weights) results = row n = dim(allocations)[1] for (index in 1:n) { allocation = allocations[index,] row = score.custom.weights(data, allocation) results = rbind(results, row) } results par(mfrow = c(2,2)) plot (results$sd, main = "risk") points (1, results$sd[1], col = "red", pch = 19) plot (results$arr, main = "arr") points (1, results$arr[1], col = "red", pch = 19) plot (results$sd, results$arr, main = "arr vs risk") points (results$sd[1], results$arr[1], pch = 19, col = "red", xlab = "risk = sd of daily returns", ylab = "ARR") plot (results$sharpe, main = "Sharpe Ratios", xlab = "index") points(1, results$sharpe[1], pch = 19, col = "red") } show.multiplot.drawdown = function (data, plots.per.page = 8, ylim = c(.8, 1.2)) { #--plot drawdowns for each symbol, multiple plots per pag source ("c://python_projects//compfi//plot.drawdowns.ggplot2.r") source ("c://python_projects//compfi//multiplot.r") plots = list() index = 0 for (symbol in data$symbols) { index = index + 1 print (symbol) p = plot.drawdowns(data, symbol, ylim = ylim) plots [[symbol]] = p if ((index %% plots.per.page == 0) | (index == length(symbols))) { multiplot (plotlist = plots, cols = 2) plots = list() } } } #-------------------------------------------symbols---------------------------------------------------------- #--need to make symbols available: maybe always lower case with a toupper in get.data main = "your main here" #symbols = c(symbols, "SPY") setwd ("c://python_projects//compfi") dir = "cyndi" symbols = get.symbols(dir) data = get.data(symbols, dir, from = "2014-03-31") symbols data$main prices = data$prices range(data$dates) symbols = data$symbols #--prices have no NAs symbols show.relative.prices (data, ylim = c(.90, 1.15)) show.return.distributions(data, ymax = 200, xmax = .03) par(mfrow = c(1,1)) barp.returns(data) par (mfrow = c(2,2)) #--maybe worry if all returns are neagtive??? allocations = crunch (data) par (mar = c(4,3,2,2)) par (mfrow = c(1,1)) plot (1:dim(allocations)[1], allocations$sharpe, main = "Sharpe vs Index", xlab = "Index", ylab = "Sharpe Ratio") par (mfrow = c(2,2)) par (oma = c(1,1,3,1)) for (index in c(1, 2, 5, 10, 15,20, 25,30, 35,40, 45, 49)) barp.allocation (allocations = allocations, index=index) mtext(outer=T, side=3, main, cex =1.75) #--show.composites() dates = data$dates msg.2 = paste(min(dates), "through", max(dates)) fname = paste(data$main, "csv", sep = ".") fname write.csv (allocations, fname) fname
/compfi-master/noma2.r
no_license
localperf/compfi
R
false
false
14,478
r
#--noma2.r #--Elliot Noma on face tube #--http://elliotnoma.wordpress.com/2013/01/22/construct-a-stock-portfolio-using-r/ library (timeSeries) library (quantmod) library (slam) library (fPortfolio) library (plotrix) library (corrplot) source ("c://python_projects//compfi//plot.drawdowns.ggplot2.r") source ("c://python_projects//compfi//multiplot.r") from = "2014-01-01" dir = "folio" get.data = function (symbols, dir = "test", from = "2009-01-01") { ###--return data frame with adjusted close and time series for daily returns ###--returns only symbols with data for all dates ###--one warning per symbol is normal symbols =sort(unique(symbols)) prices = NULL for (symbol in symbols) { print (paste("fetching", symbol)) adj = getSymbols.yahoo(symbol, from = from, verbose=F, auto.assign=F)[,6] colnames(adj) = tolower(symbol) prices = cbind(prices, adj) } symbols = colnames(prices) complete.symbols = NULL dim(prices) head(prices) for (symbol in symbols) { sigma = sum(prices[,symbol]) print (paste(symbol, sigma)) if (! is.na(sigma)) complete.symbols = c(complete.symbols, symbol) } print (complete.symbols) print (length(complete.symbols)) prices = prices[complete.cases(prices),] dates = as.Date(row.names(as.data.frame(prices))) summary(dates) summary(prices) prices = interpNA(prices, method = "linear") summary(prices) period = paste(min(rownames(prices)), "through", max(rownames(prices))) x1 = as.timeSeries(tail(prices,-1)) x2 = as.numeric(head(prices, -1)) returns = x1 / x2 - 1 summary(returns) main = paste(dir, min(dates), "-", max(dates)) data = list(prices = prices, returns = returns, dates = dates, symbols = complete.symbols, main = main) } symbol = "fxsix" show.relative.prices = function(data, ylim = c(.75, 1.25)) { #-- can work on finding ylim dynamically prices = data$prices symbols = data$symbols dates = time(prices) par (mfrow = c(2, 3)) for (symbol in symbols) { local = as.numeric(prices[, symbol]) local = local / local[1] plot (dates,local, main = symbol, ylab = "Relative Adj Close", col="blue", type = "s", ylim = ylim) abline (h=1, col = "gray") } } show.return.distributions = function (data, xmax = .04, ymax = 200, precision = 4) { #-- mshould put period in legend returns = data$returns symbols = colnames(as.data.frame(returns)) par (mfrow = c(2,3)) par (oma = c(1,1,3,1)) for (symbol in symbols) { symbol.returns = returns[,symbol] z = density(symbol.returns, na.rm=T) plot (z, main = symbol, xlim = c(-xmax, xmax), ylim = c(0, ymax), col = "blue", lwd = 2, xlab = "") msg.1 = paste("mean =", round(mean(symbol.returns), precision)) msg.2 = paste("sdev =", round(sd(symbol.returns), precision)) legend ("topleft", c(msg.1, msg.2), bty = "n") segments(0, 0, 0, max(z$y), col = "red", lty = 2) } mtext(outer=T, side=3, "Daily Return Densities", cex = 1.5, col = "blue") } crunch = function (data) { prices = data$prices returns = data$returns main = data$main symbols = data$symbols dates = data$dates msg.1 = main msg.2 = paste(min(dates), "through", max(dates)) returns = returns[complete.cases(returns),] ###--now redundant dim(returns) frontier = portfolioFrontier(data=returns) returns = as.data.frame(data$returns) plot(frontier, 1, main = main) legend ("bottomright", c(msg.1, msg.2)) getStatistics(frontier) cor(returns) points = frontierPoints(frontier) annualized.points = data.frame( target.risk = points[,"targetRisk"] * sqrt(252), target.return = points[,"targetReturn"] * 252) plot (annualized.points, main = "Annual Return vs Risk") legend ("bottomright", msg.1) annualized.points risk.free.rate = 0 sharpe = (annualized.points[,"target.return"] - risk.free.rate) / annualized.points[,"target.risk"] max.sharpe = max(sharpe) max.sharpe plot ((annualized.points[,"target.return"] - risk.free.rate) / annualized.points[,"target.risk"], main = "Sharpe Ratios", xlab = "Point on Efficient Frontier", ylab = "Sharpe Ratio") legend ("bottomright", msg.1) allocations = getWeights(frontier@portfolio) colnames(allocations) = tolower(symbols) barplot.main = paste(main, "Optimal Allocations") epsilon = 0.005 #--half a percent z = t(allocations) z = z[rowSums(z) > epsilon,] z names = rep("", 49) at = c(1, 10, 20, 30, 40) for (k in at) names[k] = as.character(k) n.symbols = nrow(z) barplot(z, col= rainbow(n.symbols), legend = rownames(z), main = barplot.main, xlab = "Portfolio Weights vs Index") getMu (frontier) export = cbind(allocations, annualized.points, sharpe) export } barp.allocation = function(allocations, index) { ###--show a Pareto bar plot of recommended weights for a portfolio selected by index main = paste("Index =", index) allocation = allocations[index,] target.return = allocation$target.return sharpe = allocation$sharpe allocation = as.data.frame( t (allocation[,tolower(symbols)])) msg.sharpe = paste("Sharpe =", round(sharpe,2)) allocation$symbol = rownames(allocation) colnames(allocation)[1] = "weight" allocation = allocation[order(- allocation$weight),] allocation = allocation[allocation$weight > 0,] allocation$weight = 100 * allocation$weight msg.0 = paste("index =", index) msg.1 = paste("exp rtn =", round(100*target.return), "%") barp(allocation$weight, names.arg = allocation$symbol, main = main, ylab = "Weight (%)") legend("topright", legend = c(msg.0, msg.1, msg.sharpe), col = "blue", cex = .9) text (1:nrow(allocation), 10, paste (round(allocation$weight), "%"), col = "blue") } plot.composite = function (data, allocations, index, ylim = NA) { ###--plot value of optimal portfolio[index] symbol = colnames(allocations)[1] symbols = colnames(allocations)[1:(ncol(allocations)-3)] returns = data$returns dates = as.Date(rownames(data$returns[,symbol])) df = data.frame(j = 1:length(dates), date = dates) principal = 1000 for (symbol in symbols) df[1,symbol] = allocations[index,symbol] * 1000 for (j in 2:nrow(df)) { date = dates[j] for (symbol in symbols) { #if (j <= 5) print (paste(j, date, symbol, "return =", returns[,symbol][j])) old = df[,symbol][df$date == dates[j-1]] new = old * (1 + returns[,symbol][j]) df[,symbol][df$date==date] = new #if (j <= 5) print (paste(j, date, symbol, "new =", new)) } } df$total = 0 for (symbol in symbols) df$total = df$total + df[,symbol] if (! complete.cases(ylim)[1]) ylim = range(df$total) plot (df$date, df$total, main = paste("Index =", index), type = "s", xlab = "", ylab = "NAV", ylim = ylim) segments(min(dates), 1000, max(dates), 1000, col = "gray") msg.1 = paste("target annual return =", round(100 * allocations$target.return[index],1), "%") msg.2 = paste("Sharpe ratio =", round(allocations$sharpe[index], 2)) legend ("topleft", c(msg.1, msg.2), col = "blue") } plot.all.relative = function(data) { prices = data$prices dates = data$dates symbols = data$symbols relative = prices for (symbol in symbols) relative[,symbol] = relative[,symbol] / relative[1, symbol] head(prices) head(relative) par(mfrow = c(1,1)) palette = rainbow(length(symbols)) plot (dates, relative[,1], main = data$main, ylim = c(.8, 1.3), col = palette[1]) for (k in 2:length(symbols)) lines (dates, relative[,k], col =palette[k]) legend ("topleft", symbols, lty=1, col = palette) dates = data$dates } barp.returns = function(data) { #--pareto chart of returns over period #--may need to drop columns in prices df = data.frame(symbols = data$symbols) df$symbols = as.character(df$symbols) symbols = data$symbols str(df) df$return = t((tail(data$prices,1) - head(data$prices,1)) / head(data$prices,1)) df$return = as.numeric(df$return) str(df) df df = df[order(-df$return),] lo = round(min(df$return),2) - .01 hi = round(max(df$return),2) + .01 ylim = c(lo, hi) ylim symbols = as.character(df$symbols) barp (df$return, main = data$main, ylim = ylim, names.arg = symbols) abline (h = seq (-2, 2, by = .05), col = "gray") } #----------------------------------------------------------------------------------------------------- index = 25 get.symbols = function(dir) { dir = tolower(dir) setwd ("c://python_projects//compfi") setwd (dir) if (dir == "jeff") symbols = c("ABEMX", "OAKIX", "OAKMX") if (dir == "folio") symbols = c("ANN", "ADKSX", "ARMH", "BMO", "BOH", "DVY", "EAT", "ENB", "EWC", "EWS", "HE", "IYR", "JWN", "LQD", "POT", "THI", "TNH", "TRP", "TU") if (dir == "mitre") symbols = c("FCNKX", "AMANX", "FAGIX", "FICDX", "FXSIX", "FDIKX") if (dir == "tsp") symbols = c("VFINX", "VEXMX","VDMIX","VBMFX") if (dir == "cyndi") symbols = c("VWINX", "VWELX", "VFINX", "ADKSX", "MQIFX") if (dir == "smm_jpm_agg") symbols = c("ECON","XLV","TLT","IEI","LQD","IEMG","MBB","EWC","EZU", "EPP","EWD","EWL","EWU","IBB","TIP","EMLC","PCY","PGX","XLF","JNK","BWX","VCR", "VDC","VDE","VIS","VGT","BIV","VAW","BSV","VOX","DXJ") if (dir == "cyndi") { symbols = c("adksx", "gsra", "hyls", "vlu", "tsla") symbols = unique(symbols) length(symbols) } symbols } show.composites = function() { ###--slow. Mya need to run twice to get ylims reasonable par (mfrow = c(2,2)) for (index in seq(15, 45, by = 10)) { print (index) plot.composite (data=data, allocations=allocations, index=index, ylim = c(0, 7500)) } mtext (side=3, outer=T, "Composite Reconstructions", cex=1.5, col = "blue") } get.smm.weights = function() { weights = read.csv("weights_20140321.csv") colnames(weights)[3] = "symbol" weights$weight = weights$Allocation weights$symbol = tolower(weights$symbol) weights$Allocation = NULL weights = weights[complete.cases(weights),] head(weights) row = data.frame(index=1) for (symbol in weights$symbol) { w = weights$weight[weights$symbol == symbol] row[,symbol] = w } row } } score.custom.weights = function(data, weights) { dates = tail(data$dates, -1) #--there are no returns on day 1 returns = as.data.frame(data$returns) returns$date = dates composite = data.frame(date = dates, return = NA) for (date in dates) { date = as.Date(date) daily.return = 0 for (symbol in symbols) { w = weights[,symbol] r = returns[,symbol][returns$date == date] this.return = w * r daily.return = daily.return + this.return } composite$return[composite$date == date] = daily.return } m = mean(composite$return) s = sd(composite$return) sharpe = sqrt(252) * m / s portfolio.period.return = 1 for (i in composite$return) portfolio.period.return = portfolio.period.return * ( 1 + i) interval = max(dates) - min(dates) + 1 years = as.numeric(interval) / 365.25 arr = ( portfolio.period.return) ^ (1 / years) - 1 stat = data.frame (n = length(composite$return), mean = m, sd = s, sharpe = sharpe, arr = arr) print (stat) stat } custom.report = function (allocations) { smm.weights = get.smm.weights() weights = smm.weights row = score.custom.weights(data, smm.weights) results = row n = dim(allocations)[1] for (index in 1:n) { allocation = allocations[index,] row = score.custom.weights(data, allocation) results = rbind(results, row) } results par(mfrow = c(2,2)) plot (results$sd, main = "risk") points (1, results$sd[1], col = "red", pch = 19) plot (results$arr, main = "arr") points (1, results$arr[1], col = "red", pch = 19) plot (results$sd, results$arr, main = "arr vs risk") points (results$sd[1], results$arr[1], pch = 19, col = "red", xlab = "risk = sd of daily returns", ylab = "ARR") plot (results$sharpe, main = "Sharpe Ratios", xlab = "index") points(1, results$sharpe[1], pch = 19, col = "red") } show.multiplot.drawdown = function (data, plots.per.page = 8, ylim = c(.8, 1.2)) { #--plot drawdowns for each symbol, multiple plots per pag source ("c://python_projects//compfi//plot.drawdowns.ggplot2.r") source ("c://python_projects//compfi//multiplot.r") plots = list() index = 0 for (symbol in data$symbols) { index = index + 1 print (symbol) p = plot.drawdowns(data, symbol, ylim = ylim) plots [[symbol]] = p if ((index %% plots.per.page == 0) | (index == length(symbols))) { multiplot (plotlist = plots, cols = 2) plots = list() } } } #-------------------------------------------symbols---------------------------------------------------------- #--need to make symbols available: maybe always lower case with a toupper in get.data main = "your main here" #symbols = c(symbols, "SPY") setwd ("c://python_projects//compfi") dir = "cyndi" symbols = get.symbols(dir) data = get.data(symbols, dir, from = "2014-03-31") symbols data$main prices = data$prices range(data$dates) symbols = data$symbols #--prices have no NAs symbols show.relative.prices (data, ylim = c(.90, 1.15)) show.return.distributions(data, ymax = 200, xmax = .03) par(mfrow = c(1,1)) barp.returns(data) par (mfrow = c(2,2)) #--maybe worry if all returns are neagtive??? allocations = crunch (data) par (mar = c(4,3,2,2)) par (mfrow = c(1,1)) plot (1:dim(allocations)[1], allocations$sharpe, main = "Sharpe vs Index", xlab = "Index", ylab = "Sharpe Ratio") par (mfrow = c(2,2)) par (oma = c(1,1,3,1)) for (index in c(1, 2, 5, 10, 15,20, 25,30, 35,40, 45, 49)) barp.allocation (allocations = allocations, index=index) mtext(outer=T, side=3, main, cex =1.75) #--show.composites() dates = data$dates msg.2 = paste(min(dates), "through", max(dates)) fname = paste(data$main, "csv", sep = ".") fname write.csv (allocations, fname) fname
#' Stacks two or more gtsummary objects #' #' Assists in patching together more complex tables. `tbl_stack()` appends two #' or more `tbl_regression`, `tbl_summary`, `tbl_svysummary`, or `tbl_merge` objects. #' Column attributes, including number formatting and column footnotes, are #' retained from the first passed gtsummary object. #' #' @param tbls List of gtsummary objects #' @param group_header Character vector with table headers where length matches #' the length of `tbls=` #' @inheritParams add_global_p #' @family tbl_summary tools #' @family tbl_svysummary tools #' @family tbl_regression tools #' @family tbl_uvregression tools #' @family tbl_survfit tools #' @seealso [tbl_merge] #' @author Daniel D. Sjoberg #' @export #' @return A `tbl_stack` object #' @examples #' # Example 1 ---------------------------------- #' # stacking two tbl_regression objects #' t1 <- #' glm(response ~ trt, trial, family = binomial) %>% #' tbl_regression( #' exponentiate = TRUE, #' label = list(trt ~ "Treatment (unadjusted)") #' ) #' #' t2 <- #' glm(response ~ trt + grade + stage + marker, trial, family = binomial) %>% #' tbl_regression( #' include = "trt", #' exponentiate = TRUE, #' label = list(trt ~ "Treatment (adjusted)") #' ) #' #' tbl_stack_ex1 <- tbl_stack(list(t1, t2)) #' #' # Example 2 ---------------------------------- #' # stacking two tbl_merge objects #' library(survival) #' t3 <- #' coxph(Surv(ttdeath, death) ~ trt, trial) %>% #' tbl_regression( #' exponentiate = TRUE, #' label = list(trt ~ "Treatment (unadjusted)") #' ) #' #' t4 <- #' coxph(Surv(ttdeath, death) ~ trt + grade + stage + marker, trial) %>% #' tbl_regression( #' include = "trt", #' exponentiate = TRUE, #' label = list(trt ~ "Treatment (adjusted)") #' ) #' #' #' # first merging, then stacking #' row1 <- tbl_merge(list(t1, t3), tab_spanner = c("Tumor Response", "Death")) #' row2 <- tbl_merge(list(t2, t4)) #' tbl_stack_ex2 <- #' tbl_stack(list(row1, row2), group_header = c("Unadjusted Analysis", "Adjusted Analysis")) #' @section Example Output: #' \if{html}{Example 1} #' #' \if{html}{\figure{tbl_stack_ex1.png}{options: width=50\%}} #' #' \if{html}{Example 2} #' #' \if{html}{\figure{tbl_stack_ex2.png}{options: width=80\%}} tbl_stack <- function(tbls, group_header = NULL, quiet = NULL) { # setting defaults ----------------------------------------------------------- quiet <- quiet %||% get_theme_element("pkgwide-lgl:quiet") %||% FALSE # input checks --------------------------------------------------------------- # class of tbls if (!inherits(tbls, "list")) { stop("Expecting 'tbls' to be a list, e.g. 'tbls = list(tbl1, tbl2)'") } # checking all inputs are class gtsummary if (!purrr::every(tbls, ~ inherits(.x, "gtsummary"))) { stop("All objects in 'tbls' must be class 'gtsummary'", call. = FALSE) } # if group_header specified, then it must be a vector of same length tbls ---- if (!is.null(group_header) && length(tbls) != length(group_header)) { stop("The length of `tbls=` and `group_header=` must match.", call. = FALSE) } # will return call, and all arguments passed to tbl_stack func_inputs <- as.list(environment()) # stacking tables ------------------------------------------------------------ # the table_body and call_list will be updated with the tbl_stack values results <- list() results$table_body <- purrr::map2_dfr( tbls, seq_along(tbls), function(tbl, id) { # adding a table ID and group header table_body <- pluck(tbl, "table_body") %>% mutate(tbl_id = id) if (!is.null(group_header)) { table_body <- table_body %>% mutate(groupname_col = group_header[id]) } table_body %>% select(any_of(c("groupname_col", "tbl_id")), everything()) } ) # creating table styling ----------------------------------------------------- # print message if column headers, footnotes, etc. are different among tbls if (identical(quiet, FALSE)) print_stack_differences(tbls) results$table_styling$header <- map_dfr(tbls, ~ pluck(.x, "table_styling", "header")) %>% group_by(.data$column) %>% filter(dplyr::row_number() == 1) %>% ungroup() # cycle over each of the styling tibbles and stack them in reverse order ----- for (style_type in c("footnote", "footnote_abbrev", "fmt_fun", "text_format", "fmt_missing", "cols_merge")) { results$table_styling[[style_type]] <- map_dfr( rev(seq_along(tbls)), function(i) { df <- tbls[[i]]$table_styling[[style_type]] if ("rows" %in% names(df) && nrow(df) > 0) { # adding tbl_id to the rows specifications, # e.g. data$tbl_id == 1L & .data$row_type != "label" df$rows <- map(df$rows, ~ add_tbl_id_to_quo(.x, tbls[[i]]$table_body, i)) } df %>% mutate_at(vars(any_of(c( "column", "text_interpret", "footnote", "format_type", "symbol" ))), as.character) } ) } # combining rows spec for same column if (nrow(results$table_styling$cols_merge) > 0) { results$table_styling$cols_merge <- results$table_styling$cols_merge %>% tidyr::nest(rows = .data$rows) %>% mutate(rows = map(.data$rows, ~ .x$rows %>% unlist())) results$table_styling$cols_merge$rows <- map( results$table_styling$cols_merge$rows, ~ .x %>% purrr::reduce(function(.x1, .y1) expr(!!.x1 | !!.y1)) ) } # take the first non-NULL element from tbls[[.]] for (style_type in c("caption", "source_note", "horizontal_line_above")) { results$table_styling[[style_type]] <- map(seq_along(tbls), ~ pluck(tbls, .x, "table_styling", style_type)) %>% purrr::reduce(.f = `%||%`) } # adding label for grouping variable, if present ----------------------------- class(results) <- c("tbl_stack", "gtsummary") results <- modify_table_styling( results, any_of("groupname_col"), label = get_theme_element("tbl_stack-str:group_header", default = "**Group**"), align = "left", hide = FALSE ) # returning results ---------------------------------------------------------- results$call_list <- list(tbl_stack = match.call()) results$tbls <- tbls results } # function prints changes to column labels and spanning headers print_stack_differences <- function(tbls) { tbl_differences <- purrr::map2_dfr( tbls, seq_len(length(tbls)), ~ pluck(.x, "table_styling", "header") %>% mutate(..tbl_id.. = .y) ) %>% select(.data$..tbl_id.., .data$column, .data$label, .data$spanning_header) %>% tidyr::pivot_longer(cols = c(.data$label, .data$spanning_header)) %>% group_by(.data$column, .data$name) %>% mutate( new_value = .data$value[1], name_fmt = case_when( name == "label" ~ "Column header", name == "spanning_header" ~ "Spanning column header" ) ) %>% filter(.data$new_value != .data$value) %>% ungroup() %>% arrange(.data$name != "label", .data$name_fmt, .data$..tbl_id..) if (nrow(tbl_differences) > 0) { paste( "Column headers among stacked tables differ. Headers from the first table are used.", "Use {.code quiet = TRUE} to supress this message." ) %>% stringr::str_wrap() %>% cli_alert_info() # purrr::pwalk( # list(tbl_differences$name_fmt, tbl_differences$..tbl_id.., # tbl_differences$column, tbl_differences$value, tbl_differences$new_value), # function(name_fmt, ..tbl_id.., column, value, new_value) # cli_alert_success("{name_fmt}, table {..tbl_id..} ({column}): {.field {value}} ---> {.field {new_value}}") # ) } return(invisible()) } add_tbl_id_to_quo <- function(x, table_body, tbl_id) { if (eval_tidy(x, data = table_body) %>% is.null()) { return(x) } # if quosure, add tbl_id if (inherits(x, "quosure")) { return( rlang::quo(.data$tbl_id == !!tbl_id & (!!rlang::f_rhs(x))) %>% structure(.Environment = attr(x, ".Environment")) ) } # if expression, add tbl_id expr(.data$tbl_id == !!tbl_id & (!!x)) }
/R/tbl_stack.R
permissive
eliascrapa/gtsummary
R
false
false
8,274
r
#' Stacks two or more gtsummary objects #' #' Assists in patching together more complex tables. `tbl_stack()` appends two #' or more `tbl_regression`, `tbl_summary`, `tbl_svysummary`, or `tbl_merge` objects. #' Column attributes, including number formatting and column footnotes, are #' retained from the first passed gtsummary object. #' #' @param tbls List of gtsummary objects #' @param group_header Character vector with table headers where length matches #' the length of `tbls=` #' @inheritParams add_global_p #' @family tbl_summary tools #' @family tbl_svysummary tools #' @family tbl_regression tools #' @family tbl_uvregression tools #' @family tbl_survfit tools #' @seealso [tbl_merge] #' @author Daniel D. Sjoberg #' @export #' @return A `tbl_stack` object #' @examples #' # Example 1 ---------------------------------- #' # stacking two tbl_regression objects #' t1 <- #' glm(response ~ trt, trial, family = binomial) %>% #' tbl_regression( #' exponentiate = TRUE, #' label = list(trt ~ "Treatment (unadjusted)") #' ) #' #' t2 <- #' glm(response ~ trt + grade + stage + marker, trial, family = binomial) %>% #' tbl_regression( #' include = "trt", #' exponentiate = TRUE, #' label = list(trt ~ "Treatment (adjusted)") #' ) #' #' tbl_stack_ex1 <- tbl_stack(list(t1, t2)) #' #' # Example 2 ---------------------------------- #' # stacking two tbl_merge objects #' library(survival) #' t3 <- #' coxph(Surv(ttdeath, death) ~ trt, trial) %>% #' tbl_regression( #' exponentiate = TRUE, #' label = list(trt ~ "Treatment (unadjusted)") #' ) #' #' t4 <- #' coxph(Surv(ttdeath, death) ~ trt + grade + stage + marker, trial) %>% #' tbl_regression( #' include = "trt", #' exponentiate = TRUE, #' label = list(trt ~ "Treatment (adjusted)") #' ) #' #' #' # first merging, then stacking #' row1 <- tbl_merge(list(t1, t3), tab_spanner = c("Tumor Response", "Death")) #' row2 <- tbl_merge(list(t2, t4)) #' tbl_stack_ex2 <- #' tbl_stack(list(row1, row2), group_header = c("Unadjusted Analysis", "Adjusted Analysis")) #' @section Example Output: #' \if{html}{Example 1} #' #' \if{html}{\figure{tbl_stack_ex1.png}{options: width=50\%}} #' #' \if{html}{Example 2} #' #' \if{html}{\figure{tbl_stack_ex2.png}{options: width=80\%}} tbl_stack <- function(tbls, group_header = NULL, quiet = NULL) { # setting defaults ----------------------------------------------------------- quiet <- quiet %||% get_theme_element("pkgwide-lgl:quiet") %||% FALSE # input checks --------------------------------------------------------------- # class of tbls if (!inherits(tbls, "list")) { stop("Expecting 'tbls' to be a list, e.g. 'tbls = list(tbl1, tbl2)'") } # checking all inputs are class gtsummary if (!purrr::every(tbls, ~ inherits(.x, "gtsummary"))) { stop("All objects in 'tbls' must be class 'gtsummary'", call. = FALSE) } # if group_header specified, then it must be a vector of same length tbls ---- if (!is.null(group_header) && length(tbls) != length(group_header)) { stop("The length of `tbls=` and `group_header=` must match.", call. = FALSE) } # will return call, and all arguments passed to tbl_stack func_inputs <- as.list(environment()) # stacking tables ------------------------------------------------------------ # the table_body and call_list will be updated with the tbl_stack values results <- list() results$table_body <- purrr::map2_dfr( tbls, seq_along(tbls), function(tbl, id) { # adding a table ID and group header table_body <- pluck(tbl, "table_body") %>% mutate(tbl_id = id) if (!is.null(group_header)) { table_body <- table_body %>% mutate(groupname_col = group_header[id]) } table_body %>% select(any_of(c("groupname_col", "tbl_id")), everything()) } ) # creating table styling ----------------------------------------------------- # print message if column headers, footnotes, etc. are different among tbls if (identical(quiet, FALSE)) print_stack_differences(tbls) results$table_styling$header <- map_dfr(tbls, ~ pluck(.x, "table_styling", "header")) %>% group_by(.data$column) %>% filter(dplyr::row_number() == 1) %>% ungroup() # cycle over each of the styling tibbles and stack them in reverse order ----- for (style_type in c("footnote", "footnote_abbrev", "fmt_fun", "text_format", "fmt_missing", "cols_merge")) { results$table_styling[[style_type]] <- map_dfr( rev(seq_along(tbls)), function(i) { df <- tbls[[i]]$table_styling[[style_type]] if ("rows" %in% names(df) && nrow(df) > 0) { # adding tbl_id to the rows specifications, # e.g. data$tbl_id == 1L & .data$row_type != "label" df$rows <- map(df$rows, ~ add_tbl_id_to_quo(.x, tbls[[i]]$table_body, i)) } df %>% mutate_at(vars(any_of(c( "column", "text_interpret", "footnote", "format_type", "symbol" ))), as.character) } ) } # combining rows spec for same column if (nrow(results$table_styling$cols_merge) > 0) { results$table_styling$cols_merge <- results$table_styling$cols_merge %>% tidyr::nest(rows = .data$rows) %>% mutate(rows = map(.data$rows, ~ .x$rows %>% unlist())) results$table_styling$cols_merge$rows <- map( results$table_styling$cols_merge$rows, ~ .x %>% purrr::reduce(function(.x1, .y1) expr(!!.x1 | !!.y1)) ) } # take the first non-NULL element from tbls[[.]] for (style_type in c("caption", "source_note", "horizontal_line_above")) { results$table_styling[[style_type]] <- map(seq_along(tbls), ~ pluck(tbls, .x, "table_styling", style_type)) %>% purrr::reduce(.f = `%||%`) } # adding label for grouping variable, if present ----------------------------- class(results) <- c("tbl_stack", "gtsummary") results <- modify_table_styling( results, any_of("groupname_col"), label = get_theme_element("tbl_stack-str:group_header", default = "**Group**"), align = "left", hide = FALSE ) # returning results ---------------------------------------------------------- results$call_list <- list(tbl_stack = match.call()) results$tbls <- tbls results } # function prints changes to column labels and spanning headers print_stack_differences <- function(tbls) { tbl_differences <- purrr::map2_dfr( tbls, seq_len(length(tbls)), ~ pluck(.x, "table_styling", "header") %>% mutate(..tbl_id.. = .y) ) %>% select(.data$..tbl_id.., .data$column, .data$label, .data$spanning_header) %>% tidyr::pivot_longer(cols = c(.data$label, .data$spanning_header)) %>% group_by(.data$column, .data$name) %>% mutate( new_value = .data$value[1], name_fmt = case_when( name == "label" ~ "Column header", name == "spanning_header" ~ "Spanning column header" ) ) %>% filter(.data$new_value != .data$value) %>% ungroup() %>% arrange(.data$name != "label", .data$name_fmt, .data$..tbl_id..) if (nrow(tbl_differences) > 0) { paste( "Column headers among stacked tables differ. Headers from the first table are used.", "Use {.code quiet = TRUE} to supress this message." ) %>% stringr::str_wrap() %>% cli_alert_info() # purrr::pwalk( # list(tbl_differences$name_fmt, tbl_differences$..tbl_id.., # tbl_differences$column, tbl_differences$value, tbl_differences$new_value), # function(name_fmt, ..tbl_id.., column, value, new_value) # cli_alert_success("{name_fmt}, table {..tbl_id..} ({column}): {.field {value}} ---> {.field {new_value}}") # ) } return(invisible()) } add_tbl_id_to_quo <- function(x, table_body, tbl_id) { if (eval_tidy(x, data = table_body) %>% is.null()) { return(x) } # if quosure, add tbl_id if (inherits(x, "quosure")) { return( rlang::quo(.data$tbl_id == !!tbl_id & (!!rlang::f_rhs(x))) %>% structure(.Environment = attr(x, ".Environment")) ) } # if expression, add tbl_id expr(.data$tbl_id == !!tbl_id & (!!x)) }
library(shiny) require(rCharts) # Load data processing file source("processData.R") shinyServer( function(input, output) { output$myChart <- renderChart({ plotResults(rankCountriesByYearRange(dataT,input$yearRange[1],input$yearRange[2])) }) output$chartTitle <- renderUI({ if(input$yearRange[1] != input$yearRange[2]) { title <- paste("Top 10 Airport Arrival Countries between ",input$yearRange[1], " and ", input$yearRange[2]) } else { title <- paste("Top 10 Airport Arrival Countries for ",input$yearRange[1]) } h4(title) }) } )
/ShinyApp/server.R
no_license
crusainte/Data-Products
R
false
false
726
r
library(shiny) require(rCharts) # Load data processing file source("processData.R") shinyServer( function(input, output) { output$myChart <- renderChart({ plotResults(rankCountriesByYearRange(dataT,input$yearRange[1],input$yearRange[2])) }) output$chartTitle <- renderUI({ if(input$yearRange[1] != input$yearRange[2]) { title <- paste("Top 10 Airport Arrival Countries between ",input$yearRange[1], " and ", input$yearRange[2]) } else { title <- paste("Top 10 Airport Arrival Countries for ",input$yearRange[1]) } h4(title) }) } )
Ti_lander <- function(a) { # Given thrust a, find the optimal time Ti to fire the rocket # We assume the optimal time is less than Tmax Tmax <- 100 g <- function(Ti) { l.out <- lander(Ti, a) return(100*min(l.out[1], 0) - max(l.out[2], 0)) } Ti <- optimize(g, c(0, Tmax), maximum=T)$maximum return(Ti) }
/inst/resources/scripts/Ti_lander.r
no_license
cran/spuRs
R
false
false
325
r
Ti_lander <- function(a) { # Given thrust a, find the optimal time Ti to fire the rocket # We assume the optimal time is less than Tmax Tmax <- 100 g <- function(Ti) { l.out <- lander(Ti, a) return(100*min(l.out[1], 0) - max(l.out[2], 0)) } Ti <- optimize(g, c(0, Tmax), maximum=T)$maximum return(Ti) }
#twitter ness #libraries library(twitteR) library(RCurl) library(bitops) library(RJSONIO) setwd("C:\\Documents and Settings\\Ty\\My Documents\\twitter test") #step 1 cadillac.tweets = searchTwitter('@Cadillac', n=5000) #step 2 length(cadillac.tweets) class(cadillac.tweets) tweet = cadillac.tweets[[1]] class(tweet) tweet$getScreenName() tweet$getText() #Extracting text cadillac.text = lapply(cadillac.tweets, function(t) t$getText() ) length(cadillac.text) head(cadillac.text, 5) class(cadillac.text) #issue with scoring, It won't read the lexicon file... you have to read that from #the website, they haven't made a package for it yet #load file opin<-read.csv("C:\\Documents and Settings\\Ty\\My Documents\\twitter test\\opinion lexicon.csv") #scoring hu.liu.pos = opin$positive.words hu.liu.neg = opin$negative.words pos.words = hu.liu.pos neg.words = hu.liu.neg #sentiment scoring algorithm score.sentiment = function(sentences, pos.words, neg.words, .progress='none') { require(plyr) require(stringr) # we got a vector of sentences. plyr will handle a list # or a vector as an "l" for us # we want a simple array of scores back, so we use # "l" + "a" + "ply" = "laply": scores = laply(sentences, function(sentence, pos.words, neg.words) { # clean up sentences with R's regex-driven global substitute, gsub(): sentence = gsub('[[:punct:]]', '', sentence) sentence = gsub('[[:cntrl:]]', '', sentence) sentence = gsub('\\d+', '', sentence) # and convert to lower case: sentence = tolower(sentence) # split into words. str_split is in the stringr package word.list = str_split(sentence, '\\s+') # sometimes a list() is one level of hierarchy too much words = unlist(word.list) # compare our words to the dictionaries of positive & negative terms pos.matches = match(words, pos.words) neg.matches = match(words, neg.words) # match() returns the position of the matched term or NA # we just want a TRUE/FALSE: pos.matches = !is.na(pos.matches) neg.matches = !is.na(neg.matches) # and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum(): score = sum(pos.matches) - sum(neg.matches) return(score) }, pos.words, neg.words, .progress=.progress ) scores.df = data.frame(score=scores, text=sentences) return(scores.df) } cadillac.scores = score.sentiment(cadillac.text, pos.words, neg.words, .progress='text') cadillac.scores$airline = 'cadillac' hist(cadillac.scores$score) #check
/Company Specific Scripts/cadillac.R
no_license
sathishcodes/Twitter-Test
R
false
false
2,573
r
#twitter ness #libraries library(twitteR) library(RCurl) library(bitops) library(RJSONIO) setwd("C:\\Documents and Settings\\Ty\\My Documents\\twitter test") #step 1 cadillac.tweets = searchTwitter('@Cadillac', n=5000) #step 2 length(cadillac.tweets) class(cadillac.tweets) tweet = cadillac.tweets[[1]] class(tweet) tweet$getScreenName() tweet$getText() #Extracting text cadillac.text = lapply(cadillac.tweets, function(t) t$getText() ) length(cadillac.text) head(cadillac.text, 5) class(cadillac.text) #issue with scoring, It won't read the lexicon file... you have to read that from #the website, they haven't made a package for it yet #load file opin<-read.csv("C:\\Documents and Settings\\Ty\\My Documents\\twitter test\\opinion lexicon.csv") #scoring hu.liu.pos = opin$positive.words hu.liu.neg = opin$negative.words pos.words = hu.liu.pos neg.words = hu.liu.neg #sentiment scoring algorithm score.sentiment = function(sentences, pos.words, neg.words, .progress='none') { require(plyr) require(stringr) # we got a vector of sentences. plyr will handle a list # or a vector as an "l" for us # we want a simple array of scores back, so we use # "l" + "a" + "ply" = "laply": scores = laply(sentences, function(sentence, pos.words, neg.words) { # clean up sentences with R's regex-driven global substitute, gsub(): sentence = gsub('[[:punct:]]', '', sentence) sentence = gsub('[[:cntrl:]]', '', sentence) sentence = gsub('\\d+', '', sentence) # and convert to lower case: sentence = tolower(sentence) # split into words. str_split is in the stringr package word.list = str_split(sentence, '\\s+') # sometimes a list() is one level of hierarchy too much words = unlist(word.list) # compare our words to the dictionaries of positive & negative terms pos.matches = match(words, pos.words) neg.matches = match(words, neg.words) # match() returns the position of the matched term or NA # we just want a TRUE/FALSE: pos.matches = !is.na(pos.matches) neg.matches = !is.na(neg.matches) # and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum(): score = sum(pos.matches) - sum(neg.matches) return(score) }, pos.words, neg.words, .progress=.progress ) scores.df = data.frame(score=scores, text=sentences) return(scores.df) } cadillac.scores = score.sentiment(cadillac.text, pos.words, neg.words, .progress='text') cadillac.scores$airline = 'cadillac' hist(cadillac.scores$score) #check
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simple_logging.R \name{simple_logging} \alias{simple_logging} \alias{log_exception} \alias{threshold} \alias{console_threshold} \alias{add_appender} \alias{remove_appender} \alias{show_log} \alias{show_dt} \alias{show_data} \title{Simple Logging} \usage{ log_exception(code, logfun = lgr$fatal, caller = get_caller(-3)) threshold(level, target = lgr::lgr) console_threshold(level, target = lgr::lgr$appenders$console) add_appender(appender, name = NULL, target = lgr::lgr) remove_appender(pos, target = lgr::lgr) show_log(threshold = NA_integer_, n = 20L, target = lgr::lgr) show_dt(target = lgr::lgr) show_data(target = lgr::lgr) } \arguments{ \item{code}{Any \R code} \item{logfun}{a \code{function} for processing the log request, usually \code{lgr$info()}, \code{lgr$debug()}, etc... .} \item{caller}{a \code{character} scalar. The name of the calling function} \item{level}{\code{integer} or \code{character} scalar: the desired log level} \item{target}{a \link{Logger} or \link{Appender} or the name of a Logger as \code{character} scalar} \item{appender}{an \code{Appender}} \item{name}{\code{character} scalar. An optional name for the new Appender.} \item{pos}{\code{integer} index or \code{character} names of the appenders to remove} \item{threshold}{\code{character} or \code{integer} scalar. The minimum \link[=log_levels]{log level} that should be processed by the root logger.} \item{n}{\code{integer} scalar. Show only the last \code{n} log entries that match \code{threshold}} } \value{ \code{threshold()} and \code{console_threshold()} return the \link{log_level} of \code{target} as \code{integer} (invisibly) \code{add_appender()} and \code{remove_appender()} return \code{target}. \code{show_log()} prints to the console and returns whatever the target Appender's \verb{$show()} method returns, usually a \code{character} vector, \code{data.frame} or \code{data.table} (invisibly). \code{show_data()} always returns a \code{data.frame} and \code{show_dt()} always returns a \code{data.table}. } \description{ lgr provides convenience functions managing the root Logger. These are designed chiefly for interactive use and are less verbose than their R6 method counterparts. \code{threshold()} sets or retrieves the threshold for an \link{Appender} or \link{Logger} (the minimum level of log messages it processes). It's \code{target} defaults to the root logger. (equivalent to \code{lgr::lgr$threshold} and \code{lgr::lgr$set_threshold}) \code{console_threshold()} is a shortcut to set the threshold of the root loggers \link{AppenderConsole}, which is usually the only Appender that manages console output for a given \R session. (equivalent to \code{lgr::lgr$appenders$console$threshold} and \code{lgr::lgr$appenders$console$set_threshold}) \code{add_appender()} and \code{remove_appender()} add Appenders to Loggers and other Appenders. (equivalent to \code{lgr::lgr$add_appender} and \code{lgr::lgr$remove_appender}) \code{show_log()} displays the last \code{n} log entries of an Appender (or a Logger with such an Appender attached) with a \verb{$show()} method. Most, but not all Appenders support this function (try \link{AppenderFile} or \link{AppenderBuffer}). \code{show_data()} and \code{show_dt()} work similar to \code{show_log()}, except that they return the log as \code{data.frame} or \code{data.table} respectively. Only Appenders that log to formats that can easily be converted to \code{data.frames} are supported (try \link{AppenderJson} or \link{AppenderBuffer}). The easiest way to try out this features is by adding an AppenderBuffer to the root logger with \code{\link[=basic_config]{basic_config(memory = TRUE)}}. } \examples{ # Get and set the threshold of the root logger threshold("error") threshold() lgr$info("this will be supressed") lgr$error("an important error message") # you can also specify a target to modify other loggers lg <- get_logger("test") threshold("fatal", target = lg) threshold(target = lg) # If a Logger's threshold is not set, the threshold is inherited from # its parent, in this case the root logger (that we set to error/200 before) threshold(NULL, target = lg) threshold(target = lg) # Alternative R6 API for getting/setting thresholds lg$set_threshold("info") lg$threshold lg$set_threshold(300) lg$threshold lg$set_threshold(NULL) lg$threshold # cleanup lgr$config(NULL) lg$config(NULL) # add Appenders to a Logger add_appender(AppenderConsole$new(), "second_console_appender") lgr$fatal("Multiple console appenders are a bad idea") remove_appender("second_console_appender") lgr$info("Good that we defined an appender name, so it's easy to remove") # Reconfigure the root logger basic_config(memory = TRUE) # log some messages lgr$info("a log message") lgr$info("another message with data", data = 1:3) show_log() show_data() # cleanup lgr$config(NULL) }
/man/simple_logging.Rd
permissive
s-fleck/lgr
R
false
true
4,947
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simple_logging.R \name{simple_logging} \alias{simple_logging} \alias{log_exception} \alias{threshold} \alias{console_threshold} \alias{add_appender} \alias{remove_appender} \alias{show_log} \alias{show_dt} \alias{show_data} \title{Simple Logging} \usage{ log_exception(code, logfun = lgr$fatal, caller = get_caller(-3)) threshold(level, target = lgr::lgr) console_threshold(level, target = lgr::lgr$appenders$console) add_appender(appender, name = NULL, target = lgr::lgr) remove_appender(pos, target = lgr::lgr) show_log(threshold = NA_integer_, n = 20L, target = lgr::lgr) show_dt(target = lgr::lgr) show_data(target = lgr::lgr) } \arguments{ \item{code}{Any \R code} \item{logfun}{a \code{function} for processing the log request, usually \code{lgr$info()}, \code{lgr$debug()}, etc... .} \item{caller}{a \code{character} scalar. The name of the calling function} \item{level}{\code{integer} or \code{character} scalar: the desired log level} \item{target}{a \link{Logger} or \link{Appender} or the name of a Logger as \code{character} scalar} \item{appender}{an \code{Appender}} \item{name}{\code{character} scalar. An optional name for the new Appender.} \item{pos}{\code{integer} index or \code{character} names of the appenders to remove} \item{threshold}{\code{character} or \code{integer} scalar. The minimum \link[=log_levels]{log level} that should be processed by the root logger.} \item{n}{\code{integer} scalar. Show only the last \code{n} log entries that match \code{threshold}} } \value{ \code{threshold()} and \code{console_threshold()} return the \link{log_level} of \code{target} as \code{integer} (invisibly) \code{add_appender()} and \code{remove_appender()} return \code{target}. \code{show_log()} prints to the console and returns whatever the target Appender's \verb{$show()} method returns, usually a \code{character} vector, \code{data.frame} or \code{data.table} (invisibly). \code{show_data()} always returns a \code{data.frame} and \code{show_dt()} always returns a \code{data.table}. } \description{ lgr provides convenience functions managing the root Logger. These are designed chiefly for interactive use and are less verbose than their R6 method counterparts. \code{threshold()} sets or retrieves the threshold for an \link{Appender} or \link{Logger} (the minimum level of log messages it processes). It's \code{target} defaults to the root logger. (equivalent to \code{lgr::lgr$threshold} and \code{lgr::lgr$set_threshold}) \code{console_threshold()} is a shortcut to set the threshold of the root loggers \link{AppenderConsole}, which is usually the only Appender that manages console output for a given \R session. (equivalent to \code{lgr::lgr$appenders$console$threshold} and \code{lgr::lgr$appenders$console$set_threshold}) \code{add_appender()} and \code{remove_appender()} add Appenders to Loggers and other Appenders. (equivalent to \code{lgr::lgr$add_appender} and \code{lgr::lgr$remove_appender}) \code{show_log()} displays the last \code{n} log entries of an Appender (or a Logger with such an Appender attached) with a \verb{$show()} method. Most, but not all Appenders support this function (try \link{AppenderFile} or \link{AppenderBuffer}). \code{show_data()} and \code{show_dt()} work similar to \code{show_log()}, except that they return the log as \code{data.frame} or \code{data.table} respectively. Only Appenders that log to formats that can easily be converted to \code{data.frames} are supported (try \link{AppenderJson} or \link{AppenderBuffer}). The easiest way to try out this features is by adding an AppenderBuffer to the root logger with \code{\link[=basic_config]{basic_config(memory = TRUE)}}. } \examples{ # Get and set the threshold of the root logger threshold("error") threshold() lgr$info("this will be supressed") lgr$error("an important error message") # you can also specify a target to modify other loggers lg <- get_logger("test") threshold("fatal", target = lg) threshold(target = lg) # If a Logger's threshold is not set, the threshold is inherited from # its parent, in this case the root logger (that we set to error/200 before) threshold(NULL, target = lg) threshold(target = lg) # Alternative R6 API for getting/setting thresholds lg$set_threshold("info") lg$threshold lg$set_threshold(300) lg$threshold lg$set_threshold(NULL) lg$threshold # cleanup lgr$config(NULL) lg$config(NULL) # add Appenders to a Logger add_appender(AppenderConsole$new(), "second_console_appender") lgr$fatal("Multiple console appenders are a bad idea") remove_appender("second_console_appender") lgr$info("Good that we defined an appender name, so it's easy to remove") # Reconfigure the root logger basic_config(memory = TRUE) # log some messages lgr$info("a log message") lgr$info("another message with data", data = 1:3) show_log() show_data() # cleanup lgr$config(NULL) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_fields.R \name{check_deliv_PhysicalChemical} \alias{check_deliv_PhysicalChemical} \title{Check if the required and recommended datatype-specific SHARK system fields (different between different datatypes) are present.} \usage{ check_deliv_PhysicalChemical(data, level = "error") } \arguments{ \item{data}{The data frame.} \item{level}{The level of error reporting, i.e. "error" or "warning". Recommended fields are only checked in case of "warning".} } \value{ Any warnings or errors. } \description{ Missing or empty required fields are reported as errors, missing or empty recommended fields are reported as warnings. }
/man/check_deliv_PhysicalChemical.Rd
permissive
sharksmhi/SHARK4R
R
false
true
727
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_fields.R \name{check_deliv_PhysicalChemical} \alias{check_deliv_PhysicalChemical} \title{Check if the required and recommended datatype-specific SHARK system fields (different between different datatypes) are present.} \usage{ check_deliv_PhysicalChemical(data, level = "error") } \arguments{ \item{data}{The data frame.} \item{level}{The level of error reporting, i.e. "error" or "warning". Recommended fields are only checked in case of "warning".} } \value{ Any warnings or errors. } \description{ Missing or empty required fields are reported as errors, missing or empty recommended fields are reported as warnings. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/segs_direction.R \name{sequenceverts} \alias{sequenceverts} \title{Store Vertices in Ascending Sequence} \usage{ sequenceverts(rivers) } \arguments{ \item{rivers}{The river network object to use} } \value{ A new river network object (see \link{rivernetwork}) } \description{ Rearranges the vertices of a river network object so that vertices are stored sequentially moving up river for all segments (coordinates [1,] are the bottom of each segment). } \note{ Even without calling \code{sequenceverts}, the vertices will be stored sequentially - either moving up river or down for a given segment. What \code{sequenceverts()} adds is a standardized direction. Currently, no function in package 'riverdist' requires the vertices to be stored sequentially. } \examples{ data(Gulk) Gulk <- setmouth(seg=1, vert=1, rivers=Gulk) str(Gulk) Gulk.dir <- sequenceverts(rivers=Gulk) str(Gulk.dir) } \author{ Matt Tyers } \seealso{ \link{line2network} }
/man/sequenceverts.Rd
no_license
jcmartinmu/riverdist
R
false
true
1,085
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/segs_direction.R \name{sequenceverts} \alias{sequenceverts} \title{Store Vertices in Ascending Sequence} \usage{ sequenceverts(rivers) } \arguments{ \item{rivers}{The river network object to use} } \value{ A new river network object (see \link{rivernetwork}) } \description{ Rearranges the vertices of a river network object so that vertices are stored sequentially moving up river for all segments (coordinates [1,] are the bottom of each segment). } \note{ Even without calling \code{sequenceverts}, the vertices will be stored sequentially - either moving up river or down for a given segment. What \code{sequenceverts()} adds is a standardized direction. Currently, no function in package 'riverdist' requires the vertices to be stored sequentially. } \examples{ data(Gulk) Gulk <- setmouth(seg=1, vert=1, rivers=Gulk) str(Gulk) Gulk.dir <- sequenceverts(rivers=Gulk) str(Gulk.dir) } \author{ Matt Tyers } \seealso{ \link{line2network} }