content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
\name{NEWS} \title{News for Package 'biwavelet'} \encoding{UTF-8} \section{Changes in biwavelet version 0.20.19 (2019-08-01)}{ \subsection{fixed}{ \itemize{ \item Updated \code{rcpp_wt_bases_paul.cpp} to fix array out-of-bounds reading issue } } } \section{Changes in biwavelet version 0.20.18 (2018-08-29)}{ \subsection{fixed}{ \itemize{ \item Updated \code{plot.biwavelet} help file with correct phase arrow interpretation } } } \section{Changes in biwavelet version 0.20.17 (2018-05-19)}{ \subsection{fixed}{ \itemize{ \item Updated package by removing benchmarks in vignettes for CRAN submission \item yaxis tickmarks should now be accurate (no more rounding issues) } } } \section{Changes in biwavelet version 0.20.16 (2018-03-26)}{ \subsection{fixed}{ \itemize{ \item Fixed documentation for wtc function } } } \section{Changes in biwavelet version 0.20.15 (2017-03-01)}{ \subsection{fixed}{ \itemize{ \item Fixed return NULL issues with Windows platforms } } } \section{Changes in biwavelet version 0.20.14 (2017-02-24)}{ \subsection{fixed}{ \itemize{ \item Fixed \code{plot.biwavelet} so that the COI extends all the way to the bottom of the plot (max of periods) } } } \section{Changes in biwavelet version 0.20.13 (2016-12-27)}{ \subsection{fixed}{ \itemize{ \item Fixed \code{plot.biwavelet} so that the \code{arrow.cutoff} argument applies to rsq values for wtc and pwtc objects } } } \section{Changes in biwavelet version 0.20.12 (2016-12-27)}{ \subsection{fixed}{ \itemize{ \item Fixed \code{plot.biwavelet} so that the \code{arrow.cutoff} argument applies to rsq values } } } \section{Changes in biwavelet version 0.20.11 (2016-08-31)}{ \subsection{added}{ \itemize{ \item Build vignettes } } } \section{Changes in biwavelet version 0.20.10 (2016-08-10)}{ \subsection{fixed}{ \itemize{ \item Function \code{phase.biwavelet} now plots the regions whose significance exceeds \code{arrow.cutoff}. If the object being plotted does not have a significance field, regions whose zvalues exceed the \code{arrow.cutoff} quantile will be plotted. \item Fixed C++ warning about unsupported dynamically sized arrays } } } \section{Changes in biwavelet version 0.20.9 (2016-07-12)}{ \subsection{fixed}{ \itemize{ \item Fixed handling of \code{lag1} coefficients in \code{wtc}. } } } \section{Changes in biwavelet version 0.20.8 (2016-06-25)}{ \subsection{fixed}{ \itemize{ \item Fixed handling of axis preferences in \code{plot.biwavelet}. } } } \section{Changes in biwavelet version 0.20.7 (2016-06-01)}{ \subsection{fixed}{ \itemize{ \item Fixed handling of time in \code{check.data}. } } } \section{Changes in biwavelet version 0.20.6 (2016-06-01)}{ \subsection{fixed}{ \itemize{ \item Fixed x-axis in \code{plot.biwavelet}. } } } \section{Changes in biwavelet version 0.20.3 (2016-05-08)}{ \subsection{fixed}{ \itemize{ \item Fixed displacement of COI, contours and phase arrows in \code{plot.biwavelet} when adding a color bar. } } } \section{Changes in biwavelet version 0.20.2 (2016-05-06)}{ \subsection{fixed}{ \itemize{ \item Fixed \code{check.datum}; hopefully for the last time. \item Faster wt bases and row quantile (Rcpp implementations): \itemize{ \item The \code{param} parameter for all \code{rcpp_wt_bases_*} must be within interval (0..10). \item The \code{rcpp_row_quantile} function requires a matrix as a parameter (use \code{as.matrix()} for vectors). } } } } \section{Changes in biwavelet version 0.19.2 (2016-05-03)}{ \subsection{fixed}{ \itemize{ \item Fixed Rcpp implementation of the \code{wt.bases} functions, i.e., \code{rcpp_wt_bases_*.cpp}. Replacing \code{int} type with \code{double} type for \code{scale} parameter which caused small scales to be rendered incorrectly. } } } \section{Changes in biwavelet version 0.19.1 (2016-04-29)}{ \subsection{fixed}{ \itemize{ \item Fixed interpretation of phase differences in \code{plot.biwavelet} help file } } \subsection{changed}{ \itemize{ \item Added unit tests for approx 78\% of the code. \item Implemented a parallelized Monte Carlo simulation function \code{wtc_sig_parallel} which is 2 to 4 times faster on a 4-core CPU than the original \code{wtc.sig}. The speedup is noticeable on: \enumerate{ \item large simulations \code{nrads >= 800}, \item multiple simulations, \item multi-core systems with 4+ cores. } However, parallelization involves a significant heat-up phase because all the workers need to be started and they need to load all the required packages. This will be addresses in future versions of biwavelet. \item Added a speed-optimized version of \code{convolve2D}. \item Replaced standard \code{arima.sim} function with a pair of functions \code{get_minroots} and \code{ar1_ma0_sim}. These functions are still implemented in R. We can reimplement them later in C. \item Reimplemented \code{wt.bases} morlet, paul and dog in C. \item Removed unused function \code{meshgrid}. } } } \section{Changes in biwavelet version 0.17.11 (2015-10-09)}{ \subsection{fixed}{ \itemize{ \item close all progress bars after use \item Function \code{wtc} can now handle non-finite values when computing the quantiles of the rsq values from the Monte Carlo simulations } } } \section{Changes in biwavelet version 0.17.10 (2015-04-29)}{ \subsection{fixed}{ \itemize{ \item Added ability to handle custom color palettes in \code{plot.biwavelet}. Users can now specify any color scheme using the \code{fill.cols} argument. } } } \section{Changes in biwavelet version 0.17.9 (2015-04-29)}{ \subsection{fixed}{ \itemize{ \item Fixed limited padding issue, which could lead to weird edge effects. Current padding level is identical to that of Torrence & Compo (1998). \item Changed the default \code{tol} value from 0.95 to 1 in the \code{plot} function.} } } \section{Changes in biwavelet version 0.17.8 (2015-04-28)}{ \subsection{fixed}{ \itemize{ \item Added semi-transparent COI.} } } \section{Changes in biwavelet version 0.17.7 (2015-04-13)}{ \subsection{fixed}{ \itemize{ \item Fixed \code{check.datum} function so that it does not assume a sampling frequency of 1.} } } \section{Changes in biwavelet version 0.17.6 (2015-04-05)}{ \subsection{fixed}{ \itemize{ \item Added ability to set \code{zlim} in \code{plot.biwavelet}.} } } \section{Changes in biwavelet version 0.17.5 (2014-11-05)}{ \subsection{fixed}{ \itemize{ \item Improved the implementation of \code{phase.plot} to allow for much better looking phase arrows (thanks Huidong Tang).} } } \section{Changes in biwavelet version 0.17.4 (2014-11-04)}{ \subsection{fixed}{ \itemize{ \item Made function \code{wt} faster by avoiding excessive padding (thanks Huidong Tang).} } } \section{Changes in biwavelet version 0.17.3 (2014-04-27)}{ \subsection{fixed}{ \itemize{ \item Made \code{check.datum} tolerate slight inconsistencies in the size of timesteps.} } } \section{Changes in biwavelet version 0.17.2 (2014-04-11)}{ \subsection{fixed}{ \itemize{ \item Added arguments in \code{plot.biwavelet} and \code{phase.plot} to control the length of the phase arrows and the size of the arrow heads independently.} } } \section{Changes in biwavelet version 0.17.1 (2013-07-15)}{ \subsection{fixed}{ \itemize{ \item Fixed code in \code{check.data} to test for constant step size in the data.} } } \section{Changes in biwavelet version 0.17 (2013-06-05)}{ \subsection{added}{ \itemize{ \item Function \code{pwtc} can be used to perform partial wavelet coherence between two time series \code{y} and \code{x1} by controlling for (or partialling-out) a third time series \code{x2}. } } } \section{Changes in biwavelet version 0.16 (2013-05-07)}{ \subsection{added}{ \itemize{ \item Users can now specify the density of the phase arrows using the \code{plot} function. } } \subsection{fixed}{ \itemize{ \item Fixed bug in \code{wt} affecting the significance region (thanks Patrick Kilduff and Flora Cordoleani). } } } \section{Changes in biwavelet version 0.15 (2013-04-08)}{ \subsection{added}{ \itemize{ \item Users can now specify the color, line width and line type for the COI, significance contours and phase arrows using the \code{plot} function. } } \subsection{fixed}{ \itemize{ \item Removed misleading examples showing how to compute the 'bias-corrected' wavelet coherence. There is no bias for the wavelet coherence function, so using the default \code{type} argument in the \code{plot} function is recommended. \item Fixed typos in the documentation of plot.biwavelet and xwt (thanks Lei Cheng). } } } \section{Changes in biwavelet version 0.14 (2013-03-06)}{ \subsection{added}{ \itemize{ \item As of biwavelet version 0.14, the bias-corrected wavelet and cross-wavelet spectra are automatically computed and plotted by default using the methods described by Liu et al. (2007) and Veleda et al. (2012). This correction is needed because the traditional approach for computing the power spectrum (e.g., Torrence and Compo 1998) leads to an artificial and systematic reduction in power at lower periods. } } \subsection{fixed}{ \itemize{ \item Plotting function now accepts traditional plotting flags such as xaxt and yaxt to control x and y tickmarks. } } }
/biwavelet/inst/NEWS.Rd
no_license
akhikolla/ClusterTests
R
false
false
10,164
rd
\name{NEWS} \title{News for Package 'biwavelet'} \encoding{UTF-8} \section{Changes in biwavelet version 0.20.19 (2019-08-01)}{ \subsection{fixed}{ \itemize{ \item Updated \code{rcpp_wt_bases_paul.cpp} to fix array out-of-bounds reading issue } } } \section{Changes in biwavelet version 0.20.18 (2018-08-29)}{ \subsection{fixed}{ \itemize{ \item Updated \code{plot.biwavelet} help file with correct phase arrow interpretation } } } \section{Changes in biwavelet version 0.20.17 (2018-05-19)}{ \subsection{fixed}{ \itemize{ \item Updated package by removing benchmarks in vignettes for CRAN submission \item yaxis tickmarks should now be accurate (no more rounding issues) } } } \section{Changes in biwavelet version 0.20.16 (2018-03-26)}{ \subsection{fixed}{ \itemize{ \item Fixed documentation for wtc function } } } \section{Changes in biwavelet version 0.20.15 (2017-03-01)}{ \subsection{fixed}{ \itemize{ \item Fixed return NULL issues with Windows platforms } } } \section{Changes in biwavelet version 0.20.14 (2017-02-24)}{ \subsection{fixed}{ \itemize{ \item Fixed \code{plot.biwavelet} so that the COI extends all the way to the bottom of the plot (max of periods) } } } \section{Changes in biwavelet version 0.20.13 (2016-12-27)}{ \subsection{fixed}{ \itemize{ \item Fixed \code{plot.biwavelet} so that the \code{arrow.cutoff} argument applies to rsq values for wtc and pwtc objects } } } \section{Changes in biwavelet version 0.20.12 (2016-12-27)}{ \subsection{fixed}{ \itemize{ \item Fixed \code{plot.biwavelet} so that the \code{arrow.cutoff} argument applies to rsq values } } } \section{Changes in biwavelet version 0.20.11 (2016-08-31)}{ \subsection{added}{ \itemize{ \item Build vignettes } } } \section{Changes in biwavelet version 0.20.10 (2016-08-10)}{ \subsection{fixed}{ \itemize{ \item Function \code{phase.biwavelet} now plots the regions whose significance exceeds \code{arrow.cutoff}. If the object being plotted does not have a significance field, regions whose zvalues exceed the \code{arrow.cutoff} quantile will be plotted. \item Fixed C++ warning about unsupported dynamically sized arrays } } } \section{Changes in biwavelet version 0.20.9 (2016-07-12)}{ \subsection{fixed}{ \itemize{ \item Fixed handling of \code{lag1} coefficients in \code{wtc}. } } } \section{Changes in biwavelet version 0.20.8 (2016-06-25)}{ \subsection{fixed}{ \itemize{ \item Fixed handling of axis preferences in \code{plot.biwavelet}. } } } \section{Changes in biwavelet version 0.20.7 (2016-06-01)}{ \subsection{fixed}{ \itemize{ \item Fixed handling of time in \code{check.data}. } } } \section{Changes in biwavelet version 0.20.6 (2016-06-01)}{ \subsection{fixed}{ \itemize{ \item Fixed x-axis in \code{plot.biwavelet}. } } } \section{Changes in biwavelet version 0.20.3 (2016-05-08)}{ \subsection{fixed}{ \itemize{ \item Fixed displacement of COI, contours and phase arrows in \code{plot.biwavelet} when adding a color bar. } } } \section{Changes in biwavelet version 0.20.2 (2016-05-06)}{ \subsection{fixed}{ \itemize{ \item Fixed \code{check.datum}; hopefully for the last time. \item Faster wt bases and row quantile (Rcpp implementations): \itemize{ \item The \code{param} parameter for all \code{rcpp_wt_bases_*} must be within interval (0..10). \item The \code{rcpp_row_quantile} function requires a matrix as a parameter (use \code{as.matrix()} for vectors). } } } } \section{Changes in biwavelet version 0.19.2 (2016-05-03)}{ \subsection{fixed}{ \itemize{ \item Fixed Rcpp implementation of the \code{wt.bases} functions, i.e., \code{rcpp_wt_bases_*.cpp}. Replacing \code{int} type with \code{double} type for \code{scale} parameter which caused small scales to be rendered incorrectly. } } } \section{Changes in biwavelet version 0.19.1 (2016-04-29)}{ \subsection{fixed}{ \itemize{ \item Fixed interpretation of phase differences in \code{plot.biwavelet} help file } } \subsection{changed}{ \itemize{ \item Added unit tests for approx 78\% of the code. \item Implemented a parallelized Monte Carlo simulation function \code{wtc_sig_parallel} which is 2 to 4 times faster on a 4-core CPU than the original \code{wtc.sig}. The speedup is noticeable on: \enumerate{ \item large simulations \code{nrads >= 800}, \item multiple simulations, \item multi-core systems with 4+ cores. } However, parallelization involves a significant heat-up phase because all the workers need to be started and they need to load all the required packages. This will be addresses in future versions of biwavelet. \item Added a speed-optimized version of \code{convolve2D}. \item Replaced standard \code{arima.sim} function with a pair of functions \code{get_minroots} and \code{ar1_ma0_sim}. These functions are still implemented in R. We can reimplement them later in C. \item Reimplemented \code{wt.bases} morlet, paul and dog in C. \item Removed unused function \code{meshgrid}. } } } \section{Changes in biwavelet version 0.17.11 (2015-10-09)}{ \subsection{fixed}{ \itemize{ \item close all progress bars after use \item Function \code{wtc} can now handle non-finite values when computing the quantiles of the rsq values from the Monte Carlo simulations } } } \section{Changes in biwavelet version 0.17.10 (2015-04-29)}{ \subsection{fixed}{ \itemize{ \item Added ability to handle custom color palettes in \code{plot.biwavelet}. Users can now specify any color scheme using the \code{fill.cols} argument. } } } \section{Changes in biwavelet version 0.17.9 (2015-04-29)}{ \subsection{fixed}{ \itemize{ \item Fixed limited padding issue, which could lead to weird edge effects. Current padding level is identical to that of Torrence & Compo (1998). \item Changed the default \code{tol} value from 0.95 to 1 in the \code{plot} function.} } } \section{Changes in biwavelet version 0.17.8 (2015-04-28)}{ \subsection{fixed}{ \itemize{ \item Added semi-transparent COI.} } } \section{Changes in biwavelet version 0.17.7 (2015-04-13)}{ \subsection{fixed}{ \itemize{ \item Fixed \code{check.datum} function so that it does not assume a sampling frequency of 1.} } } \section{Changes in biwavelet version 0.17.6 (2015-04-05)}{ \subsection{fixed}{ \itemize{ \item Added ability to set \code{zlim} in \code{plot.biwavelet}.} } } \section{Changes in biwavelet version 0.17.5 (2014-11-05)}{ \subsection{fixed}{ \itemize{ \item Improved the implementation of \code{phase.plot} to allow for much better looking phase arrows (thanks Huidong Tang).} } } \section{Changes in biwavelet version 0.17.4 (2014-11-04)}{ \subsection{fixed}{ \itemize{ \item Made function \code{wt} faster by avoiding excessive padding (thanks Huidong Tang).} } } \section{Changes in biwavelet version 0.17.3 (2014-04-27)}{ \subsection{fixed}{ \itemize{ \item Made \code{check.datum} tolerate slight inconsistencies in the size of timesteps.} } } \section{Changes in biwavelet version 0.17.2 (2014-04-11)}{ \subsection{fixed}{ \itemize{ \item Added arguments in \code{plot.biwavelet} and \code{phase.plot} to control the length of the phase arrows and the size of the arrow heads independently.} } } \section{Changes in biwavelet version 0.17.1 (2013-07-15)}{ \subsection{fixed}{ \itemize{ \item Fixed code in \code{check.data} to test for constant step size in the data.} } } \section{Changes in biwavelet version 0.17 (2013-06-05)}{ \subsection{added}{ \itemize{ \item Function \code{pwtc} can be used to perform partial wavelet coherence between two time series \code{y} and \code{x1} by controlling for (or partialling-out) a third time series \code{x2}. } } } \section{Changes in biwavelet version 0.16 (2013-05-07)}{ \subsection{added}{ \itemize{ \item Users can now specify the density of the phase arrows using the \code{plot} function. } } \subsection{fixed}{ \itemize{ \item Fixed bug in \code{wt} affecting the significance region (thanks Patrick Kilduff and Flora Cordoleani). } } } \section{Changes in biwavelet version 0.15 (2013-04-08)}{ \subsection{added}{ \itemize{ \item Users can now specify the color, line width and line type for the COI, significance contours and phase arrows using the \code{plot} function. } } \subsection{fixed}{ \itemize{ \item Removed misleading examples showing how to compute the 'bias-corrected' wavelet coherence. There is no bias for the wavelet coherence function, so using the default \code{type} argument in the \code{plot} function is recommended. \item Fixed typos in the documentation of plot.biwavelet and xwt (thanks Lei Cheng). } } } \section{Changes in biwavelet version 0.14 (2013-03-06)}{ \subsection{added}{ \itemize{ \item As of biwavelet version 0.14, the bias-corrected wavelet and cross-wavelet spectra are automatically computed and plotted by default using the methods described by Liu et al. (2007) and Veleda et al. (2012). This correction is needed because the traditional approach for computing the power spectrum (e.g., Torrence and Compo 1998) leads to an artificial and systematic reduction in power at lower periods. } } \subsection{fixed}{ \itemize{ \item Plotting function now accepts traditional plotting flags such as xaxt and yaxt to control x and y tickmarks. } } }
# Exercise 3: writing and executing functions # Define a function `add_three` that takes a single argument and # returns a value 3 greater than the input add_three <- function(input) { return(input+3) } # Create a variable `ten` that is the result of passing 7 to your `add_three` # function ten <- add_three(7) print(ten) # Define a function `imperial_to_metric` that takes in two arguments: a number # of feet and a number of inches # The function should return the equivalent length in meters imperial_to_metric <- function(feets,inches){ meters <- feets*0.3048+inches*0.0254 return(meters) } # Create a variable `height_in_meters` by passing your height in imperial to the # `imperial_to_metric` function height_in_meters <- imperial_to_metric(5,5)
/chapter-06-exercises/exercise-3/exercise.R
permissive
jqin10/book-exercises
R
false
false
764
r
# Exercise 3: writing and executing functions # Define a function `add_three` that takes a single argument and # returns a value 3 greater than the input add_three <- function(input) { return(input+3) } # Create a variable `ten` that is the result of passing 7 to your `add_three` # function ten <- add_three(7) print(ten) # Define a function `imperial_to_metric` that takes in two arguments: a number # of feet and a number of inches # The function should return the equivalent length in meters imperial_to_metric <- function(feets,inches){ meters <- feets*0.3048+inches*0.0254 return(meters) } # Create a variable `height_in_meters` by passing your height in imperial to the # `imperial_to_metric` function height_in_meters <- imperial_to_metric(5,5)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/iaibase.R \name{predict_outcomes.policy_learner} \alias{predict_outcomes.policy_learner} \title{Return the predicted outcome for each treatment made by a policy learner for each point in the features} \usage{ \method{predict_outcomes}{policy_learner}(obj, X, rewards, ...) } \arguments{ \item{obj}{The learner or grid to use for prediction.} \item{X}{The features of the data.} \item{rewards}{The estimated reward matrix for the data.} \item{...}{Additional arguments (unused)} } \description{ Julia Equivalent: \href{https://docs.interpretable.ai/v3.1.1/IAIBase/reference/#IAI.predict_outcomes-Tuple\%7BPolicyLearner\%2C\%20Union\%7BDataFrames.AbstractDataFrame\%2C\%20AbstractMatrix\%7B\%3C\%3AReal\%7D\%7D\%2C\%20Union\%7BDataFrames.AbstractDataFrame\%2C\%20AbstractMatrix\%7B\%3C\%3AReal\%7D\%7D\%7D}{\code{IAI.predict_outcomes}} } \section{IAI Compatibility}{ Requires IAI version 2.0 or higher } \examples{ \dontrun{iai::predict_outcomes(lnr, X, rewards)} }
/man/predict_outcomes.policy_learner.Rd
no_license
cran/iai
R
false
true
1,048
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/iaibase.R \name{predict_outcomes.policy_learner} \alias{predict_outcomes.policy_learner} \title{Return the predicted outcome for each treatment made by a policy learner for each point in the features} \usage{ \method{predict_outcomes}{policy_learner}(obj, X, rewards, ...) } \arguments{ \item{obj}{The learner or grid to use for prediction.} \item{X}{The features of the data.} \item{rewards}{The estimated reward matrix for the data.} \item{...}{Additional arguments (unused)} } \description{ Julia Equivalent: \href{https://docs.interpretable.ai/v3.1.1/IAIBase/reference/#IAI.predict_outcomes-Tuple\%7BPolicyLearner\%2C\%20Union\%7BDataFrames.AbstractDataFrame\%2C\%20AbstractMatrix\%7B\%3C\%3AReal\%7D\%7D\%2C\%20Union\%7BDataFrames.AbstractDataFrame\%2C\%20AbstractMatrix\%7B\%3C\%3AReal\%7D\%7D\%7D}{\code{IAI.predict_outcomes}} } \section{IAI Compatibility}{ Requires IAI version 2.0 or higher } \examples{ \dontrun{iai::predict_outcomes(lnr, X, rewards)} }
# Module UI #' @title mod_news_ui and mod_news_server #' @description A shiny Module. #' #' @param id shiny id #' @param input internal #' @param output internal #' @param session internal #' #' @rdname mod_news #' @keywords internal #' @importFrom shiny NS tagList #' @import tidytext #' @import tidyverse #' @import dplyr #' @import ggplot2 #' @import wordcloud2 #' @importFrom lubridate as_date round_date #' @export mod_news_ui <- function(id, dest){ ns <- NS(id) # This is just an example UI to be modified # Please change for your purpose # Do not forget to put ns() around all input ids!!! tagList( tags$h1(paste(dest, "Twitter News", sep = "-")), fluidRow( # box( title='Source',selectInput(inputId='source', label = 'Select the tweeet source' , # choices=c('All') # ) # ), box(title="Sentiment of the tweets from this destination" ,plotOutput(ns("plot1"))), box(title="Hot or Not - destination trending based on #tweets" ,plotOutput(ns("plot2"))) ), fluidRow( box(title="The most recent Twitter picture from this destination" ,imageOutput(ns("image"))) , box(title="Cloud with most important words", plotOutput(ns("wordcloud"))) ), fluidRow(h1(paste('Most Recent News from',dest))), fluidRow( column( width = 12, title = paste('Most Recent News from',dest),DT::dataTableOutput(ns("newstable") ) ) ) ) } # Module Server #' @rdname mod_news #' @keywords internal #' @importFrom stats rnorm reorder #' @importFrom graphics hist plot lines legend #' @importFrom rlang .data #' @importFrom utils download.file #' @export mod_news_server <- function(input, output, session, dest){ ns <- session$ns # This is just an example Server to be modified # Please change for your purpose histdata <- rnorm(500) output$plot1 <- renderPlot({ data <- histdata[seq_len(input$slider)] hist(data, main = dest()) }) # Select the right NY Times file based on the destination selected from the user---------- dest_nytimes <- reactive({ readRDS( paste0(get_prefix(), '/data/twitter/',tolower(dest()), '_nytimes.rds') ) }) # Select the right data file based on the destination selected from the user---------- dest_data <- reactive({ readRDS( paste0(get_prefix(), '/data/twitter/',tolower(dest()), '.rds') ) }) # observe({ # d <- dest_data() # updateSelectInput(session,"source",choices=dplyr::distinct(d[, c("source")],source)) # }) ## Plot for the most recent News from NYT # output$newstable <- DT::renderDataTable({ nytimes = dest_nytimes()$data[, c("headline.main",'pub_date','abstract','web_url'), drop = FALSE] arranged_data <- dplyr::arrange(nytimes,desc(.data$pub_date)) DT::datatable(data = arranged_data, options = list(pageLength = 10), rownames = FALSE) }) output$image <- renderImage({ recent<- dest_data() %>% as.data.frame %>% dplyr::filter(!is.na(dest_data()$media_url)) %>% arrange(desc(.data$created_at)) %>% slice(1) pic_url<- as.character(recent$media_url) fname <- tempfile(fileext = '.jpg') download.file(pic_url, fname, mode = 'wb') # Return a list containing the filename list(src = fname, contentType = 'image/jpg', width = '100%', height = '100%' ) }) output$plot1 <- renderPlot({ bing_lex <- get_sentiments("bing") sentiment_words <- dest_data() %>% select(.data$status_id, .data$screen_name, .data$created_at, .data$text) %>% unnest_tokens(word, .data$text) %>% inner_join(bing_lex, by = "word") sentiment_words %>% mutate(created_at_day = as_date(round_date(.data$created_at, "day")), sentiment_num = ifelse(.data$sentiment == "positive", 1, -1), count = n()) %>% ggplot() + geom_bar(aes(.data$created_at_day, fill = .data$sentiment), stat = "count") + facet_wrap(~.data$sentiment, ncol = 1)+ labs(x='Date',y='# of tweets') }) files <- list.files(path = file.path(get_prefix(), "data/twitter/"), pattern = "\\.rds$", full.names = TRUE) files <- files[regexpr("_",files)<0] coln <- gsub(".rds","",files) coln <- gsub(".*/","",coln) #r <- lapply(files, read_rds) r <- lapply(files, readRDS) formattwr <- function(x,i){ rt <- x[[i]]$created_at res <- as.data.frame(table(as.Date(rt))) res[,1] <- as.Date(res[,1]) res } k <- reactive({ location <- tolower(dest()) locations <- c("crete","lisbon","mallorca","rome","vienna") which(locations == location) }) #k <- 3 output$plot2 <- renderPlot({ plot(formattwr(r,1), type="n", ylim = c(0,2000),xlab="",ylab="# tweets") colvec <- paste("grey",10*1:length(r), sep="") colvec[k()] <- "red" for (i in 1:length(r)) { lines(formattwr(r,i), pch = 18, lty= 3, col = colvec[i], cex = 0.8) } # Add a line lines(formattwr(r,k()), pch=18, col="red", type="b", lty=2, lwd = 3) # Add a legend legend(formattwr(r,1)[1,1], 2000, legend=coln, col=colvec, lty=3, cex=0.8) }) output$wordcloud <- renderPlot({ ## Most important words tweet_words <- dest_data() %>% select(.data$screen_name, .data$created_at, .data$text) %>% unnest_tokens(word, .data$text) %>% count(word, sort=T) my_stop_words <- tidytext::stop_words %>% select(-.data$lexicon) %>% bind_rows(data.frame(word = c("https", "t.co", "rt", "amp","4yig9gzh5t","fyy2ceydhi","78","fakenews"))) tweet_words_interesting <- tweet_words %>% anti_join(my_stop_words, by = "word") top_50 <- tweet_words_interesting %>% group_by(word) %>% tally(sort=TRUE) %>% slice(1:50) top_50$n<-floor((top_50$n)^(1/3)) top_50 %>% mutate(word = reorder(word, n, function(n) -n)) %>% ggplot() + geom_bar(aes(word, n), stat = "identity") + theme(axis.text.x = element_text(angle = 60, hjust = 1)) + xlab("") ## Create word cloud wordcloud::wordcloud(top_50[, 1, drop=TRUE], top_50[, 2, drop=TRUE])#,color="random-light", shape="cardioide", size = .4, shuffle=T, rotateRatio = sample(c(1:100) / 100)) }) } ## To be copied in the UI # mod_news_ui("news_ui_1") ## To be copied in the server # callModule(mod_news_server, "news_ui_1")
/R/mod_news.R
permissive
marinapopovic11/travelboard
R
false
false
6,630
r
# Module UI #' @title mod_news_ui and mod_news_server #' @description A shiny Module. #' #' @param id shiny id #' @param input internal #' @param output internal #' @param session internal #' #' @rdname mod_news #' @keywords internal #' @importFrom shiny NS tagList #' @import tidytext #' @import tidyverse #' @import dplyr #' @import ggplot2 #' @import wordcloud2 #' @importFrom lubridate as_date round_date #' @export mod_news_ui <- function(id, dest){ ns <- NS(id) # This is just an example UI to be modified # Please change for your purpose # Do not forget to put ns() around all input ids!!! tagList( tags$h1(paste(dest, "Twitter News", sep = "-")), fluidRow( # box( title='Source',selectInput(inputId='source', label = 'Select the tweeet source' , # choices=c('All') # ) # ), box(title="Sentiment of the tweets from this destination" ,plotOutput(ns("plot1"))), box(title="Hot or Not - destination trending based on #tweets" ,plotOutput(ns("plot2"))) ), fluidRow( box(title="The most recent Twitter picture from this destination" ,imageOutput(ns("image"))) , box(title="Cloud with most important words", plotOutput(ns("wordcloud"))) ), fluidRow(h1(paste('Most Recent News from',dest))), fluidRow( column( width = 12, title = paste('Most Recent News from',dest),DT::dataTableOutput(ns("newstable") ) ) ) ) } # Module Server #' @rdname mod_news #' @keywords internal #' @importFrom stats rnorm reorder #' @importFrom graphics hist plot lines legend #' @importFrom rlang .data #' @importFrom utils download.file #' @export mod_news_server <- function(input, output, session, dest){ ns <- session$ns # This is just an example Server to be modified # Please change for your purpose histdata <- rnorm(500) output$plot1 <- renderPlot({ data <- histdata[seq_len(input$slider)] hist(data, main = dest()) }) # Select the right NY Times file based on the destination selected from the user---------- dest_nytimes <- reactive({ readRDS( paste0(get_prefix(), '/data/twitter/',tolower(dest()), '_nytimes.rds') ) }) # Select the right data file based on the destination selected from the user---------- dest_data <- reactive({ readRDS( paste0(get_prefix(), '/data/twitter/',tolower(dest()), '.rds') ) }) # observe({ # d <- dest_data() # updateSelectInput(session,"source",choices=dplyr::distinct(d[, c("source")],source)) # }) ## Plot for the most recent News from NYT # output$newstable <- DT::renderDataTable({ nytimes = dest_nytimes()$data[, c("headline.main",'pub_date','abstract','web_url'), drop = FALSE] arranged_data <- dplyr::arrange(nytimes,desc(.data$pub_date)) DT::datatable(data = arranged_data, options = list(pageLength = 10), rownames = FALSE) }) output$image <- renderImage({ recent<- dest_data() %>% as.data.frame %>% dplyr::filter(!is.na(dest_data()$media_url)) %>% arrange(desc(.data$created_at)) %>% slice(1) pic_url<- as.character(recent$media_url) fname <- tempfile(fileext = '.jpg') download.file(pic_url, fname, mode = 'wb') # Return a list containing the filename list(src = fname, contentType = 'image/jpg', width = '100%', height = '100%' ) }) output$plot1 <- renderPlot({ bing_lex <- get_sentiments("bing") sentiment_words <- dest_data() %>% select(.data$status_id, .data$screen_name, .data$created_at, .data$text) %>% unnest_tokens(word, .data$text) %>% inner_join(bing_lex, by = "word") sentiment_words %>% mutate(created_at_day = as_date(round_date(.data$created_at, "day")), sentiment_num = ifelse(.data$sentiment == "positive", 1, -1), count = n()) %>% ggplot() + geom_bar(aes(.data$created_at_day, fill = .data$sentiment), stat = "count") + facet_wrap(~.data$sentiment, ncol = 1)+ labs(x='Date',y='# of tweets') }) files <- list.files(path = file.path(get_prefix(), "data/twitter/"), pattern = "\\.rds$", full.names = TRUE) files <- files[regexpr("_",files)<0] coln <- gsub(".rds","",files) coln <- gsub(".*/","",coln) #r <- lapply(files, read_rds) r <- lapply(files, readRDS) formattwr <- function(x,i){ rt <- x[[i]]$created_at res <- as.data.frame(table(as.Date(rt))) res[,1] <- as.Date(res[,1]) res } k <- reactive({ location <- tolower(dest()) locations <- c("crete","lisbon","mallorca","rome","vienna") which(locations == location) }) #k <- 3 output$plot2 <- renderPlot({ plot(formattwr(r,1), type="n", ylim = c(0,2000),xlab="",ylab="# tweets") colvec <- paste("grey",10*1:length(r), sep="") colvec[k()] <- "red" for (i in 1:length(r)) { lines(formattwr(r,i), pch = 18, lty= 3, col = colvec[i], cex = 0.8) } # Add a line lines(formattwr(r,k()), pch=18, col="red", type="b", lty=2, lwd = 3) # Add a legend legend(formattwr(r,1)[1,1], 2000, legend=coln, col=colvec, lty=3, cex=0.8) }) output$wordcloud <- renderPlot({ ## Most important words tweet_words <- dest_data() %>% select(.data$screen_name, .data$created_at, .data$text) %>% unnest_tokens(word, .data$text) %>% count(word, sort=T) my_stop_words <- tidytext::stop_words %>% select(-.data$lexicon) %>% bind_rows(data.frame(word = c("https", "t.co", "rt", "amp","4yig9gzh5t","fyy2ceydhi","78","fakenews"))) tweet_words_interesting <- tweet_words %>% anti_join(my_stop_words, by = "word") top_50 <- tweet_words_interesting %>% group_by(word) %>% tally(sort=TRUE) %>% slice(1:50) top_50$n<-floor((top_50$n)^(1/3)) top_50 %>% mutate(word = reorder(word, n, function(n) -n)) %>% ggplot() + geom_bar(aes(word, n), stat = "identity") + theme(axis.text.x = element_text(angle = 60, hjust = 1)) + xlab("") ## Create word cloud wordcloud::wordcloud(top_50[, 1, drop=TRUE], top_50[, 2, drop=TRUE])#,color="random-light", shape="cardioide", size = .4, shuffle=T, rotateRatio = sample(c(1:100) / 100)) }) } ## To be copied in the UI # mod_news_ui("news_ui_1") ## To be copied in the server # callModule(mod_news_server, "news_ui_1")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DTMC.R \name{dtmcfpt} \alias{dtmcfpt} \title{First Passage Times} \usage{ dtmcfpt(Tr, P) } \arguments{ \item{P}{a stochastic matrix of size N by N} \item{T}{row vector representing the set of target states} } \value{ y=[y0 y1 y2], where y0, y1, and y2 are column vectors. [y1(i) y2(i)] is the [mean, second moment] of the first passage time to visit any of the states in the target set of states T, starting in a non-target state y0(i) for a DTMC X_n, n >= 0 with a transition probability matrix P. } \description{ First Passage Times } \examples{ dtmcfpt(c(1,2),matrix(c(rep(1/3,9)),nrow = 3)) }
/man/dtmcfpt.Rd
no_license
suhasghorp/MAXIM-R
R
false
true
678
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DTMC.R \name{dtmcfpt} \alias{dtmcfpt} \title{First Passage Times} \usage{ dtmcfpt(Tr, P) } \arguments{ \item{P}{a stochastic matrix of size N by N} \item{T}{row vector representing the set of target states} } \value{ y=[y0 y1 y2], where y0, y1, and y2 are column vectors. [y1(i) y2(i)] is the [mean, second moment] of the first passage time to visit any of the states in the target set of states T, starting in a non-target state y0(i) for a DTMC X_n, n >= 0 with a transition probability matrix P. } \description{ First Passage Times } \examples{ dtmcfpt(c(1,2),matrix(c(rep(1/3,9)),nrow = 3)) }
#' Add a prism of nodes to the graph #' @description With a graph object of class #' \code{dgr_graph}, add a node prism to the graph. #' @param graph a graph object of class #' \code{dgr_graph}. #' @param n the number of nodes describing the shape #' of the prism. For example, the triangonal prism has #' \code{n} equal to 3 and it is composed of 6 nodes #' and 9 edges. For any n-gonal prism, the graph will #' be generated with 2\code{n} nodes and 3\code{n} #' edges. #' @param type an optional string that describes the #' entity type for the nodes to be added. #' @param label either a vector object of length #' \code{n} that provides optional labels for the new #' nodes, or, a boolean value where setting to #' \code{TRUE} ascribes node IDs to the label and #' \code{FALSE} yields a blank label. #' @param rel an optional string for providing a #' relationship label to all new edges created in the #' node prism. #' @return a graph object of class \code{dgr_graph}. #' @examples #' # Create a new graph and add 2 prisms #' graph <- #' create_graph() %>% #' add_prism(3, "prism", "a") %>% #' add_prism(3, "prism", "b") #' #' # Get node information from this graph #' node_info(graph) #' #> id type label deg indeg outdeg loops #' #> 1 1 prism a 3 1 2 0 #' #> 2 2 prism a 3 1 2 0 #' #> 3 3 prism a 3 1 2 0 #' #> 4 4 prism a 3 2 1 0 #' #> 5 5 prism a 3 2 1 0 #' #> 6 6 prism a 3 2 1 0 #' #> 7 7 prism b 3 1 2 0 #' #> 8 8 prism b 3 1 2 0 #' #> 9 9 prism b 3 1 2 0 #' #> 10 10 prism b 3 2 1 0 #' #> 11 11 prism b 3 2 1 0 #' #> 12 12 prism b 3 2 1 0 #' @export add_prism add_prism <- function(graph, n, type = NULL, label = TRUE, rel = NULL) { # Get the time of function start time_function_start <- Sys.time() # Validation: Graph object is valid if (graph_object_valid(graph) == FALSE) { stop("The graph object is not valid.") } # Stop if n is too small if (n <= 2) { stop("The value for n must be at least 3.") } # Get the number of nodes ever created for # this graph nodes_created <- graph$last_node # Get the number of edges ever created for # this graph edges_created <- graph$last_edge # Get the graph's log graph_log <- graph$graph_log # Get the graph's info graph_info <- graph$graph_info # Get the sequence of nodes required nodes <- seq(1, 2 * n) # Create a node data frame for the prism graph prism_nodes <- create_node_df( n = length(nodes), type = type, label = label) # Create an edge data frame for the prism graph prism_edges <- create_edge_df( from = c(nodes[1:(length(nodes)/2)], nodes[((length(nodes)/2) + 1):length(nodes)], nodes[1:(length(nodes)/2)]), to = c(nodes[2:(length(nodes)/2)], nodes[1], nodes[((length(nodes)/2) + 2):length(nodes)], nodes[((length(nodes)/2) + 1)], nodes[1:(length(nodes)/2)] + n), rel = rel) # Create the prism graph prism_graph <- create_graph(prism_nodes, prism_edges) # If the input graph is not empty, combine graphs # using the `combine_graphs()` function if (!is_graph_empty(graph)) { combined_graph <- combine_graphs(graph, prism_graph) # Update the `last_node` counter combined_graph$last_node <- nodes_created + nrow(prism_nodes) # Update the `last_edge` counter combined_graph$last_edge <- edges_created + nrow(prism_edges) # Update the `graph_log` df with an action graph_log <- add_action_to_log( graph_log = graph_log, version_id = nrow(graph_log) + 1, function_used = "add_balanced_tree", time_modified = time_function_start, duration = graph_function_duration(time_function_start), nodes = nrow(combined_graph$nodes_df), edges = nrow(combined_graph$edges_df)) combined_graph$graph_log <- graph_log combined_graph$graph_info <- graph_info # Write graph backup if the option is set if (combined_graph$graph_info$write_backups) { save_graph_as_rds(graph = combined_graph) } return(combined_graph) } else { # Update the `graph_log` df with an action graph_log <- add_action_to_log( graph_log = graph_log, version_id = nrow(graph_log) + 1, function_used = "add_prism", time_modified = time_function_start, duration = graph_function_duration(time_function_start), nodes = nrow(prism_graph$nodes_df), edges = nrow(prism_graph$edges_df)) prism_graph$graph_log <- graph_log prism_graph$graph_info <- graph_info # Write graph backup if the option is set if (prism_graph$graph_info$write_backups) { save_graph_as_rds(graph = prism_graph) } return(prism_graph) } }
/R/add_prism.R
no_license
mogaio/DiagrammeR
R
false
false
5,105
r
#' Add a prism of nodes to the graph #' @description With a graph object of class #' \code{dgr_graph}, add a node prism to the graph. #' @param graph a graph object of class #' \code{dgr_graph}. #' @param n the number of nodes describing the shape #' of the prism. For example, the triangonal prism has #' \code{n} equal to 3 and it is composed of 6 nodes #' and 9 edges. For any n-gonal prism, the graph will #' be generated with 2\code{n} nodes and 3\code{n} #' edges. #' @param type an optional string that describes the #' entity type for the nodes to be added. #' @param label either a vector object of length #' \code{n} that provides optional labels for the new #' nodes, or, a boolean value where setting to #' \code{TRUE} ascribes node IDs to the label and #' \code{FALSE} yields a blank label. #' @param rel an optional string for providing a #' relationship label to all new edges created in the #' node prism. #' @return a graph object of class \code{dgr_graph}. #' @examples #' # Create a new graph and add 2 prisms #' graph <- #' create_graph() %>% #' add_prism(3, "prism", "a") %>% #' add_prism(3, "prism", "b") #' #' # Get node information from this graph #' node_info(graph) #' #> id type label deg indeg outdeg loops #' #> 1 1 prism a 3 1 2 0 #' #> 2 2 prism a 3 1 2 0 #' #> 3 3 prism a 3 1 2 0 #' #> 4 4 prism a 3 2 1 0 #' #> 5 5 prism a 3 2 1 0 #' #> 6 6 prism a 3 2 1 0 #' #> 7 7 prism b 3 1 2 0 #' #> 8 8 prism b 3 1 2 0 #' #> 9 9 prism b 3 1 2 0 #' #> 10 10 prism b 3 2 1 0 #' #> 11 11 prism b 3 2 1 0 #' #> 12 12 prism b 3 2 1 0 #' @export add_prism add_prism <- function(graph, n, type = NULL, label = TRUE, rel = NULL) { # Get the time of function start time_function_start <- Sys.time() # Validation: Graph object is valid if (graph_object_valid(graph) == FALSE) { stop("The graph object is not valid.") } # Stop if n is too small if (n <= 2) { stop("The value for n must be at least 3.") } # Get the number of nodes ever created for # this graph nodes_created <- graph$last_node # Get the number of edges ever created for # this graph edges_created <- graph$last_edge # Get the graph's log graph_log <- graph$graph_log # Get the graph's info graph_info <- graph$graph_info # Get the sequence of nodes required nodes <- seq(1, 2 * n) # Create a node data frame for the prism graph prism_nodes <- create_node_df( n = length(nodes), type = type, label = label) # Create an edge data frame for the prism graph prism_edges <- create_edge_df( from = c(nodes[1:(length(nodes)/2)], nodes[((length(nodes)/2) + 1):length(nodes)], nodes[1:(length(nodes)/2)]), to = c(nodes[2:(length(nodes)/2)], nodes[1], nodes[((length(nodes)/2) + 2):length(nodes)], nodes[((length(nodes)/2) + 1)], nodes[1:(length(nodes)/2)] + n), rel = rel) # Create the prism graph prism_graph <- create_graph(prism_nodes, prism_edges) # If the input graph is not empty, combine graphs # using the `combine_graphs()` function if (!is_graph_empty(graph)) { combined_graph <- combine_graphs(graph, prism_graph) # Update the `last_node` counter combined_graph$last_node <- nodes_created + nrow(prism_nodes) # Update the `last_edge` counter combined_graph$last_edge <- edges_created + nrow(prism_edges) # Update the `graph_log` df with an action graph_log <- add_action_to_log( graph_log = graph_log, version_id = nrow(graph_log) + 1, function_used = "add_balanced_tree", time_modified = time_function_start, duration = graph_function_duration(time_function_start), nodes = nrow(combined_graph$nodes_df), edges = nrow(combined_graph$edges_df)) combined_graph$graph_log <- graph_log combined_graph$graph_info <- graph_info # Write graph backup if the option is set if (combined_graph$graph_info$write_backups) { save_graph_as_rds(graph = combined_graph) } return(combined_graph) } else { # Update the `graph_log` df with an action graph_log <- add_action_to_log( graph_log = graph_log, version_id = nrow(graph_log) + 1, function_used = "add_prism", time_modified = time_function_start, duration = graph_function_duration(time_function_start), nodes = nrow(prism_graph$nodes_df), edges = nrow(prism_graph$edges_df)) prism_graph$graph_log <- graph_log prism_graph$graph_info <- graph_info # Write graph backup if the option is set if (prism_graph$graph_info$write_backups) { save_graph_as_rds(graph = prism_graph) } return(prism_graph) } }
#' @title Fast value replacement in data frame #' @description While \code{replace_na_dt} could replace all NAs to another #' value, \code{replace_dt} could replace any value(s) to another specific #' value. #' @param .data A data.frame #' @param ... Colunms to be replaced. If not specified, use all columns. #' @param from A value, a vector of values or a function returns a logical value. #' Defaults to \code{is.nan}. #' @param to A value. Defaults to \code{NA}. #' @return A data.table. #' @seealso \code{\link[tidyfst]{replace_na_dt}} #' @examples #' iris %>% mutate_vars(is.factor,as.character) -> new_iris #' #' new_iris %>% #' replace_dt(Species, from = "setosa",to = "SS") #' new_iris %>% #' replace_dt(Species,from = c("setosa","virginica"),to = "sv") #' new_iris %>% #' replace_dt(Petal.Width, from = .2,to = 2) #' new_iris %>% #' replace_dt(from = .2,to = NA) #' new_iris %>% #' replace_dt(is.numeric, from = function(x) x > 3, to = 9999 ) #' @export replace_dt = function (.data, ..., from = is.nan,to = NA) { dt = as.data.table(.data) #dt = as_dt(.data) if(!is.function(from)) { if (setequal(from,to)) return(.data) if(length(from) == 1) .func = function(x) x == from else if(is.character(from)) .func = function(x) x %chin% from else .func = function(x) x %in% from } else .func = from if (substitute(list(...)) %>% deparse() == "list()") dot_string <- NULL else dot_string <- dt[0] %>% select_dt(...) %>% names() if (is.null(dot_string)) { for (j in seq_len(ncol(dt))) set(dt, which(.func(dt[[j]])), j, to) } else { for (j in dot_string) set(dt, which(.func(dt[[j]])), j, to) } dt }
/R/replace_dt.R
permissive
hope-data-science/tidyfst
R
false
false
1,784
r
#' @title Fast value replacement in data frame #' @description While \code{replace_na_dt} could replace all NAs to another #' value, \code{replace_dt} could replace any value(s) to another specific #' value. #' @param .data A data.frame #' @param ... Colunms to be replaced. If not specified, use all columns. #' @param from A value, a vector of values or a function returns a logical value. #' Defaults to \code{is.nan}. #' @param to A value. Defaults to \code{NA}. #' @return A data.table. #' @seealso \code{\link[tidyfst]{replace_na_dt}} #' @examples #' iris %>% mutate_vars(is.factor,as.character) -> new_iris #' #' new_iris %>% #' replace_dt(Species, from = "setosa",to = "SS") #' new_iris %>% #' replace_dt(Species,from = c("setosa","virginica"),to = "sv") #' new_iris %>% #' replace_dt(Petal.Width, from = .2,to = 2) #' new_iris %>% #' replace_dt(from = .2,to = NA) #' new_iris %>% #' replace_dt(is.numeric, from = function(x) x > 3, to = 9999 ) #' @export replace_dt = function (.data, ..., from = is.nan,to = NA) { dt = as.data.table(.data) #dt = as_dt(.data) if(!is.function(from)) { if (setequal(from,to)) return(.data) if(length(from) == 1) .func = function(x) x == from else if(is.character(from)) .func = function(x) x %chin% from else .func = function(x) x %in% from } else .func = from if (substitute(list(...)) %>% deparse() == "list()") dot_string <- NULL else dot_string <- dt[0] %>% select_dt(...) %>% names() if (is.null(dot_string)) { for (j in seq_len(ncol(dt))) set(dt, which(.func(dt[[j]])), j, to) } else { for (j in dot_string) set(dt, which(.func(dt[[j]])), j, to) } dt }
library(shinydashboard) library(leaflet) library(shinyBS) load("./data/Criteria_per_Cell.RData") # Define UI for application that draws a histogram dashboardPage( dashboardHeader( title = "Dive into Zurich", tags$li(a(h4("Thanks to:"), style = "padding-top:5px; padding-bottom:5px;"), class = "dropdown"), tags$li(a(href = "https://hack.twist2018.ch/project/2", img(src = 'logo.png', title = "Data Source", height = "30px"), style = "padding-top:10px; padding-bottom:10px;"), class = "dropdown") ), dashboardSidebar(disable = TRUE), dashboardBody( h4("Where should you live in canton Zurich?"), h4("Do you want to live in an area where the inhabitants are similar (or maybe dissimilar) to you? With this app, you can find these areas!"), fluidRow( column(width = 4, numericInput("var_age", "Age", min = 1, max = 100, value=40, step = 1), checkboxInput("var_child", "Do you have children?", value = FALSE), selectInput("var_gen", label = "Gender", choices = c("","Female","Male"), selected = ""), selectInput("var_nat", label = "Nationality", choices = c("","Swiss","Foreign"), selected = ""), selectInput("var_work", label = "Local Working Sector", choices = c("",colnames(dat)[15:25]), selected = ""), tipify(checkboxInput("add_varos","Add points of interest", value = FALSE), title = "Data from OpenStreetMap", placement = "top"), uiOutput("out_var_os"), actionButton("contact"," Contact Us", icon = icon("address-card"), class = "btn-primary",style = "color: white;", onclick ="window.open('https://hack.twist2018.ch/project/18', '_blank')"), actionButton("help"," Help", icon = icon("info-circle"), class = "btn-primary",style = "color: white;"), div(style="display:inline-block;width:32%;text-align: center;", actionButton("reset"," Reset", icon = icon("power-off"), class = "btn-warning",style = "color: white;")), br(),br(), img(src = 'group.JPG',title = "Group Photo", height = "180px") ), column(width = 8, leafletOutput("visplot", width = "80%", height = 600) ) ) ) )
/shiny/ui.R
no_license
Geostatistic/TWIST_SpatialStatistics_Zurich
R
false
false
2,793
r
library(shinydashboard) library(leaflet) library(shinyBS) load("./data/Criteria_per_Cell.RData") # Define UI for application that draws a histogram dashboardPage( dashboardHeader( title = "Dive into Zurich", tags$li(a(h4("Thanks to:"), style = "padding-top:5px; padding-bottom:5px;"), class = "dropdown"), tags$li(a(href = "https://hack.twist2018.ch/project/2", img(src = 'logo.png', title = "Data Source", height = "30px"), style = "padding-top:10px; padding-bottom:10px;"), class = "dropdown") ), dashboardSidebar(disable = TRUE), dashboardBody( h4("Where should you live in canton Zurich?"), h4("Do you want to live in an area where the inhabitants are similar (or maybe dissimilar) to you? With this app, you can find these areas!"), fluidRow( column(width = 4, numericInput("var_age", "Age", min = 1, max = 100, value=40, step = 1), checkboxInput("var_child", "Do you have children?", value = FALSE), selectInput("var_gen", label = "Gender", choices = c("","Female","Male"), selected = ""), selectInput("var_nat", label = "Nationality", choices = c("","Swiss","Foreign"), selected = ""), selectInput("var_work", label = "Local Working Sector", choices = c("",colnames(dat)[15:25]), selected = ""), tipify(checkboxInput("add_varos","Add points of interest", value = FALSE), title = "Data from OpenStreetMap", placement = "top"), uiOutput("out_var_os"), actionButton("contact"," Contact Us", icon = icon("address-card"), class = "btn-primary",style = "color: white;", onclick ="window.open('https://hack.twist2018.ch/project/18', '_blank')"), actionButton("help"," Help", icon = icon("info-circle"), class = "btn-primary",style = "color: white;"), div(style="display:inline-block;width:32%;text-align: center;", actionButton("reset"," Reset", icon = icon("power-off"), class = "btn-warning",style = "color: white;")), br(),br(), img(src = 'group.JPG',title = "Group Photo", height = "180px") ), column(width = 8, leafletOutput("visplot", width = "80%", height = 600) ) ) ) )
% Please see file R/netCDF.R \name{ncdf4_convert} \alias{ncdf4_convert} \title{Convert from a NetCDF file format 3 to format 4} \usage{ ncdf4_convert(cdfFile, outFile = NULL, force = FALSE, baseline = FALSE, ...) } \arguments{ \item{cdfFile}{The NetCDF file to be converted} \item{outFile}{The new output file. If \code{NULL}, it replaces the \code{cdfFile}'s file extension (which should be \code{.cdf}) by \code{.nc4}. If the file extension is not \code{.cdf}, then \code{.nc4} is just appended. If the path to the file does not exist, it will be created automatically.} \item{force}{Logical. Set to \code{TRUE} to allow file overwrites, for example if the destination file still exists, in which case a warning is thrown. Default to \code{FALSE}.} \item{baseline}{Logical. Whether or not baseline correct the input file.} \item{\dots}{extra options passed to \code{\link[=baseline]{baseline()}}.} } \value{ A string. The path to the converted file or invisible. } \description{ Convert from NetCDF format 3 into a custom TargetSearch NetCDF format 4. The new NetCDF just contains the raw data in a matrix format in order to allow easier and faster data manipulation. } \details{ Starting from version 1.42.0, TargetSearch introduces a custom NetCDF file which is used for faster and easier data manipulation. This means, ion traces within a retention time can be quickly extracted, which if often required before plotting. Formerly, this process required parsing the whole file before the data could be extracted. Note that this function only takes one file at the time. To convert many files at the same time, see the function \code{\link[=ncdf4_convert_from_path]{ncdf4_convert_from_path()}} or the high level method \code{\link[=ncdf4Convert]{ncdf4Convert()}}. Alternatively, you can call this function in a loop or using the \code{\link[base:lapply]{lapply}} family of functions. Keep in mind this function is intended for internal use (or advanced users); it is exported for convenience. Using the method \code{\link[=ncdf4Convert]{ncdf4Convert()}} is recommended. } \note{ Currently, it is not possible to reconstruct the original NetCDF file from the converted file, especially if nominal mass or baseline correction was applied. On the other hand, if the NetCDF files are exported from custom chromatogram files (such as thermo raw files or LECO peg files), then the NetCDF 3 files can be deleted safely as there is always a way to recover them. } \section{File structure}{ The structure of the NetCDF format 4 is straightforward and the variables and attributes are self-evident. The following variables are defined. \itemize{ \item \code{retention_time} is a vector representing the retention time in seconds (double). \item \code{retention_index} is a vector representing the retention time indices (double). If missing, then the variable contains zeros. Its length is equal to the length of \code{retention_time}. \item \code{mass_range} is vector of length two containing the minimum and maximum m/z values (integer). \item \code{intensity} is matrix of intensity values (integer) where columns represent ion traces and rows are scans. The dimensions are length of "retention time" times the number of ions, ie, mass max - mass min + 1. } In addition, the following attributes are defined. Note that only \code{creator} and \code{version} are mandatory. \itemize{ \item \code{creator} a string equal to "TargetSearch" (for indentification purposes). \item \code{version} file format version (string). Currently "1.1". \item \code{time_corrected} (optional) a flag (short integer) to indicate RI correction. If missing it defaults to \code{false}. \item \code{baseline_corrected} (optional) a flag (short integer) to indicate that the file was baseline corrected by TargetSearch. If missing it defaults to \code{false}. } } \examples{ require(TargetSearchData) # get files from package TargetSearchData cdfpath <- tsd_data_path() # choose any file cdf <- file.path(cdfpath, '7235eg04.cdf') nc4 <- '7235eg04.nc4' # save file in current path # run the function ret <- ncdf4_convert(cdf, nc4) # the output should match the output file stopifnot(ret == nc4) # Use mapply to convert many files at the same time. cdf <- paste0('7235eg0', 6:8, '.cdf') nc4 <- paste0('7235eg0', 6:8, '.nc4') ret <- mapply(ncdf4_convert, file.path(cdfpath, cdf), nc4) stopifnot(ret == nc4) } \seealso{ \code{\link[=ncdf4Convert]{ncdf4Convert()}}, \code{\link[=ncdf4_convert_from_path]{ncdf4_convert_from_path()}}, \code{\link[=baseline]{baseline()}} } \author{ Alvaro Cuadros-Inostroza }
/man/ncdf4_convert.Rd
no_license
acinostroza/TargetSearch
R
false
false
4,606
rd
% Please see file R/netCDF.R \name{ncdf4_convert} \alias{ncdf4_convert} \title{Convert from a NetCDF file format 3 to format 4} \usage{ ncdf4_convert(cdfFile, outFile = NULL, force = FALSE, baseline = FALSE, ...) } \arguments{ \item{cdfFile}{The NetCDF file to be converted} \item{outFile}{The new output file. If \code{NULL}, it replaces the \code{cdfFile}'s file extension (which should be \code{.cdf}) by \code{.nc4}. If the file extension is not \code{.cdf}, then \code{.nc4} is just appended. If the path to the file does not exist, it will be created automatically.} \item{force}{Logical. Set to \code{TRUE} to allow file overwrites, for example if the destination file still exists, in which case a warning is thrown. Default to \code{FALSE}.} \item{baseline}{Logical. Whether or not baseline correct the input file.} \item{\dots}{extra options passed to \code{\link[=baseline]{baseline()}}.} } \value{ A string. The path to the converted file or invisible. } \description{ Convert from NetCDF format 3 into a custom TargetSearch NetCDF format 4. The new NetCDF just contains the raw data in a matrix format in order to allow easier and faster data manipulation. } \details{ Starting from version 1.42.0, TargetSearch introduces a custom NetCDF file which is used for faster and easier data manipulation. This means, ion traces within a retention time can be quickly extracted, which if often required before plotting. Formerly, this process required parsing the whole file before the data could be extracted. Note that this function only takes one file at the time. To convert many files at the same time, see the function \code{\link[=ncdf4_convert_from_path]{ncdf4_convert_from_path()}} or the high level method \code{\link[=ncdf4Convert]{ncdf4Convert()}}. Alternatively, you can call this function in a loop or using the \code{\link[base:lapply]{lapply}} family of functions. Keep in mind this function is intended for internal use (or advanced users); it is exported for convenience. Using the method \code{\link[=ncdf4Convert]{ncdf4Convert()}} is recommended. } \note{ Currently, it is not possible to reconstruct the original NetCDF file from the converted file, especially if nominal mass or baseline correction was applied. On the other hand, if the NetCDF files are exported from custom chromatogram files (such as thermo raw files or LECO peg files), then the NetCDF 3 files can be deleted safely as there is always a way to recover them. } \section{File structure}{ The structure of the NetCDF format 4 is straightforward and the variables and attributes are self-evident. The following variables are defined. \itemize{ \item \code{retention_time} is a vector representing the retention time in seconds (double). \item \code{retention_index} is a vector representing the retention time indices (double). If missing, then the variable contains zeros. Its length is equal to the length of \code{retention_time}. \item \code{mass_range} is vector of length two containing the minimum and maximum m/z values (integer). \item \code{intensity} is matrix of intensity values (integer) where columns represent ion traces and rows are scans. The dimensions are length of "retention time" times the number of ions, ie, mass max - mass min + 1. } In addition, the following attributes are defined. Note that only \code{creator} and \code{version} are mandatory. \itemize{ \item \code{creator} a string equal to "TargetSearch" (for indentification purposes). \item \code{version} file format version (string). Currently "1.1". \item \code{time_corrected} (optional) a flag (short integer) to indicate RI correction. If missing it defaults to \code{false}. \item \code{baseline_corrected} (optional) a flag (short integer) to indicate that the file was baseline corrected by TargetSearch. If missing it defaults to \code{false}. } } \examples{ require(TargetSearchData) # get files from package TargetSearchData cdfpath <- tsd_data_path() # choose any file cdf <- file.path(cdfpath, '7235eg04.cdf') nc4 <- '7235eg04.nc4' # save file in current path # run the function ret <- ncdf4_convert(cdf, nc4) # the output should match the output file stopifnot(ret == nc4) # Use mapply to convert many files at the same time. cdf <- paste0('7235eg0', 6:8, '.cdf') nc4 <- paste0('7235eg0', 6:8, '.nc4') ret <- mapply(ncdf4_convert, file.path(cdfpath, cdf), nc4) stopifnot(ret == nc4) } \seealso{ \code{\link[=ncdf4Convert]{ncdf4Convert()}}, \code{\link[=ncdf4_convert_from_path]{ncdf4_convert_from_path()}}, \code{\link[=baseline]{baseline()}} } \author{ Alvaro Cuadros-Inostroza }
\name{giveseats} \alias{giveseats} \alias{adams} \alias{agnew} \alias{customdivisor} \alias{danish} \alias{dean} \alias{dh} \alias{divisormethod} \alias{hh} \alias{hungarian} \alias{ichimori} \alias{ichimori13} \alias{imperiali} \alias{lr} \alias{msl} \alias{nepalese} \alias{nohlen} \alias{plurality} \alias{sl} \alias{steady} \alias{swedish} \alias{theil} \alias{theil-schrage} \alias{theilschrage} \title{Allocate indivisibilities} \description{ Function for proportional allocation of indivisibilities such as parliamentary seats } \usage{ giveseats(v, ns, method, thresh = 0, quota = NA, divs = NULL) } \arguments{ \item{v}{numeric, vector of votes (claims)} \item{ns}{numeric, number of seats (indivisibilities) to allocate} \item{method}{character, name of the allocation algorithm to use (see Details)} \item{thresh}{numeric, threshold of exclusion; if in [0,1], treated as a fraction; if in (1, 100), treated as a percent; if larger than 100, treated as a vote coun} \item{quota}{character, quota for \code{method="largest remainders"}; see Details} \item{divs}{numeric, divisors for \code{method="custom"}, must be non-negative} } \details{ Argument \code{method} takes the following values Divisor methods: \code{"dh"} for the D'Hondt method, for which the \eqn{x}th divisor value is \eqn{x} \code{"je"} for the Jefferson method which is equivalent to the D'Hondt method \code{"hb"} for the Hagenbach-Bischoff method which is equivalent to the D'Hondt method \code{"ad"} for the Adams method, for which the \eqn{x}th divisor equals \eqn{x-1} \code{"sd"} for the Smallest Divisors method, an alias of the Adams method \code{"no"} for the Nohlen method, for which the \eqn{x}th divisor is \eqn{x+1} \code{"im"} for the Imperiali method, for which the \eqn{x}th divisor is \eqn{(x+1)/2} \code{"sl"} for the Sainte-Lague method, for which the \eqn{x}th divsor is \eqn{2x-1} \code{"we"} for the Webster method which is equivalent to the Sainte-Lague method \code{"sw"} for the (new) Swedish Sainte-Lague method, which is identical to the Sainte-Lague method with the exception of the 1st divisor which equals to 1.2 \code{"ne"} for the Nepalese Sainte-Lague method, which is identical to the Sainte-Lague method with the exception of the 1st divisor which equals to 1.4 \code{"nor"} for the Norwegian Sainte-Lague method, which is identical to the Sainte-Lague method with the exception of the 1st divisor which equals to 1.4 \code{"hu"} for the Hungarian Sainte-Lague method, which is identical to the Sainte-Lague method with the exception of the 1st divisor which equals to 1.5 \code{"msl"} for the Modified Sainte-Lague method for which the 1st divisor is 1 and all the subsequent divisors are \eqn{(2x-1)5/7} \code{"da"} for the Danish method, for which the \eqn{x}th divisor is \eqn{3x-2} \code{"hh"} for the Huntington-Hill method for which the \eqn{x}th divisor is \eqn{\sqrt{x(x-1)}} \code{"ep"} for the Equal Proportions method, an alias of the Huntington-Hill method \code{"pl"} for the Plurality (a.k.a. Steady) method (identic divisors) where the \eqn{x}th divisor is a constant (\eqn{x^0}) \code{"de"} for the Dean method; the \eqn{x}th divisor is \eqn{x(x-1)/(x-0.5)} \code{"ts"} for the Theil-Schrage method (logarithmic mean divisors); the \eqn{x}th divisor is \deqn{\frac{1}{\ln{\frac{x}{x-1}}}} \code{"ag"} for the Agnew method (identric mean divisors); a.k.a. Theil, Ossipoff, Entropic; the \eqn{x}th divisor is \deqn{\frac{1}{e} \frac{x^x}{(x-1)^{x-1}}} \code{"ich"} for the Ichimori 1/3 method; the \eqn{x}th divisor is \eqn{\sqrt{x^2 + x + 1/3}} \code{"custom"} for user-supplied divisors (in argument \code{divs}) Largest remainders method can be called with \code{method="lr"} but requires to set the \code{quota} argument to one of \code{"ha"} for the Hare quota e/l where e is the size of the number of votes and l the number of seats \code{"dr"} for the Droop quota \deqn{\left \lfloor 1 + \frac{e}{l+1} \right \rfloor} \code{"hb"} for the Hagenbach-Bischoff quota e/(l+1) \code{"im"} for the Imperiali quota e/(l+2) \code{"rei"} for the Reinforced Imperiali quota e/(l+3) Under the largest remainder method it is possible that more than the available number of seats will be assigned in the first round (under the Imperiali and Reinforced Imperiali quotas) in which case the funtion terminates with an error message. } \value{ A named list of two items: \item{method}{character, the name of the apportionment method used} \item{seats}{numeric vector with seats} } \references{ Agnew, Robert A. 2008. Optimal Congressional Apportionment. The American Mathematical Monthly 115 (4). Grilli di Cortona, Pietro, et al. 1999. Evaluation and Optimization of Electoral Systems. SIAM. Ichimori, T., 2010. New apportionment methods and their quota property. JSIAM Letters. Marcelino, Daniel. 2016. SciencesPo: A tool set for analyzing political behavior data. R package version 1.4.1. http://CRAN.R-project.org/package=SciencesPo. Wada, Junichiro. 2016. "Apportionment behind the veil of uncertainty". The Japanese Economic Review 67 (3): 348–360 } \author{Juraj Medzihorsky} %% \note{} %% \seealso{} \examples{ seatdist::giveseats(v=c(A=60, B=28, C=12)*1e3, ns=1e1, method="lr", quota="hb", thresh=5e-2) # thresh treated as a fraction # $method # "Largest Remainders with Hagenbach-Bischoff quota" # $seats # A B C # 6 3 1 } %% \keyword{ apportionment }
/man/giveseats.Rd
no_license
jmedzihorsky/seatdist
R
false
false
5,775
rd
\name{giveseats} \alias{giveseats} \alias{adams} \alias{agnew} \alias{customdivisor} \alias{danish} \alias{dean} \alias{dh} \alias{divisormethod} \alias{hh} \alias{hungarian} \alias{ichimori} \alias{ichimori13} \alias{imperiali} \alias{lr} \alias{msl} \alias{nepalese} \alias{nohlen} \alias{plurality} \alias{sl} \alias{steady} \alias{swedish} \alias{theil} \alias{theil-schrage} \alias{theilschrage} \title{Allocate indivisibilities} \description{ Function for proportional allocation of indivisibilities such as parliamentary seats } \usage{ giveseats(v, ns, method, thresh = 0, quota = NA, divs = NULL) } \arguments{ \item{v}{numeric, vector of votes (claims)} \item{ns}{numeric, number of seats (indivisibilities) to allocate} \item{method}{character, name of the allocation algorithm to use (see Details)} \item{thresh}{numeric, threshold of exclusion; if in [0,1], treated as a fraction; if in (1, 100), treated as a percent; if larger than 100, treated as a vote coun} \item{quota}{character, quota for \code{method="largest remainders"}; see Details} \item{divs}{numeric, divisors for \code{method="custom"}, must be non-negative} } \details{ Argument \code{method} takes the following values Divisor methods: \code{"dh"} for the D'Hondt method, for which the \eqn{x}th divisor value is \eqn{x} \code{"je"} for the Jefferson method which is equivalent to the D'Hondt method \code{"hb"} for the Hagenbach-Bischoff method which is equivalent to the D'Hondt method \code{"ad"} for the Adams method, for which the \eqn{x}th divisor equals \eqn{x-1} \code{"sd"} for the Smallest Divisors method, an alias of the Adams method \code{"no"} for the Nohlen method, for which the \eqn{x}th divisor is \eqn{x+1} \code{"im"} for the Imperiali method, for which the \eqn{x}th divisor is \eqn{(x+1)/2} \code{"sl"} for the Sainte-Lague method, for which the \eqn{x}th divsor is \eqn{2x-1} \code{"we"} for the Webster method which is equivalent to the Sainte-Lague method \code{"sw"} for the (new) Swedish Sainte-Lague method, which is identical to the Sainte-Lague method with the exception of the 1st divisor which equals to 1.2 \code{"ne"} for the Nepalese Sainte-Lague method, which is identical to the Sainte-Lague method with the exception of the 1st divisor which equals to 1.4 \code{"nor"} for the Norwegian Sainte-Lague method, which is identical to the Sainte-Lague method with the exception of the 1st divisor which equals to 1.4 \code{"hu"} for the Hungarian Sainte-Lague method, which is identical to the Sainte-Lague method with the exception of the 1st divisor which equals to 1.5 \code{"msl"} for the Modified Sainte-Lague method for which the 1st divisor is 1 and all the subsequent divisors are \eqn{(2x-1)5/7} \code{"da"} for the Danish method, for which the \eqn{x}th divisor is \eqn{3x-2} \code{"hh"} for the Huntington-Hill method for which the \eqn{x}th divisor is \eqn{\sqrt{x(x-1)}} \code{"ep"} for the Equal Proportions method, an alias of the Huntington-Hill method \code{"pl"} for the Plurality (a.k.a. Steady) method (identic divisors) where the \eqn{x}th divisor is a constant (\eqn{x^0}) \code{"de"} for the Dean method; the \eqn{x}th divisor is \eqn{x(x-1)/(x-0.5)} \code{"ts"} for the Theil-Schrage method (logarithmic mean divisors); the \eqn{x}th divisor is \deqn{\frac{1}{\ln{\frac{x}{x-1}}}} \code{"ag"} for the Agnew method (identric mean divisors); a.k.a. Theil, Ossipoff, Entropic; the \eqn{x}th divisor is \deqn{\frac{1}{e} \frac{x^x}{(x-1)^{x-1}}} \code{"ich"} for the Ichimori 1/3 method; the \eqn{x}th divisor is \eqn{\sqrt{x^2 + x + 1/3}} \code{"custom"} for user-supplied divisors (in argument \code{divs}) Largest remainders method can be called with \code{method="lr"} but requires to set the \code{quota} argument to one of \code{"ha"} for the Hare quota e/l where e is the size of the number of votes and l the number of seats \code{"dr"} for the Droop quota \deqn{\left \lfloor 1 + \frac{e}{l+1} \right \rfloor} \code{"hb"} for the Hagenbach-Bischoff quota e/(l+1) \code{"im"} for the Imperiali quota e/(l+2) \code{"rei"} for the Reinforced Imperiali quota e/(l+3) Under the largest remainder method it is possible that more than the available number of seats will be assigned in the first round (under the Imperiali and Reinforced Imperiali quotas) in which case the funtion terminates with an error message. } \value{ A named list of two items: \item{method}{character, the name of the apportionment method used} \item{seats}{numeric vector with seats} } \references{ Agnew, Robert A. 2008. Optimal Congressional Apportionment. The American Mathematical Monthly 115 (4). Grilli di Cortona, Pietro, et al. 1999. Evaluation and Optimization of Electoral Systems. SIAM. Ichimori, T., 2010. New apportionment methods and their quota property. JSIAM Letters. Marcelino, Daniel. 2016. SciencesPo: A tool set for analyzing political behavior data. R package version 1.4.1. http://CRAN.R-project.org/package=SciencesPo. Wada, Junichiro. 2016. "Apportionment behind the veil of uncertainty". The Japanese Economic Review 67 (3): 348–360 } \author{Juraj Medzihorsky} %% \note{} %% \seealso{} \examples{ seatdist::giveseats(v=c(A=60, B=28, C=12)*1e3, ns=1e1, method="lr", quota="hb", thresh=5e-2) # thresh treated as a fraction # $method # "Largest Remainders with Hagenbach-Bischoff quota" # $seats # A B C # 6 3 1 } %% \keyword{ apportionment }
source('../library/GHI.R') source('../library/sanitise_data.R') # uncomment to select agglomeration method # '' = holistic by default agglomeration <- '' # agglomeration <- 'uniq' # agglomeration <- 'excl' # corresponding input and output file names input_file <- paste('../data/species_', agglomeration, 'pref.csv', sep='') output_file <- paste("../data/FOJHori", agglomeration, "_spnos.csv", sep="") # load data # hori <- read.csv('../data/species_pref.csv', row.names = NULL) hori <- read.csv(input_file, row.names = NULL) foj <- read.csv('../data/species_pref_foj.csv', row.names = NULL) # select and rename relevant star columns foj <- rename_star_to_xstar(foj) foj <- rename_starinfs_to_star(foj) # neaten up raw data frames hori <- sanitise(hori) foj <- sanitise(foj) # number of species by star and prefecture hori_table <- table(hori[,c('star', 'pref')]) foj_table <- table(foj[,c('star', 'pref')]) # number of species by prefecture hori_spno <- table(hori[,c('pref')]) foj_spno <- table(foj[,c('pref')]) # merge species number into one table spnos <- cbind(foj_spno,hori_spno) # remove prefectures with no data spnos <- spnos[hori_spno != 0 & foj_spno != 0,] # convert scores to dataframe spnos_frame <- data.frame(preflist=row.names(spnos), foj_spno=spnos[,1], hori_spno=spnos[,2], row.names=NULL) # write results (scores) to file write.csv(spnos_frame, file=output_file, #file="../data/FOJHori_spnos.csv", row.names=FALSE, fileEncoding="UTF-8") # paired-t test between the number of species per prefecture (or rather proportions relative to the total species pool) between hori and foj? check histogram if normally distributed. also think how to treat data = 0 # rank correlation analysis for the sum number of species cor(foj_spno,hori_spno, method = "spearman") # fit a linear model (linear regression analysis) compare_spnos <- lm(rank(t(hori_spno)) ~ rank(t(foj_spno))) # graphs to be considered.
/bin/compare_species_proportions_by_prefecture.R
permissive
Nodoka/Bioquality
R
false
false
2,003
r
source('../library/GHI.R') source('../library/sanitise_data.R') # uncomment to select agglomeration method # '' = holistic by default agglomeration <- '' # agglomeration <- 'uniq' # agglomeration <- 'excl' # corresponding input and output file names input_file <- paste('../data/species_', agglomeration, 'pref.csv', sep='') output_file <- paste("../data/FOJHori", agglomeration, "_spnos.csv", sep="") # load data # hori <- read.csv('../data/species_pref.csv', row.names = NULL) hori <- read.csv(input_file, row.names = NULL) foj <- read.csv('../data/species_pref_foj.csv', row.names = NULL) # select and rename relevant star columns foj <- rename_star_to_xstar(foj) foj <- rename_starinfs_to_star(foj) # neaten up raw data frames hori <- sanitise(hori) foj <- sanitise(foj) # number of species by star and prefecture hori_table <- table(hori[,c('star', 'pref')]) foj_table <- table(foj[,c('star', 'pref')]) # number of species by prefecture hori_spno <- table(hori[,c('pref')]) foj_spno <- table(foj[,c('pref')]) # merge species number into one table spnos <- cbind(foj_spno,hori_spno) # remove prefectures with no data spnos <- spnos[hori_spno != 0 & foj_spno != 0,] # convert scores to dataframe spnos_frame <- data.frame(preflist=row.names(spnos), foj_spno=spnos[,1], hori_spno=spnos[,2], row.names=NULL) # write results (scores) to file write.csv(spnos_frame, file=output_file, #file="../data/FOJHori_spnos.csv", row.names=FALSE, fileEncoding="UTF-8") # paired-t test between the number of species per prefecture (or rather proportions relative to the total species pool) between hori and foj? check histogram if normally distributed. also think how to treat data = 0 # rank correlation analysis for the sum number of species cor(foj_spno,hori_spno, method = "spearman") # fit a linear model (linear regression analysis) compare_spnos <- lm(rank(t(hori_spno)) ~ rank(t(foj_spno))) # graphs to be considered.
#!/usr/bin/Rscript #library(mrgsolve) #library(metrumrg) filename <- function (dir, run = NULL, ext = NULL) file.path(dir, paste0(run, ext)) ls <- list.files("rdev/inst/project",pattern="\\.cpp$") ls <- ls[!grepl("\\.cpp\\.cpp", ls)] out <- lapply(ls, function(file) { ## Model is being saved as "x" stem <- gsub("\\.cpp$", "", file) message(paste0("Building: ", stem)) x <- mread(stem, "rdev/inst/project",udll=FALSE,compile=FALSE) x <- new("packmod", x, package="modmrg", model=stem) x <- mrgsolve:::relocate_funs(x, "modmrg") x <- mrgsolve:::compiled(x,TRUE) x@shlib$par <- pars(x) x@shlib$cmt <- cmt(x) save(file=filename(file.path("rdev","inst", "project"),stem, ".save"), x) x }) grab <- list.files("rdev/inst/project", pattern="*.\\.(cpp\\.cpp|h)") foo <- file.copy(file.path("rdev/inst/project",grab),file.path("rdev/src",grab), overwrite=TRUE)
/makescripts/buildmodels.R
no_license
dpastoor/modmrg
R
false
false
905
r
#!/usr/bin/Rscript #library(mrgsolve) #library(metrumrg) filename <- function (dir, run = NULL, ext = NULL) file.path(dir, paste0(run, ext)) ls <- list.files("rdev/inst/project",pattern="\\.cpp$") ls <- ls[!grepl("\\.cpp\\.cpp", ls)] out <- lapply(ls, function(file) { ## Model is being saved as "x" stem <- gsub("\\.cpp$", "", file) message(paste0("Building: ", stem)) x <- mread(stem, "rdev/inst/project",udll=FALSE,compile=FALSE) x <- new("packmod", x, package="modmrg", model=stem) x <- mrgsolve:::relocate_funs(x, "modmrg") x <- mrgsolve:::compiled(x,TRUE) x@shlib$par <- pars(x) x@shlib$cmt <- cmt(x) save(file=filename(file.path("rdev","inst", "project"),stem, ".save"), x) x }) grab <- list.files("rdev/inst/project", pattern="*.\\.(cpp\\.cpp|h)") foo <- file.copy(file.path("rdev/inst/project",grab),file.path("rdev/src",grab), overwrite=TRUE)
\name{MAC_lags} \alias{MAC_lags} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Internal function used by MAC_counter and MAC_perm } \description{ Performs the lag-based correlation analysis } \usage{ MAC_lags(data, max_lag_prop = 1/3, symmetric = F) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{data}{ Data being passed to the function } \item{max_lag_prop}{ The largest proportion of your experiments that you want the size of the lag to be. Recommended not to go beyond 1/3. Default value is 1/3. } \item{symmetric}{ Logical. TRUE indicates that a symmetric matrix is required for output MAC .csv file. Results in the absolute maximum value for each pair (i,j) and (j,i). Default value is FALSE. } } \details{ See vignette for more details. } \value{ Returns MAC and associated lag matrices. } \examples{ x <- matrix(rnorm(6),2,3) y <- MAC_lags(x) \dontrun{MAC_results = MAC_lags(data=example_data, max_lag_prop=1/3, symmetric=F)}} % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 } \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
/man/MAC_lags.Rd
no_license
cran/LEAP
R
false
false
1,199
rd
\name{MAC_lags} \alias{MAC_lags} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Internal function used by MAC_counter and MAC_perm } \description{ Performs the lag-based correlation analysis } \usage{ MAC_lags(data, max_lag_prop = 1/3, symmetric = F) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{data}{ Data being passed to the function } \item{max_lag_prop}{ The largest proportion of your experiments that you want the size of the lag to be. Recommended not to go beyond 1/3. Default value is 1/3. } \item{symmetric}{ Logical. TRUE indicates that a symmetric matrix is required for output MAC .csv file. Results in the absolute maximum value for each pair (i,j) and (j,i). Default value is FALSE. } } \details{ See vignette for more details. } \value{ Returns MAC and associated lag matrices. } \examples{ x <- matrix(rnorm(6),2,3) y <- MAC_lags(x) \dontrun{MAC_results = MAC_lags(data=example_data, max_lag_prop=1/3, symmetric=F)}} % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 } \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rekognition_operations.R \name{rekognition_index_faces} \alias{rekognition_index_faces} \title{Detects faces in the input image and adds them to the specified collection} \usage{ rekognition_index_faces(CollectionId, Image, ExternalImageId, DetectionAttributes, MaxFaces, QualityFilter) } \arguments{ \item{CollectionId}{[required] The ID of an existing collection to which you want to add the faces that are detected in the input images.} \item{Image}{[required] The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes isn\'t supported. If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the \code{Bytes} field. For more information, see Images in the Amazon Rekognition developer guide.} \item{ExternalImageId}{The ID you want to assign to all the faces detected in the image.} \item{DetectionAttributes}{An array of facial attributes that you want to be returned. This can be the default list of attributes or all attributes. If you don\'t specify a value for \code{Attributes} or if you specify \verb{\\\["DEFAULT"\\\]}, the API returns the following subset of facial attributes: \code{BoundingBox}, \code{Confidence}, \code{Pose}, \code{Quality}, and \code{Landmarks}. If you provide \verb{\\\["ALL"\\\]}, all facial attributes are returned, but the operation takes longer to complete. If you provide both, \verb{\\\["ALL", "DEFAULT"\\\]}, the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).} \item{MaxFaces}{The maximum number of faces to index. The value of \code{MaxFaces} must be greater than or equal to 1. \code{IndexFaces} returns no more than 100 detected faces in an image, even if you specify a larger value for \code{MaxFaces}. If \code{IndexFaces} detects more faces than the value of \code{MaxFaces}, the faces with the lowest quality are filtered out first. If there are still more faces than the value of \code{MaxFaces}, the faces with the smallest bounding boxes are filtered out (up to the number that\'s needed to satisfy the value of \code{MaxFaces}). Information about the unindexed faces is available in the \code{UnindexedFaces} array. The faces that are returned by \code{IndexFaces} are sorted by the largest face bounding box size to the smallest size, in descending order. \code{MaxFaces} can be used with a collection associated with any version of the face model.} \item{QualityFilter}{A filter that specifies a quality bar for how much filtering is done to identify faces. Filtered faces aren\'t indexed. If you specify \code{AUTO}, Amazon Rekognition chooses the quality bar. If you specify \code{LOW}, \code{MEDIUM}, or \code{HIGH}, filtering removes all faces that don't meet the chosen quality bar. The default value is \code{AUTO}. The quality bar is based on a variety of common use cases. Low-quality detections can occur for a number of reasons. Some examples are an object that\'s misidentified as a face, a face that\'s too blurry, or a face with a pose that\'s too extreme to use. If you specify \code{NONE}, no filtering is performed. To use quality filtering, the collection you are using must be associated with version 3 of the face model or higher.} } \description{ Detects faces in the input image and adds them to the specified collection. } \details{ Amazon Rekognition doesn\'t save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the SearchFaces and SearchFacesByImage operations. For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide. To get the number of faces in a collection, call DescribeCollection. If you\'re using version 1.0 of the face detection model, \code{IndexFaces} indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image. If you\'re using version 4 or later of the face model, image orientation information is not returned in the \code{OrientationCorrection} field. To determine which version of the model you\'re using, call DescribeCollection and supply the collection ID. You can also get the model version from the value of \code{FaceModelVersion} in the response from \code{IndexFaces} For more information, see Model Versioning in the Amazon Rekognition Developer Guide. If you provide the optional \code{ExternalImageId} for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the ListFaces operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image. You can specify the maximum number of faces to index with the \code{MaxFaces} input parameter. This is useful when you want to index the largest faces in an image and don\'t want to index smaller faces, such as those belonging to people standing in the background. The \code{QualityFilter} input parameter allows you to filter out detected faces that don't meet a required quality bar. The quality bar is based on a variety of common use cases. By default, \code{IndexFaces} chooses the quality bar that\'s used to filter faces. You can also explicitly choose the quality bar. Use \code{QualityFilter}, to set the quality bar by specifying \code{LOW}, \code{MEDIUM}, or \code{HIGH}. If you do not want to filter detected faces, specify \code{NONE}. To use quality filtering, you need a collection associated with version 3 of the face model or higher. To get the version of the face model associated with a collection, call DescribeCollection. Information about faces detected in an image, but not indexed, is returned in an array of UnindexedFace objects, \code{UnindexedFaces}. Faces aren\'t indexed for reasons such as: \itemize{ \item The number of faces detected exceeds the value of the \code{MaxFaces} request parameter. \item The face is too small compared to the image dimensions. \item The face is too blurry. \item The image is too dark. \item The face has an extreme pose. \item The face doesn't have enough detail to be suitable for face search. } In response, the \code{IndexFaces} operation returns an array of metadata for all detected faces, \code{FaceRecords}. This includes: \itemize{ \item The bounding box, \code{BoundingBox}, of the detected face. \item A confidence value, \code{Confidence}, which indicates the confidence that the bounding box contains a face. \item A face ID, \code{FaceId}, assigned by the service for each face that\'s detected and stored. \item An image ID, \code{ImageId}, assigned by the service for the input image. } If you request all facial attributes (by using the \code{detectionAttributes} parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for example, location of eye and mouth) and other facial attributes. If you provide the same image, specify the same collection, and use the same external ID in the \code{IndexFaces} operation, Amazon Rekognition doesn\'t save duplicate face metadata. The input image is passed either as base64-encoded image bytes, or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn\'t supported. The image must be formatted as a PNG or JPEG file. This operation requires permissions to perform the \code{rekognition:IndexFaces} action. } \section{Request syntax}{ \preformatted{svc$index_faces( CollectionId = "string", Image = list( Bytes = raw, S3Object = list( Bucket = "string", Name = "string", Version = "string" ) ), ExternalImageId = "string", DetectionAttributes = list( "DEFAULT"|"ALL" ), MaxFaces = 123, QualityFilter = "NONE"|"AUTO"|"LOW"|"MEDIUM"|"HIGH" ) } } \examples{ \dontrun{ # This operation detects faces in an image and adds them to the specified # Rekognition collection. svc$index_faces( CollectionId = "myphotos", DetectionAttributes = list(), ExternalImageId = "myphotoid", Image = list( S3Object = list( Bucket = "mybucket", Name = "myphoto" ) ) ) } } \keyword{internal}
/cran/paws.machine.learning/man/rekognition_index_faces.Rd
permissive
jcheng5/paws
R
false
true
8,678
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rekognition_operations.R \name{rekognition_index_faces} \alias{rekognition_index_faces} \title{Detects faces in the input image and adds them to the specified collection} \usage{ rekognition_index_faces(CollectionId, Image, ExternalImageId, DetectionAttributes, MaxFaces, QualityFilter) } \arguments{ \item{CollectionId}{[required] The ID of an existing collection to which you want to add the faces that are detected in the input images.} \item{Image}{[required] The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes isn\'t supported. If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the \code{Bytes} field. For more information, see Images in the Amazon Rekognition developer guide.} \item{ExternalImageId}{The ID you want to assign to all the faces detected in the image.} \item{DetectionAttributes}{An array of facial attributes that you want to be returned. This can be the default list of attributes or all attributes. If you don\'t specify a value for \code{Attributes} or if you specify \verb{\\\["DEFAULT"\\\]}, the API returns the following subset of facial attributes: \code{BoundingBox}, \code{Confidence}, \code{Pose}, \code{Quality}, and \code{Landmarks}. If you provide \verb{\\\["ALL"\\\]}, all facial attributes are returned, but the operation takes longer to complete. If you provide both, \verb{\\\["ALL", "DEFAULT"\\\]}, the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).} \item{MaxFaces}{The maximum number of faces to index. The value of \code{MaxFaces} must be greater than or equal to 1. \code{IndexFaces} returns no more than 100 detected faces in an image, even if you specify a larger value for \code{MaxFaces}. If \code{IndexFaces} detects more faces than the value of \code{MaxFaces}, the faces with the lowest quality are filtered out first. If there are still more faces than the value of \code{MaxFaces}, the faces with the smallest bounding boxes are filtered out (up to the number that\'s needed to satisfy the value of \code{MaxFaces}). Information about the unindexed faces is available in the \code{UnindexedFaces} array. The faces that are returned by \code{IndexFaces} are sorted by the largest face bounding box size to the smallest size, in descending order. \code{MaxFaces} can be used with a collection associated with any version of the face model.} \item{QualityFilter}{A filter that specifies a quality bar for how much filtering is done to identify faces. Filtered faces aren\'t indexed. If you specify \code{AUTO}, Amazon Rekognition chooses the quality bar. If you specify \code{LOW}, \code{MEDIUM}, or \code{HIGH}, filtering removes all faces that don't meet the chosen quality bar. The default value is \code{AUTO}. The quality bar is based on a variety of common use cases. Low-quality detections can occur for a number of reasons. Some examples are an object that\'s misidentified as a face, a face that\'s too blurry, or a face with a pose that\'s too extreme to use. If you specify \code{NONE}, no filtering is performed. To use quality filtering, the collection you are using must be associated with version 3 of the face model or higher.} } \description{ Detects faces in the input image and adds them to the specified collection. } \details{ Amazon Rekognition doesn\'t save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the SearchFaces and SearchFacesByImage operations. For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide. To get the number of faces in a collection, call DescribeCollection. If you\'re using version 1.0 of the face detection model, \code{IndexFaces} indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image. If you\'re using version 4 or later of the face model, image orientation information is not returned in the \code{OrientationCorrection} field. To determine which version of the model you\'re using, call DescribeCollection and supply the collection ID. You can also get the model version from the value of \code{FaceModelVersion} in the response from \code{IndexFaces} For more information, see Model Versioning in the Amazon Rekognition Developer Guide. If you provide the optional \code{ExternalImageId} for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the ListFaces operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image. You can specify the maximum number of faces to index with the \code{MaxFaces} input parameter. This is useful when you want to index the largest faces in an image and don\'t want to index smaller faces, such as those belonging to people standing in the background. The \code{QualityFilter} input parameter allows you to filter out detected faces that don't meet a required quality bar. The quality bar is based on a variety of common use cases. By default, \code{IndexFaces} chooses the quality bar that\'s used to filter faces. You can also explicitly choose the quality bar. Use \code{QualityFilter}, to set the quality bar by specifying \code{LOW}, \code{MEDIUM}, or \code{HIGH}. If you do not want to filter detected faces, specify \code{NONE}. To use quality filtering, you need a collection associated with version 3 of the face model or higher. To get the version of the face model associated with a collection, call DescribeCollection. Information about faces detected in an image, but not indexed, is returned in an array of UnindexedFace objects, \code{UnindexedFaces}. Faces aren\'t indexed for reasons such as: \itemize{ \item The number of faces detected exceeds the value of the \code{MaxFaces} request parameter. \item The face is too small compared to the image dimensions. \item The face is too blurry. \item The image is too dark. \item The face has an extreme pose. \item The face doesn't have enough detail to be suitable for face search. } In response, the \code{IndexFaces} operation returns an array of metadata for all detected faces, \code{FaceRecords}. This includes: \itemize{ \item The bounding box, \code{BoundingBox}, of the detected face. \item A confidence value, \code{Confidence}, which indicates the confidence that the bounding box contains a face. \item A face ID, \code{FaceId}, assigned by the service for each face that\'s detected and stored. \item An image ID, \code{ImageId}, assigned by the service for the input image. } If you request all facial attributes (by using the \code{detectionAttributes} parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for example, location of eye and mouth) and other facial attributes. If you provide the same image, specify the same collection, and use the same external ID in the \code{IndexFaces} operation, Amazon Rekognition doesn\'t save duplicate face metadata. The input image is passed either as base64-encoded image bytes, or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn\'t supported. The image must be formatted as a PNG or JPEG file. This operation requires permissions to perform the \code{rekognition:IndexFaces} action. } \section{Request syntax}{ \preformatted{svc$index_faces( CollectionId = "string", Image = list( Bytes = raw, S3Object = list( Bucket = "string", Name = "string", Version = "string" ) ), ExternalImageId = "string", DetectionAttributes = list( "DEFAULT"|"ALL" ), MaxFaces = 123, QualityFilter = "NONE"|"AUTO"|"LOW"|"MEDIUM"|"HIGH" ) } } \examples{ \dontrun{ # This operation detects faces in an image and adds them to the specified # Rekognition collection. svc$index_faces( CollectionId = "myphotos", DetectionAttributes = list(), ExternalImageId = "myphotoid", Image = list( S3Object = list( Bucket = "mybucket", Name = "myphoto" ) ) ) } } \keyword{internal}
# plotting load timeseries for figure 2 library(dplyr) library(ggplot2) library(yaml) analysis_cfg <- yaml::yaml.load_file('lib/cfg/analysis_cfg.yml') # this file holds important analysis info such as CV cutoff ### loading in metabolism data for sorting by mean GPP ### dir<-'results/metab/20161107/' # directory of metabolism data folders<-list.files(dir) # folders in this dir folders<-folders[-grep('.doc',folders)] # get rid of README doc all_metab<-data.frame() # data frame to store all metab data for(i in 1:length(folders)){ # loops over all folders in metab directory cur<-read.table(file.path(dir,folders[i],paste(folders[i],'_metabEst.txt',sep='')),header=T,sep='\t', stringsAsFactors = F) # read in lake specific metab data cur<-cur[,1:12] # getting rid of any unnecessary columns cur$lake<-folders[i] all_metab<-rbind(all_metab,cur) } all_metab$date<-as.Date(paste(all_metab$year,all_metab$doy),format='%Y %j') # making date all_metab <- as_tibble(all_metab) season_cutoff <- readRDS('results/z_scored_schmidt.rds') %>% select(-doy)# seasonal cutoff based on z-scored schmidt stability all_metab <- left_join(all_metab, season_cutoff, by = c('lake' = 'lake', 'date' = 'date')) cv_cutoff = analysis_cfg$cv_cutoff min_doy = analysis_cfg$min_doy max_doy = analysis_cfg$max_doy metab_plot <- dplyr::filter(all_metab, doy > min_doy, doy < max_doy, GPP_SD/GPP < cv_cutoff, R_SD/abs(R) < cv_cutoff, GPP > 0, R < 0) %>% group_by(lake) %>% dplyr::mutate(mean_gpp = mean(GPP, na.rm=T)) %>% ungroup() #### loading in nutrient load time series ### dir<-'results/nutrient load/' # directory of load data files<-list.files(dir) # folders in this dir files<-files[-grep('Readme',files)] # get rid of README doc all_load<-data.frame() # data frame to store all load data for(i in 1:length(files)){ # loops over all files in load directory cur<-read.table(file.path(dir,files[i]),header=T,sep='\t', stringsAsFactors = F) # read in lake specific load data cur$lake<-strsplit(files[i], split = '_loads.txt')[[1]][1] all_load<-rbind(all_load,cur) } all_load <- as_tibble(all_load) %>% mutate(date = as.Date(Date)) %>% select(-Date) # adding in lakes w/o streams to plot no_streams <- all_metab[!all_metab$lake%in%all_load$lake, ] %>% select(year, doy, lake, date) %>% rename(Year = year) %>% dplyr::mutate(TN_load = NA, TP_load = NA, DOC_load = NA, inflow = NA) %>% select(Year, doy, TN_load, TP_load, DOC_load, inflow, lake, date) all_load <- bind_rows(all_load, no_streams) season_cutoff <- readRDS('results/z_scored_schmidt.rds') %>% select(-doy)# seasonal cutoff based on z-scored schmidt stability all_load <- left_join(all_load, season_cutoff, by = c('lake' = 'lake', 'date' = 'date')) metaData <- read.csv('data/metadataLookUp.csv',stringsAsFactor=F) %>% select(Lake.Name, Volume..m3., Surface.Area..m2., Catchment.Area..km2., Lake.Residence.Time..year.) all_load <- left_join(all_load, metaData, by = c('lake' = 'Lake.Name')) all_load <- left_join(all_load, dplyr::select(metab_plot, lake, date, mean_gpp), by = c('lake'='lake','date'='date')) cv_cutoff = analysis_cfg$cv_cutoff min_doy = analysis_cfg$min_doy max_doy = analysis_cfg$max_doy load_plot <- dplyr::filter(all_load, doy > min_doy, doy < max_doy) %>% group_by(lake) %>% dplyr::mutate(mean_tp = mean(TP_load / Volume..m3., na.rm=T)) %>% ungroup() %>% dplyr::mutate(lake = factor(lake), season = factor(season), plot_date = as.Date(paste('2001-',doy,sep=''), format = '%Y-%j', tz ='GMT'), TP_load = ifelse(TP_load == 0, NA, TP_load)) #ordering by mean inflow lakes_sorted <- load_plot$lake[sort.list(load_plot$mean_gpp)] lakes_sorted <- as.character(lakes_sorted[!duplicated(lakes_sorted)]) seasons_sorted <- c('spring','summer','fall') load_plot$lake <- factor(load_plot$lake,levels = lakes_sorted) load_plot$season <- factor(load_plot$season, levels = seasons_sorted) # facet labeller lake_names <- c('Acton' = 'Acton Lake', 'Crampton' = 'Crampton Lake', 'EastLong' = 'East Long Lake', 'Feeagh' = 'Lough Feeagh', 'Harp' = 'Harp Lake', 'Langtjern' = 'Lake Langtjern', 'Lillinonah' = 'Lake Lillinonah', 'Lillsjoliden' = 'Lillsjöliden', 'Mangstrettjarn' = 'Mångstrettjärn', 'Mendota' = 'Lake Mendota', 'Morris' = 'Morris Lake', 'Nastjarn' = 'Nästjärn', 'Ovre' = 'Övre Björntjärn', 'Struptjarn' = 'Struptjärn', 'Trout' = 'Trout Lake', 'Vortsjarv' = 'Lake Võrtsjärv' ) cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7") # colorblind-friendly pallete # keeping x and y axis scales the same for every plot load <- ggplot(load_plot, aes(x = plot_date, y = TP_load * 1000 *1000/ Volume..m3., group = lake ,color = season)) + geom_line(size = 1) + facet_wrap(~lake, labeller = as_labeller(lake_names), strip.position = 'top') + theme_classic() + theme(strip.background = element_blank(), strip.placement = 'inside', axis.title = element_text(size = 16), axis.text = element_text(size = 12), axis.title.x = element_blank(), legend.title = element_blank(), legend.text = element_text(size =12)) + scale_color_manual(name = 'season', values = c('spring' = '#009E73', 'summer' = '#56B4E9', 'fall' = '#E69F00', 'NA' = 'white'), labels = c('Spring', 'Summer', 'Fall', '')) + scale_fill_manual(name = 'season', values = c('spring' = '#009E73', 'summer' = '#56B4E9', 'fall' = '#E69F00', 'NA' = 'white'), labels = c('Spring', 'Summer', 'Fall', '')) + ylab(expression(TP~Load~(mg~m^-3~day^-1))) + scale_y_log10() + scale_x_date(date_labels = '%b') # load ggsave('figures/fig_tp_load_timeseries.png', plot = load, width = 10, height = 10) # keeping x and y axis scales the same for every plot load <- ggplot(load_plot, aes(x = plot_date, y = TN_load * 1000 *1000/ Volume..m3., group = lake ,color = season)) + geom_line(size = 1) + facet_wrap(~lake, labeller = as_labeller(lake_names), strip.position = 'top') + theme_classic() + theme(strip.background = element_blank(), strip.placement = 'inside', axis.title = element_text(size = 16), axis.text = element_text(size = 12), axis.title.x = element_blank(), legend.title = element_blank(), legend.text = element_text(size =12)) + scale_color_manual(name = 'season', values = c('spring' = '#009E73', 'summer' = '#56B4E9', 'fall' = '#E69F00', 'NA' = 'white'), labels = c('Spring', 'Summer', 'Fall', '')) + scale_fill_manual(name = 'season', values = c('spring' = '#009E73', 'summer' = '#56B4E9', 'fall' = '#E69F00', 'NA' = 'white'), labels = c('Spring', 'Summer', 'Fall', '')) + ylab(expression(TN~Load~(mg~m^-3~day^-1))) + scale_y_log10() + scale_x_date(date_labels = '%b') # load ggsave('figures/fig_tn_load_timeseries.png', plot = load, width = 10, height = 10) # keeping x and y axis scales the same for every plot load <- ggplot(load_plot, aes(x = plot_date, y = DOC_load * 1000 *1000/ Volume..m3., group = lake ,color = season)) + geom_line(size = 1) + facet_wrap(~lake, labeller = as_labeller(lake_names), strip.position = 'top') + theme_classic() + theme(strip.background = element_blank(), strip.placement = 'inside', axis.title = element_text(size = 16), axis.text = element_text(size = 12), axis.title.x = element_blank(), legend.title = element_blank(), legend.text = element_text(size =12)) + scale_color_manual(name = 'season', values = c('spring' = '#009E73', 'summer' = '#56B4E9', 'fall' = '#E69F00', 'NA' = 'white'), labels = c('Spring', 'Summer', 'Fall', '')) + scale_fill_manual(name = 'season', values = c('spring' = '#009E73', 'summer' = '#56B4E9', 'fall' = '#E69F00', 'NA' = 'white'), labels = c('Spring', 'Summer', 'Fall', '')) + ylab(expression(DOC~Load~(mg~m^-3~day^-1))) + scale_y_log10() + scale_x_date(date_labels = '%b') # load ggsave('figures/fig_doc_load_timeseries.png', plot = load, width = 10, height = 10)
/R_code/fig_load_timeseries.R
no_license
Atefeh786/catchment_metab_wg
R
false
false
9,106
r
# plotting load timeseries for figure 2 library(dplyr) library(ggplot2) library(yaml) analysis_cfg <- yaml::yaml.load_file('lib/cfg/analysis_cfg.yml') # this file holds important analysis info such as CV cutoff ### loading in metabolism data for sorting by mean GPP ### dir<-'results/metab/20161107/' # directory of metabolism data folders<-list.files(dir) # folders in this dir folders<-folders[-grep('.doc',folders)] # get rid of README doc all_metab<-data.frame() # data frame to store all metab data for(i in 1:length(folders)){ # loops over all folders in metab directory cur<-read.table(file.path(dir,folders[i],paste(folders[i],'_metabEst.txt',sep='')),header=T,sep='\t', stringsAsFactors = F) # read in lake specific metab data cur<-cur[,1:12] # getting rid of any unnecessary columns cur$lake<-folders[i] all_metab<-rbind(all_metab,cur) } all_metab$date<-as.Date(paste(all_metab$year,all_metab$doy),format='%Y %j') # making date all_metab <- as_tibble(all_metab) season_cutoff <- readRDS('results/z_scored_schmidt.rds') %>% select(-doy)# seasonal cutoff based on z-scored schmidt stability all_metab <- left_join(all_metab, season_cutoff, by = c('lake' = 'lake', 'date' = 'date')) cv_cutoff = analysis_cfg$cv_cutoff min_doy = analysis_cfg$min_doy max_doy = analysis_cfg$max_doy metab_plot <- dplyr::filter(all_metab, doy > min_doy, doy < max_doy, GPP_SD/GPP < cv_cutoff, R_SD/abs(R) < cv_cutoff, GPP > 0, R < 0) %>% group_by(lake) %>% dplyr::mutate(mean_gpp = mean(GPP, na.rm=T)) %>% ungroup() #### loading in nutrient load time series ### dir<-'results/nutrient load/' # directory of load data files<-list.files(dir) # folders in this dir files<-files[-grep('Readme',files)] # get rid of README doc all_load<-data.frame() # data frame to store all load data for(i in 1:length(files)){ # loops over all files in load directory cur<-read.table(file.path(dir,files[i]),header=T,sep='\t', stringsAsFactors = F) # read in lake specific load data cur$lake<-strsplit(files[i], split = '_loads.txt')[[1]][1] all_load<-rbind(all_load,cur) } all_load <- as_tibble(all_load) %>% mutate(date = as.Date(Date)) %>% select(-Date) # adding in lakes w/o streams to plot no_streams <- all_metab[!all_metab$lake%in%all_load$lake, ] %>% select(year, doy, lake, date) %>% rename(Year = year) %>% dplyr::mutate(TN_load = NA, TP_load = NA, DOC_load = NA, inflow = NA) %>% select(Year, doy, TN_load, TP_load, DOC_load, inflow, lake, date) all_load <- bind_rows(all_load, no_streams) season_cutoff <- readRDS('results/z_scored_schmidt.rds') %>% select(-doy)# seasonal cutoff based on z-scored schmidt stability all_load <- left_join(all_load, season_cutoff, by = c('lake' = 'lake', 'date' = 'date')) metaData <- read.csv('data/metadataLookUp.csv',stringsAsFactor=F) %>% select(Lake.Name, Volume..m3., Surface.Area..m2., Catchment.Area..km2., Lake.Residence.Time..year.) all_load <- left_join(all_load, metaData, by = c('lake' = 'Lake.Name')) all_load <- left_join(all_load, dplyr::select(metab_plot, lake, date, mean_gpp), by = c('lake'='lake','date'='date')) cv_cutoff = analysis_cfg$cv_cutoff min_doy = analysis_cfg$min_doy max_doy = analysis_cfg$max_doy load_plot <- dplyr::filter(all_load, doy > min_doy, doy < max_doy) %>% group_by(lake) %>% dplyr::mutate(mean_tp = mean(TP_load / Volume..m3., na.rm=T)) %>% ungroup() %>% dplyr::mutate(lake = factor(lake), season = factor(season), plot_date = as.Date(paste('2001-',doy,sep=''), format = '%Y-%j', tz ='GMT'), TP_load = ifelse(TP_load == 0, NA, TP_load)) #ordering by mean inflow lakes_sorted <- load_plot$lake[sort.list(load_plot$mean_gpp)] lakes_sorted <- as.character(lakes_sorted[!duplicated(lakes_sorted)]) seasons_sorted <- c('spring','summer','fall') load_plot$lake <- factor(load_plot$lake,levels = lakes_sorted) load_plot$season <- factor(load_plot$season, levels = seasons_sorted) # facet labeller lake_names <- c('Acton' = 'Acton Lake', 'Crampton' = 'Crampton Lake', 'EastLong' = 'East Long Lake', 'Feeagh' = 'Lough Feeagh', 'Harp' = 'Harp Lake', 'Langtjern' = 'Lake Langtjern', 'Lillinonah' = 'Lake Lillinonah', 'Lillsjoliden' = 'Lillsjöliden', 'Mangstrettjarn' = 'Mångstrettjärn', 'Mendota' = 'Lake Mendota', 'Morris' = 'Morris Lake', 'Nastjarn' = 'Nästjärn', 'Ovre' = 'Övre Björntjärn', 'Struptjarn' = 'Struptjärn', 'Trout' = 'Trout Lake', 'Vortsjarv' = 'Lake Võrtsjärv' ) cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7") # colorblind-friendly pallete # keeping x and y axis scales the same for every plot load <- ggplot(load_plot, aes(x = plot_date, y = TP_load * 1000 *1000/ Volume..m3., group = lake ,color = season)) + geom_line(size = 1) + facet_wrap(~lake, labeller = as_labeller(lake_names), strip.position = 'top') + theme_classic() + theme(strip.background = element_blank(), strip.placement = 'inside', axis.title = element_text(size = 16), axis.text = element_text(size = 12), axis.title.x = element_blank(), legend.title = element_blank(), legend.text = element_text(size =12)) + scale_color_manual(name = 'season', values = c('spring' = '#009E73', 'summer' = '#56B4E9', 'fall' = '#E69F00', 'NA' = 'white'), labels = c('Spring', 'Summer', 'Fall', '')) + scale_fill_manual(name = 'season', values = c('spring' = '#009E73', 'summer' = '#56B4E9', 'fall' = '#E69F00', 'NA' = 'white'), labels = c('Spring', 'Summer', 'Fall', '')) + ylab(expression(TP~Load~(mg~m^-3~day^-1))) + scale_y_log10() + scale_x_date(date_labels = '%b') # load ggsave('figures/fig_tp_load_timeseries.png', plot = load, width = 10, height = 10) # keeping x and y axis scales the same for every plot load <- ggplot(load_plot, aes(x = plot_date, y = TN_load * 1000 *1000/ Volume..m3., group = lake ,color = season)) + geom_line(size = 1) + facet_wrap(~lake, labeller = as_labeller(lake_names), strip.position = 'top') + theme_classic() + theme(strip.background = element_blank(), strip.placement = 'inside', axis.title = element_text(size = 16), axis.text = element_text(size = 12), axis.title.x = element_blank(), legend.title = element_blank(), legend.text = element_text(size =12)) + scale_color_manual(name = 'season', values = c('spring' = '#009E73', 'summer' = '#56B4E9', 'fall' = '#E69F00', 'NA' = 'white'), labels = c('Spring', 'Summer', 'Fall', '')) + scale_fill_manual(name = 'season', values = c('spring' = '#009E73', 'summer' = '#56B4E9', 'fall' = '#E69F00', 'NA' = 'white'), labels = c('Spring', 'Summer', 'Fall', '')) + ylab(expression(TN~Load~(mg~m^-3~day^-1))) + scale_y_log10() + scale_x_date(date_labels = '%b') # load ggsave('figures/fig_tn_load_timeseries.png', plot = load, width = 10, height = 10) # keeping x and y axis scales the same for every plot load <- ggplot(load_plot, aes(x = plot_date, y = DOC_load * 1000 *1000/ Volume..m3., group = lake ,color = season)) + geom_line(size = 1) + facet_wrap(~lake, labeller = as_labeller(lake_names), strip.position = 'top') + theme_classic() + theme(strip.background = element_blank(), strip.placement = 'inside', axis.title = element_text(size = 16), axis.text = element_text(size = 12), axis.title.x = element_blank(), legend.title = element_blank(), legend.text = element_text(size =12)) + scale_color_manual(name = 'season', values = c('spring' = '#009E73', 'summer' = '#56B4E9', 'fall' = '#E69F00', 'NA' = 'white'), labels = c('Spring', 'Summer', 'Fall', '')) + scale_fill_manual(name = 'season', values = c('spring' = '#009E73', 'summer' = '#56B4E9', 'fall' = '#E69F00', 'NA' = 'white'), labels = c('Spring', 'Summer', 'Fall', '')) + ylab(expression(DOC~Load~(mg~m^-3~day^-1))) + scale_y_log10() + scale_x_date(date_labels = '%b') # load ggsave('figures/fig_doc_load_timeseries.png', plot = load, width = 10, height = 10)
#' Loads district data for Perú #' #' Function 'import_db' the specified dataset. #' #' @param dataset is a character specifiying the dataset to request. #' #' Current datasets available are: #' #' - "Peru_names": Returns the name of the 1874 districts, 196 provinces and 25 regions of Perú as they are reported by the INEI in the REDATAM platform for the 2017 CENSUS. #' #' - "Peru_shp": Returns the shapefile for thethe 1874 districts, 196 provinces and 25 regions of Perú #' #' @examples #' df <- data.frame( #' reg = c( #' "LIMA", "CALLAO", "CAJAMARCA", "AMAZONAS", "SAN MARTIN", "HUANUCO", #' "PASCO", "JUNIN", "CUSCO", "PUNO", "APURIMAC", "AYACUCHO", #' "HUANCAVELICA", "TUMBES", "PIURA", "LAMBAYEQUE", "LA LIBERTAD", #' "ANCASH", "ICA", "AREQUIPA", "TACNA", "MOQUEGUA", "LORETO", "UCAYALI", #' "MADRE DE DIOS" #' ), #' stringsAsFactors = FALSE #' ) #' @importFrom utils read.csv #' @import RCurl #' @export import_db import_db <- function(dataset) { if (dataset == "Peru_names") { x <- read.csv("https://raw.githubusercontent.com/healthinnovation/lis/master/files/master_distr.csv", stringsAsFactors = F) } else if (dataset == "Peru_shp") { x <- "https://github.com/healthinnovation/lis/blob/master/files/shp_PER_adm3.Rdata?raw=true" x <- readRDS(url(x)) } return(x) }
/R/import_db.R
no_license
botam2/test_list
R
false
false
1,329
r
#' Loads district data for Perú #' #' Function 'import_db' the specified dataset. #' #' @param dataset is a character specifiying the dataset to request. #' #' Current datasets available are: #' #' - "Peru_names": Returns the name of the 1874 districts, 196 provinces and 25 regions of Perú as they are reported by the INEI in the REDATAM platform for the 2017 CENSUS. #' #' - "Peru_shp": Returns the shapefile for thethe 1874 districts, 196 provinces and 25 regions of Perú #' #' @examples #' df <- data.frame( #' reg = c( #' "LIMA", "CALLAO", "CAJAMARCA", "AMAZONAS", "SAN MARTIN", "HUANUCO", #' "PASCO", "JUNIN", "CUSCO", "PUNO", "APURIMAC", "AYACUCHO", #' "HUANCAVELICA", "TUMBES", "PIURA", "LAMBAYEQUE", "LA LIBERTAD", #' "ANCASH", "ICA", "AREQUIPA", "TACNA", "MOQUEGUA", "LORETO", "UCAYALI", #' "MADRE DE DIOS" #' ), #' stringsAsFactors = FALSE #' ) #' @importFrom utils read.csv #' @import RCurl #' @export import_db import_db <- function(dataset) { if (dataset == "Peru_names") { x <- read.csv("https://raw.githubusercontent.com/healthinnovation/lis/master/files/master_distr.csv", stringsAsFactors = F) } else if (dataset == "Peru_shp") { x <- "https://github.com/healthinnovation/lis/blob/master/files/shp_PER_adm3.Rdata?raw=true" x <- readRDS(url(x)) } return(x) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mirorbarplot.R \name{mirrorbarplot} \alias{mirrorbarplot} \title{Function to create a mirrored bar plot. This is two overlaid barplots, one generally above the zero line and the other below the zero line. A set of periodic returns is given (rets), along with a set of maximum drawdowns (mdd). The rets are plotted as positive bars (green) and the mdd as negative bars (red). If the rets are negative, then they show up as negative orange bars.} \usage{ mirrorbarplot(rets, mdd, cex.lab = 1, barcolors = c("red", "green", "orange"), cex.axis = 1, cex.legend = 0.8, cex.names = 1, cex.main = 1, main = "Mirror Barplot", cex.text = 0.8, legend.loc = c(17, -25), yrange = NULL, xmgp = c(3, 1, 0), annotateYrs = TRUE, legend.horiz = FALSE, ...) } \arguments{ \item{rets}{An xts of periodic returns, typically annual returns.} \item{mdd}{An xts of periodic maximum drawdowns with the same structure as rets.} \item{barcolors}{A vector of 3 colors for the bars. Default red, green, orange.} \item{main}{The title text.} \item{legend.loc}{The X,Y location of the legend. If NULL, then no legend is plotted.} \item{yrange}{The numeric range for the Y axis. Useful to extend / shorten it. NULL will calculate it automatically.} \item{xmgp}{See par() for mgp. This moves the title and the axises around.} \item{annotateYrs}{Logical. Whether to show the years on the X axis. Note that the period of returns should be years for this to work properly.} \item{legend.horiz}{Logical. Passed on as horiz to legend() and used to set the legend horizontally rather than vertically.} \item{cex.<param>}{These are the various cex values. cex.lab for the Y-label size, cex.axis for the Y-axis size, cex.legend for the legend size, cex.names for the X-axis label size, cex.main for the title, cex.text for the annotated text on the plot (the average line).} } \value{ Nothing is returned per se. This function creates a plot. } \description{ This is normally used to show annual returns for an equity curve overlaid with its associated annual max drawdown. }
/man/mirrorbarplot.Rd
no_license
jeanmarcgp/xtsanalytics
R
false
true
2,140
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mirorbarplot.R \name{mirrorbarplot} \alias{mirrorbarplot} \title{Function to create a mirrored bar plot. This is two overlaid barplots, one generally above the zero line and the other below the zero line. A set of periodic returns is given (rets), along with a set of maximum drawdowns (mdd). The rets are plotted as positive bars (green) and the mdd as negative bars (red). If the rets are negative, then they show up as negative orange bars.} \usage{ mirrorbarplot(rets, mdd, cex.lab = 1, barcolors = c("red", "green", "orange"), cex.axis = 1, cex.legend = 0.8, cex.names = 1, cex.main = 1, main = "Mirror Barplot", cex.text = 0.8, legend.loc = c(17, -25), yrange = NULL, xmgp = c(3, 1, 0), annotateYrs = TRUE, legend.horiz = FALSE, ...) } \arguments{ \item{rets}{An xts of periodic returns, typically annual returns.} \item{mdd}{An xts of periodic maximum drawdowns with the same structure as rets.} \item{barcolors}{A vector of 3 colors for the bars. Default red, green, orange.} \item{main}{The title text.} \item{legend.loc}{The X,Y location of the legend. If NULL, then no legend is plotted.} \item{yrange}{The numeric range for the Y axis. Useful to extend / shorten it. NULL will calculate it automatically.} \item{xmgp}{See par() for mgp. This moves the title and the axises around.} \item{annotateYrs}{Logical. Whether to show the years on the X axis. Note that the period of returns should be years for this to work properly.} \item{legend.horiz}{Logical. Passed on as horiz to legend() and used to set the legend horizontally rather than vertically.} \item{cex.<param>}{These are the various cex values. cex.lab for the Y-label size, cex.axis for the Y-axis size, cex.legend for the legend size, cex.names for the X-axis label size, cex.main for the title, cex.text for the annotated text on the plot (the average line).} } \value{ Nothing is returned per se. This function creates a plot. } \description{ This is normally used to show annual returns for an equity curve overlaid with its associated annual max drawdown. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/listBe.R \name{listBe} \alias{listBe} \title{Lists all the biological entities (BE) available in the BED database} \usage{ listBe() } \value{ A character vector of biological entities (BE) } \description{ Lists all the biological entities (BE) available in the BED database } \seealso{ \code{\link{listPlatforms}}, \code{\link{listBeIdSources}}, \code{\link{listOrganisms}} }
/man/listBe.Rd
no_license
sankleta/BED
R
false
true
454
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/listBe.R \name{listBe} \alias{listBe} \title{Lists all the biological entities (BE) available in the BED database} \usage{ listBe() } \value{ A character vector of biological entities (BE) } \description{ Lists all the biological entities (BE) available in the BED database } \seealso{ \code{\link{listPlatforms}}, \code{\link{listBeIdSources}}, \code{\link{listOrganisms}} }
print.osmttdiff <- function(obj) { msg <- "Object of class 'osmdata' with:\n" msg <- c (msg, c (rep (' ', 17))) nm <- c (rep (" ", 17 - length(obj$create)), "Created: ", length(obj$create)) msg <- c(msg,nm) nm <- c (rep (" ", 17 - length(obj$modify)), "Modified: ", length(obj$modify)) msg <- c(msg,nm) nm <- c (rep (" ", 17 - length(obj$delete)), "Deleted: ", length(obj$delete)) msg <- c(msg,nm) message(msg) } #' Converts overpass diff xml to osmttdiff object #' #' @param xml xml document from overpass diff query #' @export #' @examples #' \dontrun{ #' od<-osmadiff("sanga","2019-01-01T12:00:00Z") #' odd<-osmdata:::overpass_query(osmdata::opq_string(od)) #' osmttdiff_sf(odd) -> odd_sf #' } osmttdiff_sf <- function(xml){ obj <- osmttdiff(xml) odd_xml <- xml2::read_xml(xml) # created elements odd_xml_create <- xml2::xml_find_all(odd_xml,'//action[@type="create"]/*') osm_create <- xml2::xml_new_root('osm') for (i in odd_xml_create) { # print(odd_xml_way) # print(i) xml2::xml_add_child(osm_create,i) } parse_odd_create <- osmdata:::rcpp_osmdata_sf(as.character(osm_create)) obj$create <- parse_odd_create # modified elements odd_xml_modify <- xml2::xml_find_all(odd_xml,'//action[@type="modify"]/old/*') osm_modify <- xml2::xml_new_root('osm') for (i in odd_xml_modify) { # print(odd_xml_way) # print(i) xml2::xml_add_child(osm_modify,i) } parse_odd_modify <- osmdata:::rcpp_osmdata_sf(as.character(osm_modify)) obj$modify <- parse_odd_modify # deleted elements odd_xml_delete <- xml2::xml_find_all(odd_xml,'//action[@type="delete"]/old/*') osm_delete <- xml2::xml_new_root('osm') for (i in odd_xml_delete) { # print(odd_xml_way) # print(i) xml2::xml_add_child(osm_delete,i) } parse_odd_delete <- osmdata:::rcpp_osmdata_sf(as.character(osm_delete)) obj$delete <- parse_odd_delete }
/R/osmttdiff-methods.R
no_license
neogeomat/OSMTT
R
false
false
1,897
r
print.osmttdiff <- function(obj) { msg <- "Object of class 'osmdata' with:\n" msg <- c (msg, c (rep (' ', 17))) nm <- c (rep (" ", 17 - length(obj$create)), "Created: ", length(obj$create)) msg <- c(msg,nm) nm <- c (rep (" ", 17 - length(obj$modify)), "Modified: ", length(obj$modify)) msg <- c(msg,nm) nm <- c (rep (" ", 17 - length(obj$delete)), "Deleted: ", length(obj$delete)) msg <- c(msg,nm) message(msg) } #' Converts overpass diff xml to osmttdiff object #' #' @param xml xml document from overpass diff query #' @export #' @examples #' \dontrun{ #' od<-osmadiff("sanga","2019-01-01T12:00:00Z") #' odd<-osmdata:::overpass_query(osmdata::opq_string(od)) #' osmttdiff_sf(odd) -> odd_sf #' } osmttdiff_sf <- function(xml){ obj <- osmttdiff(xml) odd_xml <- xml2::read_xml(xml) # created elements odd_xml_create <- xml2::xml_find_all(odd_xml,'//action[@type="create"]/*') osm_create <- xml2::xml_new_root('osm') for (i in odd_xml_create) { # print(odd_xml_way) # print(i) xml2::xml_add_child(osm_create,i) } parse_odd_create <- osmdata:::rcpp_osmdata_sf(as.character(osm_create)) obj$create <- parse_odd_create # modified elements odd_xml_modify <- xml2::xml_find_all(odd_xml,'//action[@type="modify"]/old/*') osm_modify <- xml2::xml_new_root('osm') for (i in odd_xml_modify) { # print(odd_xml_way) # print(i) xml2::xml_add_child(osm_modify,i) } parse_odd_modify <- osmdata:::rcpp_osmdata_sf(as.character(osm_modify)) obj$modify <- parse_odd_modify # deleted elements odd_xml_delete <- xml2::xml_find_all(odd_xml,'//action[@type="delete"]/old/*') osm_delete <- xml2::xml_new_root('osm') for (i in odd_xml_delete) { # print(odd_xml_way) # print(i) xml2::xml_add_child(osm_delete,i) } parse_odd_delete <- osmdata:::rcpp_osmdata_sf(as.character(osm_delete)) obj$delete <- parse_odd_delete }
## Benchmark values have been obtained from East 6.5, ## This is from the Sample Size/Events vs Time chart from the East design. ## : gsSurv.cywx #------------------------------------------------------------------------------- testthat::test_that( desc = "Test: checking output validation", code = { x <- gsSurv( k = 4, sfl = sfPower, sflpar = .5, lambdaC = log(2) / 6, hr = .5, eta = log(2) / 40, gamma = 1, T = 36, minfup = 12 ) testthat::expect_lte( object = abs(nEventsIA(tIA = 30, x) - 103.258), expected = 2 ) } )
/tests/testthat/test-independent-test-nEventsIA.R
no_license
keaven/gsDesign
R
false
false
567
r
## Benchmark values have been obtained from East 6.5, ## This is from the Sample Size/Events vs Time chart from the East design. ## : gsSurv.cywx #------------------------------------------------------------------------------- testthat::test_that( desc = "Test: checking output validation", code = { x <- gsSurv( k = 4, sfl = sfPower, sflpar = .5, lambdaC = log(2) / 6, hr = .5, eta = log(2) / 40, gamma = 1, T = 36, minfup = 12 ) testthat::expect_lte( object = abs(nEventsIA(tIA = 30, x) - 103.258), expected = 2 ) } )
# @todo Extract subpoints too # @body Extract some/all subpoints and display how many #' Extract Components of GDPR Chapters/Articles/Points #' #' @md #' @param chapter chapter from which to extract #' @param article article from which to extract #' @param points points to extract (default all available) #' #' @return Either a vector of article titles or a vector of points. #' @importFrom utils data #' @export #' #' @examples #' chapter_components(chapter = 1) # list all articles in chapter (1) #' chapter_components(chapter = 1, article = 1) # extract all points in (1, 1) #' chapter_components(chapter = 1, article = 1, points = 2) # extract (1, 1, 2) chapter_components <- function(chapter = NULL, article = NULL, points = NULL) { GDPR_chapters <- NULL utils::data("GDPR_chapters", envir = environment()) if (is.null(chapter)) stop("Must specify at least a chapter") if (is.null(article)) return(GDPR_chapters$contents[[chapter]]$title) chap_art <- GDPR_chapters$contents[[chapter]]$contents[[article]]$text # @todo Count points/subpoints in selection # @body Display the total number of points shown/available if (!is.null(points)) { return(chap_art[points[points <= length(chap_art)]]) } else { return(chap_art) } } # @todo Text search function # @body Add functionality to search entire text for a string and return containing chapter, article, points.
/R/subset.R
permissive
jonocarroll/tidyGDPR
R
false
false
1,400
r
# @todo Extract subpoints too # @body Extract some/all subpoints and display how many #' Extract Components of GDPR Chapters/Articles/Points #' #' @md #' @param chapter chapter from which to extract #' @param article article from which to extract #' @param points points to extract (default all available) #' #' @return Either a vector of article titles or a vector of points. #' @importFrom utils data #' @export #' #' @examples #' chapter_components(chapter = 1) # list all articles in chapter (1) #' chapter_components(chapter = 1, article = 1) # extract all points in (1, 1) #' chapter_components(chapter = 1, article = 1, points = 2) # extract (1, 1, 2) chapter_components <- function(chapter = NULL, article = NULL, points = NULL) { GDPR_chapters <- NULL utils::data("GDPR_chapters", envir = environment()) if (is.null(chapter)) stop("Must specify at least a chapter") if (is.null(article)) return(GDPR_chapters$contents[[chapter]]$title) chap_art <- GDPR_chapters$contents[[chapter]]$contents[[article]]$text # @todo Count points/subpoints in selection # @body Display the total number of points shown/available if (!is.null(points)) { return(chap_art[points[points <= length(chap_art)]]) } else { return(chap_art) } } # @todo Text search function # @body Add functionality to search entire text for a string and return containing chapter, article, points.
# Prepares a D3 script to be embedable into a widget script_wrap <- function(contents, container) { paste( c( paste("var d3Script = function(d3, r2d3, data, ", container, ", width, height, options, theme, console) {", sep = ""), contents, "};" ), collapse = "\n" ) } script_read <- function(script) { if (is.null(script) || length(script) == 0 || !file.exists(script)) return(script) paste( sapply( script, function(e) paste( c( paste("/* R2D3 Source File: ", e, "*/"), readLines(e, warn = FALSE) ), collapse = "\n" ) ), collapse = "\n\n" ) }
/R/script.R
permissive
KirillShaman/r2d3
R
false
false
656
r
# Prepares a D3 script to be embedable into a widget script_wrap <- function(contents, container) { paste( c( paste("var d3Script = function(d3, r2d3, data, ", container, ", width, height, options, theme, console) {", sep = ""), contents, "};" ), collapse = "\n" ) } script_read <- function(script) { if (is.null(script) || length(script) == 0 || !file.exists(script)) return(script) paste( sapply( script, function(e) paste( c( paste("/* R2D3 Source File: ", e, "*/"), readLines(e, warn = FALSE) ), collapse = "\n" ) ), collapse = "\n\n" ) }
################ MODELO ################ naiveBayes <- function(nVarPreds) { modelo <- list() modelo$nPredictors <- nVarPreds modelo$Pc <- vector("numeric",length=2) modelo$Px_c <- matrix(0,nVarPreds,2*2) return(modelo) } predecir <- function(instancia, modelo) { probs <- vector("numeric",length=2) probs[1] <- modelo$Pc[1] probs[2] <- modelo$Pc[2] for (i in 1:modelo$nPredictors) { probs[1] <- probs[1]*modelo$Px_c[i,1+instancia[i]] probs[2] <- probs[2]*modelo$Px_c[i,3+instancia[i]] } return(probs/sum(probs)) } aprender <- function(dataset, modelo) { modelo$Pc <- vector("numeric",length=2) modelo$Px_c <- matrix(0,modelo$nPredictors,2*2) ########################## ##### AQUI TU CODIGO ##### ########################## return(modelo) } ################## EM ################## inicializar <- function(datos) { dataset <- list() dataset$labmatrix <- datos[which(!is.na(datos[,ncol(datos)])),] unlabmatrix <- datos[which(is.na(datos[,ncol(datos)])),] nUnlabExs <- nrow(unlabmatrix) dataset$peso <- vector("numeric", length=nUnlabExs*2) dataset$unlabmatrix <- matrix(0,nUnlabExs*2,ncol(datos)) iAct <- 1 for (i in 1:nUnlabExs) { dataset$unlabmatrix[iAct,] <- unlabmatrix[i,] dataset$unlabmatrix[iAct,ncol(datos)] <- 0 dataset$peso[iAct] <- 0.5 iAct <- iAct+1 dataset$unlabmatrix[iAct,] <- unlabmatrix[i,] dataset$unlabmatrix[iAct,ncol(datos)] <- 1 dataset$peso[iAct] <- 0.5 iAct <- iAct+1 } # dataset$peso <- c(1/3,2/3,1/3,2/3,1/3,2/3,2/3,1/3,2/3,1/3,2/3,1/3,1/2,1/2,1/2,1/2) return(dataset) } EStep <- function(dataset, modelo){ dataset$peso <- vector("numeric", length=length(dataset$peso)) ########################## ##### AQUI TU CODIGO ##### ########################## return(dataset) } MStep <- function(dataset, modelo){ ########################## ##### AQUI TU CODIGO ##### ########################## return(modelo) } testConvergencia <- function(modeloA, modeloB, epsilon) { resultado <- FALSE ########################## ##### AQUI TU CODIGO ##### ########################## return ( resultado ) } EM <- function(datos, epsilon) { cDataset <- inicializar(datos) # print(cDataset$peso) modelo <- aprender(cDataset,modelo) # print(modelo$Pc) # print(modelo$Px_c) # readline() convergencia <- FALSE while (!convergencia) { cDataset <- EStep(cDataset,modelo) antModelo <- modelo modelo <- MStep(cDataset, antModelo) convergencia <- testConvergencia(modelo, antModelo, epsilon) # print(cDataset$peso) # print(modelo$Pc) # print(modelo$Px_c) # readline() } return(list(modelo,cDataset)) } ############### EJECUCION ############### modelo <- naiveBayes(2) datos <- cbind(c(0,1,0,0,1,1,1,1,0,1),c(0,0,1,1,0,0,0,1,1,1),c(1,NA,NA,NA,1,0,NA,NA,0,NA)) EM(datos, 0.001)
/momo/EM_vacio.R
no_license
enpinzolas/ead-project
R
false
false
2,921
r
################ MODELO ################ naiveBayes <- function(nVarPreds) { modelo <- list() modelo$nPredictors <- nVarPreds modelo$Pc <- vector("numeric",length=2) modelo$Px_c <- matrix(0,nVarPreds,2*2) return(modelo) } predecir <- function(instancia, modelo) { probs <- vector("numeric",length=2) probs[1] <- modelo$Pc[1] probs[2] <- modelo$Pc[2] for (i in 1:modelo$nPredictors) { probs[1] <- probs[1]*modelo$Px_c[i,1+instancia[i]] probs[2] <- probs[2]*modelo$Px_c[i,3+instancia[i]] } return(probs/sum(probs)) } aprender <- function(dataset, modelo) { modelo$Pc <- vector("numeric",length=2) modelo$Px_c <- matrix(0,modelo$nPredictors,2*2) ########################## ##### AQUI TU CODIGO ##### ########################## return(modelo) } ################## EM ################## inicializar <- function(datos) { dataset <- list() dataset$labmatrix <- datos[which(!is.na(datos[,ncol(datos)])),] unlabmatrix <- datos[which(is.na(datos[,ncol(datos)])),] nUnlabExs <- nrow(unlabmatrix) dataset$peso <- vector("numeric", length=nUnlabExs*2) dataset$unlabmatrix <- matrix(0,nUnlabExs*2,ncol(datos)) iAct <- 1 for (i in 1:nUnlabExs) { dataset$unlabmatrix[iAct,] <- unlabmatrix[i,] dataset$unlabmatrix[iAct,ncol(datos)] <- 0 dataset$peso[iAct] <- 0.5 iAct <- iAct+1 dataset$unlabmatrix[iAct,] <- unlabmatrix[i,] dataset$unlabmatrix[iAct,ncol(datos)] <- 1 dataset$peso[iAct] <- 0.5 iAct <- iAct+1 } # dataset$peso <- c(1/3,2/3,1/3,2/3,1/3,2/3,2/3,1/3,2/3,1/3,2/3,1/3,1/2,1/2,1/2,1/2) return(dataset) } EStep <- function(dataset, modelo){ dataset$peso <- vector("numeric", length=length(dataset$peso)) ########################## ##### AQUI TU CODIGO ##### ########################## return(dataset) } MStep <- function(dataset, modelo){ ########################## ##### AQUI TU CODIGO ##### ########################## return(modelo) } testConvergencia <- function(modeloA, modeloB, epsilon) { resultado <- FALSE ########################## ##### AQUI TU CODIGO ##### ########################## return ( resultado ) } EM <- function(datos, epsilon) { cDataset <- inicializar(datos) # print(cDataset$peso) modelo <- aprender(cDataset,modelo) # print(modelo$Pc) # print(modelo$Px_c) # readline() convergencia <- FALSE while (!convergencia) { cDataset <- EStep(cDataset,modelo) antModelo <- modelo modelo <- MStep(cDataset, antModelo) convergencia <- testConvergencia(modelo, antModelo, epsilon) # print(cDataset$peso) # print(modelo$Pc) # print(modelo$Px_c) # readline() } return(list(modelo,cDataset)) } ############### EJECUCION ############### modelo <- naiveBayes(2) datos <- cbind(c(0,1,0,0,1,1,1,1,0,1),c(0,0,1,1,0,0,0,1,1,1),c(1,NA,NA,NA,1,0,NA,NA,0,NA)) EM(datos, 0.001)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ArtifactGroup.R \name{ArtifactGroupRefNode_PUT} \alias{ArtifactGroupRefNode_PUT} \title{PUT is disallowed for artifact groups} \description{ PUT is disallowed for artifact groups } \seealso{ Other ArtifactGroup: \code{\link{ArtifactGroupRefClass_get_artifacts}}, \code{\link{ArtifactGroupRefNode_DELETE}}, \code{\link{ArtifactGroupRefNode_POST}} }
/man/ArtifactGroupRefNode_PUT.Rd
no_license
BigelowLab/genologicsr
R
false
true
430
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ArtifactGroup.R \name{ArtifactGroupRefNode_PUT} \alias{ArtifactGroupRefNode_PUT} \title{PUT is disallowed for artifact groups} \description{ PUT is disallowed for artifact groups } \seealso{ Other ArtifactGroup: \code{\link{ArtifactGroupRefClass_get_artifacts}}, \code{\link{ArtifactGroupRefNode_DELETE}}, \code{\link{ArtifactGroupRefNode_POST}} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/method_inspect.R \name{vector.soft.thresh} \alias{vector.soft.thresh} \title{Soft thresholding a vector} \usage{ vector.soft.thresh(x, lambda) } \arguments{ \item{x}{a vector of real numbers} \item{lambda}{soft thresholding value} } \value{ a vector of the same length } \description{ entries of v are moved towards 0 by the amount lambda until they hit 0. }
/man/vector.soft.thresh.Rd
no_license
Tveten/capacc
R
false
true
457
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/method_inspect.R \name{vector.soft.thresh} \alias{vector.soft.thresh} \title{Soft thresholding a vector} \usage{ vector.soft.thresh(x, lambda) } \arguments{ \item{x}{a vector of real numbers} \item{lambda}{soft thresholding value} } \value{ a vector of the same length } \description{ entries of v are moved towards 0 by the amount lambda until they hit 0. }
\name{finvert} \alias{finvert} \title{Inverts the values of a factor} \description{ Useful function to invert the values from a categorical variable, for instance a Likert response scale.} \usage{ finvert(x, levels = FALSE) } \arguments{ \item{x}{A categorical variable (a factor)} \item{levels}{Logical, invert the levels as well} } \value{A factor of the same length as the original one.} \author{Adrian Dusa} \examples{ words <- c("ini", "mini", "miny", "moe") variable <- factor(words, levels = words) # inverts the value, preserving the levels finvert(variable) # inverts both values and levels finvert(variable, levels = TRUE) } \keyword{misc}
/man/finvert.Rd
no_license
dusadrian/admisc
R
false
false
668
rd
\name{finvert} \alias{finvert} \title{Inverts the values of a factor} \description{ Useful function to invert the values from a categorical variable, for instance a Likert response scale.} \usage{ finvert(x, levels = FALSE) } \arguments{ \item{x}{A categorical variable (a factor)} \item{levels}{Logical, invert the levels as well} } \value{A factor of the same length as the original one.} \author{Adrian Dusa} \examples{ words <- c("ini", "mini", "miny", "moe") variable <- factor(words, levels = words) # inverts the value, preserving the levels finvert(variable) # inverts both values and levels finvert(variable, levels = TRUE) } \keyword{misc}
rm(list=ls()) if (Sys.info()["sysname"] == "Darwin") j_drive <- "/Volumes/snfs" if (Sys.info()["sysname"] == "Linux") j_drive <- "/home/j" user <- Sys.info()[["user"]] library(foreach) library(iterators) library(doParallel) library(dplyr) library(data.table) library(dfoptim) library(fitdistrplus) library(RColorBrewer) library(ggplot2) library(actuar) library(grid) library(RMySQL) library(mvtnorm) library(splitstackshape) library(compiler) library(rio) library(hashmap) library(pbmcapply) setCompilerOptions(suppressAll = TRUE) enableJIT(3) source(paste0(j_drive,"/temp/",user,"/GBD_2016/calc_paf/get_draws.R")) source(paste0(j_drive,"/temp/central_comp/libraries/current/r/get_demographics.R")) source(paste0(j_drive,"/temp/central_comp/libraries/current/r/get_demographics_template.R")) comp_dem <- get_demographics(gbd_team="epi", gbd_round_id=4) comp_dem$year_ids <- c(comp_dem$year_ids, 2006) ar <- get_demographics(gbd_team="epi_ar", gbd_round_id=4) `%notin%` <- function(x,y) !(x %in% y) ay <- ar$year_ids[ar$year_ids %notin% comp_dem$year_ids] Rcpp::sourceCpp(paste0(j_drive,"/temp/",user,"/GBD_2016/calc_paf/paf_c_plus.cpp")) if (Sys.info()["sysname"] == "Darwin") { arg <- c(101) } else { arg <- commandArgs()[-(1:3)] } if (exists("DEBUG")=="TRUE") { REI_ID <- 108 L <- 44553 CORES <- 20 } else { REI_ID <- as.numeric(arg[1]) L <- as.numeric(arg[2]) CORES <- as.numeric(arg[3]) } if(is.na(REI_ID)) REI_ID <- 108 if(is.na(L)) L <- 77 if(is.na(CORES)) CORES <- 20 sessionInfo() print(paste0("REI ID: ",REI_ID," LOCATION ID: ",L," CORES: ",CORES)) risk_t <- fread(paste0(j_drive,"/temp/",user,"/GBD_2016/calc_paf/shared_rei_table.csv")) risk_map <- risk_t %>% dplyr::filter(rei_id==paste0(REI_ID)) R <- paste0(risk_map$risk[1]) if(R=="diet_fish") R<-"diet_omega_3" try(dir.create(paste0("/share/epi/risk/paf/DBD/",risk_map$risk[1]))) try(dir.create(paste0("/share/epi/risk/paf/DBD/",risk_map$risk[1],"/calc_out"))) try(dir.create(paste0("/share/epi/risk/paf/DBD/",risk_map$risk[1],"/calc_out/logs"))) ## bmi draws bmi_draws <- get_draws(gbd_id_field = c('modelable_entity_id'), gbd_round_id = c('4'), gbd_id = c('2548'), source = c('risk'), location_ids = c(paste0(L)), status=c('best'), year_ids=c(comp_dem$year_ids)) bmi_ay <- get_draws(gbd_id_field = c('modelable_entity_id'), gbd_round_id = c('4'), gbd_id = c('2548'), source = c('risk'), location_ids = c(paste0(L)), status=c('best'), year_ids=c(ay)) bmi_draws <- bind_rows(bmi_draws,bmi_ay) bmi_draws <- bmi_draws %>% dplyr::filter(model_version_id==max(model_version_id, na.rm = TRUE)) bmi_draws <- bmi_draws %>% dplyr::select(location_id,year_id,age_group_id,sex_id,starts_with("draw_")) bmi_draws <- bmi_draws[!duplicated(bmi_draws), ] names(bmi_draws)[grep("draw_", names(bmi_draws))] <- paste("bmi", names(bmi_draws)[grep("draw_", names(bmi_draws))], sep="_") ## obese draws obese_draws <- get_draws(gbd_id_field = c('modelable_entity_id'), gbd_round_id = c('4'), gbd_id = c('9364'), source = c('risk'), location_ids = c(paste0(L)), status=c('best'), year_ids=c(comp_dem$year_ids)) obese_ay <- get_draws(gbd_id_field = c('modelable_entity_id'), gbd_round_id = c('4'), gbd_id = c('9364'), source = c('risk'), location_ids = c(paste0(L)), status=c('best'), year_ids=c(ay)) obese_draws <- bind_rows(obese_draws,obese_ay) obese_draws <- obese_draws %>% dplyr::filter(model_version_id==max(model_version_id, na.rm = TRUE)) obese_draws <- obese_draws %>% dplyr::select(location_id,year_id,age_group_id,sex_id,starts_with("draw_")) obese_draws <- obese_draws[!duplicated(obese_draws), ] names(obese_draws)[grep("draw_", names(obese_draws))] <- paste("obese", names(obese_draws)[grep("draw_", names(obese_draws))], sep="_") ## overweight draws over_draws <- get_draws(gbd_id_field = c('modelable_entity_id'), gbd_round_id = c('4'), gbd_id = c('9363'), source = c('risk'), location_ids = c(paste0(L)), status=c('best'), year_ids=c(comp_dem$year_ids)) over_ay <- get_draws(gbd_id_field = c('modelable_entity_id'), gbd_round_id = c('4'), gbd_id = c('9363'), source = c('risk'), location_ids = c(paste0(L)), status=c('best'), year_ids=c(ay)) over_draws <- bind_rows(over_draws,over_ay) over_draws <- over_draws %>% dplyr::filter(model_version_id==max(model_version_id, na.rm = TRUE)) over_draws <- over_draws %>% dplyr::select(location_id,year_id,age_group_id,sex_id,starts_with("draw_")) over_draws <- over_draws[!duplicated(over_draws), ] names(over_draws)[grep("draw_", names(over_draws))] <- paste("over", names(over_draws)[grep("draw_", names(over_draws))], sep="_") exp_draws <- left_join(bmi_draws,obese_draws) exp_draws <- left_join(exp_draws,over_draws) ## read in meta-data md <- fread(paste0(j_drive,"/temp/",user,"/GBD_2016/calc_paf/risk_variables.csv")) md <- md[md$risk==paste0(risk_map$risk[1]),] RR_SCALAR = (as.numeric(md[,"rr_scalar"][[1]])) INV = (as.numeric(md[,"inv_exp"][[1]])) set.seed(11180*REI_ID) TM <- runif(1000,min=md[,"tmred_para1"][[1]],max=md[,"tmred_para2"][[1]]) ## location hierarchy lh <- fread(paste0(j_drive,"/temp/",user,"/GBD_2016/loc_path.csv")) lh <- lh %>% dplyr::filter(location_id==L) %>% dplyr::select(-location_id) ## read in weights WEIGHTS <- fread(paste0(j_drive,"/WORK/05_risk/ensemble/weights/",risk_map$risk[1],".csv")) ## weights are now universal across space-time-location w <- WEIGHTS %>% dplyr::ungroup() %>% dplyr::select(-year_id,-sex_id,-age_group_id,-location_id) %>% dplyr::slice(1) ## pull relative risks rr_draws <- get_draws(gbd_id_field = c('rei_id'), gbd_round_id = c('4'), draw_type = c('rr'), gbd_id = c(paste0(REI_ID)), source = c('risk'), location_ids = c(paste0(L))) ## filter out continuous rr_draws <- rr_draws %>% dplyr::filter(parameter=="per unit") rr_draws <- rr_draws %>% dplyr::filter(model_version_id==max(rr_draws$model_version_id, na.rm = TRUE)) rr_draws <- rr_draws %>% dplyr::select(cause_id,sex_id,age_group_id,mortality,morbidity,starts_with("rr_")) rr_draws <- rr_draws[!duplicated(rr_draws), ] exp_draws <- exp_draws[exp_draws$age_group_id %in% rr_draws$age_group_id ,] exp_draws$ID <- seq.int(nrow(exp_draws)) exp_long <- merged.stack(exp_draws,var.stubs=c("bmi_draw_","over_draw_","obese_draw_"),sep="var.stubs",keep.all=T) setnames(exp_long,".time_1","draw") exp_long <- sapply(exp_long, as.numeric) exp_long <- data.table(exp_long) exp_long <- exp_long[bmi_draw_>0] setkey(exp_long, location_id, year_id, age_group_id, sex_id, ID, draw) ## create hashmap of RRs rr_long <- merged.stack(rr_draws,var.stubs=c("rr_"),sep="var.stubs",keep.all=T) setnames(rr_long,".time_1","draw") RRARRAY <- as.array(rr_long$rr_) rr_long$cause_id = paste(rr_long$cause_id,rr_long$mortality,rr_long$morbidity,sep='_') ## create cause id from mortality and morbidity groups NAMES <- as.array(paste(rr_long$sex_id,rr_long$age_group_id,rr_long$draw,rr_long$cause_id,sep='|')) RRHASH <- hashmap(NAMES,RRARRAY) CAUSES = unique(rr_long$cause_id) exp_long <- exp_long[complete.cases(exp_long),] exp_long_cp <- exp_long cores = floor(CORES*.6) source(paste0(j_drive,"/temp/",user,"/GBD_2016/calc_paf/parallel/get_edensity.R")) source(paste0(j_drive,"/temp/",user,"/GBD_2016/calc_paf/parallel/ihmeDistList.R")) dlist <- c(classA,classB,classM) Rcpp::sourceCpp(paste0(j_drive,"/temp/",user,"/GBD_2016/bmi/integ_bmi.cpp")) calc_bmi_shape <- function(b,over,obese,weights,mean) { tryCatch({ fx <- NULL fx <- get_edensity(weights,mean,Vectorize(b),10,50) out <- NULL out <- integ_bmi(fx$x,fx$fx) ((out$over-over)^2 + (out$obese-obese)^2) }, error=function(e){cat("ERROR :",conditionMessage(e), "\n")}) } calcPAFbyrow <- function(jjj,xx,yy,tm,rs,inv,cp,sex,age,iixx) { rrval <- RRHASH$find(paste(sex,age,iixx,jjj,sep='|')) paf <- (calc_paf_c(x=xx,fx=yy,tmrel=tm,rr=rrval,rr_scalar=rs,inv_exp=inv,cap=cp)$paf) return(list(paf=paf,cause_id=jjj)) } calcPAFbyrow <- cmpfun(calcPAFbyrow) calcbyrow <- function(jj) { LL = as.numeric(exp_long[jj,"location_id"]) YY = as.numeric(exp_long[jj,"year_id"]) SS = as.numeric(exp_long[jj,"sex_id"]) AA = as.numeric(exp_long[jj,"age_group_id"]) Mval = as.numeric(exp_long[jj,"bmi_draw_"]) Over = as.numeric(exp_long[jj,"over_draw_"]) Obese = as.numeric(exp_long[jj,"obese_draw_"]) ix = as.numeric(exp_long[jj,"draw"]) ii = ix+1 ID = as.numeric(exp_long[jj,"ID"]) TMREL = TM[[ii]] mean <- Mval optPARAMS=list() optVALS=list() for(p in seq(1,10,by=2)) { SOPT <- nlminb(start=mean/p,objective = calc_bmi_shape,over=Over,obese=Obese,weights=w,mean=Mval,lower=mean*.01,upper=mean*1.5,control=list(iter.max=3,eval.max=3)) optPARAMS = rbind(optPARAMS, SOPT$par) optVALS = rbind(optVALS, SOPT$objective) } Spred = optPARAMS[which.min(optVALS),][[1]] Spred_obj = optVALS[which.min(optVALS),][[1]] D <- NULL D <- get_edensity(w,Mval,Spred,10,50) base <- D$x denn <- D$fx if(INV==1) { (cap <- D$XMIN) } else if(INV==0) { (cap <- D$XMAX) } PAFOUT <- rbindlist(lapply(unique(CAUSES), calcPAFbyrow, xx=base, yy=denn, tm=TMREL, rs=RR_SCALAR, inv=INV, cp=cap , sex=SS, age=AA, iixx=ix),fill=TRUE) pafs = data.table(location_id=LL,year_id=YY,sex_id=SS,age_group_id=AA,draw=ix,PAFOUT,bmi_draw=Mval,sd=Spred,sd_obj=Spred_obj,obese=Obese,overweight=Over) return(pafs) } calcbyrow <- cmpfun(calcbyrow) cores = floor(CORES*.6) ST <- format(Sys.time(), "%H:%M:%S") print(paste0("Computing PAFs: ",ST)) options(show.error.messages = FALSE) R_P <- mclapply(X=(1:nrow(exp_long)), FUN=try(calcbyrow), mc.cores=cores) options(show.error.messages = TRUE) SE <- format(Sys.time(), "%H:%M:%S") print(paste0("PAF calc complete: ",SE)) idx <- sapply(R_P, is.data.table) R_P <- R_P[idx] file <- rbindlist(R_P, fill=TRUE) file <- file[complete.cases(file), ] file <- cSplit(file, c("cause_id"), c("_")) names(file)[names(file) == "cause_id_1"] = "cause_id" names(file)[names(file) == "cause_id_2"] = "mortality" names(file)[names(file) == "cause_id_3"] = "morbidity" exp_long <- sapply(exp_long, as.numeric) exp_long <- data.table(exp_long) file <- sapply(file, as.numeric) file <- data.table(file) file <- file[paf>-1 & paf<1] file_ex <- file %>% dplyr::select(location_id,year_id,sex_id,age_group_id,draw) file_ex <- file_ex[!duplicated(file_ex), ] file_ex <- sapply(file_ex, as.numeric) file_ex <- data.table(file_ex) failed <- exp_long[!file_ex, on=c("year_id", "sex_id", "age_group_id", "sex_id", "draw")] NT <- nrow(failed) if (NT>0) { print("Computing failures") exp_long <- failed options(show.error.messages = FALSE) R_P <- lapply(1:nrow(exp_long),try(calcbyrow)) options(show.error.messages = TRUE) idx <- sapply(R_P, is.data.table) R_P <- R_P[idx] file_fail_out <- rbindlist(R_P, fill=TRUE) file_fail_out <- file_fail_out[complete.cases(file_fail_out), ] file_fail_out <- cSplit(file_fail_out, c("cause_id"), c("_")) names(file_fail_out)[names(file_fail_out) == "cause_id_1"] = "cause_id" names(file_fail_out)[names(file_fail_out) == "cause_id_2"] = "mortality" names(file_fail_out)[names(file_fail_out) == "cause_id_3"] = "morbidity" file <- rbind(file,file_fail_out) } exp_long <- sapply(exp_long_cp, as.numeric) exp_long <- data.table(exp_long) file <- sapply(file, as.numeric) file <- data.table(file) file_ex <- file %>% dplyr::select(location_id,year_id,sex_id,age_group_id,draw) file_ex <- file_ex[!duplicated(file_ex), ] file_ex <- sapply(file_ex, as.numeric) file_ex <- data.table(file_ex) failed <- exp_long[!file_ex, on=c("year_id", "sex_id", "age_group_id", "sex_id", "draw")] NT <- nrow(failed) if (NT>0) { try(dir.create(paste0("/share/epi/risk/paf/DBD/",R,"/calc_out/failed"))) write.csv(failed,paste0("/share/epi/risk/paf/DBD/",R,"/calc_out/failed/",L,"_failed.csv")) } write.csv(file,paste0("/share/epi/risk/paf/DBD/",R,"/calc_out/",L,".csv")) file <- data.table(file) file_pafs <- file %>% dplyr::select(location_id,year_id,sex_id,age_group_id,cause_id,paf,draw,mortality,morbidity) cols <- setdiff(names(file_pafs),c("paf","draw")) file <- dcast(file_pafs,paste(paste(cols, collapse = " + "), "~ draw"),value.var=c("paf"),sep="") names(file)[grep("[0-9]+", names(file))] <- paste("paf", names(file)[grep("[0-9]+", names(file))], sep="_") setkey(file, year_id, sex_id) cy = comp_dem$year_ids for(s in unique(file$sex_id)) { for(y in cy) { ds <- file[year_id==y & sex_id==s,] file_out <- ds[sex_id==s & mortality==1] write.csv(file_out,paste0("/share/epi/risk/paf/DBD/",R,"/","paf_","yll","_",L,"_",y,"_",s,".csv")) file_out <- ds[sex_id==s & morbidity==1] write.csv(file_out,paste0("/share/epi/risk/paf/DBD/",R,"/","paf_","yld","_",L,"_",y,"_",s,".csv")) } } print(paste0("PAF CALC TIME: ", ST, " END TIME: ", SE)) try(closeAllConnections())
/risk_factors_code/diet/PAF_calculation/calc_paf_bmi_cpp_hash_optim.R
no_license
Nermin-Ghith/ihme-modeling
R
false
false
12,717
r
rm(list=ls()) if (Sys.info()["sysname"] == "Darwin") j_drive <- "/Volumes/snfs" if (Sys.info()["sysname"] == "Linux") j_drive <- "/home/j" user <- Sys.info()[["user"]] library(foreach) library(iterators) library(doParallel) library(dplyr) library(data.table) library(dfoptim) library(fitdistrplus) library(RColorBrewer) library(ggplot2) library(actuar) library(grid) library(RMySQL) library(mvtnorm) library(splitstackshape) library(compiler) library(rio) library(hashmap) library(pbmcapply) setCompilerOptions(suppressAll = TRUE) enableJIT(3) source(paste0(j_drive,"/temp/",user,"/GBD_2016/calc_paf/get_draws.R")) source(paste0(j_drive,"/temp/central_comp/libraries/current/r/get_demographics.R")) source(paste0(j_drive,"/temp/central_comp/libraries/current/r/get_demographics_template.R")) comp_dem <- get_demographics(gbd_team="epi", gbd_round_id=4) comp_dem$year_ids <- c(comp_dem$year_ids, 2006) ar <- get_demographics(gbd_team="epi_ar", gbd_round_id=4) `%notin%` <- function(x,y) !(x %in% y) ay <- ar$year_ids[ar$year_ids %notin% comp_dem$year_ids] Rcpp::sourceCpp(paste0(j_drive,"/temp/",user,"/GBD_2016/calc_paf/paf_c_plus.cpp")) if (Sys.info()["sysname"] == "Darwin") { arg <- c(101) } else { arg <- commandArgs()[-(1:3)] } if (exists("DEBUG")=="TRUE") { REI_ID <- 108 L <- 44553 CORES <- 20 } else { REI_ID <- as.numeric(arg[1]) L <- as.numeric(arg[2]) CORES <- as.numeric(arg[3]) } if(is.na(REI_ID)) REI_ID <- 108 if(is.na(L)) L <- 77 if(is.na(CORES)) CORES <- 20 sessionInfo() print(paste0("REI ID: ",REI_ID," LOCATION ID: ",L," CORES: ",CORES)) risk_t <- fread(paste0(j_drive,"/temp/",user,"/GBD_2016/calc_paf/shared_rei_table.csv")) risk_map <- risk_t %>% dplyr::filter(rei_id==paste0(REI_ID)) R <- paste0(risk_map$risk[1]) if(R=="diet_fish") R<-"diet_omega_3" try(dir.create(paste0("/share/epi/risk/paf/DBD/",risk_map$risk[1]))) try(dir.create(paste0("/share/epi/risk/paf/DBD/",risk_map$risk[1],"/calc_out"))) try(dir.create(paste0("/share/epi/risk/paf/DBD/",risk_map$risk[1],"/calc_out/logs"))) ## bmi draws bmi_draws <- get_draws(gbd_id_field = c('modelable_entity_id'), gbd_round_id = c('4'), gbd_id = c('2548'), source = c('risk'), location_ids = c(paste0(L)), status=c('best'), year_ids=c(comp_dem$year_ids)) bmi_ay <- get_draws(gbd_id_field = c('modelable_entity_id'), gbd_round_id = c('4'), gbd_id = c('2548'), source = c('risk'), location_ids = c(paste0(L)), status=c('best'), year_ids=c(ay)) bmi_draws <- bind_rows(bmi_draws,bmi_ay) bmi_draws <- bmi_draws %>% dplyr::filter(model_version_id==max(model_version_id, na.rm = TRUE)) bmi_draws <- bmi_draws %>% dplyr::select(location_id,year_id,age_group_id,sex_id,starts_with("draw_")) bmi_draws <- bmi_draws[!duplicated(bmi_draws), ] names(bmi_draws)[grep("draw_", names(bmi_draws))] <- paste("bmi", names(bmi_draws)[grep("draw_", names(bmi_draws))], sep="_") ## obese draws obese_draws <- get_draws(gbd_id_field = c('modelable_entity_id'), gbd_round_id = c('4'), gbd_id = c('9364'), source = c('risk'), location_ids = c(paste0(L)), status=c('best'), year_ids=c(comp_dem$year_ids)) obese_ay <- get_draws(gbd_id_field = c('modelable_entity_id'), gbd_round_id = c('4'), gbd_id = c('9364'), source = c('risk'), location_ids = c(paste0(L)), status=c('best'), year_ids=c(ay)) obese_draws <- bind_rows(obese_draws,obese_ay) obese_draws <- obese_draws %>% dplyr::filter(model_version_id==max(model_version_id, na.rm = TRUE)) obese_draws <- obese_draws %>% dplyr::select(location_id,year_id,age_group_id,sex_id,starts_with("draw_")) obese_draws <- obese_draws[!duplicated(obese_draws), ] names(obese_draws)[grep("draw_", names(obese_draws))] <- paste("obese", names(obese_draws)[grep("draw_", names(obese_draws))], sep="_") ## overweight draws over_draws <- get_draws(gbd_id_field = c('modelable_entity_id'), gbd_round_id = c('4'), gbd_id = c('9363'), source = c('risk'), location_ids = c(paste0(L)), status=c('best'), year_ids=c(comp_dem$year_ids)) over_ay <- get_draws(gbd_id_field = c('modelable_entity_id'), gbd_round_id = c('4'), gbd_id = c('9363'), source = c('risk'), location_ids = c(paste0(L)), status=c('best'), year_ids=c(ay)) over_draws <- bind_rows(over_draws,over_ay) over_draws <- over_draws %>% dplyr::filter(model_version_id==max(model_version_id, na.rm = TRUE)) over_draws <- over_draws %>% dplyr::select(location_id,year_id,age_group_id,sex_id,starts_with("draw_")) over_draws <- over_draws[!duplicated(over_draws), ] names(over_draws)[grep("draw_", names(over_draws))] <- paste("over", names(over_draws)[grep("draw_", names(over_draws))], sep="_") exp_draws <- left_join(bmi_draws,obese_draws) exp_draws <- left_join(exp_draws,over_draws) ## read in meta-data md <- fread(paste0(j_drive,"/temp/",user,"/GBD_2016/calc_paf/risk_variables.csv")) md <- md[md$risk==paste0(risk_map$risk[1]),] RR_SCALAR = (as.numeric(md[,"rr_scalar"][[1]])) INV = (as.numeric(md[,"inv_exp"][[1]])) set.seed(11180*REI_ID) TM <- runif(1000,min=md[,"tmred_para1"][[1]],max=md[,"tmred_para2"][[1]]) ## location hierarchy lh <- fread(paste0(j_drive,"/temp/",user,"/GBD_2016/loc_path.csv")) lh <- lh %>% dplyr::filter(location_id==L) %>% dplyr::select(-location_id) ## read in weights WEIGHTS <- fread(paste0(j_drive,"/WORK/05_risk/ensemble/weights/",risk_map$risk[1],".csv")) ## weights are now universal across space-time-location w <- WEIGHTS %>% dplyr::ungroup() %>% dplyr::select(-year_id,-sex_id,-age_group_id,-location_id) %>% dplyr::slice(1) ## pull relative risks rr_draws <- get_draws(gbd_id_field = c('rei_id'), gbd_round_id = c('4'), draw_type = c('rr'), gbd_id = c(paste0(REI_ID)), source = c('risk'), location_ids = c(paste0(L))) ## filter out continuous rr_draws <- rr_draws %>% dplyr::filter(parameter=="per unit") rr_draws <- rr_draws %>% dplyr::filter(model_version_id==max(rr_draws$model_version_id, na.rm = TRUE)) rr_draws <- rr_draws %>% dplyr::select(cause_id,sex_id,age_group_id,mortality,morbidity,starts_with("rr_")) rr_draws <- rr_draws[!duplicated(rr_draws), ] exp_draws <- exp_draws[exp_draws$age_group_id %in% rr_draws$age_group_id ,] exp_draws$ID <- seq.int(nrow(exp_draws)) exp_long <- merged.stack(exp_draws,var.stubs=c("bmi_draw_","over_draw_","obese_draw_"),sep="var.stubs",keep.all=T) setnames(exp_long,".time_1","draw") exp_long <- sapply(exp_long, as.numeric) exp_long <- data.table(exp_long) exp_long <- exp_long[bmi_draw_>0] setkey(exp_long, location_id, year_id, age_group_id, sex_id, ID, draw) ## create hashmap of RRs rr_long <- merged.stack(rr_draws,var.stubs=c("rr_"),sep="var.stubs",keep.all=T) setnames(rr_long,".time_1","draw") RRARRAY <- as.array(rr_long$rr_) rr_long$cause_id = paste(rr_long$cause_id,rr_long$mortality,rr_long$morbidity,sep='_') ## create cause id from mortality and morbidity groups NAMES <- as.array(paste(rr_long$sex_id,rr_long$age_group_id,rr_long$draw,rr_long$cause_id,sep='|')) RRHASH <- hashmap(NAMES,RRARRAY) CAUSES = unique(rr_long$cause_id) exp_long <- exp_long[complete.cases(exp_long),] exp_long_cp <- exp_long cores = floor(CORES*.6) source(paste0(j_drive,"/temp/",user,"/GBD_2016/calc_paf/parallel/get_edensity.R")) source(paste0(j_drive,"/temp/",user,"/GBD_2016/calc_paf/parallel/ihmeDistList.R")) dlist <- c(classA,classB,classM) Rcpp::sourceCpp(paste0(j_drive,"/temp/",user,"/GBD_2016/bmi/integ_bmi.cpp")) calc_bmi_shape <- function(b,over,obese,weights,mean) { tryCatch({ fx <- NULL fx <- get_edensity(weights,mean,Vectorize(b),10,50) out <- NULL out <- integ_bmi(fx$x,fx$fx) ((out$over-over)^2 + (out$obese-obese)^2) }, error=function(e){cat("ERROR :",conditionMessage(e), "\n")}) } calcPAFbyrow <- function(jjj,xx,yy,tm,rs,inv,cp,sex,age,iixx) { rrval <- RRHASH$find(paste(sex,age,iixx,jjj,sep='|')) paf <- (calc_paf_c(x=xx,fx=yy,tmrel=tm,rr=rrval,rr_scalar=rs,inv_exp=inv,cap=cp)$paf) return(list(paf=paf,cause_id=jjj)) } calcPAFbyrow <- cmpfun(calcPAFbyrow) calcbyrow <- function(jj) { LL = as.numeric(exp_long[jj,"location_id"]) YY = as.numeric(exp_long[jj,"year_id"]) SS = as.numeric(exp_long[jj,"sex_id"]) AA = as.numeric(exp_long[jj,"age_group_id"]) Mval = as.numeric(exp_long[jj,"bmi_draw_"]) Over = as.numeric(exp_long[jj,"over_draw_"]) Obese = as.numeric(exp_long[jj,"obese_draw_"]) ix = as.numeric(exp_long[jj,"draw"]) ii = ix+1 ID = as.numeric(exp_long[jj,"ID"]) TMREL = TM[[ii]] mean <- Mval optPARAMS=list() optVALS=list() for(p in seq(1,10,by=2)) { SOPT <- nlminb(start=mean/p,objective = calc_bmi_shape,over=Over,obese=Obese,weights=w,mean=Mval,lower=mean*.01,upper=mean*1.5,control=list(iter.max=3,eval.max=3)) optPARAMS = rbind(optPARAMS, SOPT$par) optVALS = rbind(optVALS, SOPT$objective) } Spred = optPARAMS[which.min(optVALS),][[1]] Spred_obj = optVALS[which.min(optVALS),][[1]] D <- NULL D <- get_edensity(w,Mval,Spred,10,50) base <- D$x denn <- D$fx if(INV==1) { (cap <- D$XMIN) } else if(INV==0) { (cap <- D$XMAX) } PAFOUT <- rbindlist(lapply(unique(CAUSES), calcPAFbyrow, xx=base, yy=denn, tm=TMREL, rs=RR_SCALAR, inv=INV, cp=cap , sex=SS, age=AA, iixx=ix),fill=TRUE) pafs = data.table(location_id=LL,year_id=YY,sex_id=SS,age_group_id=AA,draw=ix,PAFOUT,bmi_draw=Mval,sd=Spred,sd_obj=Spred_obj,obese=Obese,overweight=Over) return(pafs) } calcbyrow <- cmpfun(calcbyrow) cores = floor(CORES*.6) ST <- format(Sys.time(), "%H:%M:%S") print(paste0("Computing PAFs: ",ST)) options(show.error.messages = FALSE) R_P <- mclapply(X=(1:nrow(exp_long)), FUN=try(calcbyrow), mc.cores=cores) options(show.error.messages = TRUE) SE <- format(Sys.time(), "%H:%M:%S") print(paste0("PAF calc complete: ",SE)) idx <- sapply(R_P, is.data.table) R_P <- R_P[idx] file <- rbindlist(R_P, fill=TRUE) file <- file[complete.cases(file), ] file <- cSplit(file, c("cause_id"), c("_")) names(file)[names(file) == "cause_id_1"] = "cause_id" names(file)[names(file) == "cause_id_2"] = "mortality" names(file)[names(file) == "cause_id_3"] = "morbidity" exp_long <- sapply(exp_long, as.numeric) exp_long <- data.table(exp_long) file <- sapply(file, as.numeric) file <- data.table(file) file <- file[paf>-1 & paf<1] file_ex <- file %>% dplyr::select(location_id,year_id,sex_id,age_group_id,draw) file_ex <- file_ex[!duplicated(file_ex), ] file_ex <- sapply(file_ex, as.numeric) file_ex <- data.table(file_ex) failed <- exp_long[!file_ex, on=c("year_id", "sex_id", "age_group_id", "sex_id", "draw")] NT <- nrow(failed) if (NT>0) { print("Computing failures") exp_long <- failed options(show.error.messages = FALSE) R_P <- lapply(1:nrow(exp_long),try(calcbyrow)) options(show.error.messages = TRUE) idx <- sapply(R_P, is.data.table) R_P <- R_P[idx] file_fail_out <- rbindlist(R_P, fill=TRUE) file_fail_out <- file_fail_out[complete.cases(file_fail_out), ] file_fail_out <- cSplit(file_fail_out, c("cause_id"), c("_")) names(file_fail_out)[names(file_fail_out) == "cause_id_1"] = "cause_id" names(file_fail_out)[names(file_fail_out) == "cause_id_2"] = "mortality" names(file_fail_out)[names(file_fail_out) == "cause_id_3"] = "morbidity" file <- rbind(file,file_fail_out) } exp_long <- sapply(exp_long_cp, as.numeric) exp_long <- data.table(exp_long) file <- sapply(file, as.numeric) file <- data.table(file) file_ex <- file %>% dplyr::select(location_id,year_id,sex_id,age_group_id,draw) file_ex <- file_ex[!duplicated(file_ex), ] file_ex <- sapply(file_ex, as.numeric) file_ex <- data.table(file_ex) failed <- exp_long[!file_ex, on=c("year_id", "sex_id", "age_group_id", "sex_id", "draw")] NT <- nrow(failed) if (NT>0) { try(dir.create(paste0("/share/epi/risk/paf/DBD/",R,"/calc_out/failed"))) write.csv(failed,paste0("/share/epi/risk/paf/DBD/",R,"/calc_out/failed/",L,"_failed.csv")) } write.csv(file,paste0("/share/epi/risk/paf/DBD/",R,"/calc_out/",L,".csv")) file <- data.table(file) file_pafs <- file %>% dplyr::select(location_id,year_id,sex_id,age_group_id,cause_id,paf,draw,mortality,morbidity) cols <- setdiff(names(file_pafs),c("paf","draw")) file <- dcast(file_pafs,paste(paste(cols, collapse = " + "), "~ draw"),value.var=c("paf"),sep="") names(file)[grep("[0-9]+", names(file))] <- paste("paf", names(file)[grep("[0-9]+", names(file))], sep="_") setkey(file, year_id, sex_id) cy = comp_dem$year_ids for(s in unique(file$sex_id)) { for(y in cy) { ds <- file[year_id==y & sex_id==s,] file_out <- ds[sex_id==s & mortality==1] write.csv(file_out,paste0("/share/epi/risk/paf/DBD/",R,"/","paf_","yll","_",L,"_",y,"_",s,".csv")) file_out <- ds[sex_id==s & morbidity==1] write.csv(file_out,paste0("/share/epi/risk/paf/DBD/",R,"/","paf_","yld","_",L,"_",y,"_",s,".csv")) } } print(paste0("PAF CALC TIME: ", ST, " END TIME: ", SE)) try(closeAllConnections())
# https://spark.rstudio.com/guides/textmining/ # install.packages("gutenbergr") library(gutenbergr) # install.packages("sparklyr") library(sparklyr) # spark_install(version = "2.1.0") sc <- spark_connect(master = "local") gutenberg_works() %>% filter(author == "Twain, Mark") %>% pull(gutenberg_id) %>% gutenberg_download() %>% pull(text) %>% writeLines("mark_twain.txt") gutenberg_works() %>% filter(author == "Doyle, Arthur") %>% pull(gutenberg_id) %>% gutenberg_download() %>% pull(text) %>% writeLines("arthur_doyle.txt") # Imports Mark Twain's file # Setting up the path to the file in a Windows OS laptop twain_path <- paste0("file:///", getwd(), "/mark_twain.txt") twain <- spark_read_text(sc, "twain", twain_path) # Imports Sir Arthur Conan Doyle's file doyle_path <- paste0("file:///", getwd(), "/arthur_doyle.txt") doyle <- spark_read_text(sc, "doyle", doyle_path) all_words <- doyle %>% mutate(author = "doyle") %>% sdf_bind_rows({ twain %>% mutate(author = "twain")}) %>% filter(nchar(line) > 0) all_words <- all_words %>% mutate(line = regexp_replace(line, "[_\"\'():;,.!?\\-]", " ")) all_words <- all_words %>% ft_tokenizer(input.col = "line", output.col = "word_list") head(all_words, 4) all_words <- all_words %>% ft_stop_words_remover(input.col = "word_list", output.col = "wo_stop_words") head(all_words, 4) all_words <- all_words %>% mutate(word = explode(wo_stop_words)) %>% select(word, author) %>% filter(nchar(word) > 2) head(all_words, 4) all_words <- all_words %>% compute("all_words") # Wrap all in one dplyr statement ----------------------------------------- all_words <- doyle %>% mutate(author = "doyle") %>% sdf_bind_rows({ twain %>% mutate(author = "twain")}) %>% filter(nchar(line) > 0) %>% mutate(line = regexp_replace(line, "[_\"\'():;,.!?\\-]", " ")) %>% ft_tokenizer(input.col = "line", output.col = "word_list") %>% ft_stop_words_remover(input.col = "word_list", output.col = "wo_stop_words") %>% mutate(word = explode(wo_stop_words)) %>% select(word, author) %>% filter(nchar(word) > 2) %>% compute("all_words") # data analysis ----------------------------------------------------------- word_count <- all_words %>% group_by(author, word) %>% tally() %>% arrange(desc(n)) word_count doyle_unique <- filter(word_count, author == "doyle") %>% anti_join(filter(word_count, author == "twain"), by = "word") %>% arrange(desc(n)) %>% compute("doyle_unique") doyle_unique doyle_unique %>% head(100) %>% collect() %>% with(wordcloud::wordcloud( word, n, colors = c("#999999", "#E69F00", "#56B4E9","#56B4E9"))) all_words %>% filter(author == "twain", word == "sherlock") %>% tally() twain %>% mutate(line = lower(line)) %>% filter(instr(line, "sherlock") > 0) %>% pull(line) spark_disconnect(sc)
/textmining.R
no_license
AMChierici/bigdata_p3
R
false
false
2,971
r
# https://spark.rstudio.com/guides/textmining/ # install.packages("gutenbergr") library(gutenbergr) # install.packages("sparklyr") library(sparklyr) # spark_install(version = "2.1.0") sc <- spark_connect(master = "local") gutenberg_works() %>% filter(author == "Twain, Mark") %>% pull(gutenberg_id) %>% gutenberg_download() %>% pull(text) %>% writeLines("mark_twain.txt") gutenberg_works() %>% filter(author == "Doyle, Arthur") %>% pull(gutenberg_id) %>% gutenberg_download() %>% pull(text) %>% writeLines("arthur_doyle.txt") # Imports Mark Twain's file # Setting up the path to the file in a Windows OS laptop twain_path <- paste0("file:///", getwd(), "/mark_twain.txt") twain <- spark_read_text(sc, "twain", twain_path) # Imports Sir Arthur Conan Doyle's file doyle_path <- paste0("file:///", getwd(), "/arthur_doyle.txt") doyle <- spark_read_text(sc, "doyle", doyle_path) all_words <- doyle %>% mutate(author = "doyle") %>% sdf_bind_rows({ twain %>% mutate(author = "twain")}) %>% filter(nchar(line) > 0) all_words <- all_words %>% mutate(line = regexp_replace(line, "[_\"\'():;,.!?\\-]", " ")) all_words <- all_words %>% ft_tokenizer(input.col = "line", output.col = "word_list") head(all_words, 4) all_words <- all_words %>% ft_stop_words_remover(input.col = "word_list", output.col = "wo_stop_words") head(all_words, 4) all_words <- all_words %>% mutate(word = explode(wo_stop_words)) %>% select(word, author) %>% filter(nchar(word) > 2) head(all_words, 4) all_words <- all_words %>% compute("all_words") # Wrap all in one dplyr statement ----------------------------------------- all_words <- doyle %>% mutate(author = "doyle") %>% sdf_bind_rows({ twain %>% mutate(author = "twain")}) %>% filter(nchar(line) > 0) %>% mutate(line = regexp_replace(line, "[_\"\'():;,.!?\\-]", " ")) %>% ft_tokenizer(input.col = "line", output.col = "word_list") %>% ft_stop_words_remover(input.col = "word_list", output.col = "wo_stop_words") %>% mutate(word = explode(wo_stop_words)) %>% select(word, author) %>% filter(nchar(word) > 2) %>% compute("all_words") # data analysis ----------------------------------------------------------- word_count <- all_words %>% group_by(author, word) %>% tally() %>% arrange(desc(n)) word_count doyle_unique <- filter(word_count, author == "doyle") %>% anti_join(filter(word_count, author == "twain"), by = "word") %>% arrange(desc(n)) %>% compute("doyle_unique") doyle_unique doyle_unique %>% head(100) %>% collect() %>% with(wordcloud::wordcloud( word, n, colors = c("#999999", "#E69F00", "#56B4E9","#56B4E9"))) all_words %>% filter(author == "twain", word == "sherlock") %>% tally() twain %>% mutate(line = lower(line)) %>% filter(instr(line, "sherlock") > 0) %>% pull(line) spark_disconnect(sc)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/defaults_constants.R \name{get_default_plot_opts_lbe_rh} \alias{get_default_plot_opts_lbe_rh} \title{Default Plot Options For LBE_RH} \usage{ get_default_plot_opts_lbe_rh() } \description{ Return a list with specific defaults and constants that are used to produce the comparison plot report for the trait group Lineare Beschreibung for RH (LBE_RH). }
/man/get_default_plot_opts_lbe_rh.Rd
no_license
pvrqualitasag/zwsroutinetools
R
false
true
430
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/defaults_constants.R \name{get_default_plot_opts_lbe_rh} \alias{get_default_plot_opts_lbe_rh} \title{Default Plot Options For LBE_RH} \usage{ get_default_plot_opts_lbe_rh() } \description{ Return a list with specific defaults and constants that are used to produce the comparison plot report for the trait group Lineare Beschreibung for RH (LBE_RH). }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{roads} \alias{roads} \title{Dodson and Tobler's street data with appended road names.} \format{ A data frame with 206 observations and 5 variables. The data describe the roads on Snow's map. \describe{ \item{\code{street}}{street segment number, which range between 1 and 528} \item{\code{n}}{number of points in this street line segment} \item{\code{x}}{x-coordinate} \item{\code{y}}{y-coordinate} \item{\code{id}}{unique numeric ID} \item{\code{name}}{road name} \item{\code{lon}}{longitude} \item{\code{lat}}{latitude} } } \usage{ roads } \description{ This data set adds road names from John Snow's map to Dodson and Tobler's street data. The latter are also available from HistData::Snow.streets. } \seealso{ \code{\link{road.segments}} \code{vignette}("road.names") \code{\link{streetNameLocator}} \code{\link{streetNumberLocator}} \code{\link{segmentLocator}} } \keyword{datasets}
/man/roads.Rd
no_license
cran/cholera
R
false
true
1,014
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{roads} \alias{roads} \title{Dodson and Tobler's street data with appended road names.} \format{ A data frame with 206 observations and 5 variables. The data describe the roads on Snow's map. \describe{ \item{\code{street}}{street segment number, which range between 1 and 528} \item{\code{n}}{number of points in this street line segment} \item{\code{x}}{x-coordinate} \item{\code{y}}{y-coordinate} \item{\code{id}}{unique numeric ID} \item{\code{name}}{road name} \item{\code{lon}}{longitude} \item{\code{lat}}{latitude} } } \usage{ roads } \description{ This data set adds road names from John Snow's map to Dodson and Tobler's street data. The latter are also available from HistData::Snow.streets. } \seealso{ \code{\link{road.segments}} \code{vignette}("road.names") \code{\link{streetNameLocator}} \code{\link{streetNumberLocator}} \code{\link{segmentLocator}} } \keyword{datasets}
library(gbm) # impute.NA is a little function that fills in NAs with either means or medians impute.NA <- function(x, fill="mean"){ if (fill=="mean") { x.complete <- ifelse(is.na(x), mean(x, na.rm=TRUE), x) } if (fill=="median") { x.complete <- ifelse(is.na(x), median(x, na.rm=TRUE), x) } return(x.complete) } data <- read.table("Psychopath_Trainingset_v1.csv", header=T, sep=",") testdata <- read.table("Psychopath_Testset_v1.csv", header=T, sep=",") # Median impute all missing values # Missing values are in columns 3-339 fulldata <- apply(data[,3:339], 2, FUN=impute.NA, fill="median") data[,3:339] <- fulldata fulltestdata <- apply(testdata[,3:339], 2, FUN=impute.NA, fill="median") testdata[,3:339] <- fulltestdata # Fit a generalized boosting model # Create a formula that specifies that psychopathy is to be predicted using # all other variables (columns 3-339) in the dataframe gbm.psych.form <- as.formula(paste("psychopathy ~", paste(names(data)[c(3:339)], collapse=" + "))) # Fit the model by supplying gbm with the formula from above. # Including the train.fraction and cv.folds argument will perform # cross-validation gbm.psych.bm.1 <- gbm(gbm.psych.form, n.trees=5000, data=data, distribution="gaussian", interaction.depth=6, train.fraction=.8, cv.folds=5) # gbm.perf will return the optimal number of trees to use based on # cross-validation. Although I grew 5,000 trees, cross-validation suggests that # the optimal number of trees is about 4,332. best.cv.iter <- gbm.perf(gbm.psych.bm.1, method="cv") # 4332 # Use the trained model to predict psychopathy from the test data. gbm.psych.1.preds <- predict(gbm.psych.bm.1, newdata=testdata, best.cv.iter) # Package it in a dataframe and write it to a .csv file for uploading. gbm.psych.1.bm.preds <- data.frame(cbind(myID=testdata$myID, psychopathy=gbm.psych.1.preds)) write.table(gbm.psych.1.bm.preds, "gbmbm1.csv", sep=",", row.names=FALSE) par(mfrow = c(1, 3), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0)) with(airquality, { # here three plots are filled in with their respective titles plot(Wind, Ozone, main = "Ozone and Wind") plot(Solar.R, Ozone, main = "Ozone and Solar Radiation") plot(Temp, Ozone, main = "Ozone and Temperature") # this adds a line of text in the outer margin* mtext("Ozone and Weather in New York City", outer = TRUE)} ) ## Create plot on screen device with(faithful, plot(eruptions, waiting)) ## Add a main title title(main = "Old Faithful Geyser data") dev.copy(png, file = "geyserplot.png") dev.off() getwd() library(lattice) set.seed(10) x <- rnorm(100) f <- rep(0:1, each = 50) y <- x + f - f * x+ rnorm(100, sd = 0.5) f <- factor(f, labels = c("Group 1", "Group 2")) ## Plot with 2 panels with custom panel function xyplot(y ~ x | f, panel = function(x, y, ...) { # call the default panel function for xyplot panel.xyplot(x, y, ...) # adds a horizontal line at the median panel.abline(h = median(y), lty = 2) # overlays a simple linear regression line panel.lmline(x, y, col = 2) }) dev.off() library(ggplot2) qplot(displ, hwy, data = mpg, color = drv, shape = drv) qplot(displ, hwy, data = mpg, geom = c("point", "smooth"), method="lm") qplot(hwy, data = mpg, fill = drv) qplot(displ, hwy, data = mpg, facets = . ~ drv) qplot(hwy, data = mpg, facets = drv ~ ., binwidth = 2) #initiates ggplot g <- ggplot(maacs, aes(logpm25, NocturnalSympt)) g + geom_point(alpha = 1/3) + facet_wrap(bmicat ~ no2dec, nrow = 2, ncol = 4) + geom_smooth(method="lm", se=FALSE, col="steelblue") + theme_bw(base_family = "Avenir", base_size = 10) + labs(x = expression("log " * PM[2.5]) + labs(y = "Nocturnal Symptoms”) + labs(title = "MAACS Cohort”) set.seed(1234) x <- rnorm(12,mean=rep(1:3,each=4),sd=0.2) y <- rnorm(12,mean=rep(c(1,2,1),each=4),sd=0.2) dataFrame <- data.frame(x=x,y=y) distxy <- dist(dataFrame) hClustering <- hclust(distxy) plot(hClustering) # load data frame provided load("samsungData.rda") # table of 6 types of activities table(samsungData$activity) # set up 1 x 2 panel plot par(mfrow=c(1, 2), mar = c(5, 4, 1, 1)) # converts activity to a factor variable samsungData <- transform(samsungData, activity = factor(activity)) # find only the subject 1 data sub1 <- subset(samsungData, subject == 1) # plot mean body acceleration in X direction plot(sub1[, 1], col = sub1$activity, ylab = names(sub1)[1], main = "Mean Body Acceleration for X") # plot mean body acceleration in Y direction plot(sub1[, 2], col = sub1$activity, ylab = names(sub1)[2], main = "Mean Body Acceleration for Y") # add legend legend("bottomright",legend=unique(sub1$activity),col=unique(sub1$activity), pch = 1)
/dangerofoverfitgbm.R
no_license
MadhuCheemala/MLREFERENCES
R
false
false
5,105
r
library(gbm) # impute.NA is a little function that fills in NAs with either means or medians impute.NA <- function(x, fill="mean"){ if (fill=="mean") { x.complete <- ifelse(is.na(x), mean(x, na.rm=TRUE), x) } if (fill=="median") { x.complete <- ifelse(is.na(x), median(x, na.rm=TRUE), x) } return(x.complete) } data <- read.table("Psychopath_Trainingset_v1.csv", header=T, sep=",") testdata <- read.table("Psychopath_Testset_v1.csv", header=T, sep=",") # Median impute all missing values # Missing values are in columns 3-339 fulldata <- apply(data[,3:339], 2, FUN=impute.NA, fill="median") data[,3:339] <- fulldata fulltestdata <- apply(testdata[,3:339], 2, FUN=impute.NA, fill="median") testdata[,3:339] <- fulltestdata # Fit a generalized boosting model # Create a formula that specifies that psychopathy is to be predicted using # all other variables (columns 3-339) in the dataframe gbm.psych.form <- as.formula(paste("psychopathy ~", paste(names(data)[c(3:339)], collapse=" + "))) # Fit the model by supplying gbm with the formula from above. # Including the train.fraction and cv.folds argument will perform # cross-validation gbm.psych.bm.1 <- gbm(gbm.psych.form, n.trees=5000, data=data, distribution="gaussian", interaction.depth=6, train.fraction=.8, cv.folds=5) # gbm.perf will return the optimal number of trees to use based on # cross-validation. Although I grew 5,000 trees, cross-validation suggests that # the optimal number of trees is about 4,332. best.cv.iter <- gbm.perf(gbm.psych.bm.1, method="cv") # 4332 # Use the trained model to predict psychopathy from the test data. gbm.psych.1.preds <- predict(gbm.psych.bm.1, newdata=testdata, best.cv.iter) # Package it in a dataframe and write it to a .csv file for uploading. gbm.psych.1.bm.preds <- data.frame(cbind(myID=testdata$myID, psychopathy=gbm.psych.1.preds)) write.table(gbm.psych.1.bm.preds, "gbmbm1.csv", sep=",", row.names=FALSE) par(mfrow = c(1, 3), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0)) with(airquality, { # here three plots are filled in with their respective titles plot(Wind, Ozone, main = "Ozone and Wind") plot(Solar.R, Ozone, main = "Ozone and Solar Radiation") plot(Temp, Ozone, main = "Ozone and Temperature") # this adds a line of text in the outer margin* mtext("Ozone and Weather in New York City", outer = TRUE)} ) ## Create plot on screen device with(faithful, plot(eruptions, waiting)) ## Add a main title title(main = "Old Faithful Geyser data") dev.copy(png, file = "geyserplot.png") dev.off() getwd() library(lattice) set.seed(10) x <- rnorm(100) f <- rep(0:1, each = 50) y <- x + f - f * x+ rnorm(100, sd = 0.5) f <- factor(f, labels = c("Group 1", "Group 2")) ## Plot with 2 panels with custom panel function xyplot(y ~ x | f, panel = function(x, y, ...) { # call the default panel function for xyplot panel.xyplot(x, y, ...) # adds a horizontal line at the median panel.abline(h = median(y), lty = 2) # overlays a simple linear regression line panel.lmline(x, y, col = 2) }) dev.off() library(ggplot2) qplot(displ, hwy, data = mpg, color = drv, shape = drv) qplot(displ, hwy, data = mpg, geom = c("point", "smooth"), method="lm") qplot(hwy, data = mpg, fill = drv) qplot(displ, hwy, data = mpg, facets = . ~ drv) qplot(hwy, data = mpg, facets = drv ~ ., binwidth = 2) #initiates ggplot g <- ggplot(maacs, aes(logpm25, NocturnalSympt)) g + geom_point(alpha = 1/3) + facet_wrap(bmicat ~ no2dec, nrow = 2, ncol = 4) + geom_smooth(method="lm", se=FALSE, col="steelblue") + theme_bw(base_family = "Avenir", base_size = 10) + labs(x = expression("log " * PM[2.5]) + labs(y = "Nocturnal Symptoms”) + labs(title = "MAACS Cohort”) set.seed(1234) x <- rnorm(12,mean=rep(1:3,each=4),sd=0.2) y <- rnorm(12,mean=rep(c(1,2,1),each=4),sd=0.2) dataFrame <- data.frame(x=x,y=y) distxy <- dist(dataFrame) hClustering <- hclust(distxy) plot(hClustering) # load data frame provided load("samsungData.rda") # table of 6 types of activities table(samsungData$activity) # set up 1 x 2 panel plot par(mfrow=c(1, 2), mar = c(5, 4, 1, 1)) # converts activity to a factor variable samsungData <- transform(samsungData, activity = factor(activity)) # find only the subject 1 data sub1 <- subset(samsungData, subject == 1) # plot mean body acceleration in X direction plot(sub1[, 1], col = sub1$activity, ylab = names(sub1)[1], main = "Mean Body Acceleration for X") # plot mean body acceleration in Y direction plot(sub1[, 2], col = sub1$activity, ylab = names(sub1)[2], main = "Mean Body Acceleration for Y") # add legend legend("bottomright",legend=unique(sub1$activity),col=unique(sub1$activity), pch = 1)
library(MetaLonDA); packageVersion("MetaLonDA") counts <- as.matrix(read.csv("test_counts.csv", sep="\t", row.names=1, header=T)) Group = factor(c("Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela")) Time = c(6.08, 6.44, 11.4, 7.06, 8.84, 8.67, 9.82, 11.04, 6.9, 6.37, 11.14, 6.41, 6.37, 9.95, 8.67, 6.97, 9.86, 11.33, 8.64, 6.05, 6.87, 9.92, 6.93, 6.37, 8.25, 11.33, 5.78, 11.43, 11.2, 5.91, 9.89, 6.34, 7.26, 5.88, 6.05, 5.91, 8.41, 11.33, 5.68, 8.71, 6.41, 8.21, 7.43, 8.25, 10.18, 11.4, 9.76, 11.01, 8.31, 8.41, 11.7, 5.88, 7.52, 11.83, 8.51, 5.95, 8.61, 11.1, 5.72, 11.47, 6.31, 11.07, 10.12, 8.34, 7.59, 11.66, 9.4, 9.1, 9.95, 11.17, 9.63, 9.99, 8.61, 5.59, 6.01, 10.09, 8.28, 5.98, 7.2, 6.64, 9.56, 11.17, 5.82, 7.2, 5.98, 7.29, 8.57, 11.14, 11.73, 11.2, 6.05, 8.54, 8.64, 11.33, 6.9, 6.34, 6.21, 8.84, 8.54, 6.24, 6.34, 7.03, 10.22, 9.95, 7.03, 5.49, 5.78, 5.95, 6.14, 11.01, 6.11, 8.44, 6.77, 10.15, 9.79, 9.0, 10.45, 8.54, 8.9, 9.63, 9.79, 6.31, 6.11, 9.92, 6.01, 11.3, 5.82, 11.04, 7.29, 8.54, 6.28, 6.01, 6.14, 6.31, 5.88, 7.26, 9.03, 5.68, 5.68, 5.59, 6.41, 9.79, 6.37, 7.62, 7.03, 5.72, 5.85, 7.13, 10.41, 5.95, 11.1, 8.38, 7.33, 7.26, 5.72, 6.37, 5.49, 11.1, 6.47, 7.66, 10.15, 11.3, 9.03, 5.98, 9.92, 6.34, 6.93, 8.64, 6.87, 11.07, 6.18, 5.49, 11.1, 5.62, 8.31, 8.64, 6.6, 5.78, 6.74, 11.6, 6.08, 8.74, 6.11, 5.65, 6.28, 10.02, 5.82, 9.92, 6.21, 8.25, 11.24, 5.49, 8.61, 7.62, 8.38, 9.66, 11.43, 9.79, 10.22, 9.92, 11.2, 6.11, 7.06, 9.66, 5.78, 6.14, 8.9, 8.57, 5.88, 9.86, 6.08, 11.47, 5.49, 5.78, 6.18, 6.11, 6.8, 6.34, 6.64, 9.86, 6.24, 7.23, 5.91, 8.64, 8.77, 5.55, 6.41, 11.14, 9.07, 5.49, 10.35, 6.34, 9.79, 7.29, 11.79, 6.08, 11.86, 6.08, 11.83, 9.07, 5.59, 7.29, 6.28, 11.4, 11.86, 7.0, 11.24, 11.17, 9.92, 7.49, 5.65, 8.34, 6.9, 8.67, 5.95, 9.03, 11.04, 11.79, 6.05, 9.99, 6.18, 8.51, 6.31, 5.95, 5.62, 10.48, 7.1, 6.01, 7.26, 9.92, 7.03, 6.14, 8.41, 11.33, 7.0, 11.2, 5.78, 8.41, 9.99, 9.69, 6.97, 7.03, 6.97, 11.96, 11.43, 5.49, 11.47, 6.11, 7.66, 6.9, 6.21, 6.77, 8.25, 6.83, 10.09, 7.2, 9.66, 11.66, 10.09, 5.59, 5.95, 11.47, 5.49, 11.76, 6.34, 8.67, 8.54, 9.76, 6.34, 8.25, 5.91, 8.67, 5.78, 11.3, 9.86, 10.02, 10.05, 9.63, 6.18, 7.03, 5.95, 5.72, 6.64, 8.9, 6.93, 6.14, 6.74, 8.64, 5.62, 10.02, 5.52, 11.66, 8.28, 11.6, 5.98, 5.72, 9.76, 11.4, 7.2, 8.41, 8.25, 8.38, 8.41, 5.65, 11.53, 6.6, 7.29, 7.2, 8.41, 11.4, 10.35, 6.8, 8.44, 9.63, 7.16, 10.15, 6.37, 9.76, 10.05, 5.55, 5.95, 8.71, 11.43, 6.24, 11.79, 9.92, 5.52, 11.3, 7.0, 5.88, 8.67, 5.75, 9.79, 6.11, 6.64, 5.52, 5.98, 6.9, 8.51, 11.37, 11.4, 9.63, 7.66, 9.07, 10.41, 8.74, 9.59, 8.71, 9.92, 9.76, 8.84, 5.88, 11.47, 5.65, 5.98, 5.52, 7.13, 11.2, 7.16, 5.65, 7.13, 9.66, 8.34, 11.07, 5.78, 6.21, 9.03, 8.31, 6.24, 6.37, 6.51, 7.62, 11.33, 7.59, 5.75, 11.04, 5.59, 9.76, 9.82, 6.08, 6.41, 5.78, 11.24, 5.65, 6.31, 5.75, 6.18, 8.25, 5.52, 7.0, 10.12, 7.33, 7.1, 5.98, 10.05, 11.37, 6.14, 6.93, 8.87, 5.49, 5.78, 8.48, 9.92, 5.82, 11.24, 6.34, 5.95, 10.41, 8.64, 8.57, 11.37, 8.34, 7.2, 7.16, 11.73, 11.33, 6.97, 11.47, 10.41, 11.1, 11.1, 11.4, 11.83, 10.35, 11.01, 5.65, 6.67, 10.35, 11.14, 5.91, 6.08, 11.27, 11.2, 10.55, 11.01, 8.34, 9.17, 11.76, 6.18, 6.87, 8.25, 11.4, 8.51, 10.38, 7.26, 8.51, 5.95, 9.82, 7.16, 5.72, 8.97, 8.84, 8.54, 6.87, 8.44, 6.21, 9.79, 11.4, 6.18, 9.69, 7.2, 11.63, 6.08, 6.24, 6.34, 7.13, 6.14, 7.16, 11.2, 5.88, 7.26, 7.06, 6.08, 10.32, 9.92, 6.54, 8.31, 7.26, 10.02, 6.24, 6.01, 6.08, 5.68, 5.65, 8.94, 7.16, 7.29, 8.9, 7.66, 10.48, 6.11, 8.25, 8.48, 7.75, 7.26, 11.66, 8.41, 6.9, 10.09, 8.25, 7.2, 11.24, 9.66, 5.88, 5.59, 7.46, 8.41, 11.01, 7.0, 11.04, 10.05, 6.97, 5.62, 11.3, 5.85, 7.03, 10.02, 9.63, 11.5, 9.76, 8.25, 6.87, 5.59, 6.08, 10.09, 6.24, 7.0, 11.17, 11.86, 9.76, 9.89, 9.79, 6.9, 5.55, 11.4, 11.01, 5.49, 5.62, 10.32, 6.77, 9.63, 7.26, 6.97, 7.36, 10.38, 6.34, 9.03, 8.77, 5.45, 5.62, 8.64, 6.64, 10.22, 6.21, 6.01, 9.76, 8.64, 8.34, 8.31, 9.99, 5.98, 6.34, 8.31, 11.4, 5.75, 8.97, 5.85, 7.0, 9.72, 6.24, 8.64, 6.51, 6.87, 7.13, 7.23, 6.21, 6.34, 9.69, 11.83, 11.79, 11.24, 6.05, 9.72, 6.31, 5.91, 10.38, 6.21, 7.33, 8.61, 9.76, 6.31, 5.82, 11.63, 11.01, 11.27, 6.21, 8.28, 11.01, 8.38, 5.52, 10.41, 10.05, 7.2, 11.14, 11.14, 7.0, 6.74, 11.04, 5.49, 6.9, 7.26, 6.05, 10.38, 9.72, 10.97, 10.02, 6.24, 5.91, 6.08, 11.73, 8.84, 6.11, 7.33, 8.38, 8.25, 6.41, 8.28, 6.51, 11.37, 9.66, 6.31, 6.28, 6.83, 8.41, 9.0, 5.85, 7.1, 11.47, 5.88, 5.82, 5.75, 8.87, 7.0, 11.3, 7.03, 9.92, 10.97, 6.44, 7.36, 5.49, 9.76, 6.67, 11.24, 5.62, 7.13, 10.05, 11.4, 9.86, 8.61, 9.03, 7.23, 11.17, 6.18, 11.01, 5.49, 8.54, 8.8, 10.05, 9.95, 8.97, 5.98, 8.48, 11.07, 6.47, 8.64, 7.13, 11.3, 7.56, 7.16, 7.66, 5.88, 6.11, 5.95, 7.0, 7.06, 11.01, 9.92, 5.98, 10.02, 6.24, 5.62, 11.83, 11.56, 9.92, 10.09, 6.9, 6.41, 8.28, 10.32, 5.52, 7.0, 11.14, 9.63, 7.43, 9.03, 7.23, 5.82, 5.62, 5.82, 6.18, 9.63, 7.66, 10.02, 9.86, 11.2, 5.95, 9.63, 10.48, 7.69, 5.52, 7.56, 9.66, 11.86, 11.27, 11.3, 11.01, 9.69, 9.66, 9.99, 6.24, 5.62, 6.8, 5.52, 10.02, 7.43, 6.28, 9.03, 6.18, 8.44, 6.74, 8.71, 10.28, 5.95, 9.69, 10.05, 11.76, 11.6, 6.18, 9.82, 6.08, 5.95, 5.52, 5.78, 9.72, 5.26, 6.77, 6.37, 6.87, 6.24, 11.17, 9.92, 10.02, 11.17, 8.54, 11.27, 5.85, 11.1, 11.56, 9.89, 11.3, 5.88, 10.35, 6.24, 6.28, 8.74, 11.56, 5.95, 8.61, 6.87, 8.64, 8.64, 11.04, 8.48, 5.95, 6.93, 6.28, 8.97, 8.34, 10.45, 11.01, 9.79, 5.52, 9.86, 8.71, 10.02, 5.91, 10.45, 9.86, 6.24, 6.41, 11.37, 9.76, 8.64, 6.97, 10.28, 6.74, 11.5, 8.34, 11.37, 9.79, 9.66, 8.8, 7.2, 7.1, 5.59, 7.16, 9.63, 5.52, 5.98, 11.4, 6.9, 10.25, 9.95, 5.95, 6.28, 8.34, 10.58, 5.55, 5.75, 6.9, 7.52, 6.41, 10.02, 5.65, 5.88, 5.82, 11.01, 6.28, 8.61, 7.39, 6.05, 8.48, 6.34, 8.25, 9.03, 9.66, 7.79, 6.08, 5.95, 6.37, 11.43, 8.25, 9.92, 11.76, 8.41, 7.75, 7.33, 7.46, 8.57, 7.33, 8.44, 6.05, 11.7, 11.17, 6.74, 11.01, 8.28, 10.02, 7.29, 5.98, 11.1, 7.06, 11.5, 9.99, 7.26, 8.54, 9.89, 6.31, 6.93, 7.56, 5.85, 7.1, 7.26, 6.9, 5.68, 5.52, 7.1, 7.49, 6.9, 11.37, 7.13, 6.28, 11.83, 7.26, 11.79, 10.51, 5.49, 8.54, 5.72, 6.28, 11.17, 5.59, 10.12, 5.88, 11.04, 7.16, 8.41, 11.37, 8.38, 9.1, 9.69, 12.16, 8.44, 9.63, 5.88, 5.65, 7.36, 6.87, 9.63, 6.87, 9.03, 10.22, 11.14, 6.97, 5.49, 9.63, 8.31, 5.98, 7.0, 5.85, 9.66, 6.11, 6.28, 6.67, 5.95, 6.24, 10.05, 8.44, 7.16, 8.25, 7.36, 11.83, 5.95, 6.87, 6.01, 5.52, 11.01, 9.92, 9.86, 8.54, 8.38, 10.02, 6.97, 10.28, 7.1, 10.38, 5.59, 8.67, 8.57, 9.79, 11.56, 8.54, 6.01, 6.7, 11.1, 7.03, 6.14, 5.72, 6.34, 11.14, 11.01, 6.74, 7.06, 6.41, 10.15, 7.26, 6.31, 11.47, 5.49, 11.07, 7.59, 6.14, 7.52, 5.88, 6.21, 6.21, 10.09, 6.21, 7.26, 8.48, 5.49, 8.61, 7.03, 6.18, 6.08, 10.09, 11.27, 6.87, 5.91, 9.82, 9.89, 5.52, 5.68, 11.07, 6.87, 11.04, 7.29, 7.1, 5.55, 5.65, 8.74, 8.71, 7.43, 11.17, 6.31, 7.26, 8.25, 5.72, 7.66, 9.63, 10.48, 6.31, 5.82, 9.89, 8.51, 9.63, 9.95, 7.23, 8.02, 11.43, 6.9, 7.59, 8.38, 8.25, 8.25, 6.05, 8.57, 11.24, 5.78, 5.78, 7.06, 5.98, 8.57, 6.6, 5.49, 11.3, 8.48, 5.72, 5.59, 11.17, 8.38, 8.28, 6.18, 5.85, 11.76, 9.0, 5.95, 6.05, 6.77, 7.0, 6.83, 10.45, 10.18, 7.39, 6.34, 9.66, 6.28, 6.08, 10.09, 9.95, 6.9, 9.82, 8.41, 11.4, 8.31, 7.0, 8.48, 9.86, 11.47, 5.68, 6.34, 8.34, 9.66, 5.49, 6.44, 6.37, 7.69, 6.9, 6.11, 5.88, 9.79, 11.3, 6.05, 6.37, 10.51, 8.54, 6.11, 9.1, 11.2, 6.44, 8.44, 8.61, 11.63, 8.38, 11.43, 5.98, 5.62, 8.71, 8.51, 8.64, 7.23, 6.97, 11.7, 7.33, 8.41, 7.2, 11.76, 7.52, 6.01, 6.37, 8.84, 9.72, 8.77, 6.97, 11.01, 9.59, 6.9, 6.87, 9.89, 11.47, 11.04, 6.01, 6.54, 11.53, 9.72, 10.12, 9.63, 5.75, 9.69, 8.67, 6.54, 5.72, 7.1, 7.66, 7.36, 11.14, 7.16, 5.98, 8.64, 6.08, 7.16, 11.3, 10.28, 5.59, 6.31, 9.76, 9.99, 8.64, 8.67, 10.97, 9.99, 11.2, 11.86, 11.24, 9.1, 9.63, 6.28, 9.76, 11.17, 9.63, 10.78, 5.95, 10.41, 11.7, 7.16, 11.33, 6.08, 8.94, 10.02, 9.69, 11.4, 8.34, 8.25, 7.03, 7.1, 6.51, 11.53, 8.28, 8.71, 11.24, 11.27, 9.69, 11.17, 8.28, 5.59, 5.95, 10.15, 7.69, 6.34, 6.21, 11.79, 8.44, 9.43, 6.24, 9.79, 6.01, 7.26, 6.34, 6.9) ID = factor(c("397", "118", "147", "193", "418", "274", "8", "224", "167", "274", "325", "50", "292", "207", "384", "58", "26", "301", "29", "352", "172", "190", "342", "304", "86", "281", "356", "330", "397", "340", "307", "255", "29", "36", "130", "278", "406", "146", "8", "49", "55", "278", "149", "168", "436", "354", "28", "90", "195", "333", "245", "80", "141", "42", "383", "122", "311", "266", "412", "431", "220", "195", "237", "59", "310", "141", "405", "302", "303", "415", "122", "269", "425", "58", "76", "52", "319", "228", "82", "43", "291", "408", "169", "140", "265", "24", "250", "116", "419", "84", "436", "180", "328", "207", "243", "328", "307", "257", "109", "217", "2", "397", "257", "146", "158", "167", "190", "52", "144", "152", "322", "325", "188", "31", "401", "318", "188", "155", "141", "172", "408", "381", "401", "82", "57", "155", "140", "256", "366", "82", "42", "411", "8", "128", "255", "81", "317", "249", "84", "413", "52", "371", "384", "68", "408", "270", "254", "25", "137", "223", "58", "332", "379", "304", "315", "340", "172", "166", "31", "282", "76", "109", "137", "53", "180", "354", "195", "47", "152", "358", "365", "296", "28", "20", "233", "255", "424", "180", "137", "418", "116", "118", "409", "371", "281", "328", "301", "356", "310", "122", "164", "176", "40", "119", "225", "57", "22", "333", "413", "109", "211", "286", "249", "372", "273", "424", "424", "301", "258", "211", "20", "49", "223", "205", "43", "408", "2", "138", "419", "164", "289", "425", "280", "347", "76", "112", "431", "332", "128", "122", "170", "269", "276", "320", "74", "416", "4", "225", "317", "227", "312", "384", "250", "159", "302", "139", "193", "416", "344", "71", "158", "312", "411", "231", "242", "74", "265", "66", "58", "321", "193", "39", "188", "187", "149", "2", "164", "342", "231", "273", "401", "157", "60", "140", "283", "249", "344", "276", "425", "342", "59", "406", "266", "219", "271", "173", "156", "406", "66", "256", "419", "128", "223", "292", "187", "146", "265", "442", "431", "166", "50", "53", "198", "368", "47", "366", "267", "149", "350", "171", "293", "271", "263", "344", "270", "231", "24", "171", "232", "286", "172", "405", "245", "442", "112", "27", "282", "159", "253", "275", "319", "424", "229", "257", "118", "232", "253", "305", "303", "158", "176", "261", "371", "60", "189", "442", "274", "169", "283", "269", "419", "4", "8", "223", "273", "203", "271", "166", "147", "233", "49", "431", "366", "6", "334", "6", "372", "425", "416", "138", "237", "25", "406", "60", "141", "345", "173", "228", "63", "7", "29", "9", "252", "188", "252", "264", "278", "187", "375", "35", "203", "29", "187", "397", "327", "265", "295", "438", "109", "286", "63", "173", "214", "316", "268", "169", "42", "224", "180", "24", "436", "170", "169", "318", "295", "241", "266", "136", "157", "139", "49", "289", "336", "333", "252", "336", "270", "288", "57", "28", "53", "55", "114", "229", "293", "47", "221", "316", "71", "242", "155", "387", "193", "217", "114", "302", "23", "74", "36", "204", "275", "266", "301", "180", "170", "303", "214", "52", "368", "413", "130", "347", "292", "310", "171", "35", "310", "318", "253", "274", "19", "307", "387", "64", "9", "67", "64", "398", "329", "176", "90", "258", "336", "42", "80", "193", "431", "27", "356", "164", "169", "31", "344", "223", "221", "25", "158", "138", "141", "58", "275", "409", "253", "359", "3", "39", "211", "267", "157", "231", "255", "27", "415", "409", "217", "409", "342", "354", "29", "109", "112", "218", "27", "408", "43", "155", "237", "144", "440", "4", "71", "88", "114", "292", "347", "144", "401", "57", "49", "209", "250", "25", "309", "269", "214", "218", "408", "296", "225", "243", "384", "166", "225", "6", "40", "333", "138", "209", "264", "415", "242", "209", "352", "257", "22", "119", "19", "371", "188", "214", "63", "286", "330", "316", "81", "223", "152", "415", "43", "381", "296", "138", "352", "53", "359", "231", "252", "189", "278", "28", "81", "405", "218", "39", "316", "332", "138", "166", "316", "40", "77", "159", "228", "36", "39", "170", "220", "116", "352", "356", "147", "400", "168", "383", "311", "318", "36", "358", "291", "252", "39", "203", "130", "321", "24", "68", "295", "22", "236", "19", "121", "204", "343", "242", "329", "170", "265", "199", "416", "228", "317", "274", "204", "19", "139", "253", "42", "57", "199", "327", "159", "400", "440", "312", "278", "271", "273", "366", "149", "310", "218", "371", "156", "415", "372", "187", "241", "203", "321", "411", "263", "440", "291", "397", "68", "275", "412", "384", "47", "281", "307", "409", "332", "375", "35", "289", "345", "264", "77", "288", "416", "170", "339", "139", "307", "280", "255", "412", "7", "368", "236", "286", "412", "122", "209", "356", "400", "340", "250", "310", "372", "164", "112", "189", "269", "336", "205", "43", "344", "74", "159", "333", "288", "20", "221", "209", "267", "241", "354", "268", "332", "128", "186", "48", "156", "265", "64", "411", "387", "243", "136", "20", "242", "400", "66", "254", "207", "116", "250", "245", "198", "368", "258", "114", "26", "198", "23", "302", "334", "224", "245", "330", "3", "63", "82", "173", "233", "243", "36", "155", "136", "3", "241", "81", "436", "368", "359", "164", "343", "368", "50", "141", "22", "112", "320", "359", "218", "387", "249", "136", "167", "361", "82", "59", "405", "334", "366", "88", "48", "406", "155", "255", "401", "263", "295", "236", "312", "436", "336", "273", "305", "398", "344", "205", "53", "31", "199", "220", "288", "305", "254", "86", "26", "9", "233", "207", "419", "58", "128", "198", "160", "309", "339", "52", "305", "271", "334", "365", "68", "379", "236", "139", "80", "229", "424", "317", "118", "322", "220", "397", "345", "436", "281", "387", "59", "263", "90", "256", "224", "328", "86", "71", "169", "209", "146", "413", "219", "195", "63", "224", "424", "156", "159", "197", "81", "303", "172", "140", "321", "189", "413", "270", "29", "167", "440", "256", "64", "261", "152", "291", "237", "23", "263", "440", "19", "291", "49", "418", "303", "52", "211", "166", "412", "27", "252", "229", "243", "304", "293", "309", "352", "343", "77", "236", "121", "273", "383", "40", "358", "398", "121", "270", "328", "309", "221", "86", "232", "409", "345", "311", "268", "282", "304", "258", "68", "291", "229", "48", "329", "66", "158", "322", "77", "384", "319", "205", "286", "121", "136", "2", "316", "318", "84", "229", "304", "406", "264", "90", "152", "23", "282", "418", "225", "160", "88", "288", "358", "319", "415", "425", "319", "158", "74", "318", "176", "317", "366", "249", "190", "345", "187", "381", "55", "171", "358", "330", "176", "268", "329", "375", "28", "47", "413", "144", "329", "119", "160", "340", "207", "116", "400", "268", "233", "119", "322", "60", "442", "114", "305", "283", "168", "440", "84", "22", "186", "269", "425", "55", "23", "233", "419", "249", "144", "147", "375", "336", "55", "383", "305", "365", "9", "121", "197", "315", "28", "379", "289", "122", "237", "221", "39", "411", "211", "411", "199", "228", "271", "26", "358", "401", "77", "156", "203", "60", "275", "302", "172", "339", "42", "168", "3", "236", "146", "295", "295", "176", "281", "321", "405", "50", "241", "359", "139", "152", "198", "160", "140", "232", "375", "267", "157", "264", "146", "144", "90", "190", "412", "365", "67", "136", "149", "256", "114", "321", "282", "119", "296", "214", "227", "325", "278", "381", "400", "31", "4", "86", "301", "418", "50", "301", "319", "84", "35", "80", "112", "261", "329", "232", "231", "157", "258", "352", "224", "168", "53", "237", "188", "173", "35", "350", "197", "268", "266", "293", "292", "289", "276", "4", "221", "219", "27", "275", "71", "253", "293", "243", "325", "379", "25", "304", "40", "130", "43", "50", "20", "207", "119", "442", "195", "320", "232", "67", "130", "322", "288", "292", "219", "198", "25", "379", "372", "59", "257", "203", "322", "361", "199", "383", "266", "24", "218", "387", "365", "137", "118", "149", "375", "256", "258", "283", "48", "263", "442", "130", "334", "225", "121", "354", "293", "167", "311", "8", "2", "270", "3", "88", "303", "283", "35", "167", "405", "156", "282", "315", "289", "250", "332", "245", "80", "195", "274", "160", "296", "371", "339", "418", "76", "173", "22", "365", "383", "228", "276", "330", "416", "379", "118", "128", "347", "63", "137", "157", "398", "267", "60", "189", "47", "147", "372")) output.metalonda.all = metalondaAll(Count = counts, Time = Time, Group = Group, ID = ID, n.perm = 10, fit.method = "nbinomial", num.intervals = 31, parall = FALSE, pvalue.threshold = 0.05, adjust.method = "BH", time.unit = "days", norm.method = "none", prefix = "test_obs_results", ylabel = "Read Counts", col = c("black", "green"))
/recipes/metalonda/test_metalonda.R
no_license
sjanssen2/ggmap
R
false
false
31,186
r
library(MetaLonDA); packageVersion("MetaLonDA") counts <- as.matrix(read.csv("test_counts.csv", sep="\t", row.names=1, header=T)) Group = factor(c("Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Limela", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Masenjere", "Masenjere", "Limela", "Masenjere", "Masenjere", "Limela", "Limela", "Limela", "Masenjere", "Limela", "Masenjere", "Limela")) Time = c(6.08, 6.44, 11.4, 7.06, 8.84, 8.67, 9.82, 11.04, 6.9, 6.37, 11.14, 6.41, 6.37, 9.95, 8.67, 6.97, 9.86, 11.33, 8.64, 6.05, 6.87, 9.92, 6.93, 6.37, 8.25, 11.33, 5.78, 11.43, 11.2, 5.91, 9.89, 6.34, 7.26, 5.88, 6.05, 5.91, 8.41, 11.33, 5.68, 8.71, 6.41, 8.21, 7.43, 8.25, 10.18, 11.4, 9.76, 11.01, 8.31, 8.41, 11.7, 5.88, 7.52, 11.83, 8.51, 5.95, 8.61, 11.1, 5.72, 11.47, 6.31, 11.07, 10.12, 8.34, 7.59, 11.66, 9.4, 9.1, 9.95, 11.17, 9.63, 9.99, 8.61, 5.59, 6.01, 10.09, 8.28, 5.98, 7.2, 6.64, 9.56, 11.17, 5.82, 7.2, 5.98, 7.29, 8.57, 11.14, 11.73, 11.2, 6.05, 8.54, 8.64, 11.33, 6.9, 6.34, 6.21, 8.84, 8.54, 6.24, 6.34, 7.03, 10.22, 9.95, 7.03, 5.49, 5.78, 5.95, 6.14, 11.01, 6.11, 8.44, 6.77, 10.15, 9.79, 9.0, 10.45, 8.54, 8.9, 9.63, 9.79, 6.31, 6.11, 9.92, 6.01, 11.3, 5.82, 11.04, 7.29, 8.54, 6.28, 6.01, 6.14, 6.31, 5.88, 7.26, 9.03, 5.68, 5.68, 5.59, 6.41, 9.79, 6.37, 7.62, 7.03, 5.72, 5.85, 7.13, 10.41, 5.95, 11.1, 8.38, 7.33, 7.26, 5.72, 6.37, 5.49, 11.1, 6.47, 7.66, 10.15, 11.3, 9.03, 5.98, 9.92, 6.34, 6.93, 8.64, 6.87, 11.07, 6.18, 5.49, 11.1, 5.62, 8.31, 8.64, 6.6, 5.78, 6.74, 11.6, 6.08, 8.74, 6.11, 5.65, 6.28, 10.02, 5.82, 9.92, 6.21, 8.25, 11.24, 5.49, 8.61, 7.62, 8.38, 9.66, 11.43, 9.79, 10.22, 9.92, 11.2, 6.11, 7.06, 9.66, 5.78, 6.14, 8.9, 8.57, 5.88, 9.86, 6.08, 11.47, 5.49, 5.78, 6.18, 6.11, 6.8, 6.34, 6.64, 9.86, 6.24, 7.23, 5.91, 8.64, 8.77, 5.55, 6.41, 11.14, 9.07, 5.49, 10.35, 6.34, 9.79, 7.29, 11.79, 6.08, 11.86, 6.08, 11.83, 9.07, 5.59, 7.29, 6.28, 11.4, 11.86, 7.0, 11.24, 11.17, 9.92, 7.49, 5.65, 8.34, 6.9, 8.67, 5.95, 9.03, 11.04, 11.79, 6.05, 9.99, 6.18, 8.51, 6.31, 5.95, 5.62, 10.48, 7.1, 6.01, 7.26, 9.92, 7.03, 6.14, 8.41, 11.33, 7.0, 11.2, 5.78, 8.41, 9.99, 9.69, 6.97, 7.03, 6.97, 11.96, 11.43, 5.49, 11.47, 6.11, 7.66, 6.9, 6.21, 6.77, 8.25, 6.83, 10.09, 7.2, 9.66, 11.66, 10.09, 5.59, 5.95, 11.47, 5.49, 11.76, 6.34, 8.67, 8.54, 9.76, 6.34, 8.25, 5.91, 8.67, 5.78, 11.3, 9.86, 10.02, 10.05, 9.63, 6.18, 7.03, 5.95, 5.72, 6.64, 8.9, 6.93, 6.14, 6.74, 8.64, 5.62, 10.02, 5.52, 11.66, 8.28, 11.6, 5.98, 5.72, 9.76, 11.4, 7.2, 8.41, 8.25, 8.38, 8.41, 5.65, 11.53, 6.6, 7.29, 7.2, 8.41, 11.4, 10.35, 6.8, 8.44, 9.63, 7.16, 10.15, 6.37, 9.76, 10.05, 5.55, 5.95, 8.71, 11.43, 6.24, 11.79, 9.92, 5.52, 11.3, 7.0, 5.88, 8.67, 5.75, 9.79, 6.11, 6.64, 5.52, 5.98, 6.9, 8.51, 11.37, 11.4, 9.63, 7.66, 9.07, 10.41, 8.74, 9.59, 8.71, 9.92, 9.76, 8.84, 5.88, 11.47, 5.65, 5.98, 5.52, 7.13, 11.2, 7.16, 5.65, 7.13, 9.66, 8.34, 11.07, 5.78, 6.21, 9.03, 8.31, 6.24, 6.37, 6.51, 7.62, 11.33, 7.59, 5.75, 11.04, 5.59, 9.76, 9.82, 6.08, 6.41, 5.78, 11.24, 5.65, 6.31, 5.75, 6.18, 8.25, 5.52, 7.0, 10.12, 7.33, 7.1, 5.98, 10.05, 11.37, 6.14, 6.93, 8.87, 5.49, 5.78, 8.48, 9.92, 5.82, 11.24, 6.34, 5.95, 10.41, 8.64, 8.57, 11.37, 8.34, 7.2, 7.16, 11.73, 11.33, 6.97, 11.47, 10.41, 11.1, 11.1, 11.4, 11.83, 10.35, 11.01, 5.65, 6.67, 10.35, 11.14, 5.91, 6.08, 11.27, 11.2, 10.55, 11.01, 8.34, 9.17, 11.76, 6.18, 6.87, 8.25, 11.4, 8.51, 10.38, 7.26, 8.51, 5.95, 9.82, 7.16, 5.72, 8.97, 8.84, 8.54, 6.87, 8.44, 6.21, 9.79, 11.4, 6.18, 9.69, 7.2, 11.63, 6.08, 6.24, 6.34, 7.13, 6.14, 7.16, 11.2, 5.88, 7.26, 7.06, 6.08, 10.32, 9.92, 6.54, 8.31, 7.26, 10.02, 6.24, 6.01, 6.08, 5.68, 5.65, 8.94, 7.16, 7.29, 8.9, 7.66, 10.48, 6.11, 8.25, 8.48, 7.75, 7.26, 11.66, 8.41, 6.9, 10.09, 8.25, 7.2, 11.24, 9.66, 5.88, 5.59, 7.46, 8.41, 11.01, 7.0, 11.04, 10.05, 6.97, 5.62, 11.3, 5.85, 7.03, 10.02, 9.63, 11.5, 9.76, 8.25, 6.87, 5.59, 6.08, 10.09, 6.24, 7.0, 11.17, 11.86, 9.76, 9.89, 9.79, 6.9, 5.55, 11.4, 11.01, 5.49, 5.62, 10.32, 6.77, 9.63, 7.26, 6.97, 7.36, 10.38, 6.34, 9.03, 8.77, 5.45, 5.62, 8.64, 6.64, 10.22, 6.21, 6.01, 9.76, 8.64, 8.34, 8.31, 9.99, 5.98, 6.34, 8.31, 11.4, 5.75, 8.97, 5.85, 7.0, 9.72, 6.24, 8.64, 6.51, 6.87, 7.13, 7.23, 6.21, 6.34, 9.69, 11.83, 11.79, 11.24, 6.05, 9.72, 6.31, 5.91, 10.38, 6.21, 7.33, 8.61, 9.76, 6.31, 5.82, 11.63, 11.01, 11.27, 6.21, 8.28, 11.01, 8.38, 5.52, 10.41, 10.05, 7.2, 11.14, 11.14, 7.0, 6.74, 11.04, 5.49, 6.9, 7.26, 6.05, 10.38, 9.72, 10.97, 10.02, 6.24, 5.91, 6.08, 11.73, 8.84, 6.11, 7.33, 8.38, 8.25, 6.41, 8.28, 6.51, 11.37, 9.66, 6.31, 6.28, 6.83, 8.41, 9.0, 5.85, 7.1, 11.47, 5.88, 5.82, 5.75, 8.87, 7.0, 11.3, 7.03, 9.92, 10.97, 6.44, 7.36, 5.49, 9.76, 6.67, 11.24, 5.62, 7.13, 10.05, 11.4, 9.86, 8.61, 9.03, 7.23, 11.17, 6.18, 11.01, 5.49, 8.54, 8.8, 10.05, 9.95, 8.97, 5.98, 8.48, 11.07, 6.47, 8.64, 7.13, 11.3, 7.56, 7.16, 7.66, 5.88, 6.11, 5.95, 7.0, 7.06, 11.01, 9.92, 5.98, 10.02, 6.24, 5.62, 11.83, 11.56, 9.92, 10.09, 6.9, 6.41, 8.28, 10.32, 5.52, 7.0, 11.14, 9.63, 7.43, 9.03, 7.23, 5.82, 5.62, 5.82, 6.18, 9.63, 7.66, 10.02, 9.86, 11.2, 5.95, 9.63, 10.48, 7.69, 5.52, 7.56, 9.66, 11.86, 11.27, 11.3, 11.01, 9.69, 9.66, 9.99, 6.24, 5.62, 6.8, 5.52, 10.02, 7.43, 6.28, 9.03, 6.18, 8.44, 6.74, 8.71, 10.28, 5.95, 9.69, 10.05, 11.76, 11.6, 6.18, 9.82, 6.08, 5.95, 5.52, 5.78, 9.72, 5.26, 6.77, 6.37, 6.87, 6.24, 11.17, 9.92, 10.02, 11.17, 8.54, 11.27, 5.85, 11.1, 11.56, 9.89, 11.3, 5.88, 10.35, 6.24, 6.28, 8.74, 11.56, 5.95, 8.61, 6.87, 8.64, 8.64, 11.04, 8.48, 5.95, 6.93, 6.28, 8.97, 8.34, 10.45, 11.01, 9.79, 5.52, 9.86, 8.71, 10.02, 5.91, 10.45, 9.86, 6.24, 6.41, 11.37, 9.76, 8.64, 6.97, 10.28, 6.74, 11.5, 8.34, 11.37, 9.79, 9.66, 8.8, 7.2, 7.1, 5.59, 7.16, 9.63, 5.52, 5.98, 11.4, 6.9, 10.25, 9.95, 5.95, 6.28, 8.34, 10.58, 5.55, 5.75, 6.9, 7.52, 6.41, 10.02, 5.65, 5.88, 5.82, 11.01, 6.28, 8.61, 7.39, 6.05, 8.48, 6.34, 8.25, 9.03, 9.66, 7.79, 6.08, 5.95, 6.37, 11.43, 8.25, 9.92, 11.76, 8.41, 7.75, 7.33, 7.46, 8.57, 7.33, 8.44, 6.05, 11.7, 11.17, 6.74, 11.01, 8.28, 10.02, 7.29, 5.98, 11.1, 7.06, 11.5, 9.99, 7.26, 8.54, 9.89, 6.31, 6.93, 7.56, 5.85, 7.1, 7.26, 6.9, 5.68, 5.52, 7.1, 7.49, 6.9, 11.37, 7.13, 6.28, 11.83, 7.26, 11.79, 10.51, 5.49, 8.54, 5.72, 6.28, 11.17, 5.59, 10.12, 5.88, 11.04, 7.16, 8.41, 11.37, 8.38, 9.1, 9.69, 12.16, 8.44, 9.63, 5.88, 5.65, 7.36, 6.87, 9.63, 6.87, 9.03, 10.22, 11.14, 6.97, 5.49, 9.63, 8.31, 5.98, 7.0, 5.85, 9.66, 6.11, 6.28, 6.67, 5.95, 6.24, 10.05, 8.44, 7.16, 8.25, 7.36, 11.83, 5.95, 6.87, 6.01, 5.52, 11.01, 9.92, 9.86, 8.54, 8.38, 10.02, 6.97, 10.28, 7.1, 10.38, 5.59, 8.67, 8.57, 9.79, 11.56, 8.54, 6.01, 6.7, 11.1, 7.03, 6.14, 5.72, 6.34, 11.14, 11.01, 6.74, 7.06, 6.41, 10.15, 7.26, 6.31, 11.47, 5.49, 11.07, 7.59, 6.14, 7.52, 5.88, 6.21, 6.21, 10.09, 6.21, 7.26, 8.48, 5.49, 8.61, 7.03, 6.18, 6.08, 10.09, 11.27, 6.87, 5.91, 9.82, 9.89, 5.52, 5.68, 11.07, 6.87, 11.04, 7.29, 7.1, 5.55, 5.65, 8.74, 8.71, 7.43, 11.17, 6.31, 7.26, 8.25, 5.72, 7.66, 9.63, 10.48, 6.31, 5.82, 9.89, 8.51, 9.63, 9.95, 7.23, 8.02, 11.43, 6.9, 7.59, 8.38, 8.25, 8.25, 6.05, 8.57, 11.24, 5.78, 5.78, 7.06, 5.98, 8.57, 6.6, 5.49, 11.3, 8.48, 5.72, 5.59, 11.17, 8.38, 8.28, 6.18, 5.85, 11.76, 9.0, 5.95, 6.05, 6.77, 7.0, 6.83, 10.45, 10.18, 7.39, 6.34, 9.66, 6.28, 6.08, 10.09, 9.95, 6.9, 9.82, 8.41, 11.4, 8.31, 7.0, 8.48, 9.86, 11.47, 5.68, 6.34, 8.34, 9.66, 5.49, 6.44, 6.37, 7.69, 6.9, 6.11, 5.88, 9.79, 11.3, 6.05, 6.37, 10.51, 8.54, 6.11, 9.1, 11.2, 6.44, 8.44, 8.61, 11.63, 8.38, 11.43, 5.98, 5.62, 8.71, 8.51, 8.64, 7.23, 6.97, 11.7, 7.33, 8.41, 7.2, 11.76, 7.52, 6.01, 6.37, 8.84, 9.72, 8.77, 6.97, 11.01, 9.59, 6.9, 6.87, 9.89, 11.47, 11.04, 6.01, 6.54, 11.53, 9.72, 10.12, 9.63, 5.75, 9.69, 8.67, 6.54, 5.72, 7.1, 7.66, 7.36, 11.14, 7.16, 5.98, 8.64, 6.08, 7.16, 11.3, 10.28, 5.59, 6.31, 9.76, 9.99, 8.64, 8.67, 10.97, 9.99, 11.2, 11.86, 11.24, 9.1, 9.63, 6.28, 9.76, 11.17, 9.63, 10.78, 5.95, 10.41, 11.7, 7.16, 11.33, 6.08, 8.94, 10.02, 9.69, 11.4, 8.34, 8.25, 7.03, 7.1, 6.51, 11.53, 8.28, 8.71, 11.24, 11.27, 9.69, 11.17, 8.28, 5.59, 5.95, 10.15, 7.69, 6.34, 6.21, 11.79, 8.44, 9.43, 6.24, 9.79, 6.01, 7.26, 6.34, 6.9) ID = factor(c("397", "118", "147", "193", "418", "274", "8", "224", "167", "274", "325", "50", "292", "207", "384", "58", "26", "301", "29", "352", "172", "190", "342", "304", "86", "281", "356", "330", "397", "340", "307", "255", "29", "36", "130", "278", "406", "146", "8", "49", "55", "278", "149", "168", "436", "354", "28", "90", "195", "333", "245", "80", "141", "42", "383", "122", "311", "266", "412", "431", "220", "195", "237", "59", "310", "141", "405", "302", "303", "415", "122", "269", "425", "58", "76", "52", "319", "228", "82", "43", "291", "408", "169", "140", "265", "24", "250", "116", "419", "84", "436", "180", "328", "207", "243", "328", "307", "257", "109", "217", "2", "397", "257", "146", "158", "167", "190", "52", "144", "152", "322", "325", "188", "31", "401", "318", "188", "155", "141", "172", "408", "381", "401", "82", "57", "155", "140", "256", "366", "82", "42", "411", "8", "128", "255", "81", "317", "249", "84", "413", "52", "371", "384", "68", "408", "270", "254", "25", "137", "223", "58", "332", "379", "304", "315", "340", "172", "166", "31", "282", "76", "109", "137", "53", "180", "354", "195", "47", "152", "358", "365", "296", "28", "20", "233", "255", "424", "180", "137", "418", "116", "118", "409", "371", "281", "328", "301", "356", "310", "122", "164", "176", "40", "119", "225", "57", "22", "333", "413", "109", "211", "286", "249", "372", "273", "424", "424", "301", "258", "211", "20", "49", "223", "205", "43", "408", "2", "138", "419", "164", "289", "425", "280", "347", "76", "112", "431", "332", "128", "122", "170", "269", "276", "320", "74", "416", "4", "225", "317", "227", "312", "384", "250", "159", "302", "139", "193", "416", "344", "71", "158", "312", "411", "231", "242", "74", "265", "66", "58", "321", "193", "39", "188", "187", "149", "2", "164", "342", "231", "273", "401", "157", "60", "140", "283", "249", "344", "276", "425", "342", "59", "406", "266", "219", "271", "173", "156", "406", "66", "256", "419", "128", "223", "292", "187", "146", "265", "442", "431", "166", "50", "53", "198", "368", "47", "366", "267", "149", "350", "171", "293", "271", "263", "344", "270", "231", "24", "171", "232", "286", "172", "405", "245", "442", "112", "27", "282", "159", "253", "275", "319", "424", "229", "257", "118", "232", "253", "305", "303", "158", "176", "261", "371", "60", "189", "442", "274", "169", "283", "269", "419", "4", "8", "223", "273", "203", "271", "166", "147", "233", "49", "431", "366", "6", "334", "6", "372", "425", "416", "138", "237", "25", "406", "60", "141", "345", "173", "228", "63", "7", "29", "9", "252", "188", "252", "264", "278", "187", "375", "35", "203", "29", "187", "397", "327", "265", "295", "438", "109", "286", "63", "173", "214", "316", "268", "169", "42", "224", "180", "24", "436", "170", "169", "318", "295", "241", "266", "136", "157", "139", "49", "289", "336", "333", "252", "336", "270", "288", "57", "28", "53", "55", "114", "229", "293", "47", "221", "316", "71", "242", "155", "387", "193", "217", "114", "302", "23", "74", "36", "204", "275", "266", "301", "180", "170", "303", "214", "52", "368", "413", "130", "347", "292", "310", "171", "35", "310", "318", "253", "274", "19", "307", "387", "64", "9", "67", "64", "398", "329", "176", "90", "258", "336", "42", "80", "193", "431", "27", "356", "164", "169", "31", "344", "223", "221", "25", "158", "138", "141", "58", "275", "409", "253", "359", "3", "39", "211", "267", "157", "231", "255", "27", "415", "409", "217", "409", "342", "354", "29", "109", "112", "218", "27", "408", "43", "155", "237", "144", "440", "4", "71", "88", "114", "292", "347", "144", "401", "57", "49", "209", "250", "25", "309", "269", "214", "218", "408", "296", "225", "243", "384", "166", "225", "6", "40", "333", "138", "209", "264", "415", "242", "209", "352", "257", "22", "119", "19", "371", "188", "214", "63", "286", "330", "316", "81", "223", "152", "415", "43", "381", "296", "138", "352", "53", "359", "231", "252", "189", "278", "28", "81", "405", "218", "39", "316", "332", "138", "166", "316", "40", "77", "159", "228", "36", "39", "170", "220", "116", "352", "356", "147", "400", "168", "383", "311", "318", "36", "358", "291", "252", "39", "203", "130", "321", "24", "68", "295", "22", "236", "19", "121", "204", "343", "242", "329", "170", "265", "199", "416", "228", "317", "274", "204", "19", "139", "253", "42", "57", "199", "327", "159", "400", "440", "312", "278", "271", "273", "366", "149", "310", "218", "371", "156", "415", "372", "187", "241", "203", "321", "411", "263", "440", "291", "397", "68", "275", "412", "384", "47", "281", "307", "409", "332", "375", "35", "289", "345", "264", "77", "288", "416", "170", "339", "139", "307", "280", "255", "412", "7", "368", "236", "286", "412", "122", "209", "356", "400", "340", "250", "310", "372", "164", "112", "189", "269", "336", "205", "43", "344", "74", "159", "333", "288", "20", "221", "209", "267", "241", "354", "268", "332", "128", "186", "48", "156", "265", "64", "411", "387", "243", "136", "20", "242", "400", "66", "254", "207", "116", "250", "245", "198", "368", "258", "114", "26", "198", "23", "302", "334", "224", "245", "330", "3", "63", "82", "173", "233", "243", "36", "155", "136", "3", "241", "81", "436", "368", "359", "164", "343", "368", "50", "141", "22", "112", "320", "359", "218", "387", "249", "136", "167", "361", "82", "59", "405", "334", "366", "88", "48", "406", "155", "255", "401", "263", "295", "236", "312", "436", "336", "273", "305", "398", "344", "205", "53", "31", "199", "220", "288", "305", "254", "86", "26", "9", "233", "207", "419", "58", "128", "198", "160", "309", "339", "52", "305", "271", "334", "365", "68", "379", "236", "139", "80", "229", "424", "317", "118", "322", "220", "397", "345", "436", "281", "387", "59", "263", "90", "256", "224", "328", "86", "71", "169", "209", "146", "413", "219", "195", "63", "224", "424", "156", "159", "197", "81", "303", "172", "140", "321", "189", "413", "270", "29", "167", "440", "256", "64", "261", "152", "291", "237", "23", "263", "440", "19", "291", "49", "418", "303", "52", "211", "166", "412", "27", "252", "229", "243", "304", "293", "309", "352", "343", "77", "236", "121", "273", "383", "40", "358", "398", "121", "270", "328", "309", "221", "86", "232", "409", "345", "311", "268", "282", "304", "258", "68", "291", "229", "48", "329", "66", "158", "322", "77", "384", "319", "205", "286", "121", "136", "2", "316", "318", "84", "229", "304", "406", "264", "90", "152", "23", "282", "418", "225", "160", "88", "288", "358", "319", "415", "425", "319", "158", "74", "318", "176", "317", "366", "249", "190", "345", "187", "381", "55", "171", "358", "330", "176", "268", "329", "375", "28", "47", "413", "144", "329", "119", "160", "340", "207", "116", "400", "268", "233", "119", "322", "60", "442", "114", "305", "283", "168", "440", "84", "22", "186", "269", "425", "55", "23", "233", "419", "249", "144", "147", "375", "336", "55", "383", "305", "365", "9", "121", "197", "315", "28", "379", "289", "122", "237", "221", "39", "411", "211", "411", "199", "228", "271", "26", "358", "401", "77", "156", "203", "60", "275", "302", "172", "339", "42", "168", "3", "236", "146", "295", "295", "176", "281", "321", "405", "50", "241", "359", "139", "152", "198", "160", "140", "232", "375", "267", "157", "264", "146", "144", "90", "190", "412", "365", "67", "136", "149", "256", "114", "321", "282", "119", "296", "214", "227", "325", "278", "381", "400", "31", "4", "86", "301", "418", "50", "301", "319", "84", "35", "80", "112", "261", "329", "232", "231", "157", "258", "352", "224", "168", "53", "237", "188", "173", "35", "350", "197", "268", "266", "293", "292", "289", "276", "4", "221", "219", "27", "275", "71", "253", "293", "243", "325", "379", "25", "304", "40", "130", "43", "50", "20", "207", "119", "442", "195", "320", "232", "67", "130", "322", "288", "292", "219", "198", "25", "379", "372", "59", "257", "203", "322", "361", "199", "383", "266", "24", "218", "387", "365", "137", "118", "149", "375", "256", "258", "283", "48", "263", "442", "130", "334", "225", "121", "354", "293", "167", "311", "8", "2", "270", "3", "88", "303", "283", "35", "167", "405", "156", "282", "315", "289", "250", "332", "245", "80", "195", "274", "160", "296", "371", "339", "418", "76", "173", "22", "365", "383", "228", "276", "330", "416", "379", "118", "128", "347", "63", "137", "157", "398", "267", "60", "189", "47", "147", "372")) output.metalonda.all = metalondaAll(Count = counts, Time = Time, Group = Group, ID = ID, n.perm = 10, fit.method = "nbinomial", num.intervals = 31, parall = FALSE, pvalue.threshold = 0.05, adjust.method = "BH", time.unit = "days", norm.method = "none", prefix = "test_obs_results", ylabel = "Read Counts", col = c("black", "green"))
# makeCacheMatrix: return a list of functions to: # 1. Set the value of the matrix # 2. Get the value of the matrix # 3. Set the value of the inverse # 4. Get the value of the inverse ## makeCacheMatrix creates a list of functions and returns them ## to the function cacheSolve that gets the inverted matrix from ## the cache makeCacheMatrix <- function(x = numeric()) { # i stores cached values and is initially set to NULL # set creates the matrix i <- NULL set <- function(y) { x <<- y i <<- NULL } # The function get, gets the values of the matrix get <- function() x # getinv and setinv sets the inverted matrix in the cache # and gets the inverted matrix from the cache memory setinv <- function(inverse) i <<- inverse getinv <- function() i # Returns the functions to the working environment list(set = set, get = get, setinv = setinv, getinv = getinv) } ## cacheSolve gives the inverse of the matrix created in makeCacheMatrix ## If the inverted matrix doesn't exist in cache, ## it it created in the working environment and it's inverted value ## is stored in cache cacheSolve <- function(x, ...) { ## Get the inverse of the matrix stored in the cache i <- x$getinv() # Return inverted matrix from cache if it's there # otherwise create the matrix in working environment if(!is.null(i)) { message("getting cached data") # Shows the matrix in the console return(i) } # Gets values from the matrix and solves them, # and prints the result data <- x$get() i <- solve(data, ...) x$set(i) i } # Try out program: # > x <- matrix(rnorm(25), nrow = 5, ncol = 5) // Creates a matrix # > fx <- makeCacheMatrix(x) // Values are put into the function # > fx$get() // Gets the matrix # > cacheSolve(fx) // Solves the function
/cachematrix.R
no_license
Koncarius/ProgrammingAssignment2
R
false
false
2,045
r
# makeCacheMatrix: return a list of functions to: # 1. Set the value of the matrix # 2. Get the value of the matrix # 3. Set the value of the inverse # 4. Get the value of the inverse ## makeCacheMatrix creates a list of functions and returns them ## to the function cacheSolve that gets the inverted matrix from ## the cache makeCacheMatrix <- function(x = numeric()) { # i stores cached values and is initially set to NULL # set creates the matrix i <- NULL set <- function(y) { x <<- y i <<- NULL } # The function get, gets the values of the matrix get <- function() x # getinv and setinv sets the inverted matrix in the cache # and gets the inverted matrix from the cache memory setinv <- function(inverse) i <<- inverse getinv <- function() i # Returns the functions to the working environment list(set = set, get = get, setinv = setinv, getinv = getinv) } ## cacheSolve gives the inverse of the matrix created in makeCacheMatrix ## If the inverted matrix doesn't exist in cache, ## it it created in the working environment and it's inverted value ## is stored in cache cacheSolve <- function(x, ...) { ## Get the inverse of the matrix stored in the cache i <- x$getinv() # Return inverted matrix from cache if it's there # otherwise create the matrix in working environment if(!is.null(i)) { message("getting cached data") # Shows the matrix in the console return(i) } # Gets values from the matrix and solves them, # and prints the result data <- x$get() i <- solve(data, ...) x$set(i) i } # Try out program: # > x <- matrix(rnorm(25), nrow = 5, ncol = 5) // Creates a matrix # > fx <- makeCacheMatrix(x) // Values are put into the function # > fx$get() // Gets the matrix # > cacheSolve(fx) // Solves the function
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wpgpCovariates.R \name{wpgpGetCountryCovariate} \alias{wpgpGetCountryCovariate} \title{wpgpGetCountryCovariate function will download files and return a list with the file paths to the requested covariates for one or more countries} \usage{ wpgpGetCountryCovariate(df.user = NULL, ISO3 = NULL, covariate = NULL, destDir = tempdir(), username = NULL, password = NULL, quiet = TRUE, frCSVDownload = FALSE, method = "auto") } \arguments{ \item{df.user}{data frame of files to download. Must contain ISO3, Folder, and RstName. If not supplied, must give ISO3, year, and covariate} \item{ISO3}{a 3-character country code or vector of country codes. Optional if df.user supplied} \item{covariate}{Covariate name(s). Optional if df.user supplied} \item{destDir}{Path to the folder where you want to save raster file} \item{username}{ftp username to WorldPop ftp server} \item{password}{ftp password to WorldPop ftp server} \item{quiet}{Download Without any messages if TRUE} \item{frCSVDownload}{If TRUE, a new wpgAllCovariates.csv file will downloaded} \item{method}{Method to be used for downloading files. Current download methods are "internal", "wininet" (Windows only) "libcurl", "wget" and "curl", and there is a value "auto"} } \value{ List of files downloaded, including file paths } \description{ wpgpGetCountryCovariate function will download files and return a list with the file paths to the requested covariates for one or more countries } \examples{ wpgpGetCountryCovariate(df.user = NULL,'NPL','px_area','G:/WorldPop/','ftpUsername','ftpPassword') }
/man/wpgpGetCountryCovariate.Rd
no_license
wpgp/wpgpCovariates
R
false
true
1,694
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wpgpCovariates.R \name{wpgpGetCountryCovariate} \alias{wpgpGetCountryCovariate} \title{wpgpGetCountryCovariate function will download files and return a list with the file paths to the requested covariates for one or more countries} \usage{ wpgpGetCountryCovariate(df.user = NULL, ISO3 = NULL, covariate = NULL, destDir = tempdir(), username = NULL, password = NULL, quiet = TRUE, frCSVDownload = FALSE, method = "auto") } \arguments{ \item{df.user}{data frame of files to download. Must contain ISO3, Folder, and RstName. If not supplied, must give ISO3, year, and covariate} \item{ISO3}{a 3-character country code or vector of country codes. Optional if df.user supplied} \item{covariate}{Covariate name(s). Optional if df.user supplied} \item{destDir}{Path to the folder where you want to save raster file} \item{username}{ftp username to WorldPop ftp server} \item{password}{ftp password to WorldPop ftp server} \item{quiet}{Download Without any messages if TRUE} \item{frCSVDownload}{If TRUE, a new wpgAllCovariates.csv file will downloaded} \item{method}{Method to be used for downloading files. Current download methods are "internal", "wininet" (Windows only) "libcurl", "wget" and "curl", and there is a value "auto"} } \value{ List of files downloaded, including file paths } \description{ wpgpGetCountryCovariate function will download files and return a list with the file paths to the requested covariates for one or more countries } \examples{ wpgpGetCountryCovariate(df.user = NULL,'NPL','px_area','G:/WorldPop/','ftpUsername','ftpPassword') }
## EJERCICIO 1 PRIMER PARCIAL ## library(lpSolveAPI) lp.model = make.lp(0,2) set.objfn(lp.model, c(250,150)) add.constraint(lp.model, c(2,1.5), "<=", 40) add.constraint(lp.model, c(1.5,1), "<=", 40) lp.control(lp.model, sense='max') print(lp.model) solve(lp.model) get.objective(lp.model) get.variables(lp.model) ## EJERCICIO 2 PRIMER PARCIAL ## library(lpSolveAPI) lp.model = make.lp(0,2) set.objfn(lp.model, c(1,2)) add.constraint(lp.model, c(1,3), "<=", 9) add.constraint(lp.model, c(2,1), "<=", 8) lp.control(lp.model, sense='max') print(lp.model) solve(lp.model) get.objective(lp.model) get.variables(lp.model) ## EJERCICIO 4 PRIMER PARCIAL ## library(lpSolveAPI) lp.model = make.lp(0,2) set.objfn(lp.model, c(30,15)) add.constraint(lp.model, c(2,1), "<=", 200) add.constraint(lp.model, c(2,3), "<=", 300) lp.control(lp.model, sense='max') print(lp.model) solve(lp.model) get.objective(lp.model) get.variables(lp.model) ## EJERCICIO 5 PRIMER PARCIAL ## library(lpSolveAPI) lp.model = make.lp(0,2) set.objfn(lp.model, c(6,5)) add.constraint(lp.model, c(1,1), "<=", 5) add.constraint(lp.model, c(3,2), "<=", 12) lp.control(lp.model, sense='max') print(lp.model) solve(lp.model) get.objective(lp.model) get.variables(lp.model)
/Parcial1.R
no_license
naimmanriquez/modelacion_R
R
false
false
1,297
r
## EJERCICIO 1 PRIMER PARCIAL ## library(lpSolveAPI) lp.model = make.lp(0,2) set.objfn(lp.model, c(250,150)) add.constraint(lp.model, c(2,1.5), "<=", 40) add.constraint(lp.model, c(1.5,1), "<=", 40) lp.control(lp.model, sense='max') print(lp.model) solve(lp.model) get.objective(lp.model) get.variables(lp.model) ## EJERCICIO 2 PRIMER PARCIAL ## library(lpSolveAPI) lp.model = make.lp(0,2) set.objfn(lp.model, c(1,2)) add.constraint(lp.model, c(1,3), "<=", 9) add.constraint(lp.model, c(2,1), "<=", 8) lp.control(lp.model, sense='max') print(lp.model) solve(lp.model) get.objective(lp.model) get.variables(lp.model) ## EJERCICIO 4 PRIMER PARCIAL ## library(lpSolveAPI) lp.model = make.lp(0,2) set.objfn(lp.model, c(30,15)) add.constraint(lp.model, c(2,1), "<=", 200) add.constraint(lp.model, c(2,3), "<=", 300) lp.control(lp.model, sense='max') print(lp.model) solve(lp.model) get.objective(lp.model) get.variables(lp.model) ## EJERCICIO 5 PRIMER PARCIAL ## library(lpSolveAPI) lp.model = make.lp(0,2) set.objfn(lp.model, c(6,5)) add.constraint(lp.model, c(1,1), "<=", 5) add.constraint(lp.model, c(3,2), "<=", 12) lp.control(lp.model, sense='max') print(lp.model) solve(lp.model) get.objective(lp.model) get.variables(lp.model)
## --- ## title: "plant: A package for modelling forest trait ecology & evolution: _Patch-level emergent properties_" ## --- ## The aim here is to use the plant package to investigate dynamics ## within a patch of competing plants, focussing on emergent patch-level ## properties, rather than properties of plants within the patch. library(plant) p0 <- scm_base_parameters("FF16") p0$control$equilibrium_nsteps <- 30 p0$control$equilibrium_solver_name <- "hybrid" p0$disturbance_mean_interval <- 30.0 ## We'll work with a single species at equilibrium p1 <- expand_parameters(trait_matrix(0.0825, "lma"), p0, FALSE) p1_eq <- equilibrium_seed_rain(p1) data1 <- run_scm_collect(p1_eq) ## There was a bit of hassle in vignette:patch in plotting all ## trajectories along with a couple of lines corresponding to focal ## years. We're going to do the same thing here: closest <- function(t, time) { which.min(abs(time - t)) } last <- function(x) { x[[length(x)]] } times <- c(5, 10, 20, 40, last(data1$time)) i <- vapply(times, closest, integer(1), data1$time) blues <- c("#DEEBF7", "#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5", "#08519C", "#08306B") cols <- colorRampPalette(blues[-(1:2)])(length(i)) height <- t(data1$species[[1]]["height", , ]) log_density <- t(data1$species[[1]]["log_density", , ]) ## As with height in vignette:patch, the density is a matrix of time ## and cohort identity. However, to show the profiles for a given ## time slice using `matplot` it is convenient to transpose this ## matrix (`matplot` works column-by-column so we want each column to ## to be the state of the system at a given time) density <- exp(log_density) matplot(height, density, type="l", lty=1, col=make_transparent("black", 0.15), xlab="Height (m)", ylab="Density (1 / m / m2)", las=1, log="y") ## Note that the densities here can be *extremely* low, and yet ## individuals within the cohorts continue to grow in size; this is the "[atto-fox problem](http://en.wikipedia.org/wiki/Lotka-Volterra_equation)", ## though here we drop down as low as `r formatC(signif(min(density, na.rm=TRUE) / 1e-24, 1))` ## yocto plants / m / metre squared (a yocto-x being 1 millionth of an ## atto-x). *Anyway*.... ## The trajectories are easier to understand if a few are ## highlighted. xlim <- c(0, max(height, na.rm=TRUE) * 1.05) matplot(height, density, type="l", lty=1, col=make_transparent("black", 0.15), xlab="Height (m)", ylab="Density (1 / m / m2)", las=1, log="y", xlim=xlim) matlines(height[, i], density[, i], col=cols, lty=1, type="l") points(height[1, i], density[1, i], pch=19, col=cols) text(height[1, i] + strwidth("x"), density[1, i], paste0(round(times), c(" years", rep("", length(times) - 1))), adj=c(0, 0)) ## Early on there is a low density "dip" caused by self thinning (5 ## years). As the stand develops that dip broadens and deepens and ## moves forward in height (these very low density cohorts are still ## travelling the characteristic equations of the SCM). Later on (20 ## years) additional an second wave of recruitment gives a second ## pulse of high density (around 4 m tall), which can be seen ## travelling along in the 40 year profile to about 13 m. When the ## patch is very old the stand approaches a stable age distribution, ## though a very narrow window of "missing" heights exists just below ## the top of the canopy. ## It's also possible see where the leaf area in the patch is coming ## from; a profile of leaf area with respect to height. Again, this ## requires reconstructing the patches, and using an unexported ## function from `plant` to put this into a matrix: patch <- lapply(seq_along(data1$time), scm_patch, data1) leaf_area <- lapply(patch, function(x) x$species[[1]]$area_leafs) leaf_area <- do.call("cbind", plant:::pad_matrix(leaf_area)) matplot(height, leaf_area, type="l", lty=1, col="lightgrey", xlim=xlim, xlab="Height (m)", ylab="Leaf area density (m2 / m2 / m)", las=1) matlines(height[, i], leaf_area[, i], col=cols, lty=1, type="l") points(height[1, i], leaf_area[1, i], pch=19, col=cols) text(height[1, i] + strwidth("x"), leaf_area[1, i], paste0(round(times), c(" years", rep("", length(times) - 1))), adj=c(0, 0)) ode_size <- patch[[1]]$species[[1]]$seed$ode_size growth_rate <- lapply(patch, function(x) matrix(x$species[[1]]$ode_rates, ode_size)[1, ]) growth_rate <- do.call("cbind", plant:::pad_matrix(growth_rate)) ## Finally, we can see where height growth rate is concentrated in the ## population. This differs from the profile in vignette:plant ## because it is reduced depending on the light environment, but that ## environment is the one constructed by the plant itself. matplot(height, growth_rate, type="l", lty=1, col="lightgrey", xlim=xlim, xlab="Height (m)", ylab="Height growth rate (m / year)", las=1) matlines(height[, i], growth_rate[, i], col=cols, lty=1, type="l") points(height[1, i], growth_rate[1, i], pch=19, col=cols) text(height[1, i] + strwidth("x"), growth_rate[1, i], paste0(round(times), c(" years", rep("", length(times) - 1))), adj=c(0, 0)) ## The above plots show relationships with patches of a given age ## What about the average relationship across the entire metapopulation? ## To get that, we average (integrate) over the distribution over patch ## (for formula see demography vignette). To achieve this we first need ## the patch-level relationships to a series of fixed heights hh <- seq_log_range(range(height, na.rm=TRUE), 500) ## We'll use a spline interpolation, on log-log-scaled data, clamped ## so that for x values outside the observed range are set to zero. f <- function(height, density, hout) { r <- range(height, na.rm=TRUE) clamp_domain(splinefun_loglog(height, density), r, 0)(hout) } ## Now interpolate the height-density relationship in each patch to ## the series of specified heights xx <- lapply(seq_along(data1$time), function(i) f(height[, i], density[, i], hh)) n_hh <- plant:::pad_list_to_array(xx) ## For each of these heights, we can now integrate across patches ## (i.e. across rows), weighting by patch abundance trapezium <- plant:::trapezium n_av <- apply(n_hh, 1, function(x) trapezium(data1$time, x * data1$patch_density)) ## Add this average to the plot (red line): xlim <- c(0, max(height, na.rm=TRUE) * 1.05) matplot(height, density, type="l", lty=1, col=make_transparent("black", 0.15), xlab="Height (m)", ylab="Density (1 / m / m2)", las=1, log="y", xlim=xlim) matlines(height[, i], density[, i], col=cols, lty=1, type="l") points(height[1, i], density[1, i], pch=19, col=cols) text(height[1, i] + strwidth("x"), density[1, i], paste0(round(times), c(" years", rep("", length(times) - 1))), adj=c(0, 0)) points(hh, n_av, col="red", type='l')
/docs/src/emergent.R
no_license
81x910l/plant
R
false
false
6,928
r
## --- ## title: "plant: A package for modelling forest trait ecology & evolution: _Patch-level emergent properties_" ## --- ## The aim here is to use the plant package to investigate dynamics ## within a patch of competing plants, focussing on emergent patch-level ## properties, rather than properties of plants within the patch. library(plant) p0 <- scm_base_parameters("FF16") p0$control$equilibrium_nsteps <- 30 p0$control$equilibrium_solver_name <- "hybrid" p0$disturbance_mean_interval <- 30.0 ## We'll work with a single species at equilibrium p1 <- expand_parameters(trait_matrix(0.0825, "lma"), p0, FALSE) p1_eq <- equilibrium_seed_rain(p1) data1 <- run_scm_collect(p1_eq) ## There was a bit of hassle in vignette:patch in plotting all ## trajectories along with a couple of lines corresponding to focal ## years. We're going to do the same thing here: closest <- function(t, time) { which.min(abs(time - t)) } last <- function(x) { x[[length(x)]] } times <- c(5, 10, 20, 40, last(data1$time)) i <- vapply(times, closest, integer(1), data1$time) blues <- c("#DEEBF7", "#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5", "#08519C", "#08306B") cols <- colorRampPalette(blues[-(1:2)])(length(i)) height <- t(data1$species[[1]]["height", , ]) log_density <- t(data1$species[[1]]["log_density", , ]) ## As with height in vignette:patch, the density is a matrix of time ## and cohort identity. However, to show the profiles for a given ## time slice using `matplot` it is convenient to transpose this ## matrix (`matplot` works column-by-column so we want each column to ## to be the state of the system at a given time) density <- exp(log_density) matplot(height, density, type="l", lty=1, col=make_transparent("black", 0.15), xlab="Height (m)", ylab="Density (1 / m / m2)", las=1, log="y") ## Note that the densities here can be *extremely* low, and yet ## individuals within the cohorts continue to grow in size; this is the "[atto-fox problem](http://en.wikipedia.org/wiki/Lotka-Volterra_equation)", ## though here we drop down as low as `r formatC(signif(min(density, na.rm=TRUE) / 1e-24, 1))` ## yocto plants / m / metre squared (a yocto-x being 1 millionth of an ## atto-x). *Anyway*.... ## The trajectories are easier to understand if a few are ## highlighted. xlim <- c(0, max(height, na.rm=TRUE) * 1.05) matplot(height, density, type="l", lty=1, col=make_transparent("black", 0.15), xlab="Height (m)", ylab="Density (1 / m / m2)", las=1, log="y", xlim=xlim) matlines(height[, i], density[, i], col=cols, lty=1, type="l") points(height[1, i], density[1, i], pch=19, col=cols) text(height[1, i] + strwidth("x"), density[1, i], paste0(round(times), c(" years", rep("", length(times) - 1))), adj=c(0, 0)) ## Early on there is a low density "dip" caused by self thinning (5 ## years). As the stand develops that dip broadens and deepens and ## moves forward in height (these very low density cohorts are still ## travelling the characteristic equations of the SCM). Later on (20 ## years) additional an second wave of recruitment gives a second ## pulse of high density (around 4 m tall), which can be seen ## travelling along in the 40 year profile to about 13 m. When the ## patch is very old the stand approaches a stable age distribution, ## though a very narrow window of "missing" heights exists just below ## the top of the canopy. ## It's also possible see where the leaf area in the patch is coming ## from; a profile of leaf area with respect to height. Again, this ## requires reconstructing the patches, and using an unexported ## function from `plant` to put this into a matrix: patch <- lapply(seq_along(data1$time), scm_patch, data1) leaf_area <- lapply(patch, function(x) x$species[[1]]$area_leafs) leaf_area <- do.call("cbind", plant:::pad_matrix(leaf_area)) matplot(height, leaf_area, type="l", lty=1, col="lightgrey", xlim=xlim, xlab="Height (m)", ylab="Leaf area density (m2 / m2 / m)", las=1) matlines(height[, i], leaf_area[, i], col=cols, lty=1, type="l") points(height[1, i], leaf_area[1, i], pch=19, col=cols) text(height[1, i] + strwidth("x"), leaf_area[1, i], paste0(round(times), c(" years", rep("", length(times) - 1))), adj=c(0, 0)) ode_size <- patch[[1]]$species[[1]]$seed$ode_size growth_rate <- lapply(patch, function(x) matrix(x$species[[1]]$ode_rates, ode_size)[1, ]) growth_rate <- do.call("cbind", plant:::pad_matrix(growth_rate)) ## Finally, we can see where height growth rate is concentrated in the ## population. This differs from the profile in vignette:plant ## because it is reduced depending on the light environment, but that ## environment is the one constructed by the plant itself. matplot(height, growth_rate, type="l", lty=1, col="lightgrey", xlim=xlim, xlab="Height (m)", ylab="Height growth rate (m / year)", las=1) matlines(height[, i], growth_rate[, i], col=cols, lty=1, type="l") points(height[1, i], growth_rate[1, i], pch=19, col=cols) text(height[1, i] + strwidth("x"), growth_rate[1, i], paste0(round(times), c(" years", rep("", length(times) - 1))), adj=c(0, 0)) ## The above plots show relationships with patches of a given age ## What about the average relationship across the entire metapopulation? ## To get that, we average (integrate) over the distribution over patch ## (for formula see demography vignette). To achieve this we first need ## the patch-level relationships to a series of fixed heights hh <- seq_log_range(range(height, na.rm=TRUE), 500) ## We'll use a spline interpolation, on log-log-scaled data, clamped ## so that for x values outside the observed range are set to zero. f <- function(height, density, hout) { r <- range(height, na.rm=TRUE) clamp_domain(splinefun_loglog(height, density), r, 0)(hout) } ## Now interpolate the height-density relationship in each patch to ## the series of specified heights xx <- lapply(seq_along(data1$time), function(i) f(height[, i], density[, i], hh)) n_hh <- plant:::pad_list_to_array(xx) ## For each of these heights, we can now integrate across patches ## (i.e. across rows), weighting by patch abundance trapezium <- plant:::trapezium n_av <- apply(n_hh, 1, function(x) trapezium(data1$time, x * data1$patch_density)) ## Add this average to the plot (red line): xlim <- c(0, max(height, na.rm=TRUE) * 1.05) matplot(height, density, type="l", lty=1, col=make_transparent("black", 0.15), xlab="Height (m)", ylab="Density (1 / m / m2)", las=1, log="y", xlim=xlim) matlines(height[, i], density[, i], col=cols, lty=1, type="l") points(height[1, i], density[1, i], pch=19, col=cols) text(height[1, i] + strwidth("x"), density[1, i], paste0(round(times), c(" years", rep("", length(times) - 1))), adj=c(0, 0)) points(hh, n_av, col="red", type='l')
library("aroma.affymetrix") verbose <- Arguments$getVerbose(-4, timestamp=TRUE) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Setup data set # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - dataSet <- "HapMap,CEU,testset" chipType <- "Mapping50K_Hind240" csR <- AffymetrixCelSet$byName(dataSet, chipType=chipType) print(csR) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # AS-CRMAv2 # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - res <- doASCRMAv2(csR, drop=FALSE, verbose=verbose) print(res) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Plot allele pairs before and after calibration # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - acc <- res$acc for (what in c("input", "output")) { toPNG(getFullName(acc), tags=c("allelePairs", what), aspectRatio=0.7, { plotAllelePairs(acc, array=1, what=what, verbose=verbose) }) }
/inst/testScripts/system/chipTypes/Mapping50K_Hind240/21.doASCRMAv2,plotACC.R
no_license
HenrikBengtsson/aroma.affymetrix
R
false
false
977
r
library("aroma.affymetrix") verbose <- Arguments$getVerbose(-4, timestamp=TRUE) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Setup data set # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - dataSet <- "HapMap,CEU,testset" chipType <- "Mapping50K_Hind240" csR <- AffymetrixCelSet$byName(dataSet, chipType=chipType) print(csR) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # AS-CRMAv2 # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - res <- doASCRMAv2(csR, drop=FALSE, verbose=verbose) print(res) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Plot allele pairs before and after calibration # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - acc <- res$acc for (what in c("input", "output")) { toPNG(getFullName(acc), tags=c("allelePairs", what), aspectRatio=0.7, { plotAllelePairs(acc, array=1, what=what, verbose=verbose) }) }
callCalibrationPerson <- function(seed=12345,n=500,runpar=c(4,0.5,0.05,10,3,0.5),mc.cores=1) { state <- RNGstate(); on.exit(state$reset()) RNGkind("user") set.user.Random.seed(seed) f <- as.factor(rep(1:mc.cores,length.out=n)) chunks <- split(1:n,f) initialSeeds <- list() currentSeed <- user.Random.seed() fun <- function(obj, i) if(i==1)obj else parallel::nextRNGStream(obj) for(i in 1:mc.cores){ initialSeeds[[i]] <- fun(currentSeed,i) currentSeed <- initialSeeds[[i]] } out <- parallel::mclapply(1:mc.cores, function(i) { chunk <- chunks[[i]] set.user.Random.seed(initialSeeds[[i]]) .Call("callCalibrationSimulation",list(n=as.integer(length(chunk)),runpar=as.double(runpar)),PACKAGE="microsimulation") }) states <- c("DiseaseFree","Precursor","PreClinical","Clinical") out <- lapply(out, function(o){ curnames <- names(o) mat <- matrix(0,nr=10,ncol=length(states)) colnames(mat) <- states; rownames(mat) <- seq(10,100,10) mat[,states[states %in% curnames]]<-data.matrix(transform(as.data.frame(o[states[states %in% curnames]]))) list(StateOccupancy=mat, TimeAtRisk=o$TimeAtRisk) }) Reduce(function(u,z) list(StateOccupancy=u$StateOccupancy + z$StateOccupancy, TimeAtRisk = u$TimeAtRisk + z$TimeAtRisk),out) }
/R/calibSim.R
no_license
harkeerat/microsimulation
R
false
false
1,295
r
callCalibrationPerson <- function(seed=12345,n=500,runpar=c(4,0.5,0.05,10,3,0.5),mc.cores=1) { state <- RNGstate(); on.exit(state$reset()) RNGkind("user") set.user.Random.seed(seed) f <- as.factor(rep(1:mc.cores,length.out=n)) chunks <- split(1:n,f) initialSeeds <- list() currentSeed <- user.Random.seed() fun <- function(obj, i) if(i==1)obj else parallel::nextRNGStream(obj) for(i in 1:mc.cores){ initialSeeds[[i]] <- fun(currentSeed,i) currentSeed <- initialSeeds[[i]] } out <- parallel::mclapply(1:mc.cores, function(i) { chunk <- chunks[[i]] set.user.Random.seed(initialSeeds[[i]]) .Call("callCalibrationSimulation",list(n=as.integer(length(chunk)),runpar=as.double(runpar)),PACKAGE="microsimulation") }) states <- c("DiseaseFree","Precursor","PreClinical","Clinical") out <- lapply(out, function(o){ curnames <- names(o) mat <- matrix(0,nr=10,ncol=length(states)) colnames(mat) <- states; rownames(mat) <- seq(10,100,10) mat[,states[states %in% curnames]]<-data.matrix(transform(as.data.frame(o[states[states %in% curnames]]))) list(StateOccupancy=mat, TimeAtRisk=o$TimeAtRisk) }) Reduce(function(u,z) list(StateOccupancy=u$StateOccupancy + z$StateOccupancy, TimeAtRisk = u$TimeAtRisk + z$TimeAtRisk),out) }
# Telvis Calhoun # Analysis of Songs Data retrieved from the Million Song Data Set # http://labrosa.ee.columbia.edu/millionsong/ library(xtable) library(ggplot2) library(slidify) library(shiny) load_csv_and_save_to_rds <- function() { # Load the million song CSV and save to a RDS file. # Will make the analysis quicker. print("Loading 1 million song data from disk. Be patient...") # NOTE: I've excluded this file from github because it very large df <- read.csv("data/songs.csv.gz") print("Saving data/songs.rds") saveRDS(df, "data/songs.rds") df } clean_data_and_filter_artist <- function(df=NULL) { if (is.null(df)) { print("Loading 1 million song data from disk. Be patient...") df <- readRDS("data/songs.rds") } names_with_prince <- unique(subset(df, grepl("^Prince$|^Prince &|^Prince With", artist_name, perl=TRUE))$artist_name) prince_df <- subset(df, grepl("^cPrince$|^Prince &|^Prince With", artist_name, perl=TRUE)) # write.csv(prince_df, "data/prince.csv") # Lot's of the song_hotttnesss values are NA so remove them. prince_hott_df <- prince_df[!is.na(prince_df$song_hotttnesss),] # A few songs have 0.0 song_hotttnesss, including "Let's Go Crazy". Set them to the mean hotttnesss prince_hott_df[(prince_hott_df$song_hotttnesss == 0), ]$song_hotttnesss <- mean(prince_df$song_hotttnesss, na.rm = TRUE) # select a few interesting columns prince_df_clean <- subset(prince_hott_df, select=c(title, tempo, loudness, song_hotttnesss)) # sort by the hottttnesss prince_df_clean <- prince_df_clean[order(prince_df_clean$song_hotttnesss, decreasing = TRUE),] # save for later write.csv(prince_df_clean, "data/prince_clean.csv") # save RDS for running recommender from R console saveRDS(prince_df_clean, "data/prince_clean.rds") # save RDS for running recommender from 'shiny' app saveRDS(prince_df_clean, "prince_song_recommender/data/prince_clean.rds") prince_df_clean } gen_plots <- function(df) { # plot age vs wage. color by jobclass obj <- qplot(song_hotttnesss, loudness, data=df) png("plots/hotttnesss_by_loudness.png", width = 960, height = 960, units = "px") print(obj) dev.off() print("Generated hotttnesss_by_loudness.png") } prince_song_recommender <- function(song_hotttnesss=0.5, loudness=-8, top_n=5, df = NULL, scale01=TRUE) { if (is.null(df)) { df <- readRDS("data/prince_clean.rds") } range01 <- function(x, datums){(x-min(datums))/(max(datums)-min(datums))} if(scale01) { x1 <- c(song_hotttnesss, range01(loudness, df$loudness)) } else { x1 <- c(song_hotttnesss, loudness) } n <- nrow(df) distances <- numeric(n) # calculate the distance between the query values and each # entry in the data. for (i in seq_len(n)){ if (scale01){ x2 <- c(df[i,]$song_hotttnesss, range01(df[i,]$loudness, df$loudness)) } else { x2 <- c(df[i,]$song_hotttnesss, df[i,]$loudness) } distances[i] <- dist(rbind(x1, x2)) } ord <- order(distances) ord <- ord[1:top_n] recs <- subset(df[ord, ], select=c("title", "loudness", "song_hotttnesss")) list(recommendations=recs, ord=ord, df=df, song_hotttnesss=song_hotttnesss, loudness=loudness) } prince_song_recommender_xtable <- function(recommender_data) { recs <- recommender_data$recommendations print(xtable(recs), type="html", include.rownames = FALSE) } prince_song_recommender_plot <- function(recommender_data) { # get the indices for the recommendations ord <- recommender_data$ord song_hotttnesss <- recommender_data$song_hotttnesss loudness <- recommender_data$loudness df <- recommender_data$df n <- nrow(df) # find which indices are in the 'top N' list df$recommended <- seq(n) %in% ord p <- ggplot(aes(x=song_hotttnesss, y=loudness, colour=recommended), data=df) + geom_point() + geom_point(aes(x=song_hotttnesss,y=loudness, colour=recommended), size=5,shape=4, data=data.frame(song_hotttnesss=song_hotttnesss, loudness=loudness, recommended=TRUE)) + labs(y="Loudness (dB)") + labs(x="Song Hotttnesss [0.0, 1.0]") + labs(title="Recommendation plot with input values marked as 'X'") p }
/prince_song_recommender/analysis.R
no_license
telvis07/data_products_peer_review
R
false
false
4,419
r
# Telvis Calhoun # Analysis of Songs Data retrieved from the Million Song Data Set # http://labrosa.ee.columbia.edu/millionsong/ library(xtable) library(ggplot2) library(slidify) library(shiny) load_csv_and_save_to_rds <- function() { # Load the million song CSV and save to a RDS file. # Will make the analysis quicker. print("Loading 1 million song data from disk. Be patient...") # NOTE: I've excluded this file from github because it very large df <- read.csv("data/songs.csv.gz") print("Saving data/songs.rds") saveRDS(df, "data/songs.rds") df } clean_data_and_filter_artist <- function(df=NULL) { if (is.null(df)) { print("Loading 1 million song data from disk. Be patient...") df <- readRDS("data/songs.rds") } names_with_prince <- unique(subset(df, grepl("^Prince$|^Prince &|^Prince With", artist_name, perl=TRUE))$artist_name) prince_df <- subset(df, grepl("^cPrince$|^Prince &|^Prince With", artist_name, perl=TRUE)) # write.csv(prince_df, "data/prince.csv") # Lot's of the song_hotttnesss values are NA so remove them. prince_hott_df <- prince_df[!is.na(prince_df$song_hotttnesss),] # A few songs have 0.0 song_hotttnesss, including "Let's Go Crazy". Set them to the mean hotttnesss prince_hott_df[(prince_hott_df$song_hotttnesss == 0), ]$song_hotttnesss <- mean(prince_df$song_hotttnesss, na.rm = TRUE) # select a few interesting columns prince_df_clean <- subset(prince_hott_df, select=c(title, tempo, loudness, song_hotttnesss)) # sort by the hottttnesss prince_df_clean <- prince_df_clean[order(prince_df_clean$song_hotttnesss, decreasing = TRUE),] # save for later write.csv(prince_df_clean, "data/prince_clean.csv") # save RDS for running recommender from R console saveRDS(prince_df_clean, "data/prince_clean.rds") # save RDS for running recommender from 'shiny' app saveRDS(prince_df_clean, "prince_song_recommender/data/prince_clean.rds") prince_df_clean } gen_plots <- function(df) { # plot age vs wage. color by jobclass obj <- qplot(song_hotttnesss, loudness, data=df) png("plots/hotttnesss_by_loudness.png", width = 960, height = 960, units = "px") print(obj) dev.off() print("Generated hotttnesss_by_loudness.png") } prince_song_recommender <- function(song_hotttnesss=0.5, loudness=-8, top_n=5, df = NULL, scale01=TRUE) { if (is.null(df)) { df <- readRDS("data/prince_clean.rds") } range01 <- function(x, datums){(x-min(datums))/(max(datums)-min(datums))} if(scale01) { x1 <- c(song_hotttnesss, range01(loudness, df$loudness)) } else { x1 <- c(song_hotttnesss, loudness) } n <- nrow(df) distances <- numeric(n) # calculate the distance between the query values and each # entry in the data. for (i in seq_len(n)){ if (scale01){ x2 <- c(df[i,]$song_hotttnesss, range01(df[i,]$loudness, df$loudness)) } else { x2 <- c(df[i,]$song_hotttnesss, df[i,]$loudness) } distances[i] <- dist(rbind(x1, x2)) } ord <- order(distances) ord <- ord[1:top_n] recs <- subset(df[ord, ], select=c("title", "loudness", "song_hotttnesss")) list(recommendations=recs, ord=ord, df=df, song_hotttnesss=song_hotttnesss, loudness=loudness) } prince_song_recommender_xtable <- function(recommender_data) { recs <- recommender_data$recommendations print(xtable(recs), type="html", include.rownames = FALSE) } prince_song_recommender_plot <- function(recommender_data) { # get the indices for the recommendations ord <- recommender_data$ord song_hotttnesss <- recommender_data$song_hotttnesss loudness <- recommender_data$loudness df <- recommender_data$df n <- nrow(df) # find which indices are in the 'top N' list df$recommended <- seq(n) %in% ord p <- ggplot(aes(x=song_hotttnesss, y=loudness, colour=recommended), data=df) + geom_point() + geom_point(aes(x=song_hotttnesss,y=loudness, colour=recommended), size=5,shape=4, data=data.frame(song_hotttnesss=song_hotttnesss, loudness=loudness, recommended=TRUE)) + labs(y="Loudness (dB)") + labs(x="Song Hotttnesss [0.0, 1.0]") + labs(title="Recommendation plot with input values marked as 'X'") p }
\name{justSNPRMA} \alias{justSNPRMA} \title{Summarization of SNP data} \description{ This function implements the SNPRMA method for summarization of SNP data. It works directly with the CEL files, saving memory. } \usage{ justSNPRMA(filenames, verbose = TRUE, phenoData = NULL, normalizeToHapmap = TRUE) } \arguments{ \item{filenames}{character vector with the filenames.} \item{verbose}{logical flag for verbosity.} \item{phenoData}{a \code{phenoData} object or \code{NULL}} \item{normalizeToHapmap}{Normalize to Hapmap? Should always be TRUE, but it's kept here for future use.} } \value{ \code{SnpQSet} or a \code{SnpCnvQSet}, depending on the array type. } %% Add Examples \examples{ ## snprmaResults <- justSNPRMA(list.celfiles()) } \keyword{manip}
/man/justSNPRMA.Rd
no_license
benilton/oligo
R
false
false
773
rd
\name{justSNPRMA} \alias{justSNPRMA} \title{Summarization of SNP data} \description{ This function implements the SNPRMA method for summarization of SNP data. It works directly with the CEL files, saving memory. } \usage{ justSNPRMA(filenames, verbose = TRUE, phenoData = NULL, normalizeToHapmap = TRUE) } \arguments{ \item{filenames}{character vector with the filenames.} \item{verbose}{logical flag for verbosity.} \item{phenoData}{a \code{phenoData} object or \code{NULL}} \item{normalizeToHapmap}{Normalize to Hapmap? Should always be TRUE, but it's kept here for future use.} } \value{ \code{SnpQSet} or a \code{SnpCnvQSet}, depending on the array type. } %% Add Examples \examples{ ## snprmaResults <- justSNPRMA(list.celfiles()) } \keyword{manip}
\name{skewness.folder} \alias{skewness.folder} %\alias{skewnessf} \title{ Skewness coefficients of a folder of data sets } \description{ Computes the skewness coefficient by column of the elements of an object of class \code{folder}. } \usage{ skewness.folder(x, na.rm = FALSE, type = 3) } \arguments{ \item{x}{ an object of class \code{\link{folder}} that is a list of data frames with the same column names. } \item{na.rm}{ logical. Should missing values be omitted from the calculations? (see \code{\link{skewness}}) } \item{type}{ an integer between 1 and 3 (see \code{\link{skewness}}). } } \details{ % This function can only be applied to a folder of data frames with the same column names. If \code{attr(x,"same.cols") = FALSE}, there is an error. It uses \code{\link{skewness}} to compute the mean by numeric column of each element of the folder. If some columns of the data frames are not numeric, there is a warning, and the means are computed on the numeric columns only. } \value{ A list whose elements are the skewness coefficients by column of the elements of the folder. } \author{ Rachid Boumaza, Pierre Santagostini, Smail Yousfi, Gilles Hunault, Sabine Demotes-Mainard } \seealso{ \code{\link{folder}} to create an object is of class \code{folder}. \code{\link{mean.folder}}, \code{\link{var.folder}}, \code{\link{cor.folder}}, \code{\link{kurtosis.folder}} for other statistics for \code{folder} objects. } \examples{ # First example: iris (Fisher) data(iris) iris.fold <- as.folder(iris, "Species") iris.skewness <- skewness.folder(iris.fold) print(iris.skewness) # Second example: roses data(roses) roses.fold <- as.folder(roses, "rose") roses.skewness <- skewness.folder(roses.fold) print(roses.skewness) }
/man/skewness.folder.Rd
no_license
cran/dad
R
false
false
1,774
rd
\name{skewness.folder} \alias{skewness.folder} %\alias{skewnessf} \title{ Skewness coefficients of a folder of data sets } \description{ Computes the skewness coefficient by column of the elements of an object of class \code{folder}. } \usage{ skewness.folder(x, na.rm = FALSE, type = 3) } \arguments{ \item{x}{ an object of class \code{\link{folder}} that is a list of data frames with the same column names. } \item{na.rm}{ logical. Should missing values be omitted from the calculations? (see \code{\link{skewness}}) } \item{type}{ an integer between 1 and 3 (see \code{\link{skewness}}). } } \details{ % This function can only be applied to a folder of data frames with the same column names. If \code{attr(x,"same.cols") = FALSE}, there is an error. It uses \code{\link{skewness}} to compute the mean by numeric column of each element of the folder. If some columns of the data frames are not numeric, there is a warning, and the means are computed on the numeric columns only. } \value{ A list whose elements are the skewness coefficients by column of the elements of the folder. } \author{ Rachid Boumaza, Pierre Santagostini, Smail Yousfi, Gilles Hunault, Sabine Demotes-Mainard } \seealso{ \code{\link{folder}} to create an object is of class \code{folder}. \code{\link{mean.folder}}, \code{\link{var.folder}}, \code{\link{cor.folder}}, \code{\link{kurtosis.folder}} for other statistics for \code{folder} objects. } \examples{ # First example: iris (Fisher) data(iris) iris.fold <- as.folder(iris, "Species") iris.skewness <- skewness.folder(iris.fold) print(iris.skewness) # Second example: roses data(roses) roses.fold <- as.folder(roses, "rose") roses.skewness <- skewness.folder(roses.fold) print(roses.skewness) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \name{print_tmleCom_opts} \alias{print_tmleCom_opts} \title{Print Current Option Settings for \code{tmleCommunity}} \usage{ print_tmleCom_opts() } \value{ Invisibly returns a list of \code{tmleCommunity} options. } \description{ Print Current Option Settings for \code{tmleCommunity} } \seealso{ \code{\link{tmleCom_Options}} }
/man/print_tmleCom_opts.Rd
no_license
chizhangucb/tmleCommunity
R
false
true
412
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \name{print_tmleCom_opts} \alias{print_tmleCom_opts} \title{Print Current Option Settings for \code{tmleCommunity}} \usage{ print_tmleCom_opts() } \value{ Invisibly returns a list of \code{tmleCommunity} options. } \description{ Print Current Option Settings for \code{tmleCommunity} } \seealso{ \code{\link{tmleCom_Options}} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/codecommit_operations.R \name{codecommit_get_blob} \alias{codecommit_get_blob} \title{Returns the base-64 encoded content of an individual blob within a repository} \usage{ codecommit_get_blob(repositoryName, blobId) } \arguments{ \item{repositoryName}{[required] The name of the repository that contains the blob.} \item{blobId}{[required] The ID of the blob, which is its SHA-1 pointer.} } \description{ Returns the base-64 encoded content of an individual blob within a repository. } \section{Request syntax}{ \preformatted{svc$get_blob( repositoryName = "string", blobId = "string" ) } } \keyword{internal}
/cran/paws.developer.tools/man/codecommit_get_blob.Rd
permissive
peoplecure/paws
R
false
true
695
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/codecommit_operations.R \name{codecommit_get_blob} \alias{codecommit_get_blob} \title{Returns the base-64 encoded content of an individual blob within a repository} \usage{ codecommit_get_blob(repositoryName, blobId) } \arguments{ \item{repositoryName}{[required] The name of the repository that contains the blob.} \item{blobId}{[required] The ID of the blob, which is its SHA-1 pointer.} } \description{ Returns the base-64 encoded content of an individual blob within a repository. } \section{Request syntax}{ \preformatted{svc$get_blob( repositoryName = "string", blobId = "string" ) } } \keyword{internal}
setup({ # Limit the number of workers so CRAN is happy DelayedArray::setAutoBPPARAM(BiocParallel::MulticoreParam(workers = 2)) }) test_that("deviance calculation works", { r_gp_deviance <- function(y, mu, theta){ -2 * sum(dnbinom(y, mu = mu, size = 1/theta, log = TRUE) - dnbinom(y, mu = y, size = 1/theta, log = TRUE)) } expect_equal(compute_gp_deviance(y = 17, mu = 3, theta = 1.3), r_gp_deviance(y = 17, mu = 3, theta = 1.3)) expect_equal(compute_gp_deviance(y = 0, mu = 3, theta = 1.3), r_gp_deviance(y = 0, mu = 3, theta = 1.3)) expect_equal(compute_gp_deviance(y = 17, mu = 3, theta = 0), r_gp_deviance(y = 17, mu = 3, theta = 0)) expect_equal(compute_gp_deviance(y = 0, mu = 3, theta = 0), r_gp_deviance(y = 0, mu = 3, theta = 0)) }) test_that("Rough estimation of Beta works", { mat <- matrix(1:32, nrow = 8, ncol = 4) model_matrix <- cbind(1, rnorm(n = 4)) offset_matrix <- combine_size_factors_and_offset(0, size_factors = TRUE, mat)$offset_matrix b1 <- estimate_betas_roughly(mat, model_matrix, offset_matrix) library(HDF5Array) hdf5_mat <- as(mat, "HDF5Matrix") hdf5_offset_matrix <- combine_size_factors_and_offset(0, size_factors = TRUE, hdf5_mat)$offset_matrix b2 <- estimate_betas_roughly(hdf5_mat, model_matrix, hdf5_offset_matrix) expect_equal(b1 , b2) }) test_that("Beta estimation can handle edge cases as input", { Y <- matrix(0, nrow = 1, ncol = 10) model_matrix <- matrix(1, nrow = 10, ncol = 1) offset_matrix <- matrix(1, nrow = 1, ncol = 10) dispersion <- 0 res <- estimate_betas_group_wise(Y, offset_matrix, dispersion, beta_group_init = matrix(3, nrow = 1, ncol = 1), groups = 1, model_matrix = model_matrix) expect_equal(res$Beta[1,1], -1e8) beta_mat_init <- estimate_betas_roughly(Y, model_matrix, offset_matrix) res2 <- estimate_betas_fisher_scoring(Y, model_matrix, offset_matrix, dispersion, beta_mat_init) expect_lt(res2$Beta[1,1], -15) }) test_that("Beta estimation can handle edge case (2)", { # In version 1.1.13, glmGamPoi converged to an extreme result (mu = 1e50) for this input. # With the introduction of the max_rel_mu_change parameter, this seems to be fixed y <- c(0, 0, 14, 2, 0, 0, 0, 0, 10, 12, 6, 2, 0, 4, 1, 1, 2, 6, 165, 2, 1, 0, 0, 0, 259, 2050, 715, 0, 0, 96, 2658, 149, 56, 7, 0, 0, 0, 0, 0, 0, 0, 5, 9, 1, 1, 0, 0, 1, 1, 5, 7, 0, 0, 1, 0, 3, 6, 19, 29, 0, 0, 0, 1, 4, 73, 3, 4, 1, 0, 1, 0, 0, 5, 169, 58, 1, 0, 32, 0, 2, 1, 1, 170, 30, 0, 1, 0, 4, 123, 1655, 1292, 101, 0, 732, 2866, 207, 3, 6, 3, 0, 0, 2, 0, 1, 0, 110, 27, 0, 0, 0, 0, 1, 51, 3, 198, 1, 0, 1, 0, 78, 9, 2, 142, 2, 0, 2, 1, 1) clust <- c("B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cell") cond <- c("ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim") sf <- c(1.25495746, 2.84060391, 3.54532248, 0.74403676, 0.12168579, 1.29108360, 0.12000349, 0.65455177, 5.44150278, 12.27749619, 10.45289957, 1.95654869, 0.13524118, 3.82396827, 0.31255044, 1.73958695, 1.48917348, 6.17830352, 4.72301313, 5.18268863, 0.09076828, 1.82555638, 0.08546259, 1.12790128, 0.39478863, 1.69810120, 2.50113680, 0.26218952, 0.05861062, 0.29934013, 0.36996404, 0.23265235, 0.55359268, 3.64881579, 2.02365705, 0.20378379, 0.02849112, 0.38153519, 0.08851444, 0.37638048, 1.51409512, 6.09265761, 13.59420239, 0.64251691, 0.38507232, 0.67384420, 0.20171328, 1.01657884, 2.55432311, 5.04959345, 12.65532171, 1.37339728, 0.16297743, 0.65830457, 0.20081821, 2.34212786, 2.65519592, 4.60271847, 14.42841431, 0.61404735, 0.33373006, 1.61952954, 0.20445239, 1.01918855, 1.71367318, 2.73565477, 4.88628172, 1.19830951, 0.23995307, 1.79371146, 0.10530501, 1.02351290, 4.40897451, 9.49905081, 9.37883164, 1.63402313, 0.17510934, 2.88147498, 0.20539059, 2.25509082, 1.48036301, 5.38691456, 4.21155324, 5.01258303, 0.19591153, 2.02167280, 0.11638010, 1.98069390, 0.57433016, 2.00913110, 3.63831225, 0.33962887, 0.20308284, 0.39905907, 0.46129308, 0.29367857, 0.69204747, 2.43350004, 2.24865281, 0.16404503, 0.11647715, 0.52166148, 0.04277982, 0.48455401, 1.23137302, 4.23807090, 11.94627878, 0.39999727, 0.23969425, 0.29627750, 0.14492514, 1.20180350, 2.62172262, 4.84203529, 12.69336739, 1.23926685, 0.28333679, 1.02518441, 0.24187261, 2.28643968, 3.42665620, 5.63192529, 19.61644098, 0.48511477, 0.52967394, 2.41232041, 0.37996074, 1.65937613) model_matrix <- model.matrix(~ cond + clust) offset_matrix <- add_vector_to_each_row(matrix(0, nrow = 1, ncol = length(y)), log(sf)) Y <- matrix(y, nrow = 1) disp_init <- estimate_dispersions_roughly(Y, model_matrix, offset_matrix = offset_matrix) beta_init <- estimate_betas_roughly(Y, model_matrix, offset_matrix = offset_matrix) beta_res <- estimate_betas_fisher_scoring(Y, model_matrix = model_matrix, offset_matrix = offset_matrix, dispersions = disp_init, beta_mat_init = beta_init) beta_res expect_lt(beta_res$deviances, 100) expect_true(all(calculate_mu(beta_res$Beta, model_matrix, offset_matrix) < 1e5)) # betaRes <- fitBeta_fisher_scoring(Y, model_matrix, exp(offset_matrix), thetas = disp_init, beta_matSEXP = beta_init, # ridge_penalty = 1e-6, tolerance = 1e-8, max_iter = 1000) # betaRes }) test_that("Groupwise beta estimation works", { mat <- matrix(1:32, nrow = 8, ncol = 4) offset_matrix <- combine_size_factors_and_offset(0, size_factors = TRUE, mat)$offset_matrix b1 <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = 1) b2 <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = rep(1, times = ncol(mat))) expect_equal(b1, b2) model_matrix <- cbind(c(1,1,0,0), c(0,0,1,1)) b3 <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = c(1, 1, 2, 2)) b4 <- cbind( estimate_betas_roughly_group_wise(mat[,1:2], offset_matrix[,1:2], groups = 1), estimate_betas_roughly_group_wise(mat[,3:4], offset_matrix[,3:4], groups = 1) ) expect_equal(b3, b4) b5 <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = c(1, 1, 2, 2)) b6 <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = c(2, 2, 1, 1)) expect_equal(b5, b6) }) test_that("Beta estimation can handle any kind of model_matrix", { skip_if_not_installed("DESeq2") # Weird input that makes DESeq2 choke set.seed(1) Y <- matrix(1:72, nrow = 9, ncol = 8)[3:5,,drop=FALSE] model_matrix <- matrix(rnorm(n = 8 * 2), nrow = 8, ncol = 2) offset_matrix <- matrix(0, nrow = nrow(Y), ncol = ncol(Y)) disp_init <- estimate_dispersions_roughly(Y, model_matrix, offset_matrix) beta_init <- estimate_betas_roughly(Y, model_matrix, offset_matrix) fit <- estimate_betas_fisher_scoring(Y, model_matrix = model_matrix, offset_matrix = offset_matrix, dispersions = disp_init, beta_mat_init = beta_init) deseq2_fit <- DESeq2:::fitBetaWrapper(ySEXP = Y, xSEXP = model_matrix, nfSEXP = exp(offset_matrix), alpha_hatSEXP = disp_init, beta_matSEXP = beta_init, lambdaSEXP = rep(0.3, ncol(model_matrix)), weightsSEXP = array(1, dim(Y)), useWeightsSEXP = FALSE, tolSEXP = 1e-8, maxitSEXP = 100, useQRSEXP = TRUE, minmuSEXP = 1e-6) edgeR_fit <- edgeR::glmFit.default(Y, design = model_matrix, dispersion = disp_init, offset = offset_matrix[1,], prior.count = 0, weights=NULL, start = beta_init) # My result agrees with edgeR expect_equal(fit$Beta, edgeR_fit$coefficients, tolerance = 1e-3) # DESeq2 however does not converge expect_failure( expect_equal(fit$Beta, deseq2_fit$beta_mat, tolerance = 1e-3) ) expect_failure( expect_equal(edgeR_fit$coefficients, deseq2_fit$beta_mat, tolerance = 1e-3) ) expect_equal(deseq2_fit$iter, rep(100, 3)) # My result, however did converge expect_lt(fit$iterations[1], 50) }) test_that("estimate_betas_group_wise properly rescales result", { dat <- make_dataset(n_genes = 20, n_samples = 30) mat <- dat$Y offset_matrix <- combine_size_factors_and_offset(0, size_factors = TRUE, mat)$offset_matrix dispersions <- dat$overdispersion df <- data.frame(cat1 = sample(LETTERS[1:3], size = 30, replace = TRUE)) model_matrix <- model.matrix(~ cat1, data = df) groups <- get_groups_for_model_matrix(model_matrix) beta_group_init <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = groups) res <- estimate_betas_group_wise(mat, offset_matrix, dispersions, beta_group_init, groups = groups, model_matrix = model_matrix) model_matrix2 <- model_matrix * 3 res2 <- estimate_betas_group_wise(mat, offset_matrix, dispersions, beta_group_init = beta_group_init, groups = groups, model_matrix = model_matrix2) expect_equal(res$Beta, res2$Beta * 3) expect_equal(calculate_mu(res$Beta, model_matrix, offset_matrix), calculate_mu(res2$Beta, model_matrix2, offset_matrix)) }) test_that("estimate_betas_group_wise can handle extreme case", { y <- matrix(c(1, rep(0, 500)), nrow = 1) res <- fitBeta_one_group(y, offset_matrix = matrix(0, nrow = 1, ncol = 501), thetas = 2.1, beta_start_values = -10, tolerance = 1e-8, maxIter = 100) expect_false(res$iter == 100) expect_false(is.na(res$beta)) }) test_that("estimate_betas_group_wise can handle DelayedArray", { mat <- matrix(1:32, nrow = 8, ncol = 4) offset_matrix <- combine_size_factors_and_offset(0, size_factors = TRUE, mat)$offset_matrix dispersions <- rep(0, 8) model_matrix <- matrix(1, nrow = 4) mat_hdf5 <- as(mat, "HDF5Matrix") offset_matrix_hdf5 <- as(offset_matrix, "HDF5Matrix") beta_vec_init <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = 1) beta_vec_init_da <- estimate_betas_roughly_group_wise(mat_hdf5, offset_matrix_hdf5, groups = 1) res <- estimate_betas_group_wise(mat, offset_matrix, dispersions, beta_vec_init, groups = 1, model_matrix = model_matrix) res2 <- estimate_betas_group_wise(mat_hdf5, offset_matrix_hdf5, dispersions, beta_vec_init_da, groups = 1, model_matrix = model_matrix) # This check is important, because beachmat makes life difficult for # handling numeric and integer input generically res3 <- estimate_betas_group_wise(mat * 1.0, offset_matrix, dispersions, beta_vec_init, groups = 1, model_matrix = model_matrix) expect_equal(res, res2) expect_equal(res, res3) }) test_that("estimate_betas_fisher_scoring can handle DelayedArray", { mat <- matrix(1:32, nrow = 8, ncol = 4) model_matrix <- cbind(1, rnorm(4, mean = 10)) offset_matrix <- combine_size_factors_and_offset(0, size_factors = TRUE, mat)$offset_matrix dispersions <- rep(0, 8) mat_hdf5 <- as(mat, "HDF5Matrix") offset_matrix_hdf5 <- as(offset_matrix, "HDF5Matrix") beta_mat_init <- estimate_betas_roughly(mat, model_matrix, offset_matrix) beta_mat_init_da <- estimate_betas_roughly(mat_hdf5, model_matrix, offset_matrix_hdf5) res <- estimate_betas_fisher_scoring(mat, model_matrix, offset_matrix, dispersions, beta_mat_init) res2 <- estimate_betas_fisher_scoring(mat_hdf5, model_matrix, offset_matrix_hdf5, dispersions, beta_mat_init_da) res3 <- estimate_betas_fisher_scoring(mat * 1.0, model_matrix, offset_matrix, dispersions, beta_mat_init) expect_equal(res, res2) expect_equal(res, res3) }) test_that("Beta estimation works", { skip_if_not(is_macos(), "Beta estimation is unprecise on Non-MacOS architectures") skip_if_not_installed("DESeq2") skip_if_not_installed("edgeR") data <- make_dataset(n_genes = 1000, n_samples = 30) offset_matrix <- matrix(log(data$size_factor), nrow=nrow(data$Y), ncol = ncol(data$Y), byrow = TRUE) model_matrix <- matrix(1.1, nrow = 30) # Fit Standard Model beta_mat_init <- estimate_betas_roughly(Y = data$Y, model_matrix = model_matrix, offset_matrix = offset_matrix) my_res <- estimate_betas_fisher_scoring(Y = data$Y, model_matrix = model_matrix, offset_matrix = offset_matrix, dispersions = data$overdispersion, beta_mat_init = beta_mat_init) # Fit Model for One Group beta_vec_init <- estimate_betas_roughly_group_wise(Y = data$Y, offset_matrix = offset_matrix, groups = 1) my_res2 <- estimate_betas_group_wise(Y = data$Y, offset_matrix = offset_matrix, dispersions = data$overdispersion, beta_group_init = beta_vec_init, groups = 1, model_matrix = model_matrix) expect_equal(my_res$Beta, my_res2$Beta, tolerance = 1e-6) expect_lt(max(my_res2$iterations), 10) # Compare with edgeR edgeR_res <- edgeR::glmFit.default(data$Y, design = model_matrix, dispersion = data$overdispersion, offset = offset_matrix[1,], prior.count = 0, weights=NULL) expect_equal(my_res$Beta[,1], coef(edgeR_res)[,1], tolerance = 1e-6) expect_equal(my_res2$Beta[,1], coef(edgeR_res)[,1], tolerance = 1e-6) # Compare with DESeq2 # This requires a few hacks: # * If the "just intercept" model is fit, DESeq2 # automatically assigns an approximation for Beta # To avoid this I give it a model design matrix that # is really similar to the "just intercept" but numerically # identical and thus force it to exactly calculate the # beta values # * The beta values are on a log2 scale. I need to convert it # to the ln scale. dds <- DESeq2::DESeqDataSetFromMatrix(data$Y, colData = data.frame(name = seq_len(ncol(data$Y))), design = ~ 1) DESeq2::sizeFactors(dds) <- data$size_factor DESeq2::dispersions(dds) <- data$overdispersion dds <- DESeq2::nbinomWaldTest(dds, modelMatrix = model_matrix, minmu = 1e-6) expect_equal(my_res$Beta[,1], coef(dds)[,1] / log2(exp(1)), tolerance = 1e-6) expect_equal(my_res2$Beta[,1], coef(dds)[,1] / log2(exp(1)), tolerance = 1e-6) expect_equal(coef(edgeR_res)[,1], coef(dds)[,1] / log2(exp(1)), tolerance = 1e-6) }) test_that("Fisher scoring and diagonal fisher scoring give consistent results", { set.seed(1) data <- make_dataset(n_genes = 1, n_samples = 3000) offset_matrix <- matrix(log(data$size_factor), nrow=nrow(data$Y), ncol = ncol(data$Y), byrow = TRUE) # Fit Standard Model beta_mat_init <- estimate_betas_roughly(Y = data$Y, model_matrix = data$X, offset_matrix = offset_matrix) res1 <- fitBeta_fisher_scoring(Y = data$Y, model_matrix = data$X, exp_offset_matrix = exp(offset_matrix), thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 0, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) res2 <- fitBeta_diagonal_fisher_scoring(Y = data$Y, model_matrix = data$X, exp_offset_matrix = exp(offset_matrix), thetas = data$overdispersion, beta_matSEXP = beta_mat_init, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) expect_equal(res1, res2 ) set.seed(1) df <- data.frame( city = sample(c("Heidelberg", "Berlin", "New York"), size = 3000, replace = TRUE), fruit = sample(c("Apple", "Cherry", "Banana"), size = 3000, replace = TRUE), age = rnorm(3000, mean = 50, sd = 15), car = sample(c("blue", "big", "truck"), size = 3000, replace = TRUE), letter = LETTERS[1:10] ) new_model_matrix <- model.matrix(~ . - 1, df) beta_mat_init <- estimate_betas_roughly(Y = data$Y, model_matrix = new_model_matrix, offset_matrix = offset_matrix) res1 <- fitBeta_fisher_scoring(Y = data$Y, model_matrix = new_model_matrix, exp_offset_matrix = exp(offset_matrix), thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 0, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) res2 <- fitBeta_diagonal_fisher_scoring(Y = data$Y, model_matrix = new_model_matrix, exp_offset_matrix = exp(offset_matrix), thetas = data$overdispersion, beta_matSEXP = beta_mat_init, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 1000) expect_gt(cor(c(res1$beta_mat), c(res2$beta_mat)), 0.99) expect_gt(res2$iter, res1$iter) }) test_that("Fisher scoring and ridge penalized fisher scoring give consistent results", { data <- make_dataset(n_genes = 1, n_samples = 3000) offset_matrix <- matrix(log(data$size_factor), nrow=nrow(data$Y), ncol = ncol(data$Y), byrow = TRUE) # Fit Standard Model beta_mat_init <- estimate_betas_roughly(Y = data$Y, model_matrix = data$X, offset_matrix = offset_matrix) res1 <- fitBeta_fisher_scoring(Y = data$Y, model_matrix = data$X, exp_offset_matrix = exp(offset_matrix), thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 0, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) res2 <- fitBeta_fisher_scoring(Y = data$Y, model_matrix = data$X, exp_offset_matrix = exp(offset_matrix), thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 1e-6, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) expect_equal(res1, res2) set.seed(1) size <- 25 df <- data.frame( city = sample(c("Heidelberg", "Berlin", "New York"), size = size, replace = TRUE), fruit = sample(c("Apple", "Cherry", "Banana"), size = size, replace = TRUE), age = rnorm(size, mean = 50, sd = 1e-5), car = sample(c("blue", "big", "truck"), size = size, replace = TRUE) ) new_model_matrix <- model.matrix(~ . - 1, df) beta_mat_init <- estimate_betas_roughly(Y = data$Y[,1:size,drop=FALSE], model_matrix = new_model_matrix, offset_matrix = offset_matrix[,1:size,drop=FALSE]) res1 <- fitBeta_fisher_scoring(Y = data$Y[,1:size,drop=FALSE], model_matrix = new_model_matrix, exp_offset_matrix = exp(offset_matrix)[,1:size,drop=FALSE], thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 0, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) res2 <- fitBeta_fisher_scoring(Y = data$Y[,1:size,drop=FALSE], model_matrix = new_model_matrix, exp_offset_matrix = exp(offset_matrix)[,1:size,drop=FALSE], thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 1e-30, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) res3 <- fitBeta_fisher_scoring(Y = data$Y[,1:size,drop=FALSE], model_matrix = new_model_matrix, exp_offset_matrix = exp(offset_matrix)[,1:size,drop=FALSE], thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 50, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) expect_equal(res1, res2) expect_lt(res3$beta_mat[6], res1$beta_mat[6]) # The age column is much smaller }) test_that("glm_gp_impl can handle all zero rows", { Y <- matrix(0, nrow = 2, ncol = 10) Y[1, ] <- rpois(n = 10, lambda = 3) X <- matrix(1, nrow = 10, ncol = 1) res <- glm_gp_impl(Y, X) expect_equal(res$Beta[2,1], -1e8) expect_equal(res$Mu[2, ], rep(0, 10)) }) test_that("glm_gp_impl can handle all zero columns", { Y <- matrix(0, nrow = 2, ncol = 10) Y[1, 1] <- 3 X <- matrix(1, nrow = 10, ncol = 1) res <- glm_gp_impl(Y, X) expect_equal(res$size_factors[2:10], rep(0.001, times = 9)) }) test_that("glm_gp_impl can handle all values zero", { Y <- matrix(0, nrow = 3, ncol = 10) X <- matrix(1, nrow = 10, ncol = 1) res <- glm_gp_impl(Y, X) expect_equal(res$size_factors, rep(0.001, times = 10)) expect_equal(res$overdispersions, rep(0, times = 3)) expect_equal(res$Beta[,1], rep(-1e8, times = 3)) expect_true(all(res$Mu == 0)) }) test_that("glm_gp_impl can handle dispersion of zero", { Y <- matrix(rnbinom(10, mu = 10, size = 1 / 2.3), nrow = 1, ncol = 10) X <- cbind(1, rnorm(10)) res <- glm_gp_impl(Y, X, overdispersion = 0, size_factors = FALSE) res2 <- glm(c(Y) ~ X - 1, family = "poisson") expect_equal(c(res$Beta), unname(coef(res2))) }) test_that("glm_gp_impl can handle weird input", { Y <- matrix(c(7, 0, 0, 0, 0), nrow = 1) X <- cbind(1, c(0.64, -2.7, -0.94, 0.6, 0.56)) offset <- matrix(0, nrow = 1, ncol = 5) init <- matrix(c(1,1), nrow = 1) # This used to return c(NA, NA) because mu got exactly zero res <- estimate_betas_fisher_scoring(Y, X, offset, dispersions = 0, beta_mat_init = init) # fitBeta_diagonal_fisher_scoring(Y, X, exp(offset), 0, init, tolerance = 1e-8, max_iter = 5000000) expect_false(any(is.na(c(res$Beta)))) }) test_that("glm_gp_impl can handle weird input 2", { Y <- matrix(c(34, 130, 1, 27, 1), nrow = 1) X <- cbind(c(0.03, -0.4, -0.4, 0.8, 0.1), c(-0.1, -0.7, 0.7, -0.03, 0.2)) offset <- matrix(0, nrow = 1, ncol = 5) init <- matrix(c(3000, -141), nrow = 1) res <- estimate_betas_fisher_scoring(Y, X, offset, dispersions = 0, beta_mat_init = init) expect_false(any(is.na(c(res$Beta)))) }) # test_that("glm_gp_impl works as expected", { # skip("No workable tests here") # # My method # data <- make_dataset(n_genes = 2000, n_samples = 30) # res <- glm_gp_impl(data$Y, model_matrix = data$X, verbose = TRUE) # # # edgeR # edgeR_data <- edgeR::DGEList(data$Y) # edgeR_data <- edgeR::calcNormFactors(edgeR_data) # edgeR_data <- edgeR::estimateDisp(edgeR_data, data$X) # fit <- edgeR::glmFit(edgeR_data, design = data$X) # # # DESeq2 # dds <- DESeq2::DESeqDataSetFromMatrix(data$Y, colData = data.frame(name = seq_len(ncol(data$Y))), # design = ~ 1) # dds <- DESeq2::estimateSizeFactors(dds) # dds <- DESeq2::estimateDispersions(dds) # dds <- DESeq2::nbinomWaldTest(dds, minmu = 1e-6) # # res <- glm_gp_impl(data$Y, model_matrix = data$X, size_factors = DESeq2::sizeFactors(dds), # verbose = TRUE) # plot(res$size_factors, DESeq2::sizeFactors(dds)); abline(0,1) # # plot(res$Beta[,1], coef(dds)[,1] / log2(exp(1)), pch = 16, cex = 0.2, col ="red"); abline(0,1) # plot(res$Beta[,1], coef(fit)[,1]); abline(0,1) # plot(coef(dds)[,1] / log2(exp(1)), coef(fit)[,1]); abline(0,1) # # # plot(res$overdispersions, SummarizedExperiment::rowData(dds)$dispGeneEst, log = "xy"); abline(0,1) # plot(res$overdispersions, edgeR_data$tagwise.dispersion, log = "xy"); abline(0,1) # plot(SummarizedExperiment::rowData(dds)$dispGeneEst, edgeR_data$tagwise.dispersion, log = "xy"); abline(0,1) # }) test_that("glm_gp_impl works with Delayed Input", { # My method data <- make_dataset(n_genes = 2000, n_samples = 30) Y_da <- HDF5Array::writeHDF5Array(data$Y) res <- glm_gp_impl(data$Y, model_matrix = data$X, verbose = TRUE) res2 <- glm_gp_impl(Y_da, model_matrix = data$X, verbose = TRUE) expect_equal(res$Beta, res2$Beta) expect_equal(res$overdispersions, res2$overdispersions) expect_equal(res$Mu, as.matrix(res2$Mu)) expect_equal(res$size_factors, res2$size_factors) })
/tests/testthat/test-estimate_betas.R
no_license
hjames1/glmGamPoi
R
false
false
26,351
r
setup({ # Limit the number of workers so CRAN is happy DelayedArray::setAutoBPPARAM(BiocParallel::MulticoreParam(workers = 2)) }) test_that("deviance calculation works", { r_gp_deviance <- function(y, mu, theta){ -2 * sum(dnbinom(y, mu = mu, size = 1/theta, log = TRUE) - dnbinom(y, mu = y, size = 1/theta, log = TRUE)) } expect_equal(compute_gp_deviance(y = 17, mu = 3, theta = 1.3), r_gp_deviance(y = 17, mu = 3, theta = 1.3)) expect_equal(compute_gp_deviance(y = 0, mu = 3, theta = 1.3), r_gp_deviance(y = 0, mu = 3, theta = 1.3)) expect_equal(compute_gp_deviance(y = 17, mu = 3, theta = 0), r_gp_deviance(y = 17, mu = 3, theta = 0)) expect_equal(compute_gp_deviance(y = 0, mu = 3, theta = 0), r_gp_deviance(y = 0, mu = 3, theta = 0)) }) test_that("Rough estimation of Beta works", { mat <- matrix(1:32, nrow = 8, ncol = 4) model_matrix <- cbind(1, rnorm(n = 4)) offset_matrix <- combine_size_factors_and_offset(0, size_factors = TRUE, mat)$offset_matrix b1 <- estimate_betas_roughly(mat, model_matrix, offset_matrix) library(HDF5Array) hdf5_mat <- as(mat, "HDF5Matrix") hdf5_offset_matrix <- combine_size_factors_and_offset(0, size_factors = TRUE, hdf5_mat)$offset_matrix b2 <- estimate_betas_roughly(hdf5_mat, model_matrix, hdf5_offset_matrix) expect_equal(b1 , b2) }) test_that("Beta estimation can handle edge cases as input", { Y <- matrix(0, nrow = 1, ncol = 10) model_matrix <- matrix(1, nrow = 10, ncol = 1) offset_matrix <- matrix(1, nrow = 1, ncol = 10) dispersion <- 0 res <- estimate_betas_group_wise(Y, offset_matrix, dispersion, beta_group_init = matrix(3, nrow = 1, ncol = 1), groups = 1, model_matrix = model_matrix) expect_equal(res$Beta[1,1], -1e8) beta_mat_init <- estimate_betas_roughly(Y, model_matrix, offset_matrix) res2 <- estimate_betas_fisher_scoring(Y, model_matrix, offset_matrix, dispersion, beta_mat_init) expect_lt(res2$Beta[1,1], -15) }) test_that("Beta estimation can handle edge case (2)", { # In version 1.1.13, glmGamPoi converged to an extreme result (mu = 1e50) for this input. # With the introduction of the max_rel_mu_change parameter, this seems to be fixed y <- c(0, 0, 14, 2, 0, 0, 0, 0, 10, 12, 6, 2, 0, 4, 1, 1, 2, 6, 165, 2, 1, 0, 0, 0, 259, 2050, 715, 0, 0, 96, 2658, 149, 56, 7, 0, 0, 0, 0, 0, 0, 0, 5, 9, 1, 1, 0, 0, 1, 1, 5, 7, 0, 0, 1, 0, 3, 6, 19, 29, 0, 0, 0, 1, 4, 73, 3, 4, 1, 0, 1, 0, 0, 5, 169, 58, 1, 0, 32, 0, 2, 1, 1, 170, 30, 0, 1, 0, 4, 123, 1655, 1292, 101, 0, 732, 2866, 207, 3, 6, 3, 0, 0, 2, 0, 1, 0, 110, 27, 0, 0, 0, 0, 1, 51, 3, 198, 1, 0, 1, 0, 78, 9, 2, 142, 2, 0, 2, 1, 1) clust <- c("B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cells", "B cells", "CD14+ Monocytes", "CD4 T cells", "CD8 T cells", "Dendritic cells", "FCGR3A+ Monocytes", "Megakaryocytes", "NK cell") cond <- c("ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "ctrl", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim", "stim") sf <- c(1.25495746, 2.84060391, 3.54532248, 0.74403676, 0.12168579, 1.29108360, 0.12000349, 0.65455177, 5.44150278, 12.27749619, 10.45289957, 1.95654869, 0.13524118, 3.82396827, 0.31255044, 1.73958695, 1.48917348, 6.17830352, 4.72301313, 5.18268863, 0.09076828, 1.82555638, 0.08546259, 1.12790128, 0.39478863, 1.69810120, 2.50113680, 0.26218952, 0.05861062, 0.29934013, 0.36996404, 0.23265235, 0.55359268, 3.64881579, 2.02365705, 0.20378379, 0.02849112, 0.38153519, 0.08851444, 0.37638048, 1.51409512, 6.09265761, 13.59420239, 0.64251691, 0.38507232, 0.67384420, 0.20171328, 1.01657884, 2.55432311, 5.04959345, 12.65532171, 1.37339728, 0.16297743, 0.65830457, 0.20081821, 2.34212786, 2.65519592, 4.60271847, 14.42841431, 0.61404735, 0.33373006, 1.61952954, 0.20445239, 1.01918855, 1.71367318, 2.73565477, 4.88628172, 1.19830951, 0.23995307, 1.79371146, 0.10530501, 1.02351290, 4.40897451, 9.49905081, 9.37883164, 1.63402313, 0.17510934, 2.88147498, 0.20539059, 2.25509082, 1.48036301, 5.38691456, 4.21155324, 5.01258303, 0.19591153, 2.02167280, 0.11638010, 1.98069390, 0.57433016, 2.00913110, 3.63831225, 0.33962887, 0.20308284, 0.39905907, 0.46129308, 0.29367857, 0.69204747, 2.43350004, 2.24865281, 0.16404503, 0.11647715, 0.52166148, 0.04277982, 0.48455401, 1.23137302, 4.23807090, 11.94627878, 0.39999727, 0.23969425, 0.29627750, 0.14492514, 1.20180350, 2.62172262, 4.84203529, 12.69336739, 1.23926685, 0.28333679, 1.02518441, 0.24187261, 2.28643968, 3.42665620, 5.63192529, 19.61644098, 0.48511477, 0.52967394, 2.41232041, 0.37996074, 1.65937613) model_matrix <- model.matrix(~ cond + clust) offset_matrix <- add_vector_to_each_row(matrix(0, nrow = 1, ncol = length(y)), log(sf)) Y <- matrix(y, nrow = 1) disp_init <- estimate_dispersions_roughly(Y, model_matrix, offset_matrix = offset_matrix) beta_init <- estimate_betas_roughly(Y, model_matrix, offset_matrix = offset_matrix) beta_res <- estimate_betas_fisher_scoring(Y, model_matrix = model_matrix, offset_matrix = offset_matrix, dispersions = disp_init, beta_mat_init = beta_init) beta_res expect_lt(beta_res$deviances, 100) expect_true(all(calculate_mu(beta_res$Beta, model_matrix, offset_matrix) < 1e5)) # betaRes <- fitBeta_fisher_scoring(Y, model_matrix, exp(offset_matrix), thetas = disp_init, beta_matSEXP = beta_init, # ridge_penalty = 1e-6, tolerance = 1e-8, max_iter = 1000) # betaRes }) test_that("Groupwise beta estimation works", { mat <- matrix(1:32, nrow = 8, ncol = 4) offset_matrix <- combine_size_factors_and_offset(0, size_factors = TRUE, mat)$offset_matrix b1 <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = 1) b2 <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = rep(1, times = ncol(mat))) expect_equal(b1, b2) model_matrix <- cbind(c(1,1,0,0), c(0,0,1,1)) b3 <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = c(1, 1, 2, 2)) b4 <- cbind( estimate_betas_roughly_group_wise(mat[,1:2], offset_matrix[,1:2], groups = 1), estimate_betas_roughly_group_wise(mat[,3:4], offset_matrix[,3:4], groups = 1) ) expect_equal(b3, b4) b5 <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = c(1, 1, 2, 2)) b6 <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = c(2, 2, 1, 1)) expect_equal(b5, b6) }) test_that("Beta estimation can handle any kind of model_matrix", { skip_if_not_installed("DESeq2") # Weird input that makes DESeq2 choke set.seed(1) Y <- matrix(1:72, nrow = 9, ncol = 8)[3:5,,drop=FALSE] model_matrix <- matrix(rnorm(n = 8 * 2), nrow = 8, ncol = 2) offset_matrix <- matrix(0, nrow = nrow(Y), ncol = ncol(Y)) disp_init <- estimate_dispersions_roughly(Y, model_matrix, offset_matrix) beta_init <- estimate_betas_roughly(Y, model_matrix, offset_matrix) fit <- estimate_betas_fisher_scoring(Y, model_matrix = model_matrix, offset_matrix = offset_matrix, dispersions = disp_init, beta_mat_init = beta_init) deseq2_fit <- DESeq2:::fitBetaWrapper(ySEXP = Y, xSEXP = model_matrix, nfSEXP = exp(offset_matrix), alpha_hatSEXP = disp_init, beta_matSEXP = beta_init, lambdaSEXP = rep(0.3, ncol(model_matrix)), weightsSEXP = array(1, dim(Y)), useWeightsSEXP = FALSE, tolSEXP = 1e-8, maxitSEXP = 100, useQRSEXP = TRUE, minmuSEXP = 1e-6) edgeR_fit <- edgeR::glmFit.default(Y, design = model_matrix, dispersion = disp_init, offset = offset_matrix[1,], prior.count = 0, weights=NULL, start = beta_init) # My result agrees with edgeR expect_equal(fit$Beta, edgeR_fit$coefficients, tolerance = 1e-3) # DESeq2 however does not converge expect_failure( expect_equal(fit$Beta, deseq2_fit$beta_mat, tolerance = 1e-3) ) expect_failure( expect_equal(edgeR_fit$coefficients, deseq2_fit$beta_mat, tolerance = 1e-3) ) expect_equal(deseq2_fit$iter, rep(100, 3)) # My result, however did converge expect_lt(fit$iterations[1], 50) }) test_that("estimate_betas_group_wise properly rescales result", { dat <- make_dataset(n_genes = 20, n_samples = 30) mat <- dat$Y offset_matrix <- combine_size_factors_and_offset(0, size_factors = TRUE, mat)$offset_matrix dispersions <- dat$overdispersion df <- data.frame(cat1 = sample(LETTERS[1:3], size = 30, replace = TRUE)) model_matrix <- model.matrix(~ cat1, data = df) groups <- get_groups_for_model_matrix(model_matrix) beta_group_init <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = groups) res <- estimate_betas_group_wise(mat, offset_matrix, dispersions, beta_group_init, groups = groups, model_matrix = model_matrix) model_matrix2 <- model_matrix * 3 res2 <- estimate_betas_group_wise(mat, offset_matrix, dispersions, beta_group_init = beta_group_init, groups = groups, model_matrix = model_matrix2) expect_equal(res$Beta, res2$Beta * 3) expect_equal(calculate_mu(res$Beta, model_matrix, offset_matrix), calculate_mu(res2$Beta, model_matrix2, offset_matrix)) }) test_that("estimate_betas_group_wise can handle extreme case", { y <- matrix(c(1, rep(0, 500)), nrow = 1) res <- fitBeta_one_group(y, offset_matrix = matrix(0, nrow = 1, ncol = 501), thetas = 2.1, beta_start_values = -10, tolerance = 1e-8, maxIter = 100) expect_false(res$iter == 100) expect_false(is.na(res$beta)) }) test_that("estimate_betas_group_wise can handle DelayedArray", { mat <- matrix(1:32, nrow = 8, ncol = 4) offset_matrix <- combine_size_factors_and_offset(0, size_factors = TRUE, mat)$offset_matrix dispersions <- rep(0, 8) model_matrix <- matrix(1, nrow = 4) mat_hdf5 <- as(mat, "HDF5Matrix") offset_matrix_hdf5 <- as(offset_matrix, "HDF5Matrix") beta_vec_init <- estimate_betas_roughly_group_wise(mat, offset_matrix, groups = 1) beta_vec_init_da <- estimate_betas_roughly_group_wise(mat_hdf5, offset_matrix_hdf5, groups = 1) res <- estimate_betas_group_wise(mat, offset_matrix, dispersions, beta_vec_init, groups = 1, model_matrix = model_matrix) res2 <- estimate_betas_group_wise(mat_hdf5, offset_matrix_hdf5, dispersions, beta_vec_init_da, groups = 1, model_matrix = model_matrix) # This check is important, because beachmat makes life difficult for # handling numeric and integer input generically res3 <- estimate_betas_group_wise(mat * 1.0, offset_matrix, dispersions, beta_vec_init, groups = 1, model_matrix = model_matrix) expect_equal(res, res2) expect_equal(res, res3) }) test_that("estimate_betas_fisher_scoring can handle DelayedArray", { mat <- matrix(1:32, nrow = 8, ncol = 4) model_matrix <- cbind(1, rnorm(4, mean = 10)) offset_matrix <- combine_size_factors_and_offset(0, size_factors = TRUE, mat)$offset_matrix dispersions <- rep(0, 8) mat_hdf5 <- as(mat, "HDF5Matrix") offset_matrix_hdf5 <- as(offset_matrix, "HDF5Matrix") beta_mat_init <- estimate_betas_roughly(mat, model_matrix, offset_matrix) beta_mat_init_da <- estimate_betas_roughly(mat_hdf5, model_matrix, offset_matrix_hdf5) res <- estimate_betas_fisher_scoring(mat, model_matrix, offset_matrix, dispersions, beta_mat_init) res2 <- estimate_betas_fisher_scoring(mat_hdf5, model_matrix, offset_matrix_hdf5, dispersions, beta_mat_init_da) res3 <- estimate_betas_fisher_scoring(mat * 1.0, model_matrix, offset_matrix, dispersions, beta_mat_init) expect_equal(res, res2) expect_equal(res, res3) }) test_that("Beta estimation works", { skip_if_not(is_macos(), "Beta estimation is unprecise on Non-MacOS architectures") skip_if_not_installed("DESeq2") skip_if_not_installed("edgeR") data <- make_dataset(n_genes = 1000, n_samples = 30) offset_matrix <- matrix(log(data$size_factor), nrow=nrow(data$Y), ncol = ncol(data$Y), byrow = TRUE) model_matrix <- matrix(1.1, nrow = 30) # Fit Standard Model beta_mat_init <- estimate_betas_roughly(Y = data$Y, model_matrix = model_matrix, offset_matrix = offset_matrix) my_res <- estimate_betas_fisher_scoring(Y = data$Y, model_matrix = model_matrix, offset_matrix = offset_matrix, dispersions = data$overdispersion, beta_mat_init = beta_mat_init) # Fit Model for One Group beta_vec_init <- estimate_betas_roughly_group_wise(Y = data$Y, offset_matrix = offset_matrix, groups = 1) my_res2 <- estimate_betas_group_wise(Y = data$Y, offset_matrix = offset_matrix, dispersions = data$overdispersion, beta_group_init = beta_vec_init, groups = 1, model_matrix = model_matrix) expect_equal(my_res$Beta, my_res2$Beta, tolerance = 1e-6) expect_lt(max(my_res2$iterations), 10) # Compare with edgeR edgeR_res <- edgeR::glmFit.default(data$Y, design = model_matrix, dispersion = data$overdispersion, offset = offset_matrix[1,], prior.count = 0, weights=NULL) expect_equal(my_res$Beta[,1], coef(edgeR_res)[,1], tolerance = 1e-6) expect_equal(my_res2$Beta[,1], coef(edgeR_res)[,1], tolerance = 1e-6) # Compare with DESeq2 # This requires a few hacks: # * If the "just intercept" model is fit, DESeq2 # automatically assigns an approximation for Beta # To avoid this I give it a model design matrix that # is really similar to the "just intercept" but numerically # identical and thus force it to exactly calculate the # beta values # * The beta values are on a log2 scale. I need to convert it # to the ln scale. dds <- DESeq2::DESeqDataSetFromMatrix(data$Y, colData = data.frame(name = seq_len(ncol(data$Y))), design = ~ 1) DESeq2::sizeFactors(dds) <- data$size_factor DESeq2::dispersions(dds) <- data$overdispersion dds <- DESeq2::nbinomWaldTest(dds, modelMatrix = model_matrix, minmu = 1e-6) expect_equal(my_res$Beta[,1], coef(dds)[,1] / log2(exp(1)), tolerance = 1e-6) expect_equal(my_res2$Beta[,1], coef(dds)[,1] / log2(exp(1)), tolerance = 1e-6) expect_equal(coef(edgeR_res)[,1], coef(dds)[,1] / log2(exp(1)), tolerance = 1e-6) }) test_that("Fisher scoring and diagonal fisher scoring give consistent results", { set.seed(1) data <- make_dataset(n_genes = 1, n_samples = 3000) offset_matrix <- matrix(log(data$size_factor), nrow=nrow(data$Y), ncol = ncol(data$Y), byrow = TRUE) # Fit Standard Model beta_mat_init <- estimate_betas_roughly(Y = data$Y, model_matrix = data$X, offset_matrix = offset_matrix) res1 <- fitBeta_fisher_scoring(Y = data$Y, model_matrix = data$X, exp_offset_matrix = exp(offset_matrix), thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 0, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) res2 <- fitBeta_diagonal_fisher_scoring(Y = data$Y, model_matrix = data$X, exp_offset_matrix = exp(offset_matrix), thetas = data$overdispersion, beta_matSEXP = beta_mat_init, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) expect_equal(res1, res2 ) set.seed(1) df <- data.frame( city = sample(c("Heidelberg", "Berlin", "New York"), size = 3000, replace = TRUE), fruit = sample(c("Apple", "Cherry", "Banana"), size = 3000, replace = TRUE), age = rnorm(3000, mean = 50, sd = 15), car = sample(c("blue", "big", "truck"), size = 3000, replace = TRUE), letter = LETTERS[1:10] ) new_model_matrix <- model.matrix(~ . - 1, df) beta_mat_init <- estimate_betas_roughly(Y = data$Y, model_matrix = new_model_matrix, offset_matrix = offset_matrix) res1 <- fitBeta_fisher_scoring(Y = data$Y, model_matrix = new_model_matrix, exp_offset_matrix = exp(offset_matrix), thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 0, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) res2 <- fitBeta_diagonal_fisher_scoring(Y = data$Y, model_matrix = new_model_matrix, exp_offset_matrix = exp(offset_matrix), thetas = data$overdispersion, beta_matSEXP = beta_mat_init, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 1000) expect_gt(cor(c(res1$beta_mat), c(res2$beta_mat)), 0.99) expect_gt(res2$iter, res1$iter) }) test_that("Fisher scoring and ridge penalized fisher scoring give consistent results", { data <- make_dataset(n_genes = 1, n_samples = 3000) offset_matrix <- matrix(log(data$size_factor), nrow=nrow(data$Y), ncol = ncol(data$Y), byrow = TRUE) # Fit Standard Model beta_mat_init <- estimate_betas_roughly(Y = data$Y, model_matrix = data$X, offset_matrix = offset_matrix) res1 <- fitBeta_fisher_scoring(Y = data$Y, model_matrix = data$X, exp_offset_matrix = exp(offset_matrix), thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 0, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) res2 <- fitBeta_fisher_scoring(Y = data$Y, model_matrix = data$X, exp_offset_matrix = exp(offset_matrix), thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 1e-6, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) expect_equal(res1, res2) set.seed(1) size <- 25 df <- data.frame( city = sample(c("Heidelberg", "Berlin", "New York"), size = size, replace = TRUE), fruit = sample(c("Apple", "Cherry", "Banana"), size = size, replace = TRUE), age = rnorm(size, mean = 50, sd = 1e-5), car = sample(c("blue", "big", "truck"), size = size, replace = TRUE) ) new_model_matrix <- model.matrix(~ . - 1, df) beta_mat_init <- estimate_betas_roughly(Y = data$Y[,1:size,drop=FALSE], model_matrix = new_model_matrix, offset_matrix = offset_matrix[,1:size,drop=FALSE]) res1 <- fitBeta_fisher_scoring(Y = data$Y[,1:size,drop=FALSE], model_matrix = new_model_matrix, exp_offset_matrix = exp(offset_matrix)[,1:size,drop=FALSE], thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 0, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) res2 <- fitBeta_fisher_scoring(Y = data$Y[,1:size,drop=FALSE], model_matrix = new_model_matrix, exp_offset_matrix = exp(offset_matrix)[,1:size,drop=FALSE], thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 1e-30, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) res3 <- fitBeta_fisher_scoring(Y = data$Y[,1:size,drop=FALSE], model_matrix = new_model_matrix, exp_offset_matrix = exp(offset_matrix)[,1:size,drop=FALSE], thetas = data$overdispersion, beta_matSEXP = beta_mat_init, ridge_penalty = 50, tolerance = 1e-8, max_rel_mu_change = 1e5, max_iter = 100) expect_equal(res1, res2) expect_lt(res3$beta_mat[6], res1$beta_mat[6]) # The age column is much smaller }) test_that("glm_gp_impl can handle all zero rows", { Y <- matrix(0, nrow = 2, ncol = 10) Y[1, ] <- rpois(n = 10, lambda = 3) X <- matrix(1, nrow = 10, ncol = 1) res <- glm_gp_impl(Y, X) expect_equal(res$Beta[2,1], -1e8) expect_equal(res$Mu[2, ], rep(0, 10)) }) test_that("glm_gp_impl can handle all zero columns", { Y <- matrix(0, nrow = 2, ncol = 10) Y[1, 1] <- 3 X <- matrix(1, nrow = 10, ncol = 1) res <- glm_gp_impl(Y, X) expect_equal(res$size_factors[2:10], rep(0.001, times = 9)) }) test_that("glm_gp_impl can handle all values zero", { Y <- matrix(0, nrow = 3, ncol = 10) X <- matrix(1, nrow = 10, ncol = 1) res <- glm_gp_impl(Y, X) expect_equal(res$size_factors, rep(0.001, times = 10)) expect_equal(res$overdispersions, rep(0, times = 3)) expect_equal(res$Beta[,1], rep(-1e8, times = 3)) expect_true(all(res$Mu == 0)) }) test_that("glm_gp_impl can handle dispersion of zero", { Y <- matrix(rnbinom(10, mu = 10, size = 1 / 2.3), nrow = 1, ncol = 10) X <- cbind(1, rnorm(10)) res <- glm_gp_impl(Y, X, overdispersion = 0, size_factors = FALSE) res2 <- glm(c(Y) ~ X - 1, family = "poisson") expect_equal(c(res$Beta), unname(coef(res2))) }) test_that("glm_gp_impl can handle weird input", { Y <- matrix(c(7, 0, 0, 0, 0), nrow = 1) X <- cbind(1, c(0.64, -2.7, -0.94, 0.6, 0.56)) offset <- matrix(0, nrow = 1, ncol = 5) init <- matrix(c(1,1), nrow = 1) # This used to return c(NA, NA) because mu got exactly zero res <- estimate_betas_fisher_scoring(Y, X, offset, dispersions = 0, beta_mat_init = init) # fitBeta_diagonal_fisher_scoring(Y, X, exp(offset), 0, init, tolerance = 1e-8, max_iter = 5000000) expect_false(any(is.na(c(res$Beta)))) }) test_that("glm_gp_impl can handle weird input 2", { Y <- matrix(c(34, 130, 1, 27, 1), nrow = 1) X <- cbind(c(0.03, -0.4, -0.4, 0.8, 0.1), c(-0.1, -0.7, 0.7, -0.03, 0.2)) offset <- matrix(0, nrow = 1, ncol = 5) init <- matrix(c(3000, -141), nrow = 1) res <- estimate_betas_fisher_scoring(Y, X, offset, dispersions = 0, beta_mat_init = init) expect_false(any(is.na(c(res$Beta)))) }) # test_that("glm_gp_impl works as expected", { # skip("No workable tests here") # # My method # data <- make_dataset(n_genes = 2000, n_samples = 30) # res <- glm_gp_impl(data$Y, model_matrix = data$X, verbose = TRUE) # # # edgeR # edgeR_data <- edgeR::DGEList(data$Y) # edgeR_data <- edgeR::calcNormFactors(edgeR_data) # edgeR_data <- edgeR::estimateDisp(edgeR_data, data$X) # fit <- edgeR::glmFit(edgeR_data, design = data$X) # # # DESeq2 # dds <- DESeq2::DESeqDataSetFromMatrix(data$Y, colData = data.frame(name = seq_len(ncol(data$Y))), # design = ~ 1) # dds <- DESeq2::estimateSizeFactors(dds) # dds <- DESeq2::estimateDispersions(dds) # dds <- DESeq2::nbinomWaldTest(dds, minmu = 1e-6) # # res <- glm_gp_impl(data$Y, model_matrix = data$X, size_factors = DESeq2::sizeFactors(dds), # verbose = TRUE) # plot(res$size_factors, DESeq2::sizeFactors(dds)); abline(0,1) # # plot(res$Beta[,1], coef(dds)[,1] / log2(exp(1)), pch = 16, cex = 0.2, col ="red"); abline(0,1) # plot(res$Beta[,1], coef(fit)[,1]); abline(0,1) # plot(coef(dds)[,1] / log2(exp(1)), coef(fit)[,1]); abline(0,1) # # # plot(res$overdispersions, SummarizedExperiment::rowData(dds)$dispGeneEst, log = "xy"); abline(0,1) # plot(res$overdispersions, edgeR_data$tagwise.dispersion, log = "xy"); abline(0,1) # plot(SummarizedExperiment::rowData(dds)$dispGeneEst, edgeR_data$tagwise.dispersion, log = "xy"); abline(0,1) # }) test_that("glm_gp_impl works with Delayed Input", { # My method data <- make_dataset(n_genes = 2000, n_samples = 30) Y_da <- HDF5Array::writeHDF5Array(data$Y) res <- glm_gp_impl(data$Y, model_matrix = data$X, verbose = TRUE) res2 <- glm_gp_impl(Y_da, model_matrix = data$X, verbose = TRUE) expect_equal(res$Beta, res2$Beta) expect_equal(res$overdispersions, res2$overdispersions) expect_equal(res$Mu, as.matrix(res2$Mu)) expect_equal(res$size_factors, res2$size_factors) })
structure(list(url = "http://atlas-dev.ohdsi.org/WebAPI/cohortdefinition/sql/", status_code = 500L, headers = structure(list(vary = "Accept-Encoding", `content-encoding` = "gzip", `content-type` = "application/json", `content-length` = "203", date = "Fri, 16 Jul 2021 19:37:29 GMT", connection = "close"), class = c("insensitive", "list" )), all_headers = list(list(status = 500L, version = "HTTP/1.1", headers = structure(list(vary = "Accept-Encoding", `content-encoding` = "gzip", `content-type` = "application/json", `content-length` = "203", date = "Fri, 16 Jul 2021 19:37:29 GMT", connection = "close"), class = c("insensitive", "list")))), cookies = structure(list(domain = logical(0), flag = logical(0), path = logical(0), secure = logical(0), expiration = structure(numeric(0), class = c("POSIXct", "POSIXt")), name = logical(0), value = logical(0)), row.names = integer(0), class = "data.frame"), content = charToRaw("{\"payload\":{\"cause\":null,\"stackTrace\":[],\"message\":\"An exception occurred: java.lang.NullPointerException\",\"localizedMessage\":\"An exception occurred: java.lang.NullPointerException\",\"suppressed\":[]},\"headers\":{\"id\":\"8df1a9ed-cbe2-1651-17cd-81bf068b4713\",\"timestamp\":1626464250512}}"), date = structure(1626464249, class = c("POSIXct", "POSIXt" ), tzone = "GMT"), times = c(redirect = 0, namelookup = 2.3e-05, connect = 2.4e-05, pretransfer = 6.8e-05, starttransfer = 0.021824, total = 0.021888)), class = "response")
/tests/testthat/mocks/CohortDefinition/atlas-dev.ohdsi.org/WebAPI/cohortdefinition/sql-f1cd93-POST.R
permissive
OHDSI/ROhdsiWebApi
R
false
false
1,599
r
structure(list(url = "http://atlas-dev.ohdsi.org/WebAPI/cohortdefinition/sql/", status_code = 500L, headers = structure(list(vary = "Accept-Encoding", `content-encoding` = "gzip", `content-type` = "application/json", `content-length` = "203", date = "Fri, 16 Jul 2021 19:37:29 GMT", connection = "close"), class = c("insensitive", "list" )), all_headers = list(list(status = 500L, version = "HTTP/1.1", headers = structure(list(vary = "Accept-Encoding", `content-encoding` = "gzip", `content-type` = "application/json", `content-length` = "203", date = "Fri, 16 Jul 2021 19:37:29 GMT", connection = "close"), class = c("insensitive", "list")))), cookies = structure(list(domain = logical(0), flag = logical(0), path = logical(0), secure = logical(0), expiration = structure(numeric(0), class = c("POSIXct", "POSIXt")), name = logical(0), value = logical(0)), row.names = integer(0), class = "data.frame"), content = charToRaw("{\"payload\":{\"cause\":null,\"stackTrace\":[],\"message\":\"An exception occurred: java.lang.NullPointerException\",\"localizedMessage\":\"An exception occurred: java.lang.NullPointerException\",\"suppressed\":[]},\"headers\":{\"id\":\"8df1a9ed-cbe2-1651-17cd-81bf068b4713\",\"timestamp\":1626464250512}}"), date = structure(1626464249, class = c("POSIXct", "POSIXt" ), tzone = "GMT"), times = c(redirect = 0, namelookup = 2.3e-05, connect = 2.4e-05, pretransfer = 6.8e-05, starttransfer = 0.021824, total = 0.021888)), class = "response")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/create.R \name{create_model_directory} \alias{create_model_directory} \title{Creates a model entry in the library} \usage{ create_model_directory(model_id, ...) } \arguments{ \item{model_id}{A name for a model} } \value{ Invisible } \description{ Creates a model entry in the library }
/man/create_model_directory.Rd
permissive
odaniel1/stanworkflowR
R
false
true
364
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/create.R \name{create_model_directory} \alias{create_model_directory} \title{Creates a model entry in the library} \usage{ create_model_directory(model_id, ...) } \arguments{ \item{model_id}{A name for a model} } \value{ Invisible } \description{ Creates a model entry in the library }
#' Time needed (in days) to search databases #' #' This function calculates the time needed to search bibliographic databases #' in a systematic review, based on inputs of the number of databases searched #' ('databases') and the number of databases that can be searched per day and #' their results exported ('db.day'). Default values are provided based on #' the empirical study of environmental systematic reviews by Haddaway and #' Westgate (2018) https://doi.org/10.1111/cobi.13231. db.time <- function(databases=9,db.day=4){ database.searching <- databases / db.day return(database.searching) }
/R/db.time.R
permissive
nealhaddaway/predicter
R
false
false
606
r
#' Time needed (in days) to search databases #' #' This function calculates the time needed to search bibliographic databases #' in a systematic review, based on inputs of the number of databases searched #' ('databases') and the number of databases that can be searched per day and #' their results exported ('db.day'). Default values are provided based on #' the empirical study of environmental systematic reviews by Haddaway and #' Westgate (2018) https://doi.org/10.1111/cobi.13231. db.time <- function(databases=9,db.day=4){ database.searching <- databases / db.day return(database.searching) }
library(shiny) # need a few more now... # if you need to install these to run on your local machine uncomment the following line: #install.packages(c('dplyr','tidyr','ggplot2','RColorBrewer','readr','stringr','shiny','shinythemes','shinyjs','DT')) library(shinythemes) library(shinyjs) library(DT) library(dplyr) library(tidyr) library(ggplot2) library(RColorBrewer) library(readr) library(stringr) # 2017-09-04 # I have uploaded the CSV containing all series (GSE) using the human Affymetrix array (GPL570) # Our search will look through here for the appropriate keywords # # In this version, I have just replaced the matrix (i.e. mtcards for all_gpl570) # Also made the checkboxes within the table (from 'http://stackoverflow.com/questions/26026740/rstudio-shiny-list-from-checking-rows-in-datatables') # Search function is not yet implemented in the UI # # all_gpl570<-read.csv('all_gpl570.csv') # search strings -- next version we take these from the UI search_string_motorNeurons = "\\bbrain\\b|\\bneurons?(al)?\\b&\\bmotor\\b" search_string_diabetes = "\\bpancreas\\b|\\bislets?\\b|\\bbeta-cells\\b|\\bdiabetes\\b" search_string_hepatocytes = "\\bliver\\b&\\bhepatocytes?\\b" search_string_cardiomyocytes = "\\bheart\\b&\\bcardiomyocytes?\\b" # cut back the columns from the GSE info to make it more readable selected <- all_gpl570[,c(2,3,8)] selected2 <- selected # fill each row with 'not assigned' label, until they are placed in groups selected$category <- rep("Not assigned", nrow(selected)) ui <- fluidPage( #creation of a navigation bar and mulitple pages navbarPage("Bioinformatics Software", tabPanel("Search for GEO data series (GSE)", #setting user inputed values and displaying values back at the user textInput("Key", "Enter search terms, separated by commas", value = ""), verbatimTextOutput("Key") ), tabPanel("Define categories for GEO samples (GSM)", uiOutput("page2"), textInput("cat1", "Define Category 1"), verbatimTextOutput("cat1"), textInput("cat2", "Define Category 2"), verbatimTextOutput("cat2"), textInput("cat3", "Define Category 3"), verbatimTextOutput("cat3") ), # changed the format of this slightly to accomodate the DT package (for row selection) tabPanel("Assign samples to categories", uiOutput("page3"), DT::dataTableOutput("page3table"), actionButton("Lock", "Assign Categories"), verbatimTextOutput("Button"), verbatimTextOutput("Rows"), verbatimTextOutput("Currentcategory") ), # currently, page 4 displays the selected rows from page 3 tabPanel("Preview results", uiOutput("page4") ) ) ) server <- function(input, output) { output$Key <- renderText(unlist(strsplit(input$Key,","))) output$cat1 <- renderText(input$cat1) output$cat2 <- renderText(input$cat2) output$cat3 <- renderText(input$cat3) output$Button <- renderText(input$Lock) output$Rows <- renderText(s()) output$Currentcategory <- renderText(currentcategory()) output$page3 <- renderUI( fluidRow( column(3, selectInput("selection", "Select a Category", c("category1" <- {input$cat1}, "category2" <- {input$cat2}, "category3" <- {input$cat3}, "category4" <- "Not included")) ) ) ) # render the table with the specified number of rows (will be keywords) without the option to further search (although we may want this) output$page3table <- DT::renderDataTable(selected[1:8,], options=list(searching=FALSE)) currentcategory <- eventReactive(input$Lock, input$selection) s <- eventReactive(input$Lock, input$page3table_rows_selected) selected2[s(),'category'] <- eventReactive(input$Lock, {currentcategory()}) # display the whole table, but with the appropriate rows having the new category # caveat here is that this refreshes every time # next version needs to store this more permanently as an attribute of the table # this will require a 'GO' button } shinyApp(ui, server)
/Bioinformatics Interface 3.1.R
no_license
VictorTong7/Universal-Bioinformatics-Platform
R
false
false
4,589
r
library(shiny) # need a few more now... # if you need to install these to run on your local machine uncomment the following line: #install.packages(c('dplyr','tidyr','ggplot2','RColorBrewer','readr','stringr','shiny','shinythemes','shinyjs','DT')) library(shinythemes) library(shinyjs) library(DT) library(dplyr) library(tidyr) library(ggplot2) library(RColorBrewer) library(readr) library(stringr) # 2017-09-04 # I have uploaded the CSV containing all series (GSE) using the human Affymetrix array (GPL570) # Our search will look through here for the appropriate keywords # # In this version, I have just replaced the matrix (i.e. mtcards for all_gpl570) # Also made the checkboxes within the table (from 'http://stackoverflow.com/questions/26026740/rstudio-shiny-list-from-checking-rows-in-datatables') # Search function is not yet implemented in the UI # # all_gpl570<-read.csv('all_gpl570.csv') # search strings -- next version we take these from the UI search_string_motorNeurons = "\\bbrain\\b|\\bneurons?(al)?\\b&\\bmotor\\b" search_string_diabetes = "\\bpancreas\\b|\\bislets?\\b|\\bbeta-cells\\b|\\bdiabetes\\b" search_string_hepatocytes = "\\bliver\\b&\\bhepatocytes?\\b" search_string_cardiomyocytes = "\\bheart\\b&\\bcardiomyocytes?\\b" # cut back the columns from the GSE info to make it more readable selected <- all_gpl570[,c(2,3,8)] selected2 <- selected # fill each row with 'not assigned' label, until they are placed in groups selected$category <- rep("Not assigned", nrow(selected)) ui <- fluidPage( #creation of a navigation bar and mulitple pages navbarPage("Bioinformatics Software", tabPanel("Search for GEO data series (GSE)", #setting user inputed values and displaying values back at the user textInput("Key", "Enter search terms, separated by commas", value = ""), verbatimTextOutput("Key") ), tabPanel("Define categories for GEO samples (GSM)", uiOutput("page2"), textInput("cat1", "Define Category 1"), verbatimTextOutput("cat1"), textInput("cat2", "Define Category 2"), verbatimTextOutput("cat2"), textInput("cat3", "Define Category 3"), verbatimTextOutput("cat3") ), # changed the format of this slightly to accomodate the DT package (for row selection) tabPanel("Assign samples to categories", uiOutput("page3"), DT::dataTableOutput("page3table"), actionButton("Lock", "Assign Categories"), verbatimTextOutput("Button"), verbatimTextOutput("Rows"), verbatimTextOutput("Currentcategory") ), # currently, page 4 displays the selected rows from page 3 tabPanel("Preview results", uiOutput("page4") ) ) ) server <- function(input, output) { output$Key <- renderText(unlist(strsplit(input$Key,","))) output$cat1 <- renderText(input$cat1) output$cat2 <- renderText(input$cat2) output$cat3 <- renderText(input$cat3) output$Button <- renderText(input$Lock) output$Rows <- renderText(s()) output$Currentcategory <- renderText(currentcategory()) output$page3 <- renderUI( fluidRow( column(3, selectInput("selection", "Select a Category", c("category1" <- {input$cat1}, "category2" <- {input$cat2}, "category3" <- {input$cat3}, "category4" <- "Not included")) ) ) ) # render the table with the specified number of rows (will be keywords) without the option to further search (although we may want this) output$page3table <- DT::renderDataTable(selected[1:8,], options=list(searching=FALSE)) currentcategory <- eventReactive(input$Lock, input$selection) s <- eventReactive(input$Lock, input$page3table_rows_selected) selected2[s(),'category'] <- eventReactive(input$Lock, {currentcategory()}) # display the whole table, but with the appropriate rows having the new category # caveat here is that this refreshes every time # next version needs to store this more permanently as an attribute of the table # this will require a 'GO' button } shinyApp(ui, server)
# PROGRAM 11.2 trend <- function (y, trend.order = 1, tau2.ini = NULL, delta, plot = TRUE, ...) { n <- length(y) # data length m <- trend.order if (m < 1) stop("Trend order must be a positive integer.") # tau2.ini # initial variance of systen noise # delta # delta for computing variance of system noise iopt <- 1 # search method if (is.null(tau2.ini)) { iopt <- 0 tau2.ini <- 0 delta <- 0 } z <- .Fortran(C_trend, as.double(y), as.integer(n), as.integer(m), as.integer(iopt), as.double(tau2.ini), as.double(delta), tau2 = double(1), sig2 = double(1), lkhood = double(1), aic = double(1), xss = double(n * m), vss = double(n * m * m), rs = double(n)) xss <- array(z$xss, dim = c(m, n)) trend <- xss[1, 1:n] vss <- array(z$vss, dim = c(m,m,n)) vss <- vss[1,1,1:n] res <- z$rs ts.atr <- tsp(y) if (is.null(ts.atr) == FALSE) { trend <- ts(trend, start = ts.atr[1], frequency = ts.atr[3]) res <- ts(res, start = ts.atr[1], frequency = ts.atr[3]) } trend.out <- list(trend = trend, residual = res, tau2 = z$tau2, sigma2 = z$sig2, llkhood = z$lkhood, aic = z$aic, cov = vss) class(trend.out) <- "trend" if(plot) { rdata <- deparse(substitute(y)) eval(parse(text=paste(rdata, "<- y"))) eval(parse(text=paste("plot.trend(trend.out,", rdata, ", ...)"))) } return(trend.out) } print.trend <- function(x, ...) { message(gettextf("\n tau2 \t\t%12.5e", x$tau2), domain = NA) message(gettextf(" sigma2 \t%12.5e", x$sigma2), domain = NA) message(gettextf(" log-likelihood\t%12.3f", x$llkhood), domain = NA) message(gettextf(" aic\t\t%12.3f\n", x$aic), domain = NA) } plot.trend <- function(x, rdata = NULL, ...) { trend <- x$trend res <- x$res ts.atr <- tsp(trend) if (is.null(ts.atr) == TRUE) { xtime <- "time" } else { freq <- ts.atr[3] if (freq == 4 || freq == 12) { xtime <- "year" } else if (freq == 24) { xtime <- "day" } else if (freq == 52 || freq == 365/7) { xtime <- "week" } else if (freq == 365.25/7 || freq == 52.18) { xtime <- "week" } else { xtime <- "time" } } old.par <- par(no.readonly = TRUE) par(mfcol = c(2, 1), xaxs = "i") ylim <- range(x$trend, rdata, na.rm = TRUE) if (is.null(rdata) == TRUE) { mtitle <- paste("Trend component") } else { tsname <- deparse(substitute(rdata)) mtitle <- paste(tsname, "and trend component") plot(rdata, type = "l", ylim = ylim, xlab = "", ylab = "", main = "", xaxt = "n", ...) par(new = TRUE) } plot(trend, type = "l", col = 2, ylim = ylim, xlab = xtime, ylab = "", main = mtitle, ...) plot(res, type = "h", xlab = xtime, ylab = "", main = "Residuals", ...) abline(h = 0) par(old.par) }
/R/trend.R
no_license
cran/TSSS
R
false
false
3,224
r
# PROGRAM 11.2 trend <- function (y, trend.order = 1, tau2.ini = NULL, delta, plot = TRUE, ...) { n <- length(y) # data length m <- trend.order if (m < 1) stop("Trend order must be a positive integer.") # tau2.ini # initial variance of systen noise # delta # delta for computing variance of system noise iopt <- 1 # search method if (is.null(tau2.ini)) { iopt <- 0 tau2.ini <- 0 delta <- 0 } z <- .Fortran(C_trend, as.double(y), as.integer(n), as.integer(m), as.integer(iopt), as.double(tau2.ini), as.double(delta), tau2 = double(1), sig2 = double(1), lkhood = double(1), aic = double(1), xss = double(n * m), vss = double(n * m * m), rs = double(n)) xss <- array(z$xss, dim = c(m, n)) trend <- xss[1, 1:n] vss <- array(z$vss, dim = c(m,m,n)) vss <- vss[1,1,1:n] res <- z$rs ts.atr <- tsp(y) if (is.null(ts.atr) == FALSE) { trend <- ts(trend, start = ts.atr[1], frequency = ts.atr[3]) res <- ts(res, start = ts.atr[1], frequency = ts.atr[3]) } trend.out <- list(trend = trend, residual = res, tau2 = z$tau2, sigma2 = z$sig2, llkhood = z$lkhood, aic = z$aic, cov = vss) class(trend.out) <- "trend" if(plot) { rdata <- deparse(substitute(y)) eval(parse(text=paste(rdata, "<- y"))) eval(parse(text=paste("plot.trend(trend.out,", rdata, ", ...)"))) } return(trend.out) } print.trend <- function(x, ...) { message(gettextf("\n tau2 \t\t%12.5e", x$tau2), domain = NA) message(gettextf(" sigma2 \t%12.5e", x$sigma2), domain = NA) message(gettextf(" log-likelihood\t%12.3f", x$llkhood), domain = NA) message(gettextf(" aic\t\t%12.3f\n", x$aic), domain = NA) } plot.trend <- function(x, rdata = NULL, ...) { trend <- x$trend res <- x$res ts.atr <- tsp(trend) if (is.null(ts.atr) == TRUE) { xtime <- "time" } else { freq <- ts.atr[3] if (freq == 4 || freq == 12) { xtime <- "year" } else if (freq == 24) { xtime <- "day" } else if (freq == 52 || freq == 365/7) { xtime <- "week" } else if (freq == 365.25/7 || freq == 52.18) { xtime <- "week" } else { xtime <- "time" } } old.par <- par(no.readonly = TRUE) par(mfcol = c(2, 1), xaxs = "i") ylim <- range(x$trend, rdata, na.rm = TRUE) if (is.null(rdata) == TRUE) { mtitle <- paste("Trend component") } else { tsname <- deparse(substitute(rdata)) mtitle <- paste(tsname, "and trend component") plot(rdata, type = "l", ylim = ylim, xlab = "", ylab = "", main = "", xaxt = "n", ...) par(new = TRUE) } plot(trend, type = "l", col = 2, ylim = ylim, xlab = xtime, ylab = "", main = mtitle, ...) plot(res, type = "h", xlab = xtime, ylab = "", main = "Residuals", ...) abline(h = 0) par(old.par) }
#[export] colmaxboltz.mle <- function(x) { n <- dim(x)[1] a <- sqrt( Rfast::colsums(x^2) / (3 * n) ) loglik <- n/2 * log(2/pi) + 2 * Rfast::colsums( Rfast::Log(x) ) - 1.5 * n - 3 * n * log(a) res <- cbind(a, loglik) colnames(res) <- c("alpha", "loglikelihood") res } #[export] colpoisson.mle <- function(x) { n <- dim(x)[1] sx <- Rfast::colsums(x) loglik <- -sx + sx * log(sx/n) - Rfast::colsums( Rfast::Lgamma(x + 1) ) res <- cbind(sx/n, loglik) colnames(res) <- c("lambda", "log-likelihood") res } #[export] colgammamle <- function (x, tol = 1e-07) { n <- dim(x)[1] m <- Rfast::colmeans(x) slx <- Rfast::colmeans(Rfast::Log(x)) s <- log(m) - slx a1 <- 3 - s + sqrt((s - 3)^2 + 24 * s) a1 <- a1/(12 * s) a2 <- a1 - (log(a1) - Rfast::Digamma(a1) - s)/(1/a1 - Rfast::Trigamma(a1)) while (max(abs(a2 - a1)) > tol) { a1 <- a2 a2 <- a1 - (log(a1) - Rfast::Digamma(a1) - s)/(1/a1 - Rfast::Trigamma(a1)) } b <- a2/m loglik <- -b * n * m + (a2 - 1) * n * slx + n * a2 * log(b) - n * Lgamma(a2) res <- cbind(a2, b, loglik) colnames(res) <- c("shape", "scale", "log-likelihood") res } #[export] colgeom.mle <- function (x, type = 1) { if (type == 1) { sx <- Rfast::colsums(x) n <- dim(x) prob <- 1/(1 + sx/n ) loglik <- n * log(prob) + sx * log(1 - prob) } else { n <- dim(x)[1] prob <- n/Rfast::colsums(x) loglik <- n * log(prob) + (n/prob - n) * log(1 - prob) } res <- cbind(prob, loglik) colnames(res) <- c("prob of success", "log-likelihood") res } #[export] colinvgauss.mle <- function(x) { n <- dim(x)[1] sx <- Rfast::colsums(x) sx2 <- Rfast::colsums(1/x) m <- sx/n lambda <- 1/(sx2/n - 1/m) loglik <- n * 0.5 * log( 0.5 * lambda/pi) - 1.5 * colsums( Log(x) ) - 0.5 * lambda/m^2 * (-sx + m^2 * sx2) res <- cbind(m, lambda, loglik) colnames(res) <- c("mu", "lambda", "log-likelihood") res } #[export] collaplace.mle <- function(x) { n <- dim(x)[1] m <- Rfast::colMedians(x) b <- Rfast::colmeans( abs( Rfast::eachrow(x, m, oper = "-") ) ) loglik <- -n * log(2 * b) - n res <- cbind(m, b, loglik) colnames(res) <- c("location", "scale", "log-likelihood") res } #[export] collindley.mle <- function(x) { n <- dim(x)[1] sx <- Rfast::colsums(x) a <- sx/n b <- a - 1 delta <- b^2 + 8 * a theta <- 0.5 * (-b + sqrt(delta))/a loglik <- 2 * n * log(theta) - n * log1p(theta) + colsums (log1p(x) ) - theta * sx res <- cbind(theta, loglik) colnames(res) <- c("theta", "log-likelihood") res } #[export] colnormal.mle <- function(x) { n <- dim(x)[1] m <- Rfast::colmeans(x) s <- Rfast::colVars(x) ss <- s * (n - 1) / n loglik <- - 0.5 * n * ( log(2 * pi) + log(ss) ) - 0.5 * n res <- cbind(m, ss, s, loglik) colnames(res) <- c("mean", "biased variance", "unbiased variance", "log-likelihood") res } #[export] colnormlog.mle <- function(x) { dm <- dim(x) n <- dm[1] mx <- Rfast::colmeans(x) m <- log(mx) sigma <- Rfast::colmeans(x^2) - mx^2 loglik <- -0.5 * n * log(2 * pi * sigma) - 0.5 * n res <- cbind(mx, m, sigma, sigma * n/(n - 1), loglik) colnames(res) <- c("exp_mu", "mu", "biased variance", "unbiased variance", "log-lik") res } #[export] colpareto.mle <- function(x) { n <- dim(x)[1] xm <- Rfast::colMins(x, value = TRUE) com <- n * log(xm) slx <- Rfast::colsums( Rfast::Log(x) ) a <- n/(slx - com) loglik <- n * log(a) + a * com - (a + 1) * slx res <- cbind(xm, a, loglik) colnames(res) <- c("scale", "shape", "log-likelihood") res } #[export] colrayleigh.mle <- function(x) { n <- dim(x)[1] sigma <- 0.5 * Rfast::colmeans(x^2) loglik <- Rfast::colsums( Rfast::Log(x) ) - n * log(sigma) - n res <- cbind(sigma, loglik) colnames(res) <- c("sigma", "log-likelihood") res } #[export] colvm.mle <- function(x, tol = 1e-07) { n <- dim(x)[1] C <- Rfast::colmeans( cos(x) ) S <- Rfast::colmeans( sin(x) ) ep <- (C < 0) mu <- atan(S/C) mu[ep] <- mu[ep] + pi con <- Rfast::colsums( cos( Rfast::eachrow(x, mu, oper = "-" ) ) ) R <- C^2 + S^2 k1 <- (1.28 - 0.53 * R) * tan(0.5 * pi * sqrt(R)) der <- con - n * besselI(k1, 1)/besselI(k1, 0) a <- besselI(k1, 0)^2/2 + besselI(k1, 2) * besselI(k1, 0)/2 - besselI(k1, 1)^2 der2 <- n * a/besselI(k1, 0)^2 k2 <- k1 + der/der2 while (max(abs(k1 - k2)) > tol) { k1 <- k2 der <- con - n * besselI(k1, 1)/besselI(k1, 0) a <- besselI(k1, 0)^2/2 + besselI(k1, 2) * besselI(k1, 0)/2 - besselI(k1, 1)^2 der2 <- n * a/besselI(k1, 0)^2 k2 <- k1 + der/der2 } k2[which(is.na(k2))] <- 709 loglik <- k2 * con - n * log(2 * pi) - n * (log(besselI(k2, 0, expon.scaled = TRUE)) + k2) res <- cbind(mu, k2, loglik) colnames(res) <- c("mean", "concentration", "log-likelihood") res } #[export] colweibull.mle <- function (x, tol = 1e-09, maxiters = 100, parallel = FALSE) { res <- .Call("Rfast_colweibull_mle", PACKAGE = "Rfast", x, tol, maxiters, parallel) colnames(res) <- c("shape", "scale", "log-lik") res } #[export] colexp2.mle <- function(x) { a <- Rfast::colMins(x) n <- dim(x)[1] b <- Rfast::colmeans(x)/n - a loglik <- - n * log(b) - 1/n res <- cbind(a, b, loglik) colnames(res) <- c("a", "b", "log-likelihood") res } #[export] colexpmle <- function (x) { n <- dim(x)[1] lambda <- Rfast::colmeans(x) loglik <- - n * log(lambda) - n res <- cbind(lambda, loglik) colnames(res) <- c("lambda", "log-likelihood") res }
/R/column-wise_mle.R
no_license
RfastOfficial/Rfast
R
false
false
5,802
r
#[export] colmaxboltz.mle <- function(x) { n <- dim(x)[1] a <- sqrt( Rfast::colsums(x^2) / (3 * n) ) loglik <- n/2 * log(2/pi) + 2 * Rfast::colsums( Rfast::Log(x) ) - 1.5 * n - 3 * n * log(a) res <- cbind(a, loglik) colnames(res) <- c("alpha", "loglikelihood") res } #[export] colpoisson.mle <- function(x) { n <- dim(x)[1] sx <- Rfast::colsums(x) loglik <- -sx + sx * log(sx/n) - Rfast::colsums( Rfast::Lgamma(x + 1) ) res <- cbind(sx/n, loglik) colnames(res) <- c("lambda", "log-likelihood") res } #[export] colgammamle <- function (x, tol = 1e-07) { n <- dim(x)[1] m <- Rfast::colmeans(x) slx <- Rfast::colmeans(Rfast::Log(x)) s <- log(m) - slx a1 <- 3 - s + sqrt((s - 3)^2 + 24 * s) a1 <- a1/(12 * s) a2 <- a1 - (log(a1) - Rfast::Digamma(a1) - s)/(1/a1 - Rfast::Trigamma(a1)) while (max(abs(a2 - a1)) > tol) { a1 <- a2 a2 <- a1 - (log(a1) - Rfast::Digamma(a1) - s)/(1/a1 - Rfast::Trigamma(a1)) } b <- a2/m loglik <- -b * n * m + (a2 - 1) * n * slx + n * a2 * log(b) - n * Lgamma(a2) res <- cbind(a2, b, loglik) colnames(res) <- c("shape", "scale", "log-likelihood") res } #[export] colgeom.mle <- function (x, type = 1) { if (type == 1) { sx <- Rfast::colsums(x) n <- dim(x) prob <- 1/(1 + sx/n ) loglik <- n * log(prob) + sx * log(1 - prob) } else { n <- dim(x)[1] prob <- n/Rfast::colsums(x) loglik <- n * log(prob) + (n/prob - n) * log(1 - prob) } res <- cbind(prob, loglik) colnames(res) <- c("prob of success", "log-likelihood") res } #[export] colinvgauss.mle <- function(x) { n <- dim(x)[1] sx <- Rfast::colsums(x) sx2 <- Rfast::colsums(1/x) m <- sx/n lambda <- 1/(sx2/n - 1/m) loglik <- n * 0.5 * log( 0.5 * lambda/pi) - 1.5 * colsums( Log(x) ) - 0.5 * lambda/m^2 * (-sx + m^2 * sx2) res <- cbind(m, lambda, loglik) colnames(res) <- c("mu", "lambda", "log-likelihood") res } #[export] collaplace.mle <- function(x) { n <- dim(x)[1] m <- Rfast::colMedians(x) b <- Rfast::colmeans( abs( Rfast::eachrow(x, m, oper = "-") ) ) loglik <- -n * log(2 * b) - n res <- cbind(m, b, loglik) colnames(res) <- c("location", "scale", "log-likelihood") res } #[export] collindley.mle <- function(x) { n <- dim(x)[1] sx <- Rfast::colsums(x) a <- sx/n b <- a - 1 delta <- b^2 + 8 * a theta <- 0.5 * (-b + sqrt(delta))/a loglik <- 2 * n * log(theta) - n * log1p(theta) + colsums (log1p(x) ) - theta * sx res <- cbind(theta, loglik) colnames(res) <- c("theta", "log-likelihood") res } #[export] colnormal.mle <- function(x) { n <- dim(x)[1] m <- Rfast::colmeans(x) s <- Rfast::colVars(x) ss <- s * (n - 1) / n loglik <- - 0.5 * n * ( log(2 * pi) + log(ss) ) - 0.5 * n res <- cbind(m, ss, s, loglik) colnames(res) <- c("mean", "biased variance", "unbiased variance", "log-likelihood") res } #[export] colnormlog.mle <- function(x) { dm <- dim(x) n <- dm[1] mx <- Rfast::colmeans(x) m <- log(mx) sigma <- Rfast::colmeans(x^2) - mx^2 loglik <- -0.5 * n * log(2 * pi * sigma) - 0.5 * n res <- cbind(mx, m, sigma, sigma * n/(n - 1), loglik) colnames(res) <- c("exp_mu", "mu", "biased variance", "unbiased variance", "log-lik") res } #[export] colpareto.mle <- function(x) { n <- dim(x)[1] xm <- Rfast::colMins(x, value = TRUE) com <- n * log(xm) slx <- Rfast::colsums( Rfast::Log(x) ) a <- n/(slx - com) loglik <- n * log(a) + a * com - (a + 1) * slx res <- cbind(xm, a, loglik) colnames(res) <- c("scale", "shape", "log-likelihood") res } #[export] colrayleigh.mle <- function(x) { n <- dim(x)[1] sigma <- 0.5 * Rfast::colmeans(x^2) loglik <- Rfast::colsums( Rfast::Log(x) ) - n * log(sigma) - n res <- cbind(sigma, loglik) colnames(res) <- c("sigma", "log-likelihood") res } #[export] colvm.mle <- function(x, tol = 1e-07) { n <- dim(x)[1] C <- Rfast::colmeans( cos(x) ) S <- Rfast::colmeans( sin(x) ) ep <- (C < 0) mu <- atan(S/C) mu[ep] <- mu[ep] + pi con <- Rfast::colsums( cos( Rfast::eachrow(x, mu, oper = "-" ) ) ) R <- C^2 + S^2 k1 <- (1.28 - 0.53 * R) * tan(0.5 * pi * sqrt(R)) der <- con - n * besselI(k1, 1)/besselI(k1, 0) a <- besselI(k1, 0)^2/2 + besselI(k1, 2) * besselI(k1, 0)/2 - besselI(k1, 1)^2 der2 <- n * a/besselI(k1, 0)^2 k2 <- k1 + der/der2 while (max(abs(k1 - k2)) > tol) { k1 <- k2 der <- con - n * besselI(k1, 1)/besselI(k1, 0) a <- besselI(k1, 0)^2/2 + besselI(k1, 2) * besselI(k1, 0)/2 - besselI(k1, 1)^2 der2 <- n * a/besselI(k1, 0)^2 k2 <- k1 + der/der2 } k2[which(is.na(k2))] <- 709 loglik <- k2 * con - n * log(2 * pi) - n * (log(besselI(k2, 0, expon.scaled = TRUE)) + k2) res <- cbind(mu, k2, loglik) colnames(res) <- c("mean", "concentration", "log-likelihood") res } #[export] colweibull.mle <- function (x, tol = 1e-09, maxiters = 100, parallel = FALSE) { res <- .Call("Rfast_colweibull_mle", PACKAGE = "Rfast", x, tol, maxiters, parallel) colnames(res) <- c("shape", "scale", "log-lik") res } #[export] colexp2.mle <- function(x) { a <- Rfast::colMins(x) n <- dim(x)[1] b <- Rfast::colmeans(x)/n - a loglik <- - n * log(b) - 1/n res <- cbind(a, b, loglik) colnames(res) <- c("a", "b", "log-likelihood") res } #[export] colexpmle <- function (x) { n <- dim(x)[1] lambda <- Rfast::colmeans(x) loglik <- - n * log(lambda) - n res <- cbind(lambda, loglik) colnames(res) <- c("lambda", "log-likelihood") res }
##THIS WILL SAMPLE THE SPECIMEN COLLECTION FOR THE FIRST 100 probands and their relatives ##prefer whole blood over mouth wash # #Prerequisites #Run PedigreeKD.r to get the data frame ?PedigreeTable? # #Goals #Family name by trio status #Family name by whole blood samples, laryngeal bx #Randomly order # #Output rp nucleic acid numbers so that they can be picked #must have whole family together. #Family name by holding score require(RODBC) channellab <- odbcConnect("Labdata")# connect to Genomics now "labdata" via ODCB holdings <-subset(sqlQuery(channellab, "SELECT HS#, SubjLastName, TissCode, StudyCode from CGS.Specimen WHERE STUDYCODE='RP'",believeNRows = FALSE),select=-StudyCode) require(gdata) holdings <-drop.levels(holdings) head(holdings) require(car) holdings <-transform(holdings, FA=grepl("FA", substr(holdings$SubjLastName,9,10)),MO=grepl("MO", substr(holdings$SubjLastName,9,10)), tiss.score=recode(TissCode,"'WB'=10;'MW'=4;'BUC'=3;'LRNXP'=5;else=0", as.factor.result=FALSE)) require(plyr) familyname <-function(x) substr(x,1,8)#function to get family name out of subject number sum(holdings$MO,holdings$FA) fam.holdings <-ddply(holdings,.(substr(holdings$SubjLastName,1,8)), function(df) c(tiss.score=sum(df$tiss.score), trio=1+sum(df$FA,df$MO), indivs=length(unique(df$SubjLastName))))#adds all the tissue scores to get a sum for the whole family; add another function to find out how many unique people, if father, if mother, trio, duo, solo, how many non mothers and fathers) names(fam.holdings)[1] <-"familyname" fam.holdings$trio <-factor(fam.holdings$trio, labels=c("solo","duo","trio")) addmargins(table(fam.holdings$trio)) prop.table(table(fam.holdings$trio)) fam.holdings <-transform(fam.holdings, value=((as.numeric(trio)-1)^3*tiss.score*tiss.score/indivs)) sample <-sample(1:nrow(fam.holdings),120*46/56,prob=fam.holdings$value) table(fam.holdings[sample,"trio"]) #Now that we have the families we need the individual specimens. #It must be everyone of a family but not only the whole blood and the mouthwash illumina.specimens <-subset(holdings,familyname(SubjLastName) %in% fam.holdings$familyname[sample]& (TissCode=="MW" | TissCode=="WB"),select=c(HS.,SubjLastName)) ord <-order(illumina.specimens$SubjLastName) illumina.specimens <-illumina.specimens[ord,] #Hold on since we have already done specimens, #first.run <-c(1078, 1081, 1080, 543, 545, 672, 546, 673, 674, 934, 933, 935, 68, 712, 312, 836) #We now need to exclude everything that was already done #Need to get nucleic acid numbers for all members of selected families #import used to be from Genomics but since nanodrop to genomics macro broke will rather take it from google docs library(RGoogleDocs) options(RCurlOptions = list(capath = system.file("CurlSSL", "cacert.pem", package = "RCurl"), ssl.verifypeer = FALSE)) sheets.con <- getGoogleDocsConnection(getGoogleAuth(login = getOption("GoogleDocsPassword"),service ="wise")) ts2=getWorksheets("rpdna",sheets.con) dna<-sheetAsMatrix(ts2$dnastock,header=TRUE, as.data.frame=TRUE, trim=TRUE) #now merge dna with illumina.speicmens rp.infin.wrk <-merge(illumina.specimens, dna, by.x="HS.", by.y="hsnr", all.x=TRUE) #Pulling out specific families for genotyping multiplexes <- c("CMM12003PT","CMM12003FA","CMM12003MO","RTC13002PT","RTC13002MO","FJB01123PT","FJB01123FA","FJB01120PT","FJB01123MO","FJB01120MO")#individuals from a multiplex family # specimens (and holdings) table has subjectlastname and HS# and the specimen type (blood vs mouthwash vs larynx), nanodrop rpdna (here already imported as dna) has the link of hs# to dna# #strategy #get specimen table (already have in holdings) #get rpdna (already have in dna) #merge them on HS number and keep HS, nucleic acid number, specimen tissue hsdnanrs <-merge(holdings,dna,by.x="HS.",by.y="hsnr") hsdnanrs.notlr <-subset(hsdnanrs,TissCode!="LRNXP",select=c(HS.:rpdnanr))#merged data but without laryngeal specimens multiplexes.dnanr <-subset(hsdnanrs,TissCode!="LRNXP"&SubjLastName %in% multiplexes,select=(names(rp.infin.wrk))) #make sure that multiplexes are to be genotyped even if they have not been selected by random sampling rp.infin.wrk <-rbind(multiplexes.dnanr,rp.infin.wrk) #Hold on since we have already done specimens, #already done ts1=getWorksheets("rpinfinwrk",sheets.con)[[1]]# the [[1]] simply gets the first worksheet #getExtent(ts1)[2,1] #getExtent enables us to determine the number of cells with data in it already.run <-as.vector(as.matrix(ts1[2:getExtent(ts1)[2,1],1])[,1])#get the vector of rpdna nucleic acid number that have been used already rp.infin.wrk <-subset(rp.infin.wrk, !is.element(rpdnanr,already.run),select=rpdnanr) write.csv(rp.infin.wrk,"rpdnanr.csv")
/Select Samples.r
no_license
FarrelBuch/gene_analysis
R
false
false
4,730
r
##THIS WILL SAMPLE THE SPECIMEN COLLECTION FOR THE FIRST 100 probands and their relatives ##prefer whole blood over mouth wash # #Prerequisites #Run PedigreeKD.r to get the data frame ?PedigreeTable? # #Goals #Family name by trio status #Family name by whole blood samples, laryngeal bx #Randomly order # #Output rp nucleic acid numbers so that they can be picked #must have whole family together. #Family name by holding score require(RODBC) channellab <- odbcConnect("Labdata")# connect to Genomics now "labdata" via ODCB holdings <-subset(sqlQuery(channellab, "SELECT HS#, SubjLastName, TissCode, StudyCode from CGS.Specimen WHERE STUDYCODE='RP'",believeNRows = FALSE),select=-StudyCode) require(gdata) holdings <-drop.levels(holdings) head(holdings) require(car) holdings <-transform(holdings, FA=grepl("FA", substr(holdings$SubjLastName,9,10)),MO=grepl("MO", substr(holdings$SubjLastName,9,10)), tiss.score=recode(TissCode,"'WB'=10;'MW'=4;'BUC'=3;'LRNXP'=5;else=0", as.factor.result=FALSE)) require(plyr) familyname <-function(x) substr(x,1,8)#function to get family name out of subject number sum(holdings$MO,holdings$FA) fam.holdings <-ddply(holdings,.(substr(holdings$SubjLastName,1,8)), function(df) c(tiss.score=sum(df$tiss.score), trio=1+sum(df$FA,df$MO), indivs=length(unique(df$SubjLastName))))#adds all the tissue scores to get a sum for the whole family; add another function to find out how many unique people, if father, if mother, trio, duo, solo, how many non mothers and fathers) names(fam.holdings)[1] <-"familyname" fam.holdings$trio <-factor(fam.holdings$trio, labels=c("solo","duo","trio")) addmargins(table(fam.holdings$trio)) prop.table(table(fam.holdings$trio)) fam.holdings <-transform(fam.holdings, value=((as.numeric(trio)-1)^3*tiss.score*tiss.score/indivs)) sample <-sample(1:nrow(fam.holdings),120*46/56,prob=fam.holdings$value) table(fam.holdings[sample,"trio"]) #Now that we have the families we need the individual specimens. #It must be everyone of a family but not only the whole blood and the mouthwash illumina.specimens <-subset(holdings,familyname(SubjLastName) %in% fam.holdings$familyname[sample]& (TissCode=="MW" | TissCode=="WB"),select=c(HS.,SubjLastName)) ord <-order(illumina.specimens$SubjLastName) illumina.specimens <-illumina.specimens[ord,] #Hold on since we have already done specimens, #first.run <-c(1078, 1081, 1080, 543, 545, 672, 546, 673, 674, 934, 933, 935, 68, 712, 312, 836) #We now need to exclude everything that was already done #Need to get nucleic acid numbers for all members of selected families #import used to be from Genomics but since nanodrop to genomics macro broke will rather take it from google docs library(RGoogleDocs) options(RCurlOptions = list(capath = system.file("CurlSSL", "cacert.pem", package = "RCurl"), ssl.verifypeer = FALSE)) sheets.con <- getGoogleDocsConnection(getGoogleAuth(login = getOption("GoogleDocsPassword"),service ="wise")) ts2=getWorksheets("rpdna",sheets.con) dna<-sheetAsMatrix(ts2$dnastock,header=TRUE, as.data.frame=TRUE, trim=TRUE) #now merge dna with illumina.speicmens rp.infin.wrk <-merge(illumina.specimens, dna, by.x="HS.", by.y="hsnr", all.x=TRUE) #Pulling out specific families for genotyping multiplexes <- c("CMM12003PT","CMM12003FA","CMM12003MO","RTC13002PT","RTC13002MO","FJB01123PT","FJB01123FA","FJB01120PT","FJB01123MO","FJB01120MO")#individuals from a multiplex family # specimens (and holdings) table has subjectlastname and HS# and the specimen type (blood vs mouthwash vs larynx), nanodrop rpdna (here already imported as dna) has the link of hs# to dna# #strategy #get specimen table (already have in holdings) #get rpdna (already have in dna) #merge them on HS number and keep HS, nucleic acid number, specimen tissue hsdnanrs <-merge(holdings,dna,by.x="HS.",by.y="hsnr") hsdnanrs.notlr <-subset(hsdnanrs,TissCode!="LRNXP",select=c(HS.:rpdnanr))#merged data but without laryngeal specimens multiplexes.dnanr <-subset(hsdnanrs,TissCode!="LRNXP"&SubjLastName %in% multiplexes,select=(names(rp.infin.wrk))) #make sure that multiplexes are to be genotyped even if they have not been selected by random sampling rp.infin.wrk <-rbind(multiplexes.dnanr,rp.infin.wrk) #Hold on since we have already done specimens, #already done ts1=getWorksheets("rpinfinwrk",sheets.con)[[1]]# the [[1]] simply gets the first worksheet #getExtent(ts1)[2,1] #getExtent enables us to determine the number of cells with data in it already.run <-as.vector(as.matrix(ts1[2:getExtent(ts1)[2,1],1])[,1])#get the vector of rpdna nucleic acid number that have been used already rp.infin.wrk <-subset(rp.infin.wrk, !is.element(rpdnanr,already.run),select=rpdnanr) write.csv(rp.infin.wrk,"rpdnanr.csv")
#### Augmenting test #### augmentingTable = table(ourNotesFeatures$FOCUS %in% c('Augmenting test with other features', 'Feasibility of doing test'), ourNotes$`Oncotype DX? Mammaprint?` ) augmentingTable[ , -1] chisq.test(augmentingTable[ , -1])$expected ## Mammaprint has more 'plasticity', more likely to encourage small tweaks. ## Appears false.
/Augmenting test.R
no_license
professorbeautiful/silver
R
false
false
376
r
#### Augmenting test #### augmentingTable = table(ourNotesFeatures$FOCUS %in% c('Augmenting test with other features', 'Feasibility of doing test'), ourNotes$`Oncotype DX? Mammaprint?` ) augmentingTable[ , -1] chisq.test(augmentingTable[ , -1])$expected ## Mammaprint has more 'plasticity', more likely to encourage small tweaks. ## Appears false.
run_lines <- function(file, lines){ source(textConnection(readLines(file)[lines])) }
/functions/run_lines.R
no_license
btcooper22/MIMIC_ICU
R
false
false
86
r
run_lines <- function(file, lines){ source(textConnection(readLines(file)[lines])) }
game_state_recoverer <- function(eyetracking_data, field_width, field_height) { lines <- eyetracking_data$events$message first_sync <- eyetracking_data$sync_timestamp def <- function(x) if(is.null(x)) NaN else as.numeric(x) game <- do.call(rbind, lapply(str_filter(lines, 'gm (\\{.+\\}), time = ([0-9]+)'), function(X){ json <- fromJSON(X[[2]]) data.frame( type=json$type, time= as.numeric(X[[3]])-first_sync, from = def(json$from), to = def(json$to), color = def(json$color), index = def(json$index) ) })) # workaround of bug in 09 if(game$time[1] > game$time[5]){ warning('Workaround for 09 experiment only'); game$time[1:4] <- 0 } start_game_timestamp <- tail(game$time[game$type=="newGame"], n=1) end_game_timestamp <- tail(game$time[game$type=="gameOver"], n=1) game <- game[ game$time>=start_game_timestamp & game$time <= end_game_timestamp ,] moves <- game[game$type %in% c('ballSelect', 'ballMove', 'blockedMove'), c('type', 'time')] move_durations <- c() for (i in 1:nrow(moves)-1) { if(moves$type[i] == 'ballSelect' && (moves$type[i+1] == "ballMove" || moves$type[i+1] == "blockedMove")){ move_durations <- c(move_durations, moves$time[i+1] - moves$time[i]) } } states <- list() times <- c() time <- start_game_timestamp m <- matrix(0, nrow = field_height, ncol = field_width) by(game, 1:nrow(game), function(move){ if(move$type == "ballCreate"){ m[move$index+1] <<- move$color } else if (move$type == "ballRemove"){ m[move$index+1] <<- 0 } else if(move$type == "ballSelect"){ m[which(m > 100)] <<- m[which(m > 100)] - 100 m[move$index+1] <<- m[move$index+1] + 100 } else if(move$type == "ballDeselect"){ m[which(m > 100)] <<- m[which(m > 100)] - 100 } else if(move$type == "ballMove"){ from_pos <- move$from+1 to_pos <- move$to+1 color <- m[from_pos] - 100 m[from_pos] <<- 0 m[to_pos] <<- color } else if(move$type %in% c('BoardPositionClicked', 'gameOver')) { states <<- c(states, list(t(m))) times <<- c(times, time) time <<- move$time } }) list(states = states, move_durations = move_durations, times = times, moves=game) } # meaningful messages types: # - ballCreate # - ballSelect # - ballMove # - ballRemove generate_game_scheme <- function(game, field_width, field_height){ }
/R/game_state_recoverer.R
no_license
bkozyrskiy/eyelinesOnline
R
false
false
2,485
r
game_state_recoverer <- function(eyetracking_data, field_width, field_height) { lines <- eyetracking_data$events$message first_sync <- eyetracking_data$sync_timestamp def <- function(x) if(is.null(x)) NaN else as.numeric(x) game <- do.call(rbind, lapply(str_filter(lines, 'gm (\\{.+\\}), time = ([0-9]+)'), function(X){ json <- fromJSON(X[[2]]) data.frame( type=json$type, time= as.numeric(X[[3]])-first_sync, from = def(json$from), to = def(json$to), color = def(json$color), index = def(json$index) ) })) # workaround of bug in 09 if(game$time[1] > game$time[5]){ warning('Workaround for 09 experiment only'); game$time[1:4] <- 0 } start_game_timestamp <- tail(game$time[game$type=="newGame"], n=1) end_game_timestamp <- tail(game$time[game$type=="gameOver"], n=1) game <- game[ game$time>=start_game_timestamp & game$time <= end_game_timestamp ,] moves <- game[game$type %in% c('ballSelect', 'ballMove', 'blockedMove'), c('type', 'time')] move_durations <- c() for (i in 1:nrow(moves)-1) { if(moves$type[i] == 'ballSelect' && (moves$type[i+1] == "ballMove" || moves$type[i+1] == "blockedMove")){ move_durations <- c(move_durations, moves$time[i+1] - moves$time[i]) } } states <- list() times <- c() time <- start_game_timestamp m <- matrix(0, nrow = field_height, ncol = field_width) by(game, 1:nrow(game), function(move){ if(move$type == "ballCreate"){ m[move$index+1] <<- move$color } else if (move$type == "ballRemove"){ m[move$index+1] <<- 0 } else if(move$type == "ballSelect"){ m[which(m > 100)] <<- m[which(m > 100)] - 100 m[move$index+1] <<- m[move$index+1] + 100 } else if(move$type == "ballDeselect"){ m[which(m > 100)] <<- m[which(m > 100)] - 100 } else if(move$type == "ballMove"){ from_pos <- move$from+1 to_pos <- move$to+1 color <- m[from_pos] - 100 m[from_pos] <<- 0 m[to_pos] <<- color } else if(move$type %in% c('BoardPositionClicked', 'gameOver')) { states <<- c(states, list(t(m))) times <<- c(times, time) time <<- move$time } }) list(states = states, move_durations = move_durations, times = times, moves=game) } # meaningful messages types: # - ballCreate # - ballSelect # - ballMove # - ballRemove generate_game_scheme <- function(game, field_width, field_height){ }
library(randomForest) #Split to train and test data set.seed(1) data.iris <- data.frame(iris) data.iris <- data.iris[sample(nrow(data.iris)),] #Split to test and train data data.train <- data.iris[1:75, ] data.test <- data.iris[76:150, ] #Fit the model #sampsize: how many observations sampled per tree #mtry: how many variables per tree #nodesize: stop splitting when nodesizes is less than this fit <- randomForest(Species~., data=data.train, importance=TRUE, sampsize=50, mtry=3, nodesize=2) #Show results fit importance(fit) #Predict prediction <- predict(fit, data.test) #Bind to validation data.results <- cbind(data.test, prediction) #Calculate success rate rate <- sum(data.results$Species == data.results$prediction)/nrow(data.results) paste(round(rate,4)*100, "%")
/R/Machine Learning/random-forest.R
no_license
mikaelahonen/notebook
R
false
false
884
r
library(randomForest) #Split to train and test data set.seed(1) data.iris <- data.frame(iris) data.iris <- data.iris[sample(nrow(data.iris)),] #Split to test and train data data.train <- data.iris[1:75, ] data.test <- data.iris[76:150, ] #Fit the model #sampsize: how many observations sampled per tree #mtry: how many variables per tree #nodesize: stop splitting when nodesizes is less than this fit <- randomForest(Species~., data=data.train, importance=TRUE, sampsize=50, mtry=3, nodesize=2) #Show results fit importance(fit) #Predict prediction <- predict(fit, data.test) #Bind to validation data.results <- cbind(data.test, prediction) #Calculate success rate rate <- sum(data.results$Species == data.results$prediction)/nrow(data.results) paste(round(rate,4)*100, "%")
library(ggplot2) iPS = read.delim("~/Documents/Research/results/iPS/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") ADM.D0 = read.delim("~/Documents/Research/results/ADM.D0/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") ADM.D4 = read.delim("~/Documents/Research/results/ADM.D4/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") ISM.D0 = read.delim("~/Documents/Research/results/ISM.D0/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") ISM.D4 = read.delim("~/Documents/Research/results/ISM.D4/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") S1 = read.delim("~/Documents/Research/results/S1/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") S2 = read.delim("~/Documents/Research/results/S2/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") S3 = read.delim("~/Documents/Research/results/S3/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") iPS = na.omit(iPS) S1 = na.omit(S1) S2 = na.omit(S2) S3 = na.omit(S3) ISM.D0 = na.omit(ISM.D0) ISM.D4 = na.omit(ISM.D4) ADM.D0 = na.omit(ADM.D0) ADM.D4 = na.omit(ADM.D4) gene.ADM.D0 = unique(ADM.D0$refgene) gene.ADM.D4 = unique(ADM.D4$refgene) gene.ISM.D0 = unique(ISM.D0$refgene) gene.ISM.D4 = unique(ISM.D4$refgene) gene.S1 = unique(S1$refgene) gene.S2 = unique(S2$refgene) gene.S3 = unique(S3$refgene) gene.iPS = unique(iPS$refgene) #### genes with different isoform numbers iPSfreq = table(iPS$refgene) iPSfreq = as.data.frame(iPSfreq) over1 = subset(iPSfreq, Freq>=5) gene.iPS = unique(over1$Var1) S1freq = table(S1$refgene) S1freq = as.data.frame(S1freq) over1 = subset(S1freq, Freq>=5) gene.S1 = unique(over1$Var1) S2freq = table(S2$refgene) S2freq = as.data.frame(S2freq) over1 = subset(S2freq, Freq>=5) gene.S2 = unique(over1$Var1) S3freq = table(S3$refgene) S3freq = as.data.frame(S3freq) over1 = subset(S3freq, Freq>=5) gene.S3 = unique(over1$Var1) ISM.D0freq = table(ISM.D0$refgene) ISM.D0freq = as.data.frame(ISM.D0freq) over1 = subset(ISM.D0freq, Freq>=5) gene.ISM.D0 = unique(over1$Var1) ISM.D4freq = table(ISM.D4$refgene) ISM.D4freq = as.data.frame(ISM.D4freq) over1 = subset(ISM.D4freq, Freq>=5) gene.ISM.D4 = unique(over1$Var1) ADM.D0freq = table(ADM.D0$refgene) ADM.D0freq = as.data.frame(ADM.D0freq) over1 = subset(ADM.D0freq, Freq>=5) gene.ADM.D0 = unique(over1$Var1) ADM.D4freq = table(ADM.D4$refgene) ADM.D4freq = as.data.frame(ADM.D4freq) over1 = subset(ADM.D4freq, Freq>=5) gene.ADM.D4 = unique(over1$Var1) length(intersect(gene.iPS,gene.S1))/length(union(gene.iPS,gene.S1)) length(intersect(gene.iPS,gene.S2))/length(union(gene.iPS,gene.S2)) length(intersect(gene.iPS,gene.S3))/length(union(gene.iPS,gene.S3)) length(intersect(gene.iPS,gene.ISM.D0))/length(union(gene.iPS,gene.ISM.D0)) length(intersect(gene.iPS,gene.ISM.D4))/length(union(gene.iPS,gene.ISM.D4)) length(intersect(gene.iPS,gene.ADM.D0))/length(union(gene.iPS,gene.ADM.D0)) length(intersect(gene.iPS,gene.ADM.D4))/length(union(gene.iPS,gene.ADM.D4)) length(intersect(gene.S1,gene.S2))/length(union(gene.S1,gene.S2)) length(intersect(gene.S1,gene.S3))/length(union(gene.S1,gene.S3)) length(intersect(gene.S1,gene.ISM.D0))/length(union(gene.S1,gene.ISM.D0)) length(intersect(gene.S1,gene.ISM.D4))/length(union(gene.S1,gene.ISM.D4)) length(intersect(gene.S1,gene.ADM.D0))/length(union(gene.S1,gene.ADM.D0)) length(intersect(gene.S1,gene.ADM.D4))/length(union(gene.S1,gene.ADM.D4)) length(intersect(gene.S2,gene.S3))/length(union(gene.S2,gene.S3)) length(intersect(gene.S2,gene.ISM.D0))/length(union(gene.S2,gene.ISM.D0)) length(intersect(gene.S2,gene.ISM.D4))/length(union(gene.S2,gene.ISM.D4)) length(intersect(gene.S2,gene.ADM.D0))/length(union(gene.S2,gene.ADM.D0)) length(intersect(gene.S2,gene.ADM.D4))/length(union(gene.S2,gene.ADM.D4)) length(intersect(gene.S3,gene.ISM.D0))/length(union(gene.S3,gene.ISM.D0)) length(intersect(gene.S3,gene.ISM.D4))/length(union(gene.S3,gene.ISM.D4)) length(intersect(gene.S3,gene.ADM.D0))/length(union(gene.S3,gene.ADM.D0)) length(intersect(gene.S3,gene.ADM.D4))/length(union(gene.S3,gene.ADM.D4)) length(intersect(gene.ISM.D0,gene.ISM.D4))/length(union(gene.ISM.D0,gene.ISM.D4)) length(intersect(gene.ISM.D0,gene.ADM.D0))/length(union(gene.ISM.D0,gene.ADM.D0)) length(intersect(gene.ISM.D0,gene.ADM.D4))/length(union(gene.ISM.D0,gene.ADM.D4)) length(intersect(gene.ISM.D4,gene.ADM.D0))/length(union(gene.ISM.D4,gene.ADM.D0)) length(intersect(gene.ISM.D4,gene.ADM.D4))/length(union(gene.ISM.D4,gene.ADM.D4)) length(intersect(gene.ADM.D0,gene.ADM.D4))/length(union(gene.ADM.D0,gene.ADM.D4))
/common_genes.R
no_license
whappycoffee/NGS-program-related-codes
R
false
false
4,594
r
library(ggplot2) iPS = read.delim("~/Documents/Research/results/iPS/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") ADM.D0 = read.delim("~/Documents/Research/results/ADM.D0/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") ADM.D4 = read.delim("~/Documents/Research/results/ADM.D4/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") ISM.D0 = read.delim("~/Documents/Research/results/ISM.D0/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") ISM.D4 = read.delim("~/Documents/Research/results/ISM.D4/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") S1 = read.delim("~/Documents/Research/results/S1/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") S2 = read.delim("~/Documents/Research/results/S2/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") S3 = read.delim("~/Documents/Research/results/S3/ToFU/iso.collapsed.rep.fq.sorted.sam.matchAnnot.txt.parsed.txt") iPS = na.omit(iPS) S1 = na.omit(S1) S2 = na.omit(S2) S3 = na.omit(S3) ISM.D0 = na.omit(ISM.D0) ISM.D4 = na.omit(ISM.D4) ADM.D0 = na.omit(ADM.D0) ADM.D4 = na.omit(ADM.D4) gene.ADM.D0 = unique(ADM.D0$refgene) gene.ADM.D4 = unique(ADM.D4$refgene) gene.ISM.D0 = unique(ISM.D0$refgene) gene.ISM.D4 = unique(ISM.D4$refgene) gene.S1 = unique(S1$refgene) gene.S2 = unique(S2$refgene) gene.S3 = unique(S3$refgene) gene.iPS = unique(iPS$refgene) #### genes with different isoform numbers iPSfreq = table(iPS$refgene) iPSfreq = as.data.frame(iPSfreq) over1 = subset(iPSfreq, Freq>=5) gene.iPS = unique(over1$Var1) S1freq = table(S1$refgene) S1freq = as.data.frame(S1freq) over1 = subset(S1freq, Freq>=5) gene.S1 = unique(over1$Var1) S2freq = table(S2$refgene) S2freq = as.data.frame(S2freq) over1 = subset(S2freq, Freq>=5) gene.S2 = unique(over1$Var1) S3freq = table(S3$refgene) S3freq = as.data.frame(S3freq) over1 = subset(S3freq, Freq>=5) gene.S3 = unique(over1$Var1) ISM.D0freq = table(ISM.D0$refgene) ISM.D0freq = as.data.frame(ISM.D0freq) over1 = subset(ISM.D0freq, Freq>=5) gene.ISM.D0 = unique(over1$Var1) ISM.D4freq = table(ISM.D4$refgene) ISM.D4freq = as.data.frame(ISM.D4freq) over1 = subset(ISM.D4freq, Freq>=5) gene.ISM.D4 = unique(over1$Var1) ADM.D0freq = table(ADM.D0$refgene) ADM.D0freq = as.data.frame(ADM.D0freq) over1 = subset(ADM.D0freq, Freq>=5) gene.ADM.D0 = unique(over1$Var1) ADM.D4freq = table(ADM.D4$refgene) ADM.D4freq = as.data.frame(ADM.D4freq) over1 = subset(ADM.D4freq, Freq>=5) gene.ADM.D4 = unique(over1$Var1) length(intersect(gene.iPS,gene.S1))/length(union(gene.iPS,gene.S1)) length(intersect(gene.iPS,gene.S2))/length(union(gene.iPS,gene.S2)) length(intersect(gene.iPS,gene.S3))/length(union(gene.iPS,gene.S3)) length(intersect(gene.iPS,gene.ISM.D0))/length(union(gene.iPS,gene.ISM.D0)) length(intersect(gene.iPS,gene.ISM.D4))/length(union(gene.iPS,gene.ISM.D4)) length(intersect(gene.iPS,gene.ADM.D0))/length(union(gene.iPS,gene.ADM.D0)) length(intersect(gene.iPS,gene.ADM.D4))/length(union(gene.iPS,gene.ADM.D4)) length(intersect(gene.S1,gene.S2))/length(union(gene.S1,gene.S2)) length(intersect(gene.S1,gene.S3))/length(union(gene.S1,gene.S3)) length(intersect(gene.S1,gene.ISM.D0))/length(union(gene.S1,gene.ISM.D0)) length(intersect(gene.S1,gene.ISM.D4))/length(union(gene.S1,gene.ISM.D4)) length(intersect(gene.S1,gene.ADM.D0))/length(union(gene.S1,gene.ADM.D0)) length(intersect(gene.S1,gene.ADM.D4))/length(union(gene.S1,gene.ADM.D4)) length(intersect(gene.S2,gene.S3))/length(union(gene.S2,gene.S3)) length(intersect(gene.S2,gene.ISM.D0))/length(union(gene.S2,gene.ISM.D0)) length(intersect(gene.S2,gene.ISM.D4))/length(union(gene.S2,gene.ISM.D4)) length(intersect(gene.S2,gene.ADM.D0))/length(union(gene.S2,gene.ADM.D0)) length(intersect(gene.S2,gene.ADM.D4))/length(union(gene.S2,gene.ADM.D4)) length(intersect(gene.S3,gene.ISM.D0))/length(union(gene.S3,gene.ISM.D0)) length(intersect(gene.S3,gene.ISM.D4))/length(union(gene.S3,gene.ISM.D4)) length(intersect(gene.S3,gene.ADM.D0))/length(union(gene.S3,gene.ADM.D0)) length(intersect(gene.S3,gene.ADM.D4))/length(union(gene.S3,gene.ADM.D4)) length(intersect(gene.ISM.D0,gene.ISM.D4))/length(union(gene.ISM.D0,gene.ISM.D4)) length(intersect(gene.ISM.D0,gene.ADM.D0))/length(union(gene.ISM.D0,gene.ADM.D0)) length(intersect(gene.ISM.D0,gene.ADM.D4))/length(union(gene.ISM.D0,gene.ADM.D4)) length(intersect(gene.ISM.D4,gene.ADM.D0))/length(union(gene.ISM.D4,gene.ADM.D0)) length(intersect(gene.ISM.D4,gene.ADM.D4))/length(union(gene.ISM.D4,gene.ADM.D4)) length(intersect(gene.ADM.D0,gene.ADM.D4))/length(union(gene.ADM.D0,gene.ADM.D4))
geno.clust <- function (dM, S, BinSize = c(0, sum(dM)), tol = 0.97, verbose = FALSE) { repeats = function(x) return(sum(duplicated(x))) nS = length(S) if (nS != length(dM) & (nS - 1) != length(dM)) stop("Error in msc.peaks.clust: vectors S and dM have to have the same length") if (length(BinSize) != 2) BinSize = c(0, 1) if (BinSize[1] > BinSize[2]) { a = BinSize[1] BinSize[1] = BinSize[2] BinSize[2] = a } if (verbose) print("Stack #Gaps [parent_bin ](bin_size) -> [Left_child ](#reps bin_size) + [right_child](#reps bin_size) gap_chosen") mStack = max(20, as.integer(3 * log(nS))) nStack = 1 Stack = matrix(0, 2, mStack) Stack[1, nStack] = 1 Stack[2, nStack] = nS bin = numeric(nS) while (nStack > 0) { from = Stack[1, nStack] to = Stack[2, nStack] nStack = nStack - 1 gap = dM[from:(to - 1)] size = sum(gap) mx = tol * max(gap) idx = which(gap >= mx) len = length(idx) if (len > 1) { score = numeric(len) for (j in 1:len) { Cut = idx[j] + from nL = repeats(S[from:(Cut - 1)]) nR = repeats(S[Cut:to]) score[j] = nL + nR } i = which(score == min(score)) idx = idx[i] m = length(idx) if (m > 1) { k = to - from - 1 if (idx[1] < k - idx[m]) idx = idx[1] else idx = idx[m] } } Cut = idx[1] + from if (from <= Cut - 2) sL = sum(dM[from:(Cut - 2)]) else sL = 0 nL = -1 if (sL > BinSize[2]) { nStack = nStack + 1 Stack[, nStack] = c(from, Cut - 1) nL = -2 } else if (sL > BinSize[1]) { nL = repeats(S[from:(Cut - 1)]) if (nL > 0) { nStack = nStack + 1 Stack[, nStack] = c(from, Cut - 1) } } else if (sL == 0) nL = 0 if (Cut <= to - 1) sR = sum(dM[Cut:(to - 1)]) else sR = 0 nR = -1 if (sR > BinSize[2]) { nStack = nStack + 1 Stack[, nStack] = c(Cut, to) nR = -2 } else if (sR > BinSize[1]) { nR = repeats(S[Cut:to]) if (nR > 0) { nStack = nStack + 1 Stack[, nStack] = c(Cut, to) } } else if (sR == 0) nR = 0 if (nStack == mStack) { mStack = trunc(1.5 * mStack) Stack = rbind(Stack, matrix(0, 2, mStack - nStack)) } bin[Cut] = 1 if (verbose) print(sprintf("%5i %5i [%5i %5i](%8.5f) -> [%5i %5i](%5i, %7.4f) + [%5i %5i](%5i, %7.4f) gap=%6.4f", as.integer(nStack), as.integer(len), as.integer(from), as.integer(to), size, as.integer(from), as.integer(Cut - 1), as.integer(nL), sL, as.integer(Cut), as.integer(to), as.integer(nR), sR, dM[Cut - 1])) } bin[1] = 1 bin[nS] = 0 return(bin) }
/RawGeno/R/geno.clust.R
no_license
arrigon/RawGeno
R
false
false
3,248
r
geno.clust <- function (dM, S, BinSize = c(0, sum(dM)), tol = 0.97, verbose = FALSE) { repeats = function(x) return(sum(duplicated(x))) nS = length(S) if (nS != length(dM) & (nS - 1) != length(dM)) stop("Error in msc.peaks.clust: vectors S and dM have to have the same length") if (length(BinSize) != 2) BinSize = c(0, 1) if (BinSize[1] > BinSize[2]) { a = BinSize[1] BinSize[1] = BinSize[2] BinSize[2] = a } if (verbose) print("Stack #Gaps [parent_bin ](bin_size) -> [Left_child ](#reps bin_size) + [right_child](#reps bin_size) gap_chosen") mStack = max(20, as.integer(3 * log(nS))) nStack = 1 Stack = matrix(0, 2, mStack) Stack[1, nStack] = 1 Stack[2, nStack] = nS bin = numeric(nS) while (nStack > 0) { from = Stack[1, nStack] to = Stack[2, nStack] nStack = nStack - 1 gap = dM[from:(to - 1)] size = sum(gap) mx = tol * max(gap) idx = which(gap >= mx) len = length(idx) if (len > 1) { score = numeric(len) for (j in 1:len) { Cut = idx[j] + from nL = repeats(S[from:(Cut - 1)]) nR = repeats(S[Cut:to]) score[j] = nL + nR } i = which(score == min(score)) idx = idx[i] m = length(idx) if (m > 1) { k = to - from - 1 if (idx[1] < k - idx[m]) idx = idx[1] else idx = idx[m] } } Cut = idx[1] + from if (from <= Cut - 2) sL = sum(dM[from:(Cut - 2)]) else sL = 0 nL = -1 if (sL > BinSize[2]) { nStack = nStack + 1 Stack[, nStack] = c(from, Cut - 1) nL = -2 } else if (sL > BinSize[1]) { nL = repeats(S[from:(Cut - 1)]) if (nL > 0) { nStack = nStack + 1 Stack[, nStack] = c(from, Cut - 1) } } else if (sL == 0) nL = 0 if (Cut <= to - 1) sR = sum(dM[Cut:(to - 1)]) else sR = 0 nR = -1 if (sR > BinSize[2]) { nStack = nStack + 1 Stack[, nStack] = c(Cut, to) nR = -2 } else if (sR > BinSize[1]) { nR = repeats(S[Cut:to]) if (nR > 0) { nStack = nStack + 1 Stack[, nStack] = c(Cut, to) } } else if (sR == 0) nR = 0 if (nStack == mStack) { mStack = trunc(1.5 * mStack) Stack = rbind(Stack, matrix(0, 2, mStack - nStack)) } bin[Cut] = 1 if (verbose) print(sprintf("%5i %5i [%5i %5i](%8.5f) -> [%5i %5i](%5i, %7.4f) + [%5i %5i](%5i, %7.4f) gap=%6.4f", as.integer(nStack), as.integer(len), as.integer(from), as.integer(to), size, as.integer(from), as.integer(Cut - 1), as.integer(nL), sL, as.integer(Cut), as.integer(to), as.integer(nR), sR, dM[Cut - 1])) } bin[1] = 1 bin[nS] = 0 return(bin) }
## version 0.3 searchByString <- function(string, datatype = "application/xml", content = "complete", myStart = 0, retCount = 25, retMax = Inf, mySort = "-coverDate", outfile) { if (!datatype %in% c("application/xml", "application/json", "application/atom+xml")) { ## error checking for valid inputs to the function stop("Invalid datatype. Valid types are 'application/xml', 'application/json', and 'application/atom+xml'") } else if (!content %in% c("complete", "standard")) { stop("Invalid content value. Valid content values are 'complete', and 'standard'") } else { library(httr) library(XML) key <- "yourAPIkey" print("Retrieving records.") theURL <- GET("https://api.elsevier.com/content/search/scopus", query = list(apiKey = key, query = string, sort = mySort, httpAccept = datatype, view = content, count = retCount, start = myStart)) ## format the URL to be sent to the API stop_for_status(theURL) ## pass any HTTP errors to the R console theData <- content(theURL, as = "text") ## extract the content of the response newData <- xmlParse(theURL) ## parse the data to extract values resultCount <- as.numeric(xpathSApply(newData,"//opensearch:totalResults", xmlValue)) ## get the total number of search results for the string print(paste("Found", resultCount, "records.")) retrievedCount <- retCount + myStart ## set the current number of results retrieved for the designated start and count parameters while (resultCount > retrievedCount && retrievedCount < retMax) { ## check if it's necessary to perform multiple requests to retrieve all of the results; if so, create a loop to retrieve additional pages of results myStart <- myStart + retCount ## add the number of records already returned to the start number print(paste("Retrieved", retrievedCount, "of", resultCount, "records. Getting more.")) theURL <- GET("https://api.elsevier.com/content/search/scopus", query = list(apiKey = key, query = string, sort = mySort, httpAccept = datatype, view = content, count = retCount, start = myStart)) ## get the next page of results theData <- paste(theData, content(theURL, as = "text")) ## paste new theURL content to theData; if there's an HTTP error, the XML of the error will be pasted to the end of theData if (http_error(theURL) == TRUE) { ## check if there's an HTTP error print("Encountered an HTTP error. Details follow.") ## alert the user to the error print(http_status(theURL)) ## print out the error category, reason, and message break ## if there's an HTTP error, break out of the loop and return the data that has been retrieved } retrievedCount <- retrievedCount + retCount ## add the number of results retrieved in this iteration to the total number of results retrieved Sys.sleep(1) } ## repeat until retrievedCount >= resultCount print(paste("Retrieved", retrievedCount, "records. Formatting results.")) writeLines(theData, outfile, useBytes = TRUE) ## if there were multiple pages of results, they come back as separate XML files pasted into the single outfile; the theData XML object can't be coerced into a string to do find/replace operations, so I think it must be written to a file and then reloaded; useBytes = TRUE keeps the UTF-8 encoding of special characters like the copyright symbol so they won't throw an error later theData <- readChar(outfile, file.info(outfile)$size) ## convert the XML results to a character vector of length 1 that can be manipulated theData <- gsub("<?xml version=\"1.0\" encoding=\"UTF-8\"?>", "", theData, fixed = TRUE, useBytes = TRUE) theData <- gsub("<search-results.+?>", "", theData, useBytes = TRUE) theData <- gsub("</search-results>", "", theData, fixed = TRUE) ## remove all headers and footers of the separate XML files theData <- paste("<?xml version=\"1.0\" encoding=\"UTF-8\"?><search-results xmlns=\"http://www.w3.org/2005/Atom\" xmlns:cto=\"http://www.elsevier.com/xml/cto/dtd\" xmlns:atom=\"http://www.w3.org/2005/Atom\" xmlns:prism=\"http://prismstandard.org/namespaces/basic/2.0/\" xmlns:opensearch=\"http://a9.com/-/spec/opensearch/1.1/\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\">", theData) theData <- paste(theData, "</search-results>") ## add the correct header to the beginning of the file and the correct footer to the end of the file writeLines(theData, outfile, useBytes = TRUE) ## save the correctly formatted XML file print("Done") return(theData) ## return the final, correctly formatted XML file } } searchByID <- function(theIDs, idtype, datatype = "application/xml", content = "complete", myStart = 0, retCount = 25, outfile) { library(httr) library(XML) key <- "yourAPIkey" if (length(theIDs) == 1) { theIDs <- unique(scan(theIDs, what = "varchar")) ## load the list of IDs into a character vector } else { theIDs <- unique(as.character(theIDs)) } resultCount <- as.numeric(length(theIDs)) ## get the total number of IDs idList <- split(theIDs, ceiling(seq_along(theIDs)/25)) ## split the IDs into batches of 25 theData <- " " ## create an empty character holder for the XML retrievedCount <- 0 ## set the current number of records retrieved to zero if (idtype == "pmid") { idList <- lapply(mapply(paste, "PMID(", idList, collapse = ") OR "), paste, ")") ## append the correct scopus search syntax around each number in each batch of IDs print(paste("Retrieving", resultCount, "records.")) for (i in 1:length(idList)) { ## loop through the list of search strings and return data for each one string <- idList[i] theURL <- GET("https://api.elsevier.com/content/search/scopus", query = list(apiKey = key, query = string, httpAccept = datatype, view = content, count = retCount, start = myStart)) theData <- paste(theData, content(theURL, as = "text")) ## paste new theURL content to theData if (http_error(theURL) == TRUE) { ## check if there's an HTTP error print("Encountered an HTTP error. Details follow.") ## alert the user to the error print(http_status(theURL)) ## print out the error category, reason, and message break ## if there's an HTTP error, break out of the loop and return the data that has been retrieved } Sys.sleep(1) retrievedCount <- retrievedCount + retCount print(paste("Retrieved", retrievedCount, "of", resultCount, "records. Getting more.")) } print(paste("Retrieved", retrievedCount, "records. Formatting results.")) writeLines(theData, outfile, useBytes = TRUE) theData <- readChar(outfile, file.info(outfile)$size) theData <- gsub("<?xml version=\"1.0\" encoding=\"UTF-8\"?>", "", theData, fixed = TRUE, useBytes = TRUE) theData <- gsub("<search-results.+?>", "", theData, useBytes = TRUE) theData <- gsub("</search-results>", "", theData, fixed = TRUE) ## remove all headers and footers of the separate XML files theData <- paste("<?xml version=\"1.0\" encoding=\"UTF-8\"?><search-results xmlns=\"http://www.w3.org/2005/Atom\" xmlns:cto=\"http://www.elsevier.com/xml/cto/dtd\" xmlns:atom=\"http://www.w3.org/2005/Atom\" xmlns:prism=\"http://prismstandard.org/namespaces/basic/2.0/\" xmlns:opensearch=\"http://a9.com/-/spec/opensearch/1.1/\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\">", theData) theData <- paste(theData, "</search-results>") ## add the correct header to the beginning of the file and the correct footer to the end of the file writeLines(theData, outfile, useBytes = TRUE) print("Done") return(theData) } else if (idtype == "doi") { idList <- lapply(mapply(paste, "DOI(", idList, collapse = ") OR "), paste, ")") ## append the correct scopus search syntax around each number print(paste("Retrieving", resultCount, "records.")) for (i in 1:length(idList)) { string <- idList[i] theURL <- GET("https://api.elsevier.com/content/search/scopus", query = list(apiKey = key, query = string, httpAccept = datatype, view = content, count = retCount, start = myStart)) theData <- paste(theData, content(theURL, as = "text")) ## paste new theURL content to theData if (http_error(theURL) == TRUE) { ## check if there's an HTTP error print("Encountered an HTTP error. Details follow.") ## alert the user to the error print(http_status(theURL)) ## print out the error category, reason, and message break ## if there's an HTTP error, break out of the loop and return the data that has been retrieved } Sys.sleep(1) retrievedCount <- retrievedCount + retCount print(paste("Retrieved", retrievedCount, "of", resultCount, "records. Getting more.")) } print(paste("Retrieved", retrievedCount, "records. Formatting results.")) writeLines(theData, outfile, useBytes = TRUE) theData <- readChar(outfile, file.info(outfile)$size) theData <- gsub("<?xml version=\"1.0\" encoding=\"UTF-8\"?>", "", theData, fixed = TRUE, useBytes = TRUE) theData <- gsub("<search-results.+?>", "", theData, useBytes = TRUE) theData <- gsub("</search-results>", "", theData, fixed = TRUE) ## remove all headers and footers of the separate XML files theData <- paste("<?xml version=\"1.0\" encoding=\"UTF-8\"?><search-results xmlns=\"http://www.w3.org/2005/Atom\" xmlns:cto=\"http://www.elsevier.com/xml/cto/dtd\" xmlns:atom=\"http://www.w3.org/2005/Atom\" xmlns:prism=\"http://prismstandard.org/namespaces/basic/2.0/\" xmlns:opensearch=\"http://a9.com/-/spec/opensearch/1.1/\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\">", theData) theData <- paste(theData, "</search-results>") ## add the correct header to the beginning of the file and the correct footer to the end of the file writeLines(theData, outfile, useBytes = TRUE) print("Done") return(theData) } else if (idtype == "eid") { idList <- lapply(mapply(paste, "EID(", idList, collapse = ") OR "), paste, ")") ## append the correct scopus search syntax around each number print(paste("Retrieving", resultCount, "records.")) for (i in 1:length(idList)) { string <- idList[i] theURL <- GET("https://api.elsevier.com/content/search/scopus", query = list(apiKey = key, query = string, httpAccept = datatype, view = content, count = retCount, start = myStart)) theData <- paste(theData, content(theURL, as = "text")) ## paste new theURL content to theData if (http_error(theURL) == TRUE) { ## check if there's an HTTP error print("Encountered an HTTP error. Details follow.") ## alert the user to the error print(http_status(theURL)) ## print out the error category, reason, and message break ## if there's an HTTP error, break out of the loop and return the data that has been retrieved } Sys.sleep(1) retrievedCount <- retrievedCount + retCount print(paste("Retrieved", retrievedCount, "of", resultCount, "records. Getting more.")) } print(paste("Retrieved", retrievedCount, "records. Formatting results.")) writeLines(theData, outfile, useBytes = TRUE) theData <- readChar(outfile, file.info(outfile)$size) theData <- gsub("<?xml version=\"1.0\" encoding=\"UTF-8\"?>", "", theData, fixed = TRUE, useBytes = TRUE) theData <- gsub("<search-results.+?>", "", theData, useBytes = TRUE) theData <- gsub("</search-results>", "", theData, fixed = TRUE) ## remove all headers and footers of the separate XML files theData <- paste("<?xml version=\"1.0\" encoding=\"UTF-8\"?><search-results xmlns=\"http://www.w3.org/2005/Atom\" xmlns:cto=\"http://www.elsevier.com/xml/cto/dtd\" xmlns:atom=\"http://www.w3.org/2005/Atom\" xmlns:prism=\"http://prismstandard.org/namespaces/basic/2.0/\" xmlns:opensearch=\"http://a9.com/-/spec/opensearch/1.1/\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\">", theData) theData <- paste(theData, "</search-results>") ## add the correct header to the beginning of the file and the correct footer to the end of the file writeLines(theData, outfile, useBytes = TRUE) print("Done") return(theData) } else { stop("Invalid idtype. Valid idtypes are 'pmid', 'doi', or 'eid'") } } extractXML <- function(theFile) { library(XML) newData <- xmlParse(theFile) ## parse the XML records <- getNodeSet(newData, "//cto:entry", namespaces = "cto") ## create a list of records for missing or duplicate node handling scopusID <- lapply(records, xpathSApply, "./cto:eid", xmlValue, namespaces = "cto") ## handle potentially missing eid nodes scopusID[sapply(scopusID, is.list)] <- NA scopusID <- unlist(scopusID) doi <- lapply(records, xpathSApply, "./prism:doi", xmlValue, namespaces = c(prism = "http://prismstandard.org/namespaces/basic/2.0/")) ## handle potentially missing doi nodes doi[sapply(doi, is.list)] <- NA doi <- unlist(doi) pmid <- lapply(records, xpathSApply, "./cto:pubmed-id", xmlValue, namespaces = "cto") ## handle potentially missing pmid nodes: returns a list with the node value if the node is present and an empty list if the node is missing pmid[sapply(pmid, is.list)] <- NA ## find the empty lists in pmid and set them to NA pmid <- unlist(pmid) ## turn the pmid list into a vector authLast <- lapply(records, xpathSApply, ".//cto:surname", xmlValue, namespaces = "cto") ## grab the surname and initials for each author in each record, then paste them together authLast[sapply(authLast, is.list)] <- NA authInit <- lapply(records, xpathSApply, ".//cto:initials", xmlValue, namespaces = "cto") authInit[sapply(authInit, is.list)] <- NA authors <- mapply(paste, authLast, authInit, collapse = "|") authors <- sapply(strsplit(authors, "|", fixed = TRUE), unique) ## remove the duplicate author listings authors <- sapply(authors, paste, collapse = "|") affiliations <- lapply(records, xpathSApply, ".//cto:affilname", xmlValue, namespaces = "cto") ## handle multiple affiliation names affiliations[sapply(affiliations, is.list)] <- NA affiliations <- sapply(affiliations, paste, collapse = "|") affiliations <- sapply(strsplit(affiliations, "|", fixed = TRUE), unique) ## remove the duplicate affiliation listings affiliations <- sapply(affiliations, paste, collapse = "|") countries <- lapply(records, xpathSApply, ".//cto:affiliation-country", xmlValue, namespaces = "cto") countries[sapply(countries, is.list)] <- NA countries <- sapply(countries, paste, collapse = "|") countries <- sapply(strsplit(countries, "|", fixed = TRUE), unique) ## remove the duplicate country listings countries <- sapply(countries, paste, collapse = "|") year <- lapply(records, xpathSApply, "./prism:coverDate", xmlValue, namespaces = c(prism = "http://prismstandard.org/namespaces/basic/2.0/")) year[sapply(year, is.list)] <- NA year <- unlist(year) year <- gsub("\\-..", "", year) ## extract only year from coverDate string (e.g. extract "2015" from "2015-01-01") articletitle <- lapply(records, xpathSApply, "./dc:title", xmlValue, namespaces = c(dc = "http://purl.org/dc/elements/1.1/")) articletitle[sapply(articletitle, is.list)] <- NA articletitle <- unlist(articletitle) journal <- lapply(records, xpathSApply, "./prism:publicationName", xmlValue, namespaces = c(prism = "http://prismstandard.org/namespaces/basic/2.0/")) ## handle potentially missing issue nodes journal[sapply(journal, is.list)] <- NA journal <- unlist(journal) volume <- lapply(records, xpathSApply, "./prism:volume", xmlValue, namespaces = c(prism = "http://prismstandard.org/namespaces/basic/2.0/")) ## handle potentially missing issue nodes volume[sapply(volume, is.list)] <- NA volume <- unlist(volume) issue <- lapply(records, xpathSApply, "./prism:issueIdentifier", xmlValue, namespaces = c(prism = "http://prismstandard.org/namespaces/basic/2.0/")) ## handle potentially missing issue nodes issue[sapply(issue, is.list)] <- NA issue <- unlist(issue) pages <- lapply(records, xpathSApply, "./prism:pageRange", xmlValue, namespaces = c(prism = "http://prismstandard.org/namespaces/basic/2.0/")) ## handle potentially missing issue nodes pages[sapply(pages, is.list)] <- NA pages <- unlist(pages) abstract <- lapply(records, xpathSApply, "./dc:description", xmlValue, namespaces = c(dc = "http://purl.org/dc/elements/1.1/")) ## handle potentially missing abstract nodes abstract[sapply(abstract, is.list)] <- NA abstract <- unlist(abstract) keywords <- lapply(records, xpathSApply, "./cto:authkeywords", xmlValue, namespaces = "cto") keywords[sapply(keywords, is.list)] <- NA keywords <- unlist(keywords) keywords <- gsub(" | ", "|", keywords, fixed = TRUE) ptype <- lapply(records, xpathSApply, "./cto:subtypeDescription", xmlValue, namespaces = "cto") ptype[sapply(ptype, is.list)] <- NA ptype <- unlist(ptype) timescited <- lapply(records, xpathSApply, "./cto:citedby-count", xmlValue, namespaces = "cto") timescited[sapply(timescited, is.list)] <- NA timescited <- unlist(timescited) theDF <- data.frame(scopusID, doi, pmid, authors, affiliations, countries, year, articletitle, journal, volume, issue, pages, keywords, abstract, ptype, timescited, stringsAsFactors = FALSE) return(theDF) }
/scopusAPI.R
no_license
pirakoze/scopusAPI
R
false
false
17,030
r
## version 0.3 searchByString <- function(string, datatype = "application/xml", content = "complete", myStart = 0, retCount = 25, retMax = Inf, mySort = "-coverDate", outfile) { if (!datatype %in% c("application/xml", "application/json", "application/atom+xml")) { ## error checking for valid inputs to the function stop("Invalid datatype. Valid types are 'application/xml', 'application/json', and 'application/atom+xml'") } else if (!content %in% c("complete", "standard")) { stop("Invalid content value. Valid content values are 'complete', and 'standard'") } else { library(httr) library(XML) key <- "yourAPIkey" print("Retrieving records.") theURL <- GET("https://api.elsevier.com/content/search/scopus", query = list(apiKey = key, query = string, sort = mySort, httpAccept = datatype, view = content, count = retCount, start = myStart)) ## format the URL to be sent to the API stop_for_status(theURL) ## pass any HTTP errors to the R console theData <- content(theURL, as = "text") ## extract the content of the response newData <- xmlParse(theURL) ## parse the data to extract values resultCount <- as.numeric(xpathSApply(newData,"//opensearch:totalResults", xmlValue)) ## get the total number of search results for the string print(paste("Found", resultCount, "records.")) retrievedCount <- retCount + myStart ## set the current number of results retrieved for the designated start and count parameters while (resultCount > retrievedCount && retrievedCount < retMax) { ## check if it's necessary to perform multiple requests to retrieve all of the results; if so, create a loop to retrieve additional pages of results myStart <- myStart + retCount ## add the number of records already returned to the start number print(paste("Retrieved", retrievedCount, "of", resultCount, "records. Getting more.")) theURL <- GET("https://api.elsevier.com/content/search/scopus", query = list(apiKey = key, query = string, sort = mySort, httpAccept = datatype, view = content, count = retCount, start = myStart)) ## get the next page of results theData <- paste(theData, content(theURL, as = "text")) ## paste new theURL content to theData; if there's an HTTP error, the XML of the error will be pasted to the end of theData if (http_error(theURL) == TRUE) { ## check if there's an HTTP error print("Encountered an HTTP error. Details follow.") ## alert the user to the error print(http_status(theURL)) ## print out the error category, reason, and message break ## if there's an HTTP error, break out of the loop and return the data that has been retrieved } retrievedCount <- retrievedCount + retCount ## add the number of results retrieved in this iteration to the total number of results retrieved Sys.sleep(1) } ## repeat until retrievedCount >= resultCount print(paste("Retrieved", retrievedCount, "records. Formatting results.")) writeLines(theData, outfile, useBytes = TRUE) ## if there were multiple pages of results, they come back as separate XML files pasted into the single outfile; the theData XML object can't be coerced into a string to do find/replace operations, so I think it must be written to a file and then reloaded; useBytes = TRUE keeps the UTF-8 encoding of special characters like the copyright symbol so they won't throw an error later theData <- readChar(outfile, file.info(outfile)$size) ## convert the XML results to a character vector of length 1 that can be manipulated theData <- gsub("<?xml version=\"1.0\" encoding=\"UTF-8\"?>", "", theData, fixed = TRUE, useBytes = TRUE) theData <- gsub("<search-results.+?>", "", theData, useBytes = TRUE) theData <- gsub("</search-results>", "", theData, fixed = TRUE) ## remove all headers and footers of the separate XML files theData <- paste("<?xml version=\"1.0\" encoding=\"UTF-8\"?><search-results xmlns=\"http://www.w3.org/2005/Atom\" xmlns:cto=\"http://www.elsevier.com/xml/cto/dtd\" xmlns:atom=\"http://www.w3.org/2005/Atom\" xmlns:prism=\"http://prismstandard.org/namespaces/basic/2.0/\" xmlns:opensearch=\"http://a9.com/-/spec/opensearch/1.1/\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\">", theData) theData <- paste(theData, "</search-results>") ## add the correct header to the beginning of the file and the correct footer to the end of the file writeLines(theData, outfile, useBytes = TRUE) ## save the correctly formatted XML file print("Done") return(theData) ## return the final, correctly formatted XML file } } searchByID <- function(theIDs, idtype, datatype = "application/xml", content = "complete", myStart = 0, retCount = 25, outfile) { library(httr) library(XML) key <- "yourAPIkey" if (length(theIDs) == 1) { theIDs <- unique(scan(theIDs, what = "varchar")) ## load the list of IDs into a character vector } else { theIDs <- unique(as.character(theIDs)) } resultCount <- as.numeric(length(theIDs)) ## get the total number of IDs idList <- split(theIDs, ceiling(seq_along(theIDs)/25)) ## split the IDs into batches of 25 theData <- " " ## create an empty character holder for the XML retrievedCount <- 0 ## set the current number of records retrieved to zero if (idtype == "pmid") { idList <- lapply(mapply(paste, "PMID(", idList, collapse = ") OR "), paste, ")") ## append the correct scopus search syntax around each number in each batch of IDs print(paste("Retrieving", resultCount, "records.")) for (i in 1:length(idList)) { ## loop through the list of search strings and return data for each one string <- idList[i] theURL <- GET("https://api.elsevier.com/content/search/scopus", query = list(apiKey = key, query = string, httpAccept = datatype, view = content, count = retCount, start = myStart)) theData <- paste(theData, content(theURL, as = "text")) ## paste new theURL content to theData if (http_error(theURL) == TRUE) { ## check if there's an HTTP error print("Encountered an HTTP error. Details follow.") ## alert the user to the error print(http_status(theURL)) ## print out the error category, reason, and message break ## if there's an HTTP error, break out of the loop and return the data that has been retrieved } Sys.sleep(1) retrievedCount <- retrievedCount + retCount print(paste("Retrieved", retrievedCount, "of", resultCount, "records. Getting more.")) } print(paste("Retrieved", retrievedCount, "records. Formatting results.")) writeLines(theData, outfile, useBytes = TRUE) theData <- readChar(outfile, file.info(outfile)$size) theData <- gsub("<?xml version=\"1.0\" encoding=\"UTF-8\"?>", "", theData, fixed = TRUE, useBytes = TRUE) theData <- gsub("<search-results.+?>", "", theData, useBytes = TRUE) theData <- gsub("</search-results>", "", theData, fixed = TRUE) ## remove all headers and footers of the separate XML files theData <- paste("<?xml version=\"1.0\" encoding=\"UTF-8\"?><search-results xmlns=\"http://www.w3.org/2005/Atom\" xmlns:cto=\"http://www.elsevier.com/xml/cto/dtd\" xmlns:atom=\"http://www.w3.org/2005/Atom\" xmlns:prism=\"http://prismstandard.org/namespaces/basic/2.0/\" xmlns:opensearch=\"http://a9.com/-/spec/opensearch/1.1/\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\">", theData) theData <- paste(theData, "</search-results>") ## add the correct header to the beginning of the file and the correct footer to the end of the file writeLines(theData, outfile, useBytes = TRUE) print("Done") return(theData) } else if (idtype == "doi") { idList <- lapply(mapply(paste, "DOI(", idList, collapse = ") OR "), paste, ")") ## append the correct scopus search syntax around each number print(paste("Retrieving", resultCount, "records.")) for (i in 1:length(idList)) { string <- idList[i] theURL <- GET("https://api.elsevier.com/content/search/scopus", query = list(apiKey = key, query = string, httpAccept = datatype, view = content, count = retCount, start = myStart)) theData <- paste(theData, content(theURL, as = "text")) ## paste new theURL content to theData if (http_error(theURL) == TRUE) { ## check if there's an HTTP error print("Encountered an HTTP error. Details follow.") ## alert the user to the error print(http_status(theURL)) ## print out the error category, reason, and message break ## if there's an HTTP error, break out of the loop and return the data that has been retrieved } Sys.sleep(1) retrievedCount <- retrievedCount + retCount print(paste("Retrieved", retrievedCount, "of", resultCount, "records. Getting more.")) } print(paste("Retrieved", retrievedCount, "records. Formatting results.")) writeLines(theData, outfile, useBytes = TRUE) theData <- readChar(outfile, file.info(outfile)$size) theData <- gsub("<?xml version=\"1.0\" encoding=\"UTF-8\"?>", "", theData, fixed = TRUE, useBytes = TRUE) theData <- gsub("<search-results.+?>", "", theData, useBytes = TRUE) theData <- gsub("</search-results>", "", theData, fixed = TRUE) ## remove all headers and footers of the separate XML files theData <- paste("<?xml version=\"1.0\" encoding=\"UTF-8\"?><search-results xmlns=\"http://www.w3.org/2005/Atom\" xmlns:cto=\"http://www.elsevier.com/xml/cto/dtd\" xmlns:atom=\"http://www.w3.org/2005/Atom\" xmlns:prism=\"http://prismstandard.org/namespaces/basic/2.0/\" xmlns:opensearch=\"http://a9.com/-/spec/opensearch/1.1/\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\">", theData) theData <- paste(theData, "</search-results>") ## add the correct header to the beginning of the file and the correct footer to the end of the file writeLines(theData, outfile, useBytes = TRUE) print("Done") return(theData) } else if (idtype == "eid") { idList <- lapply(mapply(paste, "EID(", idList, collapse = ") OR "), paste, ")") ## append the correct scopus search syntax around each number print(paste("Retrieving", resultCount, "records.")) for (i in 1:length(idList)) { string <- idList[i] theURL <- GET("https://api.elsevier.com/content/search/scopus", query = list(apiKey = key, query = string, httpAccept = datatype, view = content, count = retCount, start = myStart)) theData <- paste(theData, content(theURL, as = "text")) ## paste new theURL content to theData if (http_error(theURL) == TRUE) { ## check if there's an HTTP error print("Encountered an HTTP error. Details follow.") ## alert the user to the error print(http_status(theURL)) ## print out the error category, reason, and message break ## if there's an HTTP error, break out of the loop and return the data that has been retrieved } Sys.sleep(1) retrievedCount <- retrievedCount + retCount print(paste("Retrieved", retrievedCount, "of", resultCount, "records. Getting more.")) } print(paste("Retrieved", retrievedCount, "records. Formatting results.")) writeLines(theData, outfile, useBytes = TRUE) theData <- readChar(outfile, file.info(outfile)$size) theData <- gsub("<?xml version=\"1.0\" encoding=\"UTF-8\"?>", "", theData, fixed = TRUE, useBytes = TRUE) theData <- gsub("<search-results.+?>", "", theData, useBytes = TRUE) theData <- gsub("</search-results>", "", theData, fixed = TRUE) ## remove all headers and footers of the separate XML files theData <- paste("<?xml version=\"1.0\" encoding=\"UTF-8\"?><search-results xmlns=\"http://www.w3.org/2005/Atom\" xmlns:cto=\"http://www.elsevier.com/xml/cto/dtd\" xmlns:atom=\"http://www.w3.org/2005/Atom\" xmlns:prism=\"http://prismstandard.org/namespaces/basic/2.0/\" xmlns:opensearch=\"http://a9.com/-/spec/opensearch/1.1/\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\">", theData) theData <- paste(theData, "</search-results>") ## add the correct header to the beginning of the file and the correct footer to the end of the file writeLines(theData, outfile, useBytes = TRUE) print("Done") return(theData) } else { stop("Invalid idtype. Valid idtypes are 'pmid', 'doi', or 'eid'") } } extractXML <- function(theFile) { library(XML) newData <- xmlParse(theFile) ## parse the XML records <- getNodeSet(newData, "//cto:entry", namespaces = "cto") ## create a list of records for missing or duplicate node handling scopusID <- lapply(records, xpathSApply, "./cto:eid", xmlValue, namespaces = "cto") ## handle potentially missing eid nodes scopusID[sapply(scopusID, is.list)] <- NA scopusID <- unlist(scopusID) doi <- lapply(records, xpathSApply, "./prism:doi", xmlValue, namespaces = c(prism = "http://prismstandard.org/namespaces/basic/2.0/")) ## handle potentially missing doi nodes doi[sapply(doi, is.list)] <- NA doi <- unlist(doi) pmid <- lapply(records, xpathSApply, "./cto:pubmed-id", xmlValue, namespaces = "cto") ## handle potentially missing pmid nodes: returns a list with the node value if the node is present and an empty list if the node is missing pmid[sapply(pmid, is.list)] <- NA ## find the empty lists in pmid and set them to NA pmid <- unlist(pmid) ## turn the pmid list into a vector authLast <- lapply(records, xpathSApply, ".//cto:surname", xmlValue, namespaces = "cto") ## grab the surname and initials for each author in each record, then paste them together authLast[sapply(authLast, is.list)] <- NA authInit <- lapply(records, xpathSApply, ".//cto:initials", xmlValue, namespaces = "cto") authInit[sapply(authInit, is.list)] <- NA authors <- mapply(paste, authLast, authInit, collapse = "|") authors <- sapply(strsplit(authors, "|", fixed = TRUE), unique) ## remove the duplicate author listings authors <- sapply(authors, paste, collapse = "|") affiliations <- lapply(records, xpathSApply, ".//cto:affilname", xmlValue, namespaces = "cto") ## handle multiple affiliation names affiliations[sapply(affiliations, is.list)] <- NA affiliations <- sapply(affiliations, paste, collapse = "|") affiliations <- sapply(strsplit(affiliations, "|", fixed = TRUE), unique) ## remove the duplicate affiliation listings affiliations <- sapply(affiliations, paste, collapse = "|") countries <- lapply(records, xpathSApply, ".//cto:affiliation-country", xmlValue, namespaces = "cto") countries[sapply(countries, is.list)] <- NA countries <- sapply(countries, paste, collapse = "|") countries <- sapply(strsplit(countries, "|", fixed = TRUE), unique) ## remove the duplicate country listings countries <- sapply(countries, paste, collapse = "|") year <- lapply(records, xpathSApply, "./prism:coverDate", xmlValue, namespaces = c(prism = "http://prismstandard.org/namespaces/basic/2.0/")) year[sapply(year, is.list)] <- NA year <- unlist(year) year <- gsub("\\-..", "", year) ## extract only year from coverDate string (e.g. extract "2015" from "2015-01-01") articletitle <- lapply(records, xpathSApply, "./dc:title", xmlValue, namespaces = c(dc = "http://purl.org/dc/elements/1.1/")) articletitle[sapply(articletitle, is.list)] <- NA articletitle <- unlist(articletitle) journal <- lapply(records, xpathSApply, "./prism:publicationName", xmlValue, namespaces = c(prism = "http://prismstandard.org/namespaces/basic/2.0/")) ## handle potentially missing issue nodes journal[sapply(journal, is.list)] <- NA journal <- unlist(journal) volume <- lapply(records, xpathSApply, "./prism:volume", xmlValue, namespaces = c(prism = "http://prismstandard.org/namespaces/basic/2.0/")) ## handle potentially missing issue nodes volume[sapply(volume, is.list)] <- NA volume <- unlist(volume) issue <- lapply(records, xpathSApply, "./prism:issueIdentifier", xmlValue, namespaces = c(prism = "http://prismstandard.org/namespaces/basic/2.0/")) ## handle potentially missing issue nodes issue[sapply(issue, is.list)] <- NA issue <- unlist(issue) pages <- lapply(records, xpathSApply, "./prism:pageRange", xmlValue, namespaces = c(prism = "http://prismstandard.org/namespaces/basic/2.0/")) ## handle potentially missing issue nodes pages[sapply(pages, is.list)] <- NA pages <- unlist(pages) abstract <- lapply(records, xpathSApply, "./dc:description", xmlValue, namespaces = c(dc = "http://purl.org/dc/elements/1.1/")) ## handle potentially missing abstract nodes abstract[sapply(abstract, is.list)] <- NA abstract <- unlist(abstract) keywords <- lapply(records, xpathSApply, "./cto:authkeywords", xmlValue, namespaces = "cto") keywords[sapply(keywords, is.list)] <- NA keywords <- unlist(keywords) keywords <- gsub(" | ", "|", keywords, fixed = TRUE) ptype <- lapply(records, xpathSApply, "./cto:subtypeDescription", xmlValue, namespaces = "cto") ptype[sapply(ptype, is.list)] <- NA ptype <- unlist(ptype) timescited <- lapply(records, xpathSApply, "./cto:citedby-count", xmlValue, namespaces = "cto") timescited[sapply(timescited, is.list)] <- NA timescited <- unlist(timescited) theDF <- data.frame(scopusID, doi, pmid, authors, affiliations, countries, year, articletitle, journal, volume, issue, pages, keywords, abstract, ptype, timescited, stringsAsFactors = FALSE) return(theDF) }
########################################################################################## ### Setting up the libraries: ############################################################ ########################################################################################## .libPaths("/home/fahimehb/R/x86_64-redhat-linux-gnu-library/3.5") .libPaths(c("/allen/programs/celltypes/workgroups/rnaseqanalysis/Script_Repository/Olivia/3.5", .libPaths())) library(rbokeh) library(ggplot2) library(dendextend) script_rep="//allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/patch_seq/star/mouse_patchseq_script_repository/" library(reshape2) library(matrixStats) library(feather) library(tibble) library(dplyr) library(purrr) library(cowplot) library(scrattch.hicat) source(file.path(script_rep,"patchseq/heatmap.R")) source(file.path(script_rep,"patchseq/de.genes.R")) source(file.path(script_rep,"patchseq/dendro.R")) source(file.path(script_rep,"patchseq/patchseq.R")) source(file.path(script_rep,"patchseq/Mapping_helper_functions.R")) source("/allen/programs/celltypes/workgroups/rnaseqanalysis/Fahimehb/MY_R/Utils.R") options(stringsAsFactors = F) ### TODO: The batch date should be updated everytime #just update batch_date and source it batch_date="20190722_BT014-RSC-215" ###################################################################################################### ### Setting up some paths ############################################################################ ###################################################################################################### ### TODO: Change these if you changed them in the mapping.R ref.dir = "//allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/facs_seq/mouse_V1_ALM_20180520/" res.dir = "//allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/patch_seq/star/mouse_patchseq_VISp_20180626_collapsed40_cpm/" query.dir = "//allen/programs/celltypes/workgroups/rnaseqanalysis/SMARTer/STAR/Mouse/patchseq/R_Object/" work.dir = "/allen//programs/celltypes/workgroups/rnaseqanalysis/Fahimehb/Manuscript_patchseq_2019/" patchseq.dir = "/allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/patch_seq/star/mouse_patchseq_VISp_current/" latest.mapping.dir = "/allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/patch_seq/star/mouse_patchseq_VISp_20190729_collapsed40_cpm/" ###################################################################################################### ### Reading ref data ################################################################################# ###################################################################################################### ### TODO: the following two lines should be modified if the number of FACS cells has changed load("//allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/Taxonomies/AIT2.3.1/REF_mapping_probability.rda") load("//allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/Taxonomies/AIT2.3.1/select.markers.rda") bp.collapse.th = 40 bp.name.add = NULL if (!is.null(bp.collapse.th)) { bp.name.add = paste0(".with.bp.", bp.collapse.th) } ###load reference data and tree tmp.load1 = load(file=file.path(res.dir, "ref.data.rda")) # should include cl, cl.df, norm.dat. # The loaded cl is not used because it only includes cluster ids but not cluster labels tmp.load2 = load(file.path(file=res.dir, file=paste0("V1.dend", bp.name.add,".rda"))) # should include the pruned V1 tree tmp.load3 = load(file.path(res.dir, file=paste0("V1.dend.list", bp.name.add,".rda"))) # should include dend.list plot(dend) rownames(cl.df)=cl.df$cluster_id cltmp=cl.df[as.character(cl),"cluster_label"] names(cltmp)=names(cl) cl=factor(cltmp) FACS.cells <- colnames(norm.dat) FACS.anno <- read_feather(paste0(ref.dir, "/anno.feather")) FACS.anno <- as.data.frame(FACS.anno) rownames(FACS.anno) <- FACS.anno$sample_id FACS.anno <- FACS.anno[FACS.cells,] select.genotypes <- unique(FACS.anno$genotype_label)[grepl("Gad2", unique(FACS.anno$genotype_label)) | grepl("32", unique(FACS.anno$genotype_label))] select.subclass <- c("Sst", "Pvalb", "Vip", "Sncg", "Lamp5") FACS.anno <- FACS.anno %>% filter(subclass_label %in% select.subclass) #%>% #filter(genotype_label %in% select.genotypes) dim(FACS.anno) ###################################################################################################### ### Loading the query data ########################################################################### ###################################################################################################### tmp<-load(paste0(query.dir,batch_date,"_mouse_patchseq_star2.0_cpm.Rdata")) query.dat = cpmR # loading samp.dat object tmp<-load(paste0(query.dir,batch_date,"_mouse_patchseq_star2.0_samp.dat.Rdata")) keepcells = which(samp.dat$Region=="VISp" & samp.dat$Type=="patch_seq") samp.dat = samp.dat[c(keepcells, which(samp.dat$Region=="TCx"),which(samp.dat$Region=="FCx"), which(samp.dat$Region=="MOp"),which(samp.dat$Region=="TEa") ),] #FCx is for Brian. Rat samples mapped in mouse query.dat = query.dat[,as.character(samp.dat$exp_component_name)] colnames(query.dat)=as.character(samp.dat$patched_cell_container) query.dat.norm = log2(as.matrix(query.dat+1)) idx=match(rownames(norm.dat), rownames(query.dat.norm)) query.dat.norm=query.dat.norm[idx,] patchseq_anno <- read_feather(paste0(patchseq.dir, "/anno.feather")) patchseq_anno <- as.data.frame(patchseq_anno) rownames(patchseq_anno) <- patchseq_anno$sample_id #Patchseq Cells of interests locked_cells_spec_id = rownames(read.csv(paste0(work.dir, "mouse_met_Jun_14.csv"), check.names=FALSE, row.names = 1 )) locked_cells_sample_id = patchseq_anno[patchseq_anno$spec_id_label %in% locked_cells_spec_id, "sample_id"] length(locked_cells_spec_id) == length(locked_cells_sample_id) patchseq_anno <- patchseq_anno[locked_cells_sample_id,] dim(query.dat.norm[select.markers, locked_cells_sample_id]) Core.cells <- patchseq_anno[patchseq_anno$Tree_call_label == "Core", "sample_id"] I1.cells <- patchseq_anno[patchseq_anno$Tree_call_label == "I1", "sample_id"] I2.cells <- patchseq_anno[patchseq_anno$Tree_call_label == "I2", "sample_id"] I3.cells <- patchseq_anno[patchseq_anno$Tree_call_label == "I3", "sample_id"] patchseq_anno <- patchseq_anno[patchseq_anno$subclass_label %in% select.subclass,] ###################################################################################################### ### Comparing gene expression for FACS and patchseq: ################################################# ###################################################################################################### lamp5_sncg_vip_genes <- c("Lamp5", "Ndnf", "Krt73", "Fam19a1", "Pax6", "Ntn1", "Plch2", "Lsp1", "Lhx6", "Nkx2-1", "Vip", "Sncg", "Slc17a8", "Nptx2", "Gpr50", "Itih5", "Serpinf1", "Igfbp6", "Gpc3", "Lmo1", "Ptprt", "Rspo4", "Chat", "Crispld2", "Col15a1", "Pde1a") sst_pvalb_genes <- c("Sst", "Chodl", "Nos1", "Mme", "Tac1", "Tacr3", "Calb2", "Nr2f2", "Myh8", "Tac2", "Hpse", "Crhr2", "Crh", "Esm1", "Rxfp1", "Nts", "Pvalb", "Gabrg1", "Th", "Calb1", "Akr1c18", "Sema3e", "Gpr149", "Reln", "Tpbg", "Cpne5", "Vipr2", "Nkx2-1") tmp1 <- do.call("cbind",tapply(FACS.anno[FACS.anno$subclass_label %in% c("Sst", "Pvalb"), "sample_id"], FACS.anno[FACS.anno$subclass_label %in% c("Sst", "Pvalb"), "cluster_label"], function(x)rowMedians(norm.dat[sst_pvalb_genes, x, drop = F]))) tmp2 <- do.call("cbind",tapply(patchseq_anno[patchseq_anno$subclass_label %in% c("Sst", "Pvalb"), "sample_id"], patchseq_anno[patchseq_anno$subclass_label %in% c("Sst", "Pvalb"), "topLeaf_label"], function(x)rowMedians(query.dat.norm[sst_pvalb_genes, x, drop = F]))) cormat <- cor(tmp1[,colnames(tmp2)], tmp2) tmp3 <- unique((FACS.anno[FACS.anno$subclass_label %in% c("Sst", "Pvalb"),c("cluster_id", "cluster_label", "cluster_color")])) fake.cl <- setNames(tmp3$cluster_id, tmp3$cluster_label) col <- t(as.data.frame(setNames(tmp3$cluster_color, tmp3$cluster_label))) plot_co_matrix(co.ratio =cormat , cl = fake.cl, max.cl.size = 1, col = col) tmp1 <- do.call("cbind",tapply(FACS.anno[FACS.anno$subclass_label %in% c("Vip", "Lamp5"), "sample_id"], FACS.anno[FACS.anno$subclass_label %in% c("Vip", "Lamp5"), "cluster_label"], function(x)rowMedians(norm.dat[lamp5_sncg_vip_genes, x, drop = F]))) tmp2 <- do.call("cbind",tapply(patchseq_anno[patchseq_anno$subclass_label %in% c("Vip", "Lamp5"), "sample_id"], patchseq_anno[patchseq_anno$subclass_label %in% c("Vip", "Lamp5"), "topLeaf_label"], function(x)rowMedians(query.dat.norm[lamp5_sncg_vip_genes, x, drop = F]))) tmp1 <- tmp1[,colnames(tmp2)] dim(tmp1) dim(tmp2) cormat <- cor(tmp1[,colnames(tmp2)], tmp2) tmp3 <- unique((FACS.anno[FACS.anno$subclass_label %in% c("Vip", "Lamp5"),c("cluster_id", "cluster_label", "cluster_color")])) fake.cl <- setNames(tmp3$cluster_id, tmp3$cluster_label) fake.cl <- fake.cl[rownames(cormat)] col <- t(as.data.frame(setNames(tmp3$cluster_color, tmp3$cluster_label))) plot_co_matrix(co.ratio =cormat , cl = fake.cl, max.cl.size = 1) dim(cormat) ggplot(data = melt(cor(tmp1[,colnames(tmp2)], tmp2)), aes(x=Var1, y=Var2, fill=value)) + geom_tile()+ theme(axis.text = element_text(size=7)) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + xlab("Clustering lable") + ylab("NN Mapping lables") + scale_fill_gradient(low = "white", high = "red") plot(norm.dat["Lamp5",]) ###################################################################################################### ### Comparing gene expression for FACS and patchseq: ################################################# ###################################################################################################### tmp1 <- do.call("cbind", tapply(FACS.anno[, "sample_id"], FACS.anno[, "cluster_label"], function(x)rowMeans(norm.dat[c(lamp5_sncg_vip_genes, sst_pvalb_genes), x, drop = F]))) tmp2 <- do.call("cbind",tapply(patchseq_anno[c(Core.cells, I1.cells), "sample_id"], patchseq_anno[c(Core.cells, I1.cells), "topLeaf_label"], function(x)rowMeans(query.dat.norm[c(lamp5_sncg_vip_genes, sst_pvalb_genes), x, drop = F]))) select.cl <- intersect(colnames(tmp1), colnames(tmp2)) tmp1<- tmp1[,select.cl] tmp2<- tmp2[,select.cl] rownames(tmp1) <- c(lamp5_sncg_vip_genes, sst_pvalb_genes) rownames(tmp2) <- c(lamp5_sncg_vip_genes, sst_pvalb_genes) tmp1 <- melt(tmp1) colnames(tmp1) <- c("Gene", "cl", "FACS.med") #tmp1$color <- c("Red") tmp2 <- melt(tmp2) colnames(tmp2) <- c("Gene", "cl", "Patchseq.med") #tmp2$color <- c("Green") tmp3 <- left_join(tmp1, tmp2) ref.cl.color <- unique(FACS.anno %>% select(cluster_label, cluster_color)) tmp3$cl_color <- sapply(tmp3$cl, function(x)ref.cl.color[ref.cl.color$cluster_label==x, "cluster_color"]) tmp3 <- tmp3 %>% mutate(Gene_color = case_when(Gene == "Sst" ~ "#F54607", Gene == "Pvalb" ~"#0707F5", Gene == "Lamp5" ~ "#48F214", TRUE ~ "#CBCAC6")) figure(width =800, height = 800, xlim = c(0, 20), ylim = c(0, 20), xlab = "Patchseq mean gene expression", ylab = "FACS mean gene expression") %>% ly_wedge(data = tmp3, x = Patchseq.med, y = FACS.med, color = Gene_color, start_angle = 0, end_angle = 3.99*pi/2, radius = 0.1, hover = list("Cluster label" = cl, "Gene label" = Gene)) p <- ggplot()+ geom_point(data = tmp3, aes(y = FACS.med, x = Patchseq.med, colour = Gene_color), size = 2, show.legend = FALSE) + scale_color_identity() + geom_abline(slope = 1) + xlim(0,15) + ylim(0,15) + xlab("Patchseq mean gene expression") + ylab("FACS mean gene expression")
/patch_facs_comparison.R
no_license
Fahimeh1983/Rstudios
R
false
false
12,444
r
########################################################################################## ### Setting up the libraries: ############################################################ ########################################################################################## .libPaths("/home/fahimehb/R/x86_64-redhat-linux-gnu-library/3.5") .libPaths(c("/allen/programs/celltypes/workgroups/rnaseqanalysis/Script_Repository/Olivia/3.5", .libPaths())) library(rbokeh) library(ggplot2) library(dendextend) script_rep="//allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/patch_seq/star/mouse_patchseq_script_repository/" library(reshape2) library(matrixStats) library(feather) library(tibble) library(dplyr) library(purrr) library(cowplot) library(scrattch.hicat) source(file.path(script_rep,"patchseq/heatmap.R")) source(file.path(script_rep,"patchseq/de.genes.R")) source(file.path(script_rep,"patchseq/dendro.R")) source(file.path(script_rep,"patchseq/patchseq.R")) source(file.path(script_rep,"patchseq/Mapping_helper_functions.R")) source("/allen/programs/celltypes/workgroups/rnaseqanalysis/Fahimehb/MY_R/Utils.R") options(stringsAsFactors = F) ### TODO: The batch date should be updated everytime #just update batch_date and source it batch_date="20190722_BT014-RSC-215" ###################################################################################################### ### Setting up some paths ############################################################################ ###################################################################################################### ### TODO: Change these if you changed them in the mapping.R ref.dir = "//allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/facs_seq/mouse_V1_ALM_20180520/" res.dir = "//allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/patch_seq/star/mouse_patchseq_VISp_20180626_collapsed40_cpm/" query.dir = "//allen/programs/celltypes/workgroups/rnaseqanalysis/SMARTer/STAR/Mouse/patchseq/R_Object/" work.dir = "/allen//programs/celltypes/workgroups/rnaseqanalysis/Fahimehb/Manuscript_patchseq_2019/" patchseq.dir = "/allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/patch_seq/star/mouse_patchseq_VISp_current/" latest.mapping.dir = "/allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/patch_seq/star/mouse_patchseq_VISp_20190729_collapsed40_cpm/" ###################################################################################################### ### Reading ref data ################################################################################# ###################################################################################################### ### TODO: the following two lines should be modified if the number of FACS cells has changed load("//allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/Taxonomies/AIT2.3.1/REF_mapping_probability.rda") load("//allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/Taxonomies/AIT2.3.1/select.markers.rda") bp.collapse.th = 40 bp.name.add = NULL if (!is.null(bp.collapse.th)) { bp.name.add = paste0(".with.bp.", bp.collapse.th) } ###load reference data and tree tmp.load1 = load(file=file.path(res.dir, "ref.data.rda")) # should include cl, cl.df, norm.dat. # The loaded cl is not used because it only includes cluster ids but not cluster labels tmp.load2 = load(file.path(file=res.dir, file=paste0("V1.dend", bp.name.add,".rda"))) # should include the pruned V1 tree tmp.load3 = load(file.path(res.dir, file=paste0("V1.dend.list", bp.name.add,".rda"))) # should include dend.list plot(dend) rownames(cl.df)=cl.df$cluster_id cltmp=cl.df[as.character(cl),"cluster_label"] names(cltmp)=names(cl) cl=factor(cltmp) FACS.cells <- colnames(norm.dat) FACS.anno <- read_feather(paste0(ref.dir, "/anno.feather")) FACS.anno <- as.data.frame(FACS.anno) rownames(FACS.anno) <- FACS.anno$sample_id FACS.anno <- FACS.anno[FACS.cells,] select.genotypes <- unique(FACS.anno$genotype_label)[grepl("Gad2", unique(FACS.anno$genotype_label)) | grepl("32", unique(FACS.anno$genotype_label))] select.subclass <- c("Sst", "Pvalb", "Vip", "Sncg", "Lamp5") FACS.anno <- FACS.anno %>% filter(subclass_label %in% select.subclass) #%>% #filter(genotype_label %in% select.genotypes) dim(FACS.anno) ###################################################################################################### ### Loading the query data ########################################################################### ###################################################################################################### tmp<-load(paste0(query.dir,batch_date,"_mouse_patchseq_star2.0_cpm.Rdata")) query.dat = cpmR # loading samp.dat object tmp<-load(paste0(query.dir,batch_date,"_mouse_patchseq_star2.0_samp.dat.Rdata")) keepcells = which(samp.dat$Region=="VISp" & samp.dat$Type=="patch_seq") samp.dat = samp.dat[c(keepcells, which(samp.dat$Region=="TCx"),which(samp.dat$Region=="FCx"), which(samp.dat$Region=="MOp"),which(samp.dat$Region=="TEa") ),] #FCx is for Brian. Rat samples mapped in mouse query.dat = query.dat[,as.character(samp.dat$exp_component_name)] colnames(query.dat)=as.character(samp.dat$patched_cell_container) query.dat.norm = log2(as.matrix(query.dat+1)) idx=match(rownames(norm.dat), rownames(query.dat.norm)) query.dat.norm=query.dat.norm[idx,] patchseq_anno <- read_feather(paste0(patchseq.dir, "/anno.feather")) patchseq_anno <- as.data.frame(patchseq_anno) rownames(patchseq_anno) <- patchseq_anno$sample_id #Patchseq Cells of interests locked_cells_spec_id = rownames(read.csv(paste0(work.dir, "mouse_met_Jun_14.csv"), check.names=FALSE, row.names = 1 )) locked_cells_sample_id = patchseq_anno[patchseq_anno$spec_id_label %in% locked_cells_spec_id, "sample_id"] length(locked_cells_spec_id) == length(locked_cells_sample_id) patchseq_anno <- patchseq_anno[locked_cells_sample_id,] dim(query.dat.norm[select.markers, locked_cells_sample_id]) Core.cells <- patchseq_anno[patchseq_anno$Tree_call_label == "Core", "sample_id"] I1.cells <- patchseq_anno[patchseq_anno$Tree_call_label == "I1", "sample_id"] I2.cells <- patchseq_anno[patchseq_anno$Tree_call_label == "I2", "sample_id"] I3.cells <- patchseq_anno[patchseq_anno$Tree_call_label == "I3", "sample_id"] patchseq_anno <- patchseq_anno[patchseq_anno$subclass_label %in% select.subclass,] ###################################################################################################### ### Comparing gene expression for FACS and patchseq: ################################################# ###################################################################################################### lamp5_sncg_vip_genes <- c("Lamp5", "Ndnf", "Krt73", "Fam19a1", "Pax6", "Ntn1", "Plch2", "Lsp1", "Lhx6", "Nkx2-1", "Vip", "Sncg", "Slc17a8", "Nptx2", "Gpr50", "Itih5", "Serpinf1", "Igfbp6", "Gpc3", "Lmo1", "Ptprt", "Rspo4", "Chat", "Crispld2", "Col15a1", "Pde1a") sst_pvalb_genes <- c("Sst", "Chodl", "Nos1", "Mme", "Tac1", "Tacr3", "Calb2", "Nr2f2", "Myh8", "Tac2", "Hpse", "Crhr2", "Crh", "Esm1", "Rxfp1", "Nts", "Pvalb", "Gabrg1", "Th", "Calb1", "Akr1c18", "Sema3e", "Gpr149", "Reln", "Tpbg", "Cpne5", "Vipr2", "Nkx2-1") tmp1 <- do.call("cbind",tapply(FACS.anno[FACS.anno$subclass_label %in% c("Sst", "Pvalb"), "sample_id"], FACS.anno[FACS.anno$subclass_label %in% c("Sst", "Pvalb"), "cluster_label"], function(x)rowMedians(norm.dat[sst_pvalb_genes, x, drop = F]))) tmp2 <- do.call("cbind",tapply(patchseq_anno[patchseq_anno$subclass_label %in% c("Sst", "Pvalb"), "sample_id"], patchseq_anno[patchseq_anno$subclass_label %in% c("Sst", "Pvalb"), "topLeaf_label"], function(x)rowMedians(query.dat.norm[sst_pvalb_genes, x, drop = F]))) cormat <- cor(tmp1[,colnames(tmp2)], tmp2) tmp3 <- unique((FACS.anno[FACS.anno$subclass_label %in% c("Sst", "Pvalb"),c("cluster_id", "cluster_label", "cluster_color")])) fake.cl <- setNames(tmp3$cluster_id, tmp3$cluster_label) col <- t(as.data.frame(setNames(tmp3$cluster_color, tmp3$cluster_label))) plot_co_matrix(co.ratio =cormat , cl = fake.cl, max.cl.size = 1, col = col) tmp1 <- do.call("cbind",tapply(FACS.anno[FACS.anno$subclass_label %in% c("Vip", "Lamp5"), "sample_id"], FACS.anno[FACS.anno$subclass_label %in% c("Vip", "Lamp5"), "cluster_label"], function(x)rowMedians(norm.dat[lamp5_sncg_vip_genes, x, drop = F]))) tmp2 <- do.call("cbind",tapply(patchseq_anno[patchseq_anno$subclass_label %in% c("Vip", "Lamp5"), "sample_id"], patchseq_anno[patchseq_anno$subclass_label %in% c("Vip", "Lamp5"), "topLeaf_label"], function(x)rowMedians(query.dat.norm[lamp5_sncg_vip_genes, x, drop = F]))) tmp1 <- tmp1[,colnames(tmp2)] dim(tmp1) dim(tmp2) cormat <- cor(tmp1[,colnames(tmp2)], tmp2) tmp3 <- unique((FACS.anno[FACS.anno$subclass_label %in% c("Vip", "Lamp5"),c("cluster_id", "cluster_label", "cluster_color")])) fake.cl <- setNames(tmp3$cluster_id, tmp3$cluster_label) fake.cl <- fake.cl[rownames(cormat)] col <- t(as.data.frame(setNames(tmp3$cluster_color, tmp3$cluster_label))) plot_co_matrix(co.ratio =cormat , cl = fake.cl, max.cl.size = 1) dim(cormat) ggplot(data = melt(cor(tmp1[,colnames(tmp2)], tmp2)), aes(x=Var1, y=Var2, fill=value)) + geom_tile()+ theme(axis.text = element_text(size=7)) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + xlab("Clustering lable") + ylab("NN Mapping lables") + scale_fill_gradient(low = "white", high = "red") plot(norm.dat["Lamp5",]) ###################################################################################################### ### Comparing gene expression for FACS and patchseq: ################################################# ###################################################################################################### tmp1 <- do.call("cbind", tapply(FACS.anno[, "sample_id"], FACS.anno[, "cluster_label"], function(x)rowMeans(norm.dat[c(lamp5_sncg_vip_genes, sst_pvalb_genes), x, drop = F]))) tmp2 <- do.call("cbind",tapply(patchseq_anno[c(Core.cells, I1.cells), "sample_id"], patchseq_anno[c(Core.cells, I1.cells), "topLeaf_label"], function(x)rowMeans(query.dat.norm[c(lamp5_sncg_vip_genes, sst_pvalb_genes), x, drop = F]))) select.cl <- intersect(colnames(tmp1), colnames(tmp2)) tmp1<- tmp1[,select.cl] tmp2<- tmp2[,select.cl] rownames(tmp1) <- c(lamp5_sncg_vip_genes, sst_pvalb_genes) rownames(tmp2) <- c(lamp5_sncg_vip_genes, sst_pvalb_genes) tmp1 <- melt(tmp1) colnames(tmp1) <- c("Gene", "cl", "FACS.med") #tmp1$color <- c("Red") tmp2 <- melt(tmp2) colnames(tmp2) <- c("Gene", "cl", "Patchseq.med") #tmp2$color <- c("Green") tmp3 <- left_join(tmp1, tmp2) ref.cl.color <- unique(FACS.anno %>% select(cluster_label, cluster_color)) tmp3$cl_color <- sapply(tmp3$cl, function(x)ref.cl.color[ref.cl.color$cluster_label==x, "cluster_color"]) tmp3 <- tmp3 %>% mutate(Gene_color = case_when(Gene == "Sst" ~ "#F54607", Gene == "Pvalb" ~"#0707F5", Gene == "Lamp5" ~ "#48F214", TRUE ~ "#CBCAC6")) figure(width =800, height = 800, xlim = c(0, 20), ylim = c(0, 20), xlab = "Patchseq mean gene expression", ylab = "FACS mean gene expression") %>% ly_wedge(data = tmp3, x = Patchseq.med, y = FACS.med, color = Gene_color, start_angle = 0, end_angle = 3.99*pi/2, radius = 0.1, hover = list("Cluster label" = cl, "Gene label" = Gene)) p <- ggplot()+ geom_point(data = tmp3, aes(y = FACS.med, x = Patchseq.med, colour = Gene_color), size = 2, show.legend = FALSE) + scale_color_identity() + geom_abline(slope = 1) + xlim(0,15) + ylim(0,15) + xlab("Patchseq mean gene expression") + ylab("FACS mean gene expression")
## The buildTidyData() function does the bulk of the work, which is ## placed on its own function primarily so any intermediary variables ## are cleared when the function returns ## ## No input parameters, but it assumes the folder containing the ## original data (named "UCI HAR DataSet") is a subfolder of the ## current directory buildTidyData <- function () { ## Saves the current dir and switches to original data folder saved_current_dir <- getwd(); setwd("./UCI HAR DataSet") ## Read features and activity labels ## The features and activity labels are read in as strings, ## so we can more easily manipulate them before using as factors features_labels_data <- read.table("features.txt", col.names=c("Id", "Name"), stringsAsFactors = FALSE) activity_labels_data <- read.table("activity_labels.txt", col.names = c("Id", "Name"), stringsAsFactors = FALSE) ## Read the training data, ## already into the data frames for the merged data. ## Initially we read subject, feature values(i.e. "x") ## and activity values (i.e. "y") into separate data frames subjects_data <- read.table("./train/subject_train.txt", col.names = c("subjectid")) features_data <- read.table("./train/X_train.txt") activity_data <- read.table("./train/y_train.txt", col.names = c("ActivityId")) ## Read the test data, already merging with the training data subjects_data <- rbind(subjects_data, read.table("./test/subject_test.txt", col.names = c("subjectid"))) features_data <- rbind(features_data, read.table("./test/X_test.txt")) activity_data <- rbind(activity_data, read.table("./test/y_test.txt", col.names = c("ActivityId"))) ## Restores working directory, after all data is read in setwd(saved_current_dir) ## Cleaning up and tidying up the data ## Before we start manipulating the features names to make them more tidy, ## build a couple of logical vectors using the original names ## They will allow us, later, to filter the features we are interested in ## We have 561 features in the data, but only 477 distinct feature names, ## so duplicatedFeatures just keep track of the duplicates ## The first ocurrence of each feature will be kept duplicatedFeatures <- duplicated(features_labels_data$Name) ## We only want means (identified by features with "mean()" in name) ## or standard deviations (feature names with "std()") wantedFeatures <- grepl("mean()", features_labels_data$Name, fixed=TRUE) | grepl("std()", features_labels_data$Name, fixed=TRUE) ## Perform a series of pipelined changes to features and activity names, ## to get them formatted as we want, as per codebook features_labels_data$Name <- features_labels_data$Name %>% gsub("()", "", ., fixed=TRUE) %>% gsub("-", "", ., fixed=TRUE) %>% gsub("mean", "Mean", ., fixed=TRUE) %>% gsub("std", "Std", ., fixed=TRUE) activity_labels_data$Name <- activity_labels_data$Name %>% gsub("_", "", .) %>% tolower() ## Apply the feature names to the columns/variables of the features data colnames(features_data) <- features_labels_data$Name ## Filter the features features_data <- features_data[wantedFeatures & !duplicatedFeatures] ## Build the consolidated data, by incorporating both the subjects, ## activity and features data in a single frame ## For activity data, convert the id in the original data to ## the descriptive activity names, as a factor activity <- as.factor( activity_labels_data$Name[ match(activity_data$ActivityId, activity_labels_data$Id) ]) consolidated_data <- cbind(subjects_data, activity, features_data) ## Finally, build the tidy_data we want, and return it ## We group by subject and activity and summarize the mean ## of each feature for each combination(subject, activity) tidy_data<-summarize_each( group_by(consolidated_data, subjectid, activity), c('mean')) } ## The actual main script ## First make sure dplyr is installed if(!("dplyr" %in% installed.packages()[,"Package"])) { install.packages("dplyr", quiet = TRUE) } require(dplyr, quietly = TRUE) ## Now, build tidyData and write it to tidyData <- buildTidyData() write.table(tidyData, "tidydata.txt", row.names = FALSE)
/run_analysis.R
no_license
SalvadorAssumpcao/DataScienceGettingAndCleaningData
R
false
false
5,078
r
## The buildTidyData() function does the bulk of the work, which is ## placed on its own function primarily so any intermediary variables ## are cleared when the function returns ## ## No input parameters, but it assumes the folder containing the ## original data (named "UCI HAR DataSet") is a subfolder of the ## current directory buildTidyData <- function () { ## Saves the current dir and switches to original data folder saved_current_dir <- getwd(); setwd("./UCI HAR DataSet") ## Read features and activity labels ## The features and activity labels are read in as strings, ## so we can more easily manipulate them before using as factors features_labels_data <- read.table("features.txt", col.names=c("Id", "Name"), stringsAsFactors = FALSE) activity_labels_data <- read.table("activity_labels.txt", col.names = c("Id", "Name"), stringsAsFactors = FALSE) ## Read the training data, ## already into the data frames for the merged data. ## Initially we read subject, feature values(i.e. "x") ## and activity values (i.e. "y") into separate data frames subjects_data <- read.table("./train/subject_train.txt", col.names = c("subjectid")) features_data <- read.table("./train/X_train.txt") activity_data <- read.table("./train/y_train.txt", col.names = c("ActivityId")) ## Read the test data, already merging with the training data subjects_data <- rbind(subjects_data, read.table("./test/subject_test.txt", col.names = c("subjectid"))) features_data <- rbind(features_data, read.table("./test/X_test.txt")) activity_data <- rbind(activity_data, read.table("./test/y_test.txt", col.names = c("ActivityId"))) ## Restores working directory, after all data is read in setwd(saved_current_dir) ## Cleaning up and tidying up the data ## Before we start manipulating the features names to make them more tidy, ## build a couple of logical vectors using the original names ## They will allow us, later, to filter the features we are interested in ## We have 561 features in the data, but only 477 distinct feature names, ## so duplicatedFeatures just keep track of the duplicates ## The first ocurrence of each feature will be kept duplicatedFeatures <- duplicated(features_labels_data$Name) ## We only want means (identified by features with "mean()" in name) ## or standard deviations (feature names with "std()") wantedFeatures <- grepl("mean()", features_labels_data$Name, fixed=TRUE) | grepl("std()", features_labels_data$Name, fixed=TRUE) ## Perform a series of pipelined changes to features and activity names, ## to get them formatted as we want, as per codebook features_labels_data$Name <- features_labels_data$Name %>% gsub("()", "", ., fixed=TRUE) %>% gsub("-", "", ., fixed=TRUE) %>% gsub("mean", "Mean", ., fixed=TRUE) %>% gsub("std", "Std", ., fixed=TRUE) activity_labels_data$Name <- activity_labels_data$Name %>% gsub("_", "", .) %>% tolower() ## Apply the feature names to the columns/variables of the features data colnames(features_data) <- features_labels_data$Name ## Filter the features features_data <- features_data[wantedFeatures & !duplicatedFeatures] ## Build the consolidated data, by incorporating both the subjects, ## activity and features data in a single frame ## For activity data, convert the id in the original data to ## the descriptive activity names, as a factor activity <- as.factor( activity_labels_data$Name[ match(activity_data$ActivityId, activity_labels_data$Id) ]) consolidated_data <- cbind(subjects_data, activity, features_data) ## Finally, build the tidy_data we want, and return it ## We group by subject and activity and summarize the mean ## of each feature for each combination(subject, activity) tidy_data<-summarize_each( group_by(consolidated_data, subjectid, activity), c('mean')) } ## The actual main script ## First make sure dplyr is installed if(!("dplyr" %in% installed.packages()[,"Package"])) { install.packages("dplyr", quiet = TRUE) } require(dplyr, quietly = TRUE) ## Now, build tidyData and write it to tidyData <- buildTidyData() write.table(tidyData, "tidydata.txt", row.names = FALSE)
# Cau a nhi thuc, ve bieu do to chuc tan so la table dat_a <- rbinom(100, 60, 0.4) plot(table(dat_a), type = "h") # Cau b poison, ve bieu do to chuc tan so la table dat_b <- rpois(100, 4) x11() plot(table(dat_b), type = "h") # Cau c pp chuan, ve ham phan phoi, ham mat do dat_c <- rnorm(100, 50, 4) x11() par(mfrow = c(2, 1)) plot(dat_c, type = "l", main = "ham mat do") dat_c_2 <- c() sum <- 0 for (i in dat_c) { dat_c_2 <- c(dat_c_2, sum) sum = sum + i } plot(dat_c_2, type = "h", main = "ham phan phoi")
/src/week_6/prob-4.R
permissive
haunt98/R-learn
R
false
false
517
r
# Cau a nhi thuc, ve bieu do to chuc tan so la table dat_a <- rbinom(100, 60, 0.4) plot(table(dat_a), type = "h") # Cau b poison, ve bieu do to chuc tan so la table dat_b <- rpois(100, 4) x11() plot(table(dat_b), type = "h") # Cau c pp chuan, ve ham phan phoi, ham mat do dat_c <- rnorm(100, 50, 4) x11() par(mfrow = c(2, 1)) plot(dat_c, type = "l", main = "ham mat do") dat_c_2 <- c() sum <- 0 for (i in dat_c) { dat_c_2 <- c(dat_c_2, sum) sum = sum + i } plot(dat_c_2, type = "h", main = "ham phan phoi")
##read data osdata<-read.csv("./Data/2020-01_Swanson-et-al_Bats-Forest-Str_Quant-Lidar.csv", header=T) #write the k means cluster model3<-kmeans(data.frame(ElevMin,ElevMax,ElevMean,ElevStdev,ElevSkew,ElevKurt,Ret3Ab3,PerAb3,PropStrat5, Prop515, Prop153, Prop36, Prop69, Prop912, PropAb12), centers = 6, algorithm="Lloyd", iter.max=1000) model4<-kmeans(data.frame(ElevMin,ElevMax,ElevMean,ElevStdev,ElevSkew,ElevKurt,Ret3Ab3,PerAb3,PropStrat5, Prop515, Prop153, Prop36, Prop69, Prop912, PropAb12), centers = 5, algorithm="Lloyd", iter.max=1000) #write the model to a file library(MASS) write.matrix(model3,file="kmeans6-1.txt", sep =",") write.matrix(model4,file="kmeans5-1.txt", sep =",") ##import the kmeans data clusterdata6<-scan("kmeans6-1-1.txt", what=numeric(), sep=",") clusterdata5<-scan("kmeans5-1-1.txt", what=numeric(), sep=",") ##transponse the kmeans data t(clusterdata6) t(clusterdata5) ##add clusters to original data osdata$Cluster6<-clusterdata6 osdata$Cluster5<-clusterdata5 ##write table with clusters included write.table(unclass(osdata), "OSclusters.txt", sep=",", col.names=T, row.names=F) write.table(unclass(osbsdata), "OSBSclusters5-1.txt", sep=",", col.names=T, row.names=F) #write the k means cluster model2<-kmeans(data.frame(TotalReturnCount,ElevMinim,ElevMax,ElevMean,ElevStdDev,ElevSkew,ElevKurtosis,Return3Above3,PercentAllAbove3,MaxHeight,P0to3,P3to6,P6to9,P9to12,P12to15,P15to18,P18to21,P21to24,P24to27,P27to30,P30to33), centers = 10, algorithm="Lloyd", iter.max=1000) #write the model to a file library(MASS) write.matrix(model2,file="kmeans10-2.txt", sep =",") ##import the kmeans data kmeans10<-scan("kmeans10-2.txt", what=numeric(), sep=",") ##transponse the kmeans data t(kmeans10) ##add clusters to original data data$Clusters10<-kmeans10 ##write table with clusters included write.table(unclass(data), "OSBSclusters10-2.txt", sep=",", col.names=T, row.names=F) ##model with 7 clusters model3<-kmeans(data.frame(TotalReturnCount,ElevMinim,ElevMax,ElevMean,ElevStdDev,ElevSkew,ElevKurtosis,Return3Above3,PercentAllAbove3,MaxHeight,P0to3,P3to6,P6to9,P9to12,P12to15,P15to18,P18to21,P21to24,P24to27,P27to30,P30to33), centers = 7, algorithm="Lloyd", iter.max=1000) #write the model to a file library(MASS) write.matrix(model3,file="kmeans7-1.txt", sep =",") ##import the kmeans data kmeans7<-scan("kmeans7-1.txt", what=numeric(), sep=",") ##transponse the kmeans data t(kmeans7) ##add clusters to original data data$Clusters7<-kmeans7 ##write table with clusters included write.table(unclass(data), "OSBSclusters7-1.txt", sep=",", col.names=T, row.names=F) names(data) ##Remove minimum elevation to counteract effects of topography data$ElevMax2<-(ElevMax-ElevMinim) data$ElevMean2<-(ElevMean-ElevMinim) model4<-kmeans(data.frame(TotalReturnCount,ElevMax2,ElevMean2,ElevStdDev,ElevSkew,ElevKurtosis,Return3Above3,PercentAllAbove3,MaxHeight,P0to3,P3to6,P6to9,P9to12,P12to15,P15to18,P18to21,P21to24,P24to27,P27to30,P30to33), centers = 7, algorithm="Lloyd", iter.max=1000) write.matrix(model4,file="kmeans7-2.txt", sep =",") kmeans72<-scan("kmeans7-2.txt", what=numeric(), sep=",") t(kmeans72) data$Clusters72<-kmeans72 write.table(unclass(data), "OSBSclusters7-2.txt", sep=",", col.names=T, row.names=F) ##run kmeans without total return count (this was messing up the clusters with remnants of flight lines) and with adjusted elevation metrics model5<-kmeans(data.frame(ElevMax2,ElevMean2,ElevStdDev,ElevSkew,ElevKurtosis,Return3Above3,PercentAllAbove3,MaxHeight,P0to3,P3to6,P6to9,P9to12,P12to15,P15to18,P18to21,P21to24,P24to27,P27to30,P30to33), centers = 7, algorithm="Lloyd", iter.max=1000) kmeans73<-scan("kmeans7-3.txt", what=numeric(), sep=",") t(kmeans73) data$Clusters73<-kmeans73 write.table(unclass(data), "OSBSclusters7-3.txt", sep=",", col.names=T, row.names=F) ##segregate rows by cluster ##note: clusters 1 and 7 were excluded because no data and lakes, respectively data.sub1<-subset(osclusters, Cluster6==1) write.table(unclass(data.sub1), "OSclustersSub6-1-1.txt", sep=",", col.names=T, row.names=F) data.sub2<-subset(osclusters, Cluster6==2) write.table(unclass(data.sub2), "OSBSclustersSub6-1-2.txt", sep=",", col.names=T, row.names=F) data.sub3<-subset(osclusters, Cluster6==3) write.table(unclass(data.sub3), "OSBSclustersSub6-1-3.txt", sep=",", col.names=T, row.names=F) data.sub4<-subset(osclusters, Cluster6==4) write.table(unclass(data.sub4), "OSBSclustersSub6-1-4.txt", sep=",", col.names=T, row.names=F) data.sub5<-subset(osclusters, Cluster6==5) write.table(unclass(data.sub5), "OSBSclustersSub6-1-5.txt", sep=",", col.names=T, row.names=F) data.sub6<-subset(osclusters, Cluster6==6) write.table(unclass(data.sub6), "OSBSclustersSub6-5-6.txt", sep=",", col.names=T, row.names=F) ##randomly select sites from the subsets, extra sites were selected to ensure that there were enough sites within the boundaries of Ordway-Swisher set.seed(0) random1<-data.sub1[sample(nrow(data.sub1), 30), ] random2<-data.sub2[sample(nrow(data.sub2), 30), ] random3<-data.sub3[sample(nrow(data.sub3), 30), ] random4<-data.sub4[sample(nrow(data.sub4), 30), ] random5<-data.sub5[sample(nrow(data.sub5), 30), ] random6<-data.sub6[sample(nrow(data.sub6), 30), ] ##write random samples to a table write.table(unclass(random1), "OSRandom6-1-1.txt", sep=",", col.names=T, row.names=F) write.table(unclass(random2), "OSRandom6-1-2.txt", sep=",", col.names=T, row.names=F) write.table(unclass(random3), "OSRandom6-1-3.txt", sep=",", col.names=T, row.names=F) write.table(unclass(random4), "OSRandom6-1-4.txt", sep=",", col.names=T, row.names=F) write.table(unclass(random5), "OSRandom6-1-5.txt", sep=",", col.names=T, row.names=F) write.table(unclass(random6), "OSRandom6-1-6.txt", sep=",", col.names=T, row.names=F) model6<-kmeans(data.frame(ElevMinim,ElevMax,ElevMean,ElevStdDev,ElevSkew,ElevKurtosis,Return3Above3,PercentAllAbove3,MaxHeight,P0to3,P3to6,P6to9,P9to12,P12to15,P15to18,P18to21,P21to24,P24to27,P27to30,P30to33), centers = 6, algorithm="Lloyd", iter.max=1000) write.table(unclass(model6), "OSBSclusters6-2.txt", sep=",", col.names=T, row.names=F) data.sub61<-subset(data, Clusters62==1) write.table(unclass(data.sub61), "OSBSclustersSub6-2-1.txt", sep=",", col.names=T, row.names=F) data.sub62<-subset(data, Clusters62==2) write.table(unclass(data.sub62), "OSBSclustersSub6-2-2.txt", sep=",", col.names=T, row.names=F) data.sub63<-subset(data, Clusters62==3) write.table(unclass(data.sub63), "OSBSclustersSub6-2-3.txt", sep=",", col.names=T, row.names=F) data.sub64<-subset(data, Clusters62==4) write.table(unclass(data.sub64), "OSBSclustersSub6-2-4.txt", sep=",", col.names=T, row.names=F) data.sub65<-subset(data, Clusters62==5) write.table(unclass(data.sub65), "OSBSclustersSub6-2-5.txt", sep=",", col.names=T, row.names=F) data.sub66<-subset(data, Clusters62==6) write.table(unclass(data.sub66), "OSBSclustersSub6-2-6.txt", sep=",", col.names=T, row.names=F) lib
/kmeans.R
no_license
thatsciencegal/flying_under_lidar
R
false
false
6,949
r
##read data osdata<-read.csv("./Data/2020-01_Swanson-et-al_Bats-Forest-Str_Quant-Lidar.csv", header=T) #write the k means cluster model3<-kmeans(data.frame(ElevMin,ElevMax,ElevMean,ElevStdev,ElevSkew,ElevKurt,Ret3Ab3,PerAb3,PropStrat5, Prop515, Prop153, Prop36, Prop69, Prop912, PropAb12), centers = 6, algorithm="Lloyd", iter.max=1000) model4<-kmeans(data.frame(ElevMin,ElevMax,ElevMean,ElevStdev,ElevSkew,ElevKurt,Ret3Ab3,PerAb3,PropStrat5, Prop515, Prop153, Prop36, Prop69, Prop912, PropAb12), centers = 5, algorithm="Lloyd", iter.max=1000) #write the model to a file library(MASS) write.matrix(model3,file="kmeans6-1.txt", sep =",") write.matrix(model4,file="kmeans5-1.txt", sep =",") ##import the kmeans data clusterdata6<-scan("kmeans6-1-1.txt", what=numeric(), sep=",") clusterdata5<-scan("kmeans5-1-1.txt", what=numeric(), sep=",") ##transponse the kmeans data t(clusterdata6) t(clusterdata5) ##add clusters to original data osdata$Cluster6<-clusterdata6 osdata$Cluster5<-clusterdata5 ##write table with clusters included write.table(unclass(osdata), "OSclusters.txt", sep=",", col.names=T, row.names=F) write.table(unclass(osbsdata), "OSBSclusters5-1.txt", sep=",", col.names=T, row.names=F) #write the k means cluster model2<-kmeans(data.frame(TotalReturnCount,ElevMinim,ElevMax,ElevMean,ElevStdDev,ElevSkew,ElevKurtosis,Return3Above3,PercentAllAbove3,MaxHeight,P0to3,P3to6,P6to9,P9to12,P12to15,P15to18,P18to21,P21to24,P24to27,P27to30,P30to33), centers = 10, algorithm="Lloyd", iter.max=1000) #write the model to a file library(MASS) write.matrix(model2,file="kmeans10-2.txt", sep =",") ##import the kmeans data kmeans10<-scan("kmeans10-2.txt", what=numeric(), sep=",") ##transponse the kmeans data t(kmeans10) ##add clusters to original data data$Clusters10<-kmeans10 ##write table with clusters included write.table(unclass(data), "OSBSclusters10-2.txt", sep=",", col.names=T, row.names=F) ##model with 7 clusters model3<-kmeans(data.frame(TotalReturnCount,ElevMinim,ElevMax,ElevMean,ElevStdDev,ElevSkew,ElevKurtosis,Return3Above3,PercentAllAbove3,MaxHeight,P0to3,P3to6,P6to9,P9to12,P12to15,P15to18,P18to21,P21to24,P24to27,P27to30,P30to33), centers = 7, algorithm="Lloyd", iter.max=1000) #write the model to a file library(MASS) write.matrix(model3,file="kmeans7-1.txt", sep =",") ##import the kmeans data kmeans7<-scan("kmeans7-1.txt", what=numeric(), sep=",") ##transponse the kmeans data t(kmeans7) ##add clusters to original data data$Clusters7<-kmeans7 ##write table with clusters included write.table(unclass(data), "OSBSclusters7-1.txt", sep=",", col.names=T, row.names=F) names(data) ##Remove minimum elevation to counteract effects of topography data$ElevMax2<-(ElevMax-ElevMinim) data$ElevMean2<-(ElevMean-ElevMinim) model4<-kmeans(data.frame(TotalReturnCount,ElevMax2,ElevMean2,ElevStdDev,ElevSkew,ElevKurtosis,Return3Above3,PercentAllAbove3,MaxHeight,P0to3,P3to6,P6to9,P9to12,P12to15,P15to18,P18to21,P21to24,P24to27,P27to30,P30to33), centers = 7, algorithm="Lloyd", iter.max=1000) write.matrix(model4,file="kmeans7-2.txt", sep =",") kmeans72<-scan("kmeans7-2.txt", what=numeric(), sep=",") t(kmeans72) data$Clusters72<-kmeans72 write.table(unclass(data), "OSBSclusters7-2.txt", sep=",", col.names=T, row.names=F) ##run kmeans without total return count (this was messing up the clusters with remnants of flight lines) and with adjusted elevation metrics model5<-kmeans(data.frame(ElevMax2,ElevMean2,ElevStdDev,ElevSkew,ElevKurtosis,Return3Above3,PercentAllAbove3,MaxHeight,P0to3,P3to6,P6to9,P9to12,P12to15,P15to18,P18to21,P21to24,P24to27,P27to30,P30to33), centers = 7, algorithm="Lloyd", iter.max=1000) kmeans73<-scan("kmeans7-3.txt", what=numeric(), sep=",") t(kmeans73) data$Clusters73<-kmeans73 write.table(unclass(data), "OSBSclusters7-3.txt", sep=",", col.names=T, row.names=F) ##segregate rows by cluster ##note: clusters 1 and 7 were excluded because no data and lakes, respectively data.sub1<-subset(osclusters, Cluster6==1) write.table(unclass(data.sub1), "OSclustersSub6-1-1.txt", sep=",", col.names=T, row.names=F) data.sub2<-subset(osclusters, Cluster6==2) write.table(unclass(data.sub2), "OSBSclustersSub6-1-2.txt", sep=",", col.names=T, row.names=F) data.sub3<-subset(osclusters, Cluster6==3) write.table(unclass(data.sub3), "OSBSclustersSub6-1-3.txt", sep=",", col.names=T, row.names=F) data.sub4<-subset(osclusters, Cluster6==4) write.table(unclass(data.sub4), "OSBSclustersSub6-1-4.txt", sep=",", col.names=T, row.names=F) data.sub5<-subset(osclusters, Cluster6==5) write.table(unclass(data.sub5), "OSBSclustersSub6-1-5.txt", sep=",", col.names=T, row.names=F) data.sub6<-subset(osclusters, Cluster6==6) write.table(unclass(data.sub6), "OSBSclustersSub6-5-6.txt", sep=",", col.names=T, row.names=F) ##randomly select sites from the subsets, extra sites were selected to ensure that there were enough sites within the boundaries of Ordway-Swisher set.seed(0) random1<-data.sub1[sample(nrow(data.sub1), 30), ] random2<-data.sub2[sample(nrow(data.sub2), 30), ] random3<-data.sub3[sample(nrow(data.sub3), 30), ] random4<-data.sub4[sample(nrow(data.sub4), 30), ] random5<-data.sub5[sample(nrow(data.sub5), 30), ] random6<-data.sub6[sample(nrow(data.sub6), 30), ] ##write random samples to a table write.table(unclass(random1), "OSRandom6-1-1.txt", sep=",", col.names=T, row.names=F) write.table(unclass(random2), "OSRandom6-1-2.txt", sep=",", col.names=T, row.names=F) write.table(unclass(random3), "OSRandom6-1-3.txt", sep=",", col.names=T, row.names=F) write.table(unclass(random4), "OSRandom6-1-4.txt", sep=",", col.names=T, row.names=F) write.table(unclass(random5), "OSRandom6-1-5.txt", sep=",", col.names=T, row.names=F) write.table(unclass(random6), "OSRandom6-1-6.txt", sep=",", col.names=T, row.names=F) model6<-kmeans(data.frame(ElevMinim,ElevMax,ElevMean,ElevStdDev,ElevSkew,ElevKurtosis,Return3Above3,PercentAllAbove3,MaxHeight,P0to3,P3to6,P6to9,P9to12,P12to15,P15to18,P18to21,P21to24,P24to27,P27to30,P30to33), centers = 6, algorithm="Lloyd", iter.max=1000) write.table(unclass(model6), "OSBSclusters6-2.txt", sep=",", col.names=T, row.names=F) data.sub61<-subset(data, Clusters62==1) write.table(unclass(data.sub61), "OSBSclustersSub6-2-1.txt", sep=",", col.names=T, row.names=F) data.sub62<-subset(data, Clusters62==2) write.table(unclass(data.sub62), "OSBSclustersSub6-2-2.txt", sep=",", col.names=T, row.names=F) data.sub63<-subset(data, Clusters62==3) write.table(unclass(data.sub63), "OSBSclustersSub6-2-3.txt", sep=",", col.names=T, row.names=F) data.sub64<-subset(data, Clusters62==4) write.table(unclass(data.sub64), "OSBSclustersSub6-2-4.txt", sep=",", col.names=T, row.names=F) data.sub65<-subset(data, Clusters62==5) write.table(unclass(data.sub65), "OSBSclustersSub6-2-5.txt", sep=",", col.names=T, row.names=F) data.sub66<-subset(data, Clusters62==6) write.table(unclass(data.sub66), "OSBSclustersSub6-2-6.txt", sep=",", col.names=T, row.names=F) lib
df <- noaa.data %>% eq_clean_data() test_that("geom_timeline returns ggplot object", { g <- df %>% dplyr::filter(COUNTRY %in% c("TURKEY"), YEAR > 2005) %>% ggplot2::ggplot(ggplot2::aes(x = DATE, y = COUNTRY)) + geom_timeline() expect_is(g, "ggplot") }) test_that("geom_timeline_label returns ggplot object", { g <- df %>% dplyr::filter(COUNTRY %in% c("TURKEY"), YEAR > 2005) %>% ggplot2::ggplot(ggplot2::aes(x = DATE, y = COUNTRY)) + geom_timeline_label(ggplot2::aes(label = LOCATION_NAME)) expect_is(g, "ggplot") })
/tests/testthat/testModule2.R
no_license
shaowei72/RCapstone
R
false
false
574
r
df <- noaa.data %>% eq_clean_data() test_that("geom_timeline returns ggplot object", { g <- df %>% dplyr::filter(COUNTRY %in% c("TURKEY"), YEAR > 2005) %>% ggplot2::ggplot(ggplot2::aes(x = DATE, y = COUNTRY)) + geom_timeline() expect_is(g, "ggplot") }) test_that("geom_timeline_label returns ggplot object", { g <- df %>% dplyr::filter(COUNTRY %in% c("TURKEY"), YEAR > 2005) %>% ggplot2::ggplot(ggplot2::aes(x = DATE, y = COUNTRY)) + geom_timeline_label(ggplot2::aes(label = LOCATION_NAME)) expect_is(g, "ggplot") })
## makeCacheMatrix is a function that takes a matrix agrument ## and creates a special matrix object that can cache its own inverse. makeCacheMatrix <- function(x = matrix()) { m<-NULL set<-function(y){ x<<-y m<<-NULL } get<-function() x setmatrix <- function(solve) m <<- solve getmatrix <- function() m list(set=set, get=get, setmatrix=setmatrix, getmatrix=getmatrix) } ## The cacheSolve function computes the inverse of the special matrix ## returned by makeCacheMatrix, above. If the inverse has already been ## calculated and the matrix has not changed, the the cacheSolve function ## should retrieve the inverse from the cache. cacheSolve <- function(x = matrix (), ...) { ## Return a matrix that is the inverse of 'x' m<-x$getmatrix() if(!is.null(m)){ print("Using Cached Data") return(m) } matrix<-x$get() m<-solve(matrix,...) x$setmatrix(m) return(m) }
/cachematrix.R
no_license
caksans/ProgrammingAssignment2
R
false
false
899
r
## makeCacheMatrix is a function that takes a matrix agrument ## and creates a special matrix object that can cache its own inverse. makeCacheMatrix <- function(x = matrix()) { m<-NULL set<-function(y){ x<<-y m<<-NULL } get<-function() x setmatrix <- function(solve) m <<- solve getmatrix <- function() m list(set=set, get=get, setmatrix=setmatrix, getmatrix=getmatrix) } ## The cacheSolve function computes the inverse of the special matrix ## returned by makeCacheMatrix, above. If the inverse has already been ## calculated and the matrix has not changed, the the cacheSolve function ## should retrieve the inverse from the cache. cacheSolve <- function(x = matrix (), ...) { ## Return a matrix that is the inverse of 'x' m<-x$getmatrix() if(!is.null(m)){ print("Using Cached Data") return(m) } matrix<-x$get() m<-solve(matrix,...) x$setmatrix(m) return(m) }
\name{runGUI} \alias{runGUI} \title{Open web-based GUI in browser} \description{ Opens the web-based GUI in an external browser. } \usage{ runGUI(...) } \arguments{ \item{...}{Arguments passed to \code{\link[shiny]{runApp}}. Supply \code{port=80} if a web browser refuses to connect to the randomly chosen port for security reasons.} } \details{ This function calls \code{\link[shiny]{runApp}} to run the included DVHshiny application. See \code{vignette("DVHshiny")} for documentation. } \seealso{ \code{\link[shiny]{runApp}} } \examples{ \dontrun{ runGUI() } }
/man/runGUI.Rd
no_license
cran/DVHmetrics
R
false
false
588
rd
\name{runGUI} \alias{runGUI} \title{Open web-based GUI in browser} \description{ Opens the web-based GUI in an external browser. } \usage{ runGUI(...) } \arguments{ \item{...}{Arguments passed to \code{\link[shiny]{runApp}}. Supply \code{port=80} if a web browser refuses to connect to the randomly chosen port for security reasons.} } \details{ This function calls \code{\link[shiny]{runApp}} to run the included DVHshiny application. See \code{vignette("DVHshiny")} for documentation. } \seealso{ \code{\link[shiny]{runApp}} } \examples{ \dontrun{ runGUI() } }
#' Select the last set of nodes created in a graph #' #' Select the last nodes that were created in a graph object of class #' `dgr_graph`. This function should ideally be used just after creating the #' nodes to be selected. #' #' @inheritParams render_graph #' #' @return A graph object of class `dgr_graph`. #' #' @examples #' # Create a graph and add 4 nodes #' # in 2 separate function calls #' graph <- #' create_graph() %>% #' add_n_nodes( #' n = 2, #' type = "a", #' label = c("a_1", "a_2")) %>% #' add_n_nodes( #' n = 2, #' type = "b", #' label = c("b_1", "b_2")) #' #' # Select the last nodes created (2 nodes #' # from the last function call) and then #' # set their color to be `red` #' graph <- #' graph %>% #' select_last_nodes_created() %>% #' set_node_attrs_ws( #' node_attr = color, #' value = "red") %>% #' clear_selection() #' #' # Display the graph's internal node #' # data frame to verify the change #' graph %>% get_node_df() #' #' @export select_last_nodes_created <- function(graph) { # Get the time of function start time_function_start <- Sys.time() # Get the name of the function fcn_name <- get_calling_fcn() # Validation: Graph object is valid if (graph_object_valid(graph) == FALSE) { emit_error( fcn_name = fcn_name, reasons = "The graph object is not valid") } # Validation: Graph contains nodes if (graph_contains_nodes(graph) == FALSE) { emit_error( fcn_name = fcn_name, reasons = "The graph contains no nodes") } graph_transform_steps <- graph$graph_log %>% dplyr::mutate(step_created_nodes = dplyr::if_else( function_used %in% node_creation_functions(), 1, 0)) %>% dplyr::mutate(step_deleted_nodes = dplyr::if_else( function_used %in% node_deletion_functions(), 1, 0)) %>% dplyr::mutate(step_init_with_nodes = dplyr::if_else( function_used %in% graph_init_functions() & nodes > 0, 1, 0)) %>% dplyr::filter( step_created_nodes == 1 | step_deleted_nodes == 1 | step_init_with_nodes) %>% dplyr::select(-version_id, -time_modified, -duration) if (nrow(graph_transform_steps) > 0) { if (graph_transform_steps %>% utils::tail(1) %>% dplyr::pull(step_deleted_nodes) == 1) { emit_error( fcn_name = fcn_name, reasons = "The previous graph transformation function resulted in a removal of nodes") } else { if (nrow(graph_transform_steps) > 1) { number_of_nodes_created <- (graph_transform_steps %>% dplyr::select(nodes) %>% utils::tail(2) %>% dplyr::pull(nodes))[2] - (graph_transform_steps %>% dplyr::select(nodes) %>% utils::tail(2) %>% dplyr::pull(nodes))[1] } else { number_of_nodes_created <- graph_transform_steps %>% dplyr::pull(nodes) } } node_id_values <- graph$nodes_df %>% dplyr::select(id) %>% utils::tail(number_of_nodes_created) %>% dplyr::pull(id) } else { node_id_values <- NA } if (!any(is.na(node_id_values))) { # Apply the selection of nodes to the graph graph <- suppressMessages( select_nodes( graph = graph, nodes = node_id_values)) # Update the `graph_log` df with an action graph$graph_log <- graph$graph_log[-nrow(graph$graph_log),] %>% add_action_to_log( version_id = nrow(graph$graph_log) + 1, function_used = fcn_name, time_modified = time_function_start, duration = graph_function_duration(time_function_start), nodes = nrow(graph$nodes_df), edges = nrow(graph$edges_df)) # Write graph backup if the option is set if (graph$graph_info$write_backups) { save_graph_as_rds(graph = graph) } } graph }
/R/select_last_nodes_created.R
permissive
vnijs/DiagrammeR
R
false
false
3,900
r
#' Select the last set of nodes created in a graph #' #' Select the last nodes that were created in a graph object of class #' `dgr_graph`. This function should ideally be used just after creating the #' nodes to be selected. #' #' @inheritParams render_graph #' #' @return A graph object of class `dgr_graph`. #' #' @examples #' # Create a graph and add 4 nodes #' # in 2 separate function calls #' graph <- #' create_graph() %>% #' add_n_nodes( #' n = 2, #' type = "a", #' label = c("a_1", "a_2")) %>% #' add_n_nodes( #' n = 2, #' type = "b", #' label = c("b_1", "b_2")) #' #' # Select the last nodes created (2 nodes #' # from the last function call) and then #' # set their color to be `red` #' graph <- #' graph %>% #' select_last_nodes_created() %>% #' set_node_attrs_ws( #' node_attr = color, #' value = "red") %>% #' clear_selection() #' #' # Display the graph's internal node #' # data frame to verify the change #' graph %>% get_node_df() #' #' @export select_last_nodes_created <- function(graph) { # Get the time of function start time_function_start <- Sys.time() # Get the name of the function fcn_name <- get_calling_fcn() # Validation: Graph object is valid if (graph_object_valid(graph) == FALSE) { emit_error( fcn_name = fcn_name, reasons = "The graph object is not valid") } # Validation: Graph contains nodes if (graph_contains_nodes(graph) == FALSE) { emit_error( fcn_name = fcn_name, reasons = "The graph contains no nodes") } graph_transform_steps <- graph$graph_log %>% dplyr::mutate(step_created_nodes = dplyr::if_else( function_used %in% node_creation_functions(), 1, 0)) %>% dplyr::mutate(step_deleted_nodes = dplyr::if_else( function_used %in% node_deletion_functions(), 1, 0)) %>% dplyr::mutate(step_init_with_nodes = dplyr::if_else( function_used %in% graph_init_functions() & nodes > 0, 1, 0)) %>% dplyr::filter( step_created_nodes == 1 | step_deleted_nodes == 1 | step_init_with_nodes) %>% dplyr::select(-version_id, -time_modified, -duration) if (nrow(graph_transform_steps) > 0) { if (graph_transform_steps %>% utils::tail(1) %>% dplyr::pull(step_deleted_nodes) == 1) { emit_error( fcn_name = fcn_name, reasons = "The previous graph transformation function resulted in a removal of nodes") } else { if (nrow(graph_transform_steps) > 1) { number_of_nodes_created <- (graph_transform_steps %>% dplyr::select(nodes) %>% utils::tail(2) %>% dplyr::pull(nodes))[2] - (graph_transform_steps %>% dplyr::select(nodes) %>% utils::tail(2) %>% dplyr::pull(nodes))[1] } else { number_of_nodes_created <- graph_transform_steps %>% dplyr::pull(nodes) } } node_id_values <- graph$nodes_df %>% dplyr::select(id) %>% utils::tail(number_of_nodes_created) %>% dplyr::pull(id) } else { node_id_values <- NA } if (!any(is.na(node_id_values))) { # Apply the selection of nodes to the graph graph <- suppressMessages( select_nodes( graph = graph, nodes = node_id_values)) # Update the `graph_log` df with an action graph$graph_log <- graph$graph_log[-nrow(graph$graph_log),] %>% add_action_to_log( version_id = nrow(graph$graph_log) + 1, function_used = fcn_name, time_modified = time_function_start, duration = graph_function_duration(time_function_start), nodes = nrow(graph$nodes_df), edges = nrow(graph$edges_df)) # Write graph backup if the option is set if (graph$graph_info$write_backups) { save_graph_as_rds(graph = graph) } } graph }
## Downloading the whole dataset all_data <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"') all_data$Date <- as.Date(all_data$Date, format="%d/%m/%Y") ## Selecting monthly data sub_data <- subset(all_data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02")) ## Dates convertion datetime <- paste(as.Date(sub_data$Date), sub_data$Time) sub_data$Datetime <- as.POSIXct(datetime) ## Creating Plot 3 with(sub_data, { plot(Sub_metering_1~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") lines(Sub_metering_2~Datetime,col='Red') lines(Sub_metering_3~Datetime,col='Blue') }) legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) ## Saving 480x480 copy of Plot 3 to file dev.copy(png, file="plot3.png", height=480, width=480) dev.off()
/plot3.R
no_license
larrylugo/ExData_Plotting1
R
false
false
998
r
## Downloading the whole dataset all_data <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"') all_data$Date <- as.Date(all_data$Date, format="%d/%m/%Y") ## Selecting monthly data sub_data <- subset(all_data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02")) ## Dates convertion datetime <- paste(as.Date(sub_data$Date), sub_data$Time) sub_data$Datetime <- as.POSIXct(datetime) ## Creating Plot 3 with(sub_data, { plot(Sub_metering_1~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") lines(Sub_metering_2~Datetime,col='Red') lines(Sub_metering_3~Datetime,col='Blue') }) legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) ## Saving 480x480 copy of Plot 3 to file dev.copy(png, file="plot3.png", height=480, width=480) dev.off()
### run simple preferential sampling example. Doing in this in two steps since there seems to be # a conflict between TMB loop and rgeos ('gCentroid') functionality # need to declare an ADREPORT for Z_s in TMB for this library(PrefSampling) # Settings grid_dim = c("x"=25, "y"=25) n_samp = 50 n_cells = grid_dim[1]*grid_dim[2] prop_sampled=0.5 Prop_sampled=rep(prop_sampled,n_samp) SpatialScale = sqrt(prod(grid_dim))/5 # Range ~ 2*Scale SD_eta = SD_x = SD_delta = 1 beta0 = 2 Use_REML = FALSE # Spatial_sim_model = c("GP_gaussian", "ICAR")[1] Spatial_model = c("SPDE_GMRF", "ICAR")[1] Alpha = 1 # Smoothness for GMRF, 1 or 2 (1 is faster) RandomSeed = ceiling(runif(1,min=1,max=1e6)) # Configurations b_set = c(0,1,5) # Impact of delta on sampling intensity EM_set = c("fix_b","est_b") # Configurations set.seed(12345) Counts=Est=vector("list",3) # Settings betax = runif(1,-.5,.5) # Impact of x on density betax_prob = runif(1,-.5,.5) # Impact of x on sampling intensity #betax=0 #betax_prob=0 # Spatial model loc_s = expand.grid( "x"=1:grid_dim['x'], "y"=1:grid_dim['y']) model_delta <- RMgauss(var=SD_delta^2, scale=SpatialScale) model_x <- RMgauss(var=SD_x^2, scale=SpatialScale) model_eta <- RMgauss(var=SD_eta^2, scale=SpatialScale) # Realization from GRF # delta_s -- spatial variation that affects both density and sampling intensity # x_s -- spatial variation in if( Spatial_sim_model=="GP_gaussian"){ delta_s = RFsimulate(model=model_delta, x=loc_s[,'x'], y=loc_s[,'y'])@data[,1] x_s = RFsimulate(model=model_x, x=loc_s[,'x'], y=loc_s[,'y'])@data[,1] eta_s = RFsimulate(model=model_eta, x=loc_s[,'x'], y=loc_s[,'y'])@data[,1] } if( Spatial_sim_model=="ICAR"){ delta_s = rrw( Q )[,1] x_s = rrw( Q )[,1] eta_s = rrw( Q )[,1] } # Total abundance Ztrue_s = exp( beta0 + betax*x_s + delta_s ) Ztrue_s = rpois(n_cells,Ztrue_s) #added to v5, 11/9/2015 #loop over sampling, estimation for(SimI in 1:length(b_set)){ b = b_set[SimI] R_s = exp( betax_prob*x_s + eta_s + b*delta_s ) R_s = R_s / sum(R_s) # Process for locating samples s_i = sample(1:prod(grid_dim), size=n_samp, replace=FALSE, prob=R_s) y_s = ifelse(1:prod(grid_dim) %in% s_i, 1, 0) # Counting process #c_i = rpois( n=n_samp, lambda=Ztrue_s[s_i]) c_i = rbinom(n=n_samp,Ztrue_s[s_i],prop_sampled) #changed to binom for v5, 11/9 Cur.count=rep(NA,prod(grid_dim)) Cur.count[s_i]=c_i Counts[[SimI]]=Cur.count for(EstI in 1:1){ EM = EM_set[EstI] # Create the SPDE/GMRF model, (kappa^2-Delta)(tau x) = W: mesh = inla.mesh.create( loc_s ) # Loop # Options Options_vec = c( 'Prior'=switch(Spatial_model,"ICAR"=1,"SPDE_GMRF"=0), 'Alpha'=Alpha, 'IncludeDelta'=1, 'IncludeEta'=1, 'OutputSE'=1) # Data spde <- (inla.spde2.matern(mesh, alpha=2)$param.inla)[c("M0","M1","M2")] Data = list( "Options_vec"=Options_vec, "c_i"=c_i, "P_i"=Prop_sampled,"A_s"=rep(1,n_cells),"s_i"=s_i-1, "X_sj"=cbind(1,x_s), "y_s"=y_s, "X_sk"=cbind(x_s), "spde"=spde, "X_sb"=matrix(1,nrow=n_cells)) # Parameters # Intercept is needed for beta_j (delta -- abundance) but not beta_k (eta -- sampling intensity) if( Options_vec['Prior']==0 ) etainput_s = deltainput_s = rep(0,mesh$n) if( Options_vec['Prior']==1 ) etainput_s = deltainput_s = rep(0,prod(grid_dim)) Params = list("beta_j"=rep(0,ncol(Data$X_sj)), "beta_k"=rep(0,ncol(Data$X_sk)), "beta_b"=0, "logtau_z"=rep(0,2), "logkappa_z"=rep(0,2), "deltainput_s"=deltainput_s, "etainput_s"=etainput_s) Params$beta_j[1]=median(log(Ztrue_s+0.01)) #set mean expected abundance close to truth for faster optimization # Random Random = c( "deltainput_s", "etainput_s" ) if(Use_REML==TRUE) Random = c(Random,"beta_j","beta_k") if(Use_REML==TRUE) Random = c( Random, "b" ) # Fix parameters Map = list() # Fix common spatial-scale Map[["logkappa_z"]] = factor( rep(1,length(Params[["logkappa_z"]])) ) # Elminate intercept and marginal-SD if using ICAR model if( Options_vec['Prior']==1 ){ Params[["logtau"]] = log(1) Map[["logtau"]] = factor(NA) Params[["beta_j"]] = log(1) Map[["beta_j"]] = factor(NA) } # Eliminate linkeage of density and sampling intensity if( EM=="fix_b" ){ Map[["beta_b"]] = factor(NA) } # Make object #compile( paste0(Version,".cpp") ) #dyn.load( dynlib(Version) ) Start_time = Sys.time() Obj = MakeADFun( data=Data, parameters=Params, random=Random, map=Map, silent=TRUE, DLL="PrefSampling") Obj$fn( Obj$par ) # Run Lower = -Inf Upper = Inf Opt = nlminb( start=Obj$par, objective=Obj$fn, gradient=Obj$gr, lower=Lower, upper=Upper, control=list(trace=1, maxit=1000)) # Opt[["diagnostics"]] = data.frame( "Param"=names(Obj$par), "Lower"=-Inf, "Est"=Opt$par, "Upper"=Inf, "gradient"=Obj$gr(Opt$par) ) Report = Obj$report() # Potentially fix random fields with zero sample or population variance if( any(Report$MargSD_z<0.001) ){ Which = which(Report$MargSD_z<0.001) Map[["logtau_z"]] = factor( ifelse(1:2==Which,NA,1:2) ) if(length(Which)==2){ Map[["logkappa_z"]] = factor( c(NA,NA) ) } if( any(Which==1) ){ Map[["deltainput_s"]] = factor( rep(NA,length(Params[["deltainput_s"]])) ) Params[["deltainput_s"]][] = 0 Map[["b"]] = factor(NA) Params[["b"]] = 0 } if( any(Which==2) ){ Map[["etainput_s"]] = factor( rep(NA,length(Params[["etainput_s"]])) ) Params[["etainput_s"]][] = 0 } Data$Options_vec[Which+2] = 0 # Re-run if( length(Which)!=2 ) Obj = MakeADFun( data=Data, parameters=Params, random=Random, map=Map, silent=TRUE,DLL="PrefSampling") if( length(Which)==2 ) Obj = MakeADFun( data=Data, parameters=Params, random=NULL, map=Map, silent=TRUE,DLL="PrefSampling") Opt = nlminb( start=Obj$par, objective=Obj$fn, gradient=Obj$gr, lower=Lower, upper=Upper, control=list(trace=1, maxit=1000)) # Opt[["diagnostics"]] = data.frame( "Param"=names(Obj$par), "Lower"=-Inf, "Est"=Opt$par, "Upper"=Inf, "gradient"=Obj$gr(Opt$par) ) } # SD #Report = Obj$report() SD=sdreport(Obj,bias.correct=TRUE) Est[[SimI]]=SD$unbiased$value[5:629] #if( all(c("etainput_s","deltainput_s")%in%names(Map)) ){ # Est[[EstI]]=Report$Z_s #}else{ # SD = sdreport( Obj, bias.correct=TRUE ) # Est[[EstI]]=SD$unbiased$Z_s #} } } Sim=list("N.1"=Est[[1]],"N.2"=Est[[2]],"N.3"=Est[[3]],"C.1"=Counts[[1]],"C.2"=Counts[[2]],"C.3"=Counts[[3]],"N.true"=Ztrue_s,"Delta"=delta_s,"Cov"=x_s) save(Sim,file="sim_plot_data.Rdata")
/pref_sampling/inst/run_sim_example.R
no_license
govtmirror/pref_sampling
R
false
false
6,739
r
### run simple preferential sampling example. Doing in this in two steps since there seems to be # a conflict between TMB loop and rgeos ('gCentroid') functionality # need to declare an ADREPORT for Z_s in TMB for this library(PrefSampling) # Settings grid_dim = c("x"=25, "y"=25) n_samp = 50 n_cells = grid_dim[1]*grid_dim[2] prop_sampled=0.5 Prop_sampled=rep(prop_sampled,n_samp) SpatialScale = sqrt(prod(grid_dim))/5 # Range ~ 2*Scale SD_eta = SD_x = SD_delta = 1 beta0 = 2 Use_REML = FALSE # Spatial_sim_model = c("GP_gaussian", "ICAR")[1] Spatial_model = c("SPDE_GMRF", "ICAR")[1] Alpha = 1 # Smoothness for GMRF, 1 or 2 (1 is faster) RandomSeed = ceiling(runif(1,min=1,max=1e6)) # Configurations b_set = c(0,1,5) # Impact of delta on sampling intensity EM_set = c("fix_b","est_b") # Configurations set.seed(12345) Counts=Est=vector("list",3) # Settings betax = runif(1,-.5,.5) # Impact of x on density betax_prob = runif(1,-.5,.5) # Impact of x on sampling intensity #betax=0 #betax_prob=0 # Spatial model loc_s = expand.grid( "x"=1:grid_dim['x'], "y"=1:grid_dim['y']) model_delta <- RMgauss(var=SD_delta^2, scale=SpatialScale) model_x <- RMgauss(var=SD_x^2, scale=SpatialScale) model_eta <- RMgauss(var=SD_eta^2, scale=SpatialScale) # Realization from GRF # delta_s -- spatial variation that affects both density and sampling intensity # x_s -- spatial variation in if( Spatial_sim_model=="GP_gaussian"){ delta_s = RFsimulate(model=model_delta, x=loc_s[,'x'], y=loc_s[,'y'])@data[,1] x_s = RFsimulate(model=model_x, x=loc_s[,'x'], y=loc_s[,'y'])@data[,1] eta_s = RFsimulate(model=model_eta, x=loc_s[,'x'], y=loc_s[,'y'])@data[,1] } if( Spatial_sim_model=="ICAR"){ delta_s = rrw( Q )[,1] x_s = rrw( Q )[,1] eta_s = rrw( Q )[,1] } # Total abundance Ztrue_s = exp( beta0 + betax*x_s + delta_s ) Ztrue_s = rpois(n_cells,Ztrue_s) #added to v5, 11/9/2015 #loop over sampling, estimation for(SimI in 1:length(b_set)){ b = b_set[SimI] R_s = exp( betax_prob*x_s + eta_s + b*delta_s ) R_s = R_s / sum(R_s) # Process for locating samples s_i = sample(1:prod(grid_dim), size=n_samp, replace=FALSE, prob=R_s) y_s = ifelse(1:prod(grid_dim) %in% s_i, 1, 0) # Counting process #c_i = rpois( n=n_samp, lambda=Ztrue_s[s_i]) c_i = rbinom(n=n_samp,Ztrue_s[s_i],prop_sampled) #changed to binom for v5, 11/9 Cur.count=rep(NA,prod(grid_dim)) Cur.count[s_i]=c_i Counts[[SimI]]=Cur.count for(EstI in 1:1){ EM = EM_set[EstI] # Create the SPDE/GMRF model, (kappa^2-Delta)(tau x) = W: mesh = inla.mesh.create( loc_s ) # Loop # Options Options_vec = c( 'Prior'=switch(Spatial_model,"ICAR"=1,"SPDE_GMRF"=0), 'Alpha'=Alpha, 'IncludeDelta'=1, 'IncludeEta'=1, 'OutputSE'=1) # Data spde <- (inla.spde2.matern(mesh, alpha=2)$param.inla)[c("M0","M1","M2")] Data = list( "Options_vec"=Options_vec, "c_i"=c_i, "P_i"=Prop_sampled,"A_s"=rep(1,n_cells),"s_i"=s_i-1, "X_sj"=cbind(1,x_s), "y_s"=y_s, "X_sk"=cbind(x_s), "spde"=spde, "X_sb"=matrix(1,nrow=n_cells)) # Parameters # Intercept is needed for beta_j (delta -- abundance) but not beta_k (eta -- sampling intensity) if( Options_vec['Prior']==0 ) etainput_s = deltainput_s = rep(0,mesh$n) if( Options_vec['Prior']==1 ) etainput_s = deltainput_s = rep(0,prod(grid_dim)) Params = list("beta_j"=rep(0,ncol(Data$X_sj)), "beta_k"=rep(0,ncol(Data$X_sk)), "beta_b"=0, "logtau_z"=rep(0,2), "logkappa_z"=rep(0,2), "deltainput_s"=deltainput_s, "etainput_s"=etainput_s) Params$beta_j[1]=median(log(Ztrue_s+0.01)) #set mean expected abundance close to truth for faster optimization # Random Random = c( "deltainput_s", "etainput_s" ) if(Use_REML==TRUE) Random = c(Random,"beta_j","beta_k") if(Use_REML==TRUE) Random = c( Random, "b" ) # Fix parameters Map = list() # Fix common spatial-scale Map[["logkappa_z"]] = factor( rep(1,length(Params[["logkappa_z"]])) ) # Elminate intercept and marginal-SD if using ICAR model if( Options_vec['Prior']==1 ){ Params[["logtau"]] = log(1) Map[["logtau"]] = factor(NA) Params[["beta_j"]] = log(1) Map[["beta_j"]] = factor(NA) } # Eliminate linkeage of density and sampling intensity if( EM=="fix_b" ){ Map[["beta_b"]] = factor(NA) } # Make object #compile( paste0(Version,".cpp") ) #dyn.load( dynlib(Version) ) Start_time = Sys.time() Obj = MakeADFun( data=Data, parameters=Params, random=Random, map=Map, silent=TRUE, DLL="PrefSampling") Obj$fn( Obj$par ) # Run Lower = -Inf Upper = Inf Opt = nlminb( start=Obj$par, objective=Obj$fn, gradient=Obj$gr, lower=Lower, upper=Upper, control=list(trace=1, maxit=1000)) # Opt[["diagnostics"]] = data.frame( "Param"=names(Obj$par), "Lower"=-Inf, "Est"=Opt$par, "Upper"=Inf, "gradient"=Obj$gr(Opt$par) ) Report = Obj$report() # Potentially fix random fields with zero sample or population variance if( any(Report$MargSD_z<0.001) ){ Which = which(Report$MargSD_z<0.001) Map[["logtau_z"]] = factor( ifelse(1:2==Which,NA,1:2) ) if(length(Which)==2){ Map[["logkappa_z"]] = factor( c(NA,NA) ) } if( any(Which==1) ){ Map[["deltainput_s"]] = factor( rep(NA,length(Params[["deltainput_s"]])) ) Params[["deltainput_s"]][] = 0 Map[["b"]] = factor(NA) Params[["b"]] = 0 } if( any(Which==2) ){ Map[["etainput_s"]] = factor( rep(NA,length(Params[["etainput_s"]])) ) Params[["etainput_s"]][] = 0 } Data$Options_vec[Which+2] = 0 # Re-run if( length(Which)!=2 ) Obj = MakeADFun( data=Data, parameters=Params, random=Random, map=Map, silent=TRUE,DLL="PrefSampling") if( length(Which)==2 ) Obj = MakeADFun( data=Data, parameters=Params, random=NULL, map=Map, silent=TRUE,DLL="PrefSampling") Opt = nlminb( start=Obj$par, objective=Obj$fn, gradient=Obj$gr, lower=Lower, upper=Upper, control=list(trace=1, maxit=1000)) # Opt[["diagnostics"]] = data.frame( "Param"=names(Obj$par), "Lower"=-Inf, "Est"=Opt$par, "Upper"=Inf, "gradient"=Obj$gr(Opt$par) ) } # SD #Report = Obj$report() SD=sdreport(Obj,bias.correct=TRUE) Est[[SimI]]=SD$unbiased$value[5:629] #if( all(c("etainput_s","deltainput_s")%in%names(Map)) ){ # Est[[EstI]]=Report$Z_s #}else{ # SD = sdreport( Obj, bias.correct=TRUE ) # Est[[EstI]]=SD$unbiased$Z_s #} } } Sim=list("N.1"=Est[[1]],"N.2"=Est[[2]],"N.3"=Est[[3]],"C.1"=Counts[[1]],"C.2"=Counts[[2]],"C.3"=Counts[[3]],"N.true"=Ztrue_s,"Delta"=delta_s,"Cov"=x_s) save(Sim,file="sim_plot_data.Rdata")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/defineMUM.r \name{defineMUM} \alias{defineMUM} \title{Define Mulivariate Uncertainty Model} \usage{ defineMUM(UMlist, cormatrix, ...) } \arguments{ \item{UMlist}{a list of uncertain objects creaded in defineUM().} \item{cormatrix}{matrix of cross-correlations.} \item{...}{additional parameters.} } \value{ Object of a class "JointNumericSpatial" or "JointScalar". } \description{ Function that uses output of defineUM() to define joint probability distribution for uncertain cross-correlated variables. } \details{ The cormatrix is a square matrix of correlations, dimentionally equal to the number of objects, symetric (transposed must be the same as original), diagonal must all be 1 all values must be <-1, +1>) and all eigenvalues must be > 0. } \examples{ data(OC, OC_sd, TN, TN_sd) OC_crm <- makecrm(acf0 = 0.6, range = 1000, model = "Sph") OC_UM <- defineUM(TRUE, distribution = "norm", distr_param = c(OC, OC_sd), crm = OC_crm, id = "OC") class(OC_UM) TN_crm <- makecrm(acf0 = 0.4, range = 1000, model = "Sph") TN_UM <- defineUM(TRUE, distribution = "norm", distr_param = c(TN, TN_sd), crm = TN_crm, id = "TN") class(TN_UM) soil_prop <- list(OC_UM,TN_UM) str(soil_prop) mySpatialMUM <- defineMUM(soil_prop, matrix(c(1,0.7,0.7,1), nrow=2, ncol=2)) class(mySpatialMUM) str(mySpatialMUM) # scalar scalarUM <- defineUM(uncertain = TRUE, distribution = "norm", distr_param = c(1, 2), id="Var1") scalarUM2 <- defineUM(uncertain = TRUE, distribution = "norm", distr_param = c(3, 2), id="Var2") scalarUM3 <- defineUM(uncertain = TRUE, distribution = "norm", distr_param = c(10, 2.5), id="Var3") myMUM <- defineMUM(UMlist = list(scalarUM, scalarUM2, scalarUM3), matrix(c(1,0.7,0.2,0.7,1,0.5,0.2,0.5,1), nrow = 3, ncol = 3)) class(myMUM) } \author{ Kasia Sawicka, Gerard Heuvelink }
/man/defineMUM.Rd
no_license
edzer/spup
R
false
true
1,982
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/defineMUM.r \name{defineMUM} \alias{defineMUM} \title{Define Mulivariate Uncertainty Model} \usage{ defineMUM(UMlist, cormatrix, ...) } \arguments{ \item{UMlist}{a list of uncertain objects creaded in defineUM().} \item{cormatrix}{matrix of cross-correlations.} \item{...}{additional parameters.} } \value{ Object of a class "JointNumericSpatial" or "JointScalar". } \description{ Function that uses output of defineUM() to define joint probability distribution for uncertain cross-correlated variables. } \details{ The cormatrix is a square matrix of correlations, dimentionally equal to the number of objects, symetric (transposed must be the same as original), diagonal must all be 1 all values must be <-1, +1>) and all eigenvalues must be > 0. } \examples{ data(OC, OC_sd, TN, TN_sd) OC_crm <- makecrm(acf0 = 0.6, range = 1000, model = "Sph") OC_UM <- defineUM(TRUE, distribution = "norm", distr_param = c(OC, OC_sd), crm = OC_crm, id = "OC") class(OC_UM) TN_crm <- makecrm(acf0 = 0.4, range = 1000, model = "Sph") TN_UM <- defineUM(TRUE, distribution = "norm", distr_param = c(TN, TN_sd), crm = TN_crm, id = "TN") class(TN_UM) soil_prop <- list(OC_UM,TN_UM) str(soil_prop) mySpatialMUM <- defineMUM(soil_prop, matrix(c(1,0.7,0.7,1), nrow=2, ncol=2)) class(mySpatialMUM) str(mySpatialMUM) # scalar scalarUM <- defineUM(uncertain = TRUE, distribution = "norm", distr_param = c(1, 2), id="Var1") scalarUM2 <- defineUM(uncertain = TRUE, distribution = "norm", distr_param = c(3, 2), id="Var2") scalarUM3 <- defineUM(uncertain = TRUE, distribution = "norm", distr_param = c(10, 2.5), id="Var3") myMUM <- defineMUM(UMlist = list(scalarUM, scalarUM2, scalarUM3), matrix(c(1,0.7,0.2,0.7,1,0.5,0.2,0.5,1), nrow = 3, ncol = 3)) class(myMUM) } \author{ Kasia Sawicka, Gerard Heuvelink }
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f"))) source('../h2o-runit.R') # Connect to a cluster # Set this to True if you want to fetch the data directly from S3. # This is useful if your cluster is running in EC2. data_source_is_s3 = F locate_source <- function(s) { if (data_source_is_s3) myPath <- paste0("s3n://h2o-public-test-data/", s) else myPath <- locate(s) } test.whd_zip.demo <- function(conn) { missing_frac <- 0.2 train_frac <- 0.8 k_dim <- 5 Log.info("Import and parse ACS 2013 5-year DP02 demographic data...") acs_orig <- h2o.uploadFile(locate_source("bigdata/laptop/census/ACS_13_5YR_DP02_cleaned.zip"), col.types = c("enum", rep("numeric", 149))) print(summary(acs_orig)) acs_zcta_col <- acs_orig$ZCTA5 acs_full <- acs_orig[,-which(colnames(acs_orig) == "ZCTA5")] Log.info("Import and parse WHD 2014-2015 labor violations data...") whd_zcta <- h2o.uploadFile(locate_source("bigdata/laptop/census/whd_zcta_cleaned.zip"), col.types = c(rep("enum", 7), rep("numeric", 97))) print(summary(whd_zcta)) # Log.info(paste0("Create validation data with ", 100*missing_frac, "% missing entries")) # acs_miss <- h2o.uploadFile(locate_source("bigdata/laptop/census/ACS_13_5YR_DP02_cleaned.zip"), col.types = c("enum", rep("numeric", 149))) # acs_miss <- acs_miss[,-which(colnames(acs_miss) == "ZCTA5")] # acs_miss <- h2o.insertMissingValues(data = acs_miss, fraction = missing_frac, seed = SEED) # print(summary(acs_miss)) Log.info(paste("Run GLRM to reduce ZCTA demographics to k =", k_dim, "archetypes")) # Log.info("Grid search for optimal regularization weights") # gamma_x_grid <- c(0.25, 0.5) # gamma_y_grid <- c(0.5, 0.75) # search_params = list(gamma_x = gamma_x_grid, gamma_y = gamma_y_grid) # acs_grid <- h2o.grid("glrm", is_supervised = FALSE, training_frame = acs_miss, validation_frame = acs_full, # k = k_dim, transform = "STANDARDIZE", init = "PlusPlus", loss = "Quadratic", max_iterations = 100, # regularization_x = "Quadratic", regularization_y = "L1", seed = SEED, hyper_params = search_params) # grid_models <- lapply(acs_grid@model_ids, function(i) { model = h2o.getModel(i) }) # Log.info("Select model that achieves lowest total validation error") # valid_numerr <- sapply(grid_models, function(m) { m@model$validation_metrics@metrics$numerr }) # valid_caterr <- sapply(grid_models, function(m) { m@model$validation_metrics@metrics$caterr }) # acs_best <- grid_models[[which.min(valid_numerr + valid_caterr)]] # acs_best_full <- h2o.glrm(training_frame = acs_full, k = k_dim, transform = "STANDARDIZE", init = "PlusPlus", # loss = "Quadratic", max_iterations = 100, regularization_x = "Quadratic", regularization_y = "L1", # gamma_x = acs_best@parameters$gamma_x, gamma_y = acs_best@parameters$gamma_y, seed = SEED) acs_best_full <- h2o.glrm(training_frame = acs_full, k = k_dim, transform = "STANDARDIZE", init = "PlusPlus", loss = "Quadratic", max_iterations = 100, regularization_x = "Quadratic", regularization_y = "L1", gamma_x = 0.25, gamma_y = 0.5, seed = SEED) print(acs_best_full) Log.info("Embedding of ZCTAs into archetypes (X):") zcta_arch_x <- h2o.getFrame(acs_best_full@model$loading_key$name) print(head(zcta_arch_x)) Log.info("Archetype to full feature mapping (Y):") arch_feat_y <- acs_best_full@model$archetypes print(arch_feat_y) Log.info(paste0("Split WHD data into test/train with ratio = ", 100*(1-train_frac), "/", 100*train_frac)) split <- h2o.runif(whd_zcta) train <- whd_zcta[split <= train_frac,] test <- whd_zcta[split > train_frac,] Log.info("Build a GBM model to predict repeat violators and score") myY <- "flsa_repeat_violator" myX <- setdiff(4:ncol(train), which(colnames(train) == myY)) orig_time <- system.time(gbm_orig <- h2o.gbm(x = myX, y = myY, training_frame = train, validation_frame = test, ntrees = 10, max_depth = 6, distribution = "multinomial")) Log.info("Replace zcta5_cd column in WHD data with GLRM archetypes") zcta_arch_x$zcta5_cd <- acs_zcta_col whd_arch <- h2o.merge(whd_zcta, zcta_arch_x, all.x = TRUE, all.y = FALSE) whd_arch$zcta5_cd <- NULL print(summary(whd_arch)) Log.info(paste0("Split modified WHD data into test/train with ratio = ", 100*(1-train_frac), "/", 100*train_frac)) train_mod <- whd_arch[split <= train_frac,] test_mod <- whd_arch[split > train_frac,] Log.info("Build a GBM model on modified WHD data to predict repeat violators and score") myX <- setdiff(4:ncol(train_mod), which(colnames(train_mod) == myY)) mod_time <- system.time(gbm_mod <- h2o.gbm(x = myX, y = myY, training_frame = train_mod, validation_frame = test_mod, ntrees = 10, max_depth = 6, distribution = "multinomial")) Log.info("Performance comparison:") gbm_sum <- data.frame(original = c(orig_time[3], gbm_orig@model$training_metric@metrics$MSE, gbm_orig@model$validation_metric@metrics$MSE), reduced = c(mod_time[3], gbm_mod@model$training_metric@metrics$MSE, gbm_mod@model$validation_metric@metrics$MSE), row.names = c("runtime", "train_mse", "test_mse")) print(gbm_sum) } doTest("Test out WHD Labor Violations Demo", test.whd_zip.demo)
/h2o-r/tests/testdir_demos/runit_demo_glrm_census_large.R
permissive
konor/h2o-3
R
false
false
5,452
r
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f"))) source('../h2o-runit.R') # Connect to a cluster # Set this to True if you want to fetch the data directly from S3. # This is useful if your cluster is running in EC2. data_source_is_s3 = F locate_source <- function(s) { if (data_source_is_s3) myPath <- paste0("s3n://h2o-public-test-data/", s) else myPath <- locate(s) } test.whd_zip.demo <- function(conn) { missing_frac <- 0.2 train_frac <- 0.8 k_dim <- 5 Log.info("Import and parse ACS 2013 5-year DP02 demographic data...") acs_orig <- h2o.uploadFile(locate_source("bigdata/laptop/census/ACS_13_5YR_DP02_cleaned.zip"), col.types = c("enum", rep("numeric", 149))) print(summary(acs_orig)) acs_zcta_col <- acs_orig$ZCTA5 acs_full <- acs_orig[,-which(colnames(acs_orig) == "ZCTA5")] Log.info("Import and parse WHD 2014-2015 labor violations data...") whd_zcta <- h2o.uploadFile(locate_source("bigdata/laptop/census/whd_zcta_cleaned.zip"), col.types = c(rep("enum", 7), rep("numeric", 97))) print(summary(whd_zcta)) # Log.info(paste0("Create validation data with ", 100*missing_frac, "% missing entries")) # acs_miss <- h2o.uploadFile(locate_source("bigdata/laptop/census/ACS_13_5YR_DP02_cleaned.zip"), col.types = c("enum", rep("numeric", 149))) # acs_miss <- acs_miss[,-which(colnames(acs_miss) == "ZCTA5")] # acs_miss <- h2o.insertMissingValues(data = acs_miss, fraction = missing_frac, seed = SEED) # print(summary(acs_miss)) Log.info(paste("Run GLRM to reduce ZCTA demographics to k =", k_dim, "archetypes")) # Log.info("Grid search for optimal regularization weights") # gamma_x_grid <- c(0.25, 0.5) # gamma_y_grid <- c(0.5, 0.75) # search_params = list(gamma_x = gamma_x_grid, gamma_y = gamma_y_grid) # acs_grid <- h2o.grid("glrm", is_supervised = FALSE, training_frame = acs_miss, validation_frame = acs_full, # k = k_dim, transform = "STANDARDIZE", init = "PlusPlus", loss = "Quadratic", max_iterations = 100, # regularization_x = "Quadratic", regularization_y = "L1", seed = SEED, hyper_params = search_params) # grid_models <- lapply(acs_grid@model_ids, function(i) { model = h2o.getModel(i) }) # Log.info("Select model that achieves lowest total validation error") # valid_numerr <- sapply(grid_models, function(m) { m@model$validation_metrics@metrics$numerr }) # valid_caterr <- sapply(grid_models, function(m) { m@model$validation_metrics@metrics$caterr }) # acs_best <- grid_models[[which.min(valid_numerr + valid_caterr)]] # acs_best_full <- h2o.glrm(training_frame = acs_full, k = k_dim, transform = "STANDARDIZE", init = "PlusPlus", # loss = "Quadratic", max_iterations = 100, regularization_x = "Quadratic", regularization_y = "L1", # gamma_x = acs_best@parameters$gamma_x, gamma_y = acs_best@parameters$gamma_y, seed = SEED) acs_best_full <- h2o.glrm(training_frame = acs_full, k = k_dim, transform = "STANDARDIZE", init = "PlusPlus", loss = "Quadratic", max_iterations = 100, regularization_x = "Quadratic", regularization_y = "L1", gamma_x = 0.25, gamma_y = 0.5, seed = SEED) print(acs_best_full) Log.info("Embedding of ZCTAs into archetypes (X):") zcta_arch_x <- h2o.getFrame(acs_best_full@model$loading_key$name) print(head(zcta_arch_x)) Log.info("Archetype to full feature mapping (Y):") arch_feat_y <- acs_best_full@model$archetypes print(arch_feat_y) Log.info(paste0("Split WHD data into test/train with ratio = ", 100*(1-train_frac), "/", 100*train_frac)) split <- h2o.runif(whd_zcta) train <- whd_zcta[split <= train_frac,] test <- whd_zcta[split > train_frac,] Log.info("Build a GBM model to predict repeat violators and score") myY <- "flsa_repeat_violator" myX <- setdiff(4:ncol(train), which(colnames(train) == myY)) orig_time <- system.time(gbm_orig <- h2o.gbm(x = myX, y = myY, training_frame = train, validation_frame = test, ntrees = 10, max_depth = 6, distribution = "multinomial")) Log.info("Replace zcta5_cd column in WHD data with GLRM archetypes") zcta_arch_x$zcta5_cd <- acs_zcta_col whd_arch <- h2o.merge(whd_zcta, zcta_arch_x, all.x = TRUE, all.y = FALSE) whd_arch$zcta5_cd <- NULL print(summary(whd_arch)) Log.info(paste0("Split modified WHD data into test/train with ratio = ", 100*(1-train_frac), "/", 100*train_frac)) train_mod <- whd_arch[split <= train_frac,] test_mod <- whd_arch[split > train_frac,] Log.info("Build a GBM model on modified WHD data to predict repeat violators and score") myX <- setdiff(4:ncol(train_mod), which(colnames(train_mod) == myY)) mod_time <- system.time(gbm_mod <- h2o.gbm(x = myX, y = myY, training_frame = train_mod, validation_frame = test_mod, ntrees = 10, max_depth = 6, distribution = "multinomial")) Log.info("Performance comparison:") gbm_sum <- data.frame(original = c(orig_time[3], gbm_orig@model$training_metric@metrics$MSE, gbm_orig@model$validation_metric@metrics$MSE), reduced = c(mod_time[3], gbm_mod@model$training_metric@metrics$MSE, gbm_mod@model$validation_metric@metrics$MSE), row.names = c("runtime", "train_mse", "test_mse")) print(gbm_sum) } doTest("Test out WHD Labor Violations Demo", test.whd_zip.demo)
testlist <- list(m = NULL, repetitions = -304992360L, in_m = structure(c(2.31584307392677e+77, 9.53818264351982e+295, 1.22810536108214e+146), .Dim = c(3L, 1L ))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615785290-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
254
r
testlist <- list(m = NULL, repetitions = -304992360L, in_m = structure(c(2.31584307392677e+77, 9.53818264351982e+295, 1.22810536108214e+146), .Dim = c(3L, 1L ))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/activity_code.r \docType{class} \name{actmod-class} \alias{actmod-class} \title{Activity model class.} \description{ An S4 class describing activity models fitted to time of observation data. } \section{Slots}{ \describe{ \item{\code{data}}{Object of class \code{"numeric"}, the input data.} \item{\code{wt}}{Object of class \code{"numeric"}, weights applied to the data.} \item{\code{bw}}{Object of class \code{"numeric"}, kernel bandwidth.} \item{\code{adj}}{Object of class \code{"numeric"}, kernel bandwidth adjustment multiplier.} \item{\code{pdf}}{Object of class \code{"matrix"} describing fitted probability density function: Column 1: A regular sequence of radian times at which PDF evaluated; range is [0, 2*pi] if unbounded, and sequence steps are range difference divided by 512. Column 2: Corresponding circular kernel PDF values. Additionally if errors bootstrapped: Column 3: PDF standard error. Column 4: PDF lower 95\% confidence limit. Column 5: PDF upper 95\% confidence limit.} \item{\code{act}}{Object of class \code{"numeric"} giving activity level estimate and, if errors boostrapped, standard error and 95 percent confidence limits.} }}
/man/actmod-class.Rd
no_license
MarcusRowcliffe/activity
R
false
true
1,250
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/activity_code.r \docType{class} \name{actmod-class} \alias{actmod-class} \title{Activity model class.} \description{ An S4 class describing activity models fitted to time of observation data. } \section{Slots}{ \describe{ \item{\code{data}}{Object of class \code{"numeric"}, the input data.} \item{\code{wt}}{Object of class \code{"numeric"}, weights applied to the data.} \item{\code{bw}}{Object of class \code{"numeric"}, kernel bandwidth.} \item{\code{adj}}{Object of class \code{"numeric"}, kernel bandwidth adjustment multiplier.} \item{\code{pdf}}{Object of class \code{"matrix"} describing fitted probability density function: Column 1: A regular sequence of radian times at which PDF evaluated; range is [0, 2*pi] if unbounded, and sequence steps are range difference divided by 512. Column 2: Corresponding circular kernel PDF values. Additionally if errors bootstrapped: Column 3: PDF standard error. Column 4: PDF lower 95\% confidence limit. Column 5: PDF upper 95\% confidence limit.} \item{\code{act}}{Object of class \code{"numeric"} giving activity level estimate and, if errors boostrapped, standard error and 95 percent confidence limits.} }}
#!/usr/bin/Rscript library(ggplot2) library(tikzDevice) load("example-distance-est-reporting-prob.RData") sub <- estpdf[estpdf$var == "C", ] sub$R0 <- factor(paste0("$R_0 = ", sub$repnum, "$"), levels = paste0("$R_0 = ", sort(unique(sub$repnum)), "$")) g <- ggplot(data = sub, aes(y = sqrt((omega) ^ 2 + (gamma) ^ 2), x = prob_rep)) g <- g + geom_jitter(width = 0.05, height = 0, alpha = 0.5, color = palette[1]) g <- g + geom_point(data = sub, shape=4, color=1, aes(y=Mod(lambda1))) #g <- g + scale_y_log10() g <- g + ylab("Distance to threshold\n") g <- g + xlab("\nReporting probability") g <- g + theme_minimal() g <- g + theme(legend.position = "top") g <- g + theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust=0.5)) #g <- g + facet_wrap(~R0) tikz("distance-vs-reporting-prob.tex", width = 3.25, height = 3.25, standAlone = TRUE) print(g) dev.off()
/src/example-distance-est-reporting-prob-plot.R
permissive
e3bo/2017distance
R
false
false
898
r
#!/usr/bin/Rscript library(ggplot2) library(tikzDevice) load("example-distance-est-reporting-prob.RData") sub <- estpdf[estpdf$var == "C", ] sub$R0 <- factor(paste0("$R_0 = ", sub$repnum, "$"), levels = paste0("$R_0 = ", sort(unique(sub$repnum)), "$")) g <- ggplot(data = sub, aes(y = sqrt((omega) ^ 2 + (gamma) ^ 2), x = prob_rep)) g <- g + geom_jitter(width = 0.05, height = 0, alpha = 0.5, color = palette[1]) g <- g + geom_point(data = sub, shape=4, color=1, aes(y=Mod(lambda1))) #g <- g + scale_y_log10() g <- g + ylab("Distance to threshold\n") g <- g + xlab("\nReporting probability") g <- g + theme_minimal() g <- g + theme(legend.position = "top") g <- g + theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust=0.5)) #g <- g + facet_wrap(~R0) tikz("distance-vs-reporting-prob.tex", width = 3.25, height = 3.25, standAlone = TRUE) print(g) dev.off()
#' Check standard deviation given a mean #' #' Uses sprite in the background to find a matching vector. #' If the process cannot find a possible match it will output a FALSE. #' This usually means the standard deviation is not possible but if it is #' difficult to find but possible solution try increasing the max_rounds. #' #' @param mean required mean value #' @param sd required standard deviation #' @param n total cases #' @param scale_min underpoint of scale #' @param scale_max upper point of scale #' @family sprite #' @return TRUE or FALSE #' @export #' #' @examples #' check_sd(mean=8, sd=3.83, n=10,scale_min=1, scale_max=15) #' check_sd(mean=2, sd=3.83, n=10,scale_min=1, scale_max=15) check_sd<- function(mean, sd, n,scale_min, scale_max, dp=2, max_rounds=1000L){ # TODO: guard against obvious wrong input values vec<- suppressWarnings(simulate_vector( mean=mean, sd =sd, n =n, scale_min = scale_min, scale_max = scale_max, dp=2, max_rounds=max_rounds)) all(vec != rep(0,n)) }
/R/extensions.R
permissive
RMHogervorst/forensicdatatoolkit
R
false
false
1,007
r
#' Check standard deviation given a mean #' #' Uses sprite in the background to find a matching vector. #' If the process cannot find a possible match it will output a FALSE. #' This usually means the standard deviation is not possible but if it is #' difficult to find but possible solution try increasing the max_rounds. #' #' @param mean required mean value #' @param sd required standard deviation #' @param n total cases #' @param scale_min underpoint of scale #' @param scale_max upper point of scale #' @family sprite #' @return TRUE or FALSE #' @export #' #' @examples #' check_sd(mean=8, sd=3.83, n=10,scale_min=1, scale_max=15) #' check_sd(mean=2, sd=3.83, n=10,scale_min=1, scale_max=15) check_sd<- function(mean, sd, n,scale_min, scale_max, dp=2, max_rounds=1000L){ # TODO: guard against obvious wrong input values vec<- suppressWarnings(simulate_vector( mean=mean, sd =sd, n =n, scale_min = scale_min, scale_max = scale_max, dp=2, max_rounds=max_rounds)) all(vec != rep(0,n)) }
test_that("Test plotting", { skip_on_cran() x = 1:4 ##Discrete power-law mt = displ$new(x); mt$setPars(3); mt$setXmin(2) d_exact = data.frame(x=1:4, y=4:1/4) expect_equal(plot(mt, draw=FALSE), d_exact, tol=1e-4) d2 = lines(mt, draw=FALSE) expect_equal(d2$x, 2:4, tol=1e-4) y = c(0.7500, 0.2860, 0.1485) expect_equal(d2$y, y, tol=1e-4) ##Poisson m = dispois$new(x); m$setPars(1); m$setXmin(2) d_exact = data.frame(x=1:4, y=4:1/4) expect_equal(plot(m, draw=FALSE), d_exact, tol=1e-4) d2 = lines(m, draw=FALSE) y = c(0.75000,0.17735,0.03418) expect_equal(d2$x, 2:4, tol=1e-4) expect_equal(d2$y, y, tol=1e-5) ##Discrete Log normal m = dislnorm$new(x); m$setPars(c(1, 1)); m$setXmin(2) d_exact = data.frame(x=1:4, y=4:1/4) expect_equal(plot(m, draw=FALSE), d_exact, tol=1e-4) d2 = lines(m, draw=FALSE) y = c(0.75, 0.5628, 0.4318) expect_equal(d2$x, 2:4, tol=1e-4) expect_equal(d2$y, y, tol=1e-4) ##CTN power-law m = conpl$new(x); m$setPars(3); m$setXmin(2) d_exact = data.frame(x=1:4, y=4:1/4) expect_equal(plot(m, draw=FALSE), d_exact, tol=1e-4) d2 = lines(m, draw=FALSE, length.out=4) expect_equal(d2$x, c(2.000, 2.520, 3.175, 4.000), tol=1e-4) y = c(0.7500, 0.4725, 0.2976, 0.1875) expect_equal(d2$y, y, tol=1e-4) ##Log normal m = conlnorm$new(x); m$setPars(c(1, 1)); m$setXmin(2) d_exact = data.frame(x=1:4, y=4:1/4) expect_equal(plot(m, draw=FALSE), d_exact, tol=1e-4) d2 = lines(m, draw=FALSE, length.out=4) expect_equal(d2$x, c(2.000, 2.520, 3.175, 4.000), tol=1e-4) y = c(0.7500, 0.6408, 0.5298, 0.4226) expect_equal(d2$y, y, tol=1e-4) ##Exponential m = conexp$new(x); m$setPars(0.5); m$setXmin(2) d_exact = data.frame(x=1:4, y=4:1/4) expect_equal(plot(m, draw=FALSE), d_exact, tol=1e-4) d2 = lines(m, draw=FALSE, length.out=4) expect_equal(d2$x, c(2.000, 2.520, 3.175, 4.000), tol=1e-4) y = c(0.7500, 0.5783, 0.4168,0.2759) expect_equal(d2$y, y, tol=1e-4) } )
/pkg/tests/testthat/test_plot.R
no_license
RInterested/poweRlaw
R
false
false
2,018
r
test_that("Test plotting", { skip_on_cran() x = 1:4 ##Discrete power-law mt = displ$new(x); mt$setPars(3); mt$setXmin(2) d_exact = data.frame(x=1:4, y=4:1/4) expect_equal(plot(mt, draw=FALSE), d_exact, tol=1e-4) d2 = lines(mt, draw=FALSE) expect_equal(d2$x, 2:4, tol=1e-4) y = c(0.7500, 0.2860, 0.1485) expect_equal(d2$y, y, tol=1e-4) ##Poisson m = dispois$new(x); m$setPars(1); m$setXmin(2) d_exact = data.frame(x=1:4, y=4:1/4) expect_equal(plot(m, draw=FALSE), d_exact, tol=1e-4) d2 = lines(m, draw=FALSE) y = c(0.75000,0.17735,0.03418) expect_equal(d2$x, 2:4, tol=1e-4) expect_equal(d2$y, y, tol=1e-5) ##Discrete Log normal m = dislnorm$new(x); m$setPars(c(1, 1)); m$setXmin(2) d_exact = data.frame(x=1:4, y=4:1/4) expect_equal(plot(m, draw=FALSE), d_exact, tol=1e-4) d2 = lines(m, draw=FALSE) y = c(0.75, 0.5628, 0.4318) expect_equal(d2$x, 2:4, tol=1e-4) expect_equal(d2$y, y, tol=1e-4) ##CTN power-law m = conpl$new(x); m$setPars(3); m$setXmin(2) d_exact = data.frame(x=1:4, y=4:1/4) expect_equal(plot(m, draw=FALSE), d_exact, tol=1e-4) d2 = lines(m, draw=FALSE, length.out=4) expect_equal(d2$x, c(2.000, 2.520, 3.175, 4.000), tol=1e-4) y = c(0.7500, 0.4725, 0.2976, 0.1875) expect_equal(d2$y, y, tol=1e-4) ##Log normal m = conlnorm$new(x); m$setPars(c(1, 1)); m$setXmin(2) d_exact = data.frame(x=1:4, y=4:1/4) expect_equal(plot(m, draw=FALSE), d_exact, tol=1e-4) d2 = lines(m, draw=FALSE, length.out=4) expect_equal(d2$x, c(2.000, 2.520, 3.175, 4.000), tol=1e-4) y = c(0.7500, 0.6408, 0.5298, 0.4226) expect_equal(d2$y, y, tol=1e-4) ##Exponential m = conexp$new(x); m$setPars(0.5); m$setXmin(2) d_exact = data.frame(x=1:4, y=4:1/4) expect_equal(plot(m, draw=FALSE), d_exact, tol=1e-4) d2 = lines(m, draw=FALSE, length.out=4) expect_equal(d2$x, c(2.000, 2.520, 3.175, 4.000), tol=1e-4) y = c(0.7500, 0.5783, 0.4168,0.2759) expect_equal(d2$y, y, tol=1e-4) } )
args <- commandArgs(TRUE) # args <- c("-s", "1", "-e", "1", "-l", "100", "-r", "0.1") #' @description This function check the actual directory has a sub directory #' called src if exists it's a new working directory setWorkspace <- function() { files <- c("classifiers.R", "crossValidation.R", "database.R", "flexconc.R", "functions.R", "statistics.R", "utils.R", "write.R") if ("src" %in% list.dirs(full.names = F)) { setwd("src") } else if (all(files %in% list.files())) { print("All files exists!") } else { stop("The follow file(s) are missing!\n", files[!files %in% list.files()]) } } options(java.parameters = "-Xmx4g") shuffleClassify <- function(size) { typeClassify <- 1:length(baseClassifiers) return(sample(typeClassify, size, T)) } setWorkspace() source('utils.R') installNeedPacks() token <- fromJSON('../token.txt') pbSetup(token$key, defdev = 1) scripts <- list.files(pattern='*.R', recursive=T) for (scri in scripts) { source(scri) } path <- "../results/detailed" rm(scripts, scri) databases <- list.files(path = "../datasets/") myParam <- atribArgs(args, databases) ratios <- myParam$ratios lengthBatch <- myParam$lengthBatch # lengthBatch <- c(100, 250, 750, 500, 1000, 2500, 5000) databases <- databases[myParam$iniIndex:myParam$finIndex] defines() ratio <- 0.1 for (dataLength in lengthBatch) { kValue <- floor(sqrt(dataLength)) for (dataset in databases) { dataName <- strsplit(dataset, ".", T)[[1]][1] script_name <- "_mainDyDaSL_Weight_" fileName <- paste(ratio * 100, dataName, script_name, dataLength, ".txt", sep = "") title <- paste("test", fileName, sep = "") headerDetailedOutputEnsemble(title, path, dataName, "DyDaSL - Weight by Acc") cat(dataName) epoch <- 0 calculate <- TRUE epoch <- epoch + 1 cat("\n\n\nRODADA: ", epoch, "\n\n\n\n") set.seed(19) ensemble <- list() ensemble_weights <- c() it <- 0 typeClassifier <- shuffleClassify(10) train <- readData(dataset, path = "../datasets/") all_classes <- sort(levels(train$data$class)) totalInstances <- nrow(train$data) while (totalInstances > (train$state)) { detect_drift <- FALSE begin <- Sys.time() it <- it + 1 batch <- getBatch(train, dataLength) # batch$class <- droplevels(batch$class) cat("Foram processadas: ", train$processed, "/", totalInstances, "\t") rownames(batch) <- as.character(1:nrow(batch)) batchIds <- holdout(batch$class, ratio, seed = 1, mode="random") batchLabeled <- batchIds$tr rm(batchIds) data <- newBase(batch, batchLabeled) data$class <- droplevels(data$class) if (((totalInstances - (train$state)) > 100) && (length(levels(data$class)) > 1)) { classDist <- ddply(data[batchLabeled, ], ~class, summarise, samplesClass = length(class)) if (it > 1) { # ensemble <- knora(valid_base_classifier, data[batchLabeled], # sort(levels(batch$class))) ensemble_weights <- weightEnsemble(ensemble, data[batchLabeled, ], all_classes) ensemble_pred_weighted <- predictEnsembleConfidence(ensemble, ensemble_weights, data[batchLabeled, ], all_classes) cmLabeled <- table(ensemble_pred_weighted, data[batchLabeled, ]$class) cmLabeled <- fixCM(cmLabeled, all_classes) ensembleAcc <- getAcc(cmLabeled) cat("Accuracy Ensemble:\t", ensembleAcc, "\n") if (calculate) { calculate <- FALSE acceptabelAcc <- round(ensembleAcc, 2) } if (ensembleAcc < acceptabelAcc * 0.99) { detect_drift <- TRUE typeClassifier <- shuffleClassify(1) learner <- baseClassifiers[[typeClassifier]] initialAcc <- supAcc(learner, data[batchLabeled, ]) oracle <- flexConC(learner, funcType[typeClassifier], classDist, initialAcc, "1", data, batchLabeled, learner@func) oracle_data <- cbind(batch[, -match(label, colnames(batch))], class=predictClass(oracle, batch)) ensemble <- swapEnsemble(ensemble, oracle_data, oracle, all_classes) calculate <- TRUE } } else { for (i in typeClassifier) { learner <- baseClassifiers[[i]] initialAcc <- supAcc(learner, data[batchLabeled, ]) model <- flexConC(learner, funcType[i], classDist, initialAcc, "1", data, batchLabeled, learner@func) ensemble <- addingEnsemble(ensemble, model) } # END FOR } # END ELSE } # END ELSE end <- Sys.time() ensemble_weights <- weightEnsemble(ensemble, batch, all_classes) ensemble_pred_weighted <- predictEnsembleConfidence(ensemble, ensemble_weights, batch, all_classes) cm_ensemble_weight <- table(ensemble_pred_weighted, batch$class) cm_ensemble_weight <- fixCM(cm_ensemble_weight, all_classes) detailedOutputEnsemble(title, path, length(ensemble_weights), sum(diag(cm_ensemble_weight)), sum(cm_ensemble_weight) - sum(diag(cm_ensemble_weight)), getAcc(cm_ensemble_weight), fmeasure(cm_ensemble_weight), kappa(cm_ensemble_weight), detect_drift, train$state, difftime(end, begin, units = "mins")) } # END WHILE } # END FOR DATASETS } # END FOR BATCHSIZE msg <- paste("Batch Size = ", dataLength, "\nTime: ", Sys.time(), sep = "") pbPost("note", "Experiment Finished!!", msg)
/mainDyDaSL_Weight.R
no_license
ArthurGorgonio/ssl-dds
R
false
false
6,065
r
args <- commandArgs(TRUE) # args <- c("-s", "1", "-e", "1", "-l", "100", "-r", "0.1") #' @description This function check the actual directory has a sub directory #' called src if exists it's a new working directory setWorkspace <- function() { files <- c("classifiers.R", "crossValidation.R", "database.R", "flexconc.R", "functions.R", "statistics.R", "utils.R", "write.R") if ("src" %in% list.dirs(full.names = F)) { setwd("src") } else if (all(files %in% list.files())) { print("All files exists!") } else { stop("The follow file(s) are missing!\n", files[!files %in% list.files()]) } } options(java.parameters = "-Xmx4g") shuffleClassify <- function(size) { typeClassify <- 1:length(baseClassifiers) return(sample(typeClassify, size, T)) } setWorkspace() source('utils.R') installNeedPacks() token <- fromJSON('../token.txt') pbSetup(token$key, defdev = 1) scripts <- list.files(pattern='*.R', recursive=T) for (scri in scripts) { source(scri) } path <- "../results/detailed" rm(scripts, scri) databases <- list.files(path = "../datasets/") myParam <- atribArgs(args, databases) ratios <- myParam$ratios lengthBatch <- myParam$lengthBatch # lengthBatch <- c(100, 250, 750, 500, 1000, 2500, 5000) databases <- databases[myParam$iniIndex:myParam$finIndex] defines() ratio <- 0.1 for (dataLength in lengthBatch) { kValue <- floor(sqrt(dataLength)) for (dataset in databases) { dataName <- strsplit(dataset, ".", T)[[1]][1] script_name <- "_mainDyDaSL_Weight_" fileName <- paste(ratio * 100, dataName, script_name, dataLength, ".txt", sep = "") title <- paste("test", fileName, sep = "") headerDetailedOutputEnsemble(title, path, dataName, "DyDaSL - Weight by Acc") cat(dataName) epoch <- 0 calculate <- TRUE epoch <- epoch + 1 cat("\n\n\nRODADA: ", epoch, "\n\n\n\n") set.seed(19) ensemble <- list() ensemble_weights <- c() it <- 0 typeClassifier <- shuffleClassify(10) train <- readData(dataset, path = "../datasets/") all_classes <- sort(levels(train$data$class)) totalInstances <- nrow(train$data) while (totalInstances > (train$state)) { detect_drift <- FALSE begin <- Sys.time() it <- it + 1 batch <- getBatch(train, dataLength) # batch$class <- droplevels(batch$class) cat("Foram processadas: ", train$processed, "/", totalInstances, "\t") rownames(batch) <- as.character(1:nrow(batch)) batchIds <- holdout(batch$class, ratio, seed = 1, mode="random") batchLabeled <- batchIds$tr rm(batchIds) data <- newBase(batch, batchLabeled) data$class <- droplevels(data$class) if (((totalInstances - (train$state)) > 100) && (length(levels(data$class)) > 1)) { classDist <- ddply(data[batchLabeled, ], ~class, summarise, samplesClass = length(class)) if (it > 1) { # ensemble <- knora(valid_base_classifier, data[batchLabeled], # sort(levels(batch$class))) ensemble_weights <- weightEnsemble(ensemble, data[batchLabeled, ], all_classes) ensemble_pred_weighted <- predictEnsembleConfidence(ensemble, ensemble_weights, data[batchLabeled, ], all_classes) cmLabeled <- table(ensemble_pred_weighted, data[batchLabeled, ]$class) cmLabeled <- fixCM(cmLabeled, all_classes) ensembleAcc <- getAcc(cmLabeled) cat("Accuracy Ensemble:\t", ensembleAcc, "\n") if (calculate) { calculate <- FALSE acceptabelAcc <- round(ensembleAcc, 2) } if (ensembleAcc < acceptabelAcc * 0.99) { detect_drift <- TRUE typeClassifier <- shuffleClassify(1) learner <- baseClassifiers[[typeClassifier]] initialAcc <- supAcc(learner, data[batchLabeled, ]) oracle <- flexConC(learner, funcType[typeClassifier], classDist, initialAcc, "1", data, batchLabeled, learner@func) oracle_data <- cbind(batch[, -match(label, colnames(batch))], class=predictClass(oracle, batch)) ensemble <- swapEnsemble(ensemble, oracle_data, oracle, all_classes) calculate <- TRUE } } else { for (i in typeClassifier) { learner <- baseClassifiers[[i]] initialAcc <- supAcc(learner, data[batchLabeled, ]) model <- flexConC(learner, funcType[i], classDist, initialAcc, "1", data, batchLabeled, learner@func) ensemble <- addingEnsemble(ensemble, model) } # END FOR } # END ELSE } # END ELSE end <- Sys.time() ensemble_weights <- weightEnsemble(ensemble, batch, all_classes) ensemble_pred_weighted <- predictEnsembleConfidence(ensemble, ensemble_weights, batch, all_classes) cm_ensemble_weight <- table(ensemble_pred_weighted, batch$class) cm_ensemble_weight <- fixCM(cm_ensemble_weight, all_classes) detailedOutputEnsemble(title, path, length(ensemble_weights), sum(diag(cm_ensemble_weight)), sum(cm_ensemble_weight) - sum(diag(cm_ensemble_weight)), getAcc(cm_ensemble_weight), fmeasure(cm_ensemble_weight), kappa(cm_ensemble_weight), detect_drift, train$state, difftime(end, begin, units = "mins")) } # END WHILE } # END FOR DATASETS } # END FOR BATCHSIZE msg <- paste("Batch Size = ", dataLength, "\nTime: ", Sys.time(), sep = "") pbPost("note", "Experiment Finished!!", msg)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/center_data.R \name{center_data} \alias{center_data} \title{Center Functional Data With Change} \usage{ center_data(fdobj, change = TRUE) } \arguments{ \item{fdobj}{A functional data object} \item{change}{If \code{TRUE} centering is done by considering the mean change, if \code{FALSE}, the global mean function is subtracted from the functional data. The default is \code{change=TRUE}.} } \value{ Centered functional data sample (class \code{fd}) containing: \item{coefs}{ The coefficient array } \item{basis}{ A basis object } \item{fdnames}{ A list containing names for the arguments, function values and variables } } \description{ This function centers the functional data by subtracting the pointwise mean from each of the functions in a functional data by taking into account a potential change in the mean function. If there is a change in the mean function, the location of the change is estimated using a fully functional estimator implemented in \code{change_FF}, and the mean before and after the change is computed and subtracted from the respective part of the functional data. } \examples{ # Generate FAR(1) process with change in the mean f_AR = fun_AR(n=100, nbasis=21, kappa=0.9) f_AR_change = insert_change(f_AR, k=20, change_location = 0.5, SNR=5) fdata = f_AR_change$fundata c_fdata = center_data(fdata) par(mfrow=c(1,2)) plot(fdata, main="Functional Data") plot(c_fdata, main="Centered Functional Data") } \seealso{ \code{\link{center.fd}} }
/man/center_data.Rd
no_license
Lujia-Bai/fChange
R
false
true
1,543
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/center_data.R \name{center_data} \alias{center_data} \title{Center Functional Data With Change} \usage{ center_data(fdobj, change = TRUE) } \arguments{ \item{fdobj}{A functional data object} \item{change}{If \code{TRUE} centering is done by considering the mean change, if \code{FALSE}, the global mean function is subtracted from the functional data. The default is \code{change=TRUE}.} } \value{ Centered functional data sample (class \code{fd}) containing: \item{coefs}{ The coefficient array } \item{basis}{ A basis object } \item{fdnames}{ A list containing names for the arguments, function values and variables } } \description{ This function centers the functional data by subtracting the pointwise mean from each of the functions in a functional data by taking into account a potential change in the mean function. If there is a change in the mean function, the location of the change is estimated using a fully functional estimator implemented in \code{change_FF}, and the mean before and after the change is computed and subtracted from the respective part of the functional data. } \examples{ # Generate FAR(1) process with change in the mean f_AR = fun_AR(n=100, nbasis=21, kappa=0.9) f_AR_change = insert_change(f_AR, k=20, change_location = 0.5, SNR=5) fdata = f_AR_change$fundata c_fdata = center_data(fdata) par(mfrow=c(1,2)) plot(fdata, main="Functional Data") plot(c_fdata, main="Centered Functional Data") } \seealso{ \code{\link{center.fd}} }
myFunction <- function(data) { attribute_names <- names(mydata) cor_mat = matrix(nrow=1,ncol=3) #c("at1","at2","cor") for(i in 1:length(attribute_names)) { x <- attribute_names[i] column <- mydata[[x]] if (is.numeric(column)) { png(filename = paste("Histograms/",x,".png",sep ="")) hist(column, main = x) dev.off() for(j in (i+1):length(attribute_names)) { y <- attribute_names[j] column_y <- mydata[[y]] if (is.numeric(column_y) & x != y & x != "points" & y!="points") { cor_ = abs(cor(column,column_y)) cor_mat = rbind(cor_mat, c(x,y,cor_)) # print(cor_) } } } } or_c_mat = cor_mat[order(as.numeric(cor_mat[,3])),] # print(ordered_cor_mat) print(or_c_mat[1,]) N = nrow(or_c_mat) print(or_c_mat[nrow(or_c_mat)-1,]) #scatterplot of highest correlation plot(mydata[[or_c_mat[1,1]]],mydata[[or_c_mat[1,2]]],xlab = or_c_mat[1,1], ylab = or_c_mat[1,2]) plot(mydata[[or_c_mat[N-1,1]]],mydata[[or_c_mat[N-1,2]]],xlab = or_c_mat[N-1,1], ylab = or_c_mat[N-1,2]) } mydata <- read.csv("dmc2010_train.txt",header = TRUE, sep = ";", quote = "\"") myFunction(mydata)
/exercise2/exercie2_1.R
no_license
djrieger/dataminingexercises
R
false
false
1,237
r
myFunction <- function(data) { attribute_names <- names(mydata) cor_mat = matrix(nrow=1,ncol=3) #c("at1","at2","cor") for(i in 1:length(attribute_names)) { x <- attribute_names[i] column <- mydata[[x]] if (is.numeric(column)) { png(filename = paste("Histograms/",x,".png",sep ="")) hist(column, main = x) dev.off() for(j in (i+1):length(attribute_names)) { y <- attribute_names[j] column_y <- mydata[[y]] if (is.numeric(column_y) & x != y & x != "points" & y!="points") { cor_ = abs(cor(column,column_y)) cor_mat = rbind(cor_mat, c(x,y,cor_)) # print(cor_) } } } } or_c_mat = cor_mat[order(as.numeric(cor_mat[,3])),] # print(ordered_cor_mat) print(or_c_mat[1,]) N = nrow(or_c_mat) print(or_c_mat[nrow(or_c_mat)-1,]) #scatterplot of highest correlation plot(mydata[[or_c_mat[1,1]]],mydata[[or_c_mat[1,2]]],xlab = or_c_mat[1,1], ylab = or_c_mat[1,2]) plot(mydata[[or_c_mat[N-1,1]]],mydata[[or_c_mat[N-1,2]]],xlab = or_c_mat[N-1,1], ylab = or_c_mat[N-1,2]) } mydata <- read.csv("dmc2010_train.txt",header = TRUE, sep = ";", quote = "\"") myFunction(mydata)
setwd("/Volumes/Data Science/Google Drive/data_science_competition/kaggle/Santander_Customer_Satisfaction/") rm(list = ls()); gc(); require(data.table) require(purrr) require(caret) require(xgboost) require(Matrix) require(Ckmeans.1d.dp) require(Metrics) require(ggplot2) require(combinat) source("utilities/preprocess.R") source("utilities/cv.R") load("../data/Santander_Customer_Satisfaction/RData/dt_featureEngineered.RData") # load("../data/Santander_Customer_Satisfaction/RData/dt_featureEngineered_combine.RData") load("../data/Santander_Customer_Satisfaction/RData/cols_selected.RData") ####################################################################################### ## 1.0 train and test ################################################################# ####################################################################################### dt.featureEngineered <- dt.featureEngineered[, !c("lr"), with = F] cat("prepare train, valid, and test data set...\n") set.seed(888) dt.train <- dt.featureEngineered[TARGET >= 0] dt.test <- dt.featureEngineered[TARGET == -1] dim(dt.train); dim(dt.test) table(dt.train$TARGET) ####################################################################################### ## 2.0 train oof ###################################################################### ####################################################################################### ## folds cat("folds ...\n") k = 5 # change to 5 set.seed(888) folds <- createFolds(dt.train$TARGET, k = k, list = F) ## init preds vec.xgb.pred.train <- rep(0, nrow(dt.train)) vec.xgb.pred.test <- rep(0, nrow(dt.test)) ## init x.test x.test <- sparse.model.matrix(~., data = dt.test[, !c("ID", "TARGET"), with = F]) ## init some params # watchlist <- list(val = dmx.valid, train = dmx.train) # change to dval md <- 5 gamma <- 0 cbl <- .5 # .3* | .4 | .5 ; .2 ss <- .4 # .4; .9 spw <- 1 j <- 1 score <- numeric() params <- list(booster = "gbtree" , nthread = 8 , objective = "binary:logistic" , eval_metric = "auc" , max_depth = 5 # 5 , subsample = ss #.74 , min_child_weight = 1 # 1 , gamma = gamma , colsample_bylevel = cbl , eta = 0.022 #.0201 , scale_pos_weight = spw ) ## oof train for(i in 1:k){ f <- folds == i dmx.train <- xgb.DMatrix(data = sparse.model.matrix(TARGET ~., data = dt.train[!f, setdiff(names(dt.train), c("ID")), with = F]), label = dt.train[!f]$TARGET) dmx.valid <- xgb.DMatrix(data = sparse.model.matrix(TARGET ~., data = dt.train[f, setdiff(names(dt.train), c("ID")), with = F]), label = dt.train[f]$TARGET) watchlist <- list(val = dmx.valid, train = dmx.train) # change to dval set.seed(1234 * i) md.xgb <- xgb.train(params = params , data = dmx.train , nrounds = 100000 , early.stop.round = 50 , watchlist = watchlist , print.every.n = 50 , verbose = F ) # valid pred.valid <- predict(md.xgb, dmx.valid) vec.xgb.pred.train[f] <- pred.valid print(paste("fold:", i, "valid auc:", auc(dt.train$TARGET[f], pred.valid))) score[i] <- auc(dt.train$TARGET[f], pred.valid) # test pred.test <- predict(md.xgb, x.test) vec.xgb.pred.test <- vec.xgb.pred.test + pred.test / k } mean(score) sd(score) auc(dt.train$TARGET, vec.xgb.pred.train) # 0.8415005 oof k = 5 xgb with cnt0, cnt1, kmeans, lr, vars with spw, cpl tuning # 0.8410063 oof k = 5 xgb with cnt0, cnt1, kmeans, vars with spw, cpl, without lr # 0.8420962 oof k = 5 xgb with dummy # 0.842384 oof k = 5 xgb with scale ####################################################################################### ## submit ############################################################################# ####################################################################################### # pred.test <- predict(md.xgb, x.test) # pred.test.mean <- apply(as.data.table(sapply(ls.pred.test, print)), 1, mean) # submit <- data.table(ID = dt.test$ID, TARGET = pred.test) submit <- data.table(ID = dt.test$ID, TARGET = vec.xgb.pred.test) write.csv(submit, file = "submission/32_oof_k_5_xgb_scale.csv", row.names = F) # 0.839825 oof k = 5 xgb with cnt0, cnt1, kmeans, lr, vars with spw, cpl tuning # 0.839007 oof k = 5 xgb with cnt0, cnt1, kmeans, vars with spw, cpl, without lr # 0.838.. oof k = 5 xgb with dummy # 0.838653 oof k = 5 xgb with scale save(vec.xgb.pred.train, vec.xgb.pred.test, file = "../data/Santander_Customer_Satisfaction/RData/dt_meta_1_xgb.RData")
/script/11_singleModel_xgb_oof.R
no_license
noahhhhhh/Santander_Customer_Satisfaction
R
false
false
4,693
r
setwd("/Volumes/Data Science/Google Drive/data_science_competition/kaggle/Santander_Customer_Satisfaction/") rm(list = ls()); gc(); require(data.table) require(purrr) require(caret) require(xgboost) require(Matrix) require(Ckmeans.1d.dp) require(Metrics) require(ggplot2) require(combinat) source("utilities/preprocess.R") source("utilities/cv.R") load("../data/Santander_Customer_Satisfaction/RData/dt_featureEngineered.RData") # load("../data/Santander_Customer_Satisfaction/RData/dt_featureEngineered_combine.RData") load("../data/Santander_Customer_Satisfaction/RData/cols_selected.RData") ####################################################################################### ## 1.0 train and test ################################################################# ####################################################################################### dt.featureEngineered <- dt.featureEngineered[, !c("lr"), with = F] cat("prepare train, valid, and test data set...\n") set.seed(888) dt.train <- dt.featureEngineered[TARGET >= 0] dt.test <- dt.featureEngineered[TARGET == -1] dim(dt.train); dim(dt.test) table(dt.train$TARGET) ####################################################################################### ## 2.0 train oof ###################################################################### ####################################################################################### ## folds cat("folds ...\n") k = 5 # change to 5 set.seed(888) folds <- createFolds(dt.train$TARGET, k = k, list = F) ## init preds vec.xgb.pred.train <- rep(0, nrow(dt.train)) vec.xgb.pred.test <- rep(0, nrow(dt.test)) ## init x.test x.test <- sparse.model.matrix(~., data = dt.test[, !c("ID", "TARGET"), with = F]) ## init some params # watchlist <- list(val = dmx.valid, train = dmx.train) # change to dval md <- 5 gamma <- 0 cbl <- .5 # .3* | .4 | .5 ; .2 ss <- .4 # .4; .9 spw <- 1 j <- 1 score <- numeric() params <- list(booster = "gbtree" , nthread = 8 , objective = "binary:logistic" , eval_metric = "auc" , max_depth = 5 # 5 , subsample = ss #.74 , min_child_weight = 1 # 1 , gamma = gamma , colsample_bylevel = cbl , eta = 0.022 #.0201 , scale_pos_weight = spw ) ## oof train for(i in 1:k){ f <- folds == i dmx.train <- xgb.DMatrix(data = sparse.model.matrix(TARGET ~., data = dt.train[!f, setdiff(names(dt.train), c("ID")), with = F]), label = dt.train[!f]$TARGET) dmx.valid <- xgb.DMatrix(data = sparse.model.matrix(TARGET ~., data = dt.train[f, setdiff(names(dt.train), c("ID")), with = F]), label = dt.train[f]$TARGET) watchlist <- list(val = dmx.valid, train = dmx.train) # change to dval set.seed(1234 * i) md.xgb <- xgb.train(params = params , data = dmx.train , nrounds = 100000 , early.stop.round = 50 , watchlist = watchlist , print.every.n = 50 , verbose = F ) # valid pred.valid <- predict(md.xgb, dmx.valid) vec.xgb.pred.train[f] <- pred.valid print(paste("fold:", i, "valid auc:", auc(dt.train$TARGET[f], pred.valid))) score[i] <- auc(dt.train$TARGET[f], pred.valid) # test pred.test <- predict(md.xgb, x.test) vec.xgb.pred.test <- vec.xgb.pred.test + pred.test / k } mean(score) sd(score) auc(dt.train$TARGET, vec.xgb.pred.train) # 0.8415005 oof k = 5 xgb with cnt0, cnt1, kmeans, lr, vars with spw, cpl tuning # 0.8410063 oof k = 5 xgb with cnt0, cnt1, kmeans, vars with spw, cpl, without lr # 0.8420962 oof k = 5 xgb with dummy # 0.842384 oof k = 5 xgb with scale ####################################################################################### ## submit ############################################################################# ####################################################################################### # pred.test <- predict(md.xgb, x.test) # pred.test.mean <- apply(as.data.table(sapply(ls.pred.test, print)), 1, mean) # submit <- data.table(ID = dt.test$ID, TARGET = pred.test) submit <- data.table(ID = dt.test$ID, TARGET = vec.xgb.pred.test) write.csv(submit, file = "submission/32_oof_k_5_xgb_scale.csv", row.names = F) # 0.839825 oof k = 5 xgb with cnt0, cnt1, kmeans, lr, vars with spw, cpl tuning # 0.839007 oof k = 5 xgb with cnt0, cnt1, kmeans, vars with spw, cpl, without lr # 0.838.. oof k = 5 xgb with dummy # 0.838653 oof k = 5 xgb with scale save(vec.xgb.pred.train, vec.xgb.pred.test, file = "../data/Santander_Customer_Satisfaction/RData/dt_meta_1_xgb.RData")
testlist <- list(x = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 2.08160089612635e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(multivariance::fastdist,testlist) str(result)
/multivariance/inst/testfiles/fastdist/AFL_fastdist/fastdist_valgrind_files/1613098379-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
303
r
testlist <- list(x = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 2.08160089612635e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(multivariance::fastdist,testlist) str(result)
library(DStree) ### Name: DStree ### Title: Fit a discrete-time survival tree ### Aliases: DStree ### ** Examples ##Build tree fit<- DStree(spell~ui+age+tenure+logwage,status="censor1",data=UnempDur,control=list(cp=0)) plot(fit)
/data/genthat_extracted_code/DStree/examples/DStree.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
236
r
library(DStree) ### Name: DStree ### Title: Fit a discrete-time survival tree ### Aliases: DStree ### ** Examples ##Build tree fit<- DStree(spell~ui+age+tenure+logwage,status="censor1",data=UnempDur,control=list(cp=0)) plot(fit)
library(dplyr) library(readr) library(caret) # library(mlr3) library(MASS) data <- readr::read_csv("phpTJRsqa.csv") #install.packages("tableone") #install.packages("dlookr") library(glmnet) library(ranger) """ Attribute Information: For more information, read [Cortez et al., 2009]. Input variables (based on physicochemical tests): 1 - fixed acidity 2 - volatile acidity 3 - citric acid 4 - residual sugar 5 - chlorides 6 - free sulfur dioxide 7 - total sulfur dioxide 8 - density 9 - pH 10 - sulphates 11 - alcohol Output variable (based on sensory data): 12 - quality (score between 0 and 10) """ data <- data[data$Class!=7 & data$Class!=1,] data$Class <- as.factor(data$Class) data$Class <- ordered(data$Class) features <- data[, -12] set.seed(999) train_index <- createDataPartition(data$Class, p=0.8, list=FALSE, times = 1) levels(data$Class) train_ds <- data[train_index,] test_ds <- data[-train_index,] mse <- function(x,y){ mean((as.integer(x)-as.integer(y))^2) } acc <- function(x,y){ mean(as.numeric(x) == as.numeric(y)) } acc1 <- function(x,y){ mean(abs((as.numeric(x)-as.numeric(y)) <= 1)) } m1 <- polr(Class ~ ., data = train_ds, Hess=TRUE) m1_pred <- predict(m1, test_ds) library(nnet) m2 <- nnet::multinom(Class ~ ., data = train_ds) #m2 <- glmnet::glmnet(x = as.matrix(train_ds[, -12]), y = as.matrix(train_ds[,12]), family = "multinomial", alpha=0.5) m2_pred <- predict(m2, test_ds) mse(test_ds$Class, m1_pred) mse(test_ds$Class, m2_pred) acc(test_ds$Class, m1_pred) acc(test_ds$Class, m2_pred) #---------------------------------------------------------------------------------------- convert <- function(x){ tresholds <- as.numeric(levels(train_ds$Class)) tresholds <- tresholds[-length(tresholds)] lables <- matrix(ncol = 4, nrow = nrow(x)) for(i in 1:nrow(x)){ lables[i,] <- as.logical(as.numeric(x$Class[i]) > tresholds-1) } lables list(cbind(train_ds[,-12], a=lables[,1]), cbind(train_ds[,-12], b=lables[,2]), cbind(train_ds[,-12], c=lables[,3]), cbind(train_ds[,-12], d=lables[,4])) } #------------------------------------------------------------------------------------------- calc_prob <- function(responses){ a <- responses[[1]] b <- responses[[2]] c <- responses[[3]] d <- responses[[4]] # a,b,c,d - probabilities tresholds (vectors) from model for classes n <- length(a) predicts <- numeric(n) for(i in 1:n){ probs <- numeric(5) probs[1] <- 1 - a[i] probs[2] <- a[i] - b[i] probs[3] <- b[i] - c[i] probs[4] <- c[i] - d[i] probs[5] <- d[i] predicts[i] <- which.max(probs) + 1 } predicts } stack <- convert(train_ds) responses <- list() for(i in 1:4){ print(i) df <- stack[[i]] target_name <- colnames(df)[ncol(df)] m <- ranger(paste(c(target_name, " ~ ."), collapse = ""), data = df) #glm(paste(c(target_name, " ~ ."), collapse = ""), data = df, family = "binomial") #m <- glmnet::glmnet(x = as.matrix(stack[[i]][, -12]), y = as.matrix(stack[[i]][,12]), family = "binomial", alpha=0.5) m_pred <- predict(m, test_ds[,-12], type="response") responses[[i]] <- m_pred$predictions } m3_pred <- calc_prob(responses) #---------------------------------------------------------- m4 <- ranger(Class ~ ., train_ds) m4_pred <- predict(m4, test_ds, type = "response") m4_pred <- m4_pred$predictions # ---------------------------------------------------------------- responses_logit <- list() for(i in 1:4){ print(i) df <- stack[[i]] target_name <- colnames(df)[ncol(df)] m <- multinom(paste(c(target_name, " ~ ."), collapse = ""), data = df) #glm(paste(c(target_name, " ~ ."), collapse = ""), data = df, family = "binomial") #m <- glmnet::glmnet(x = as.matrix(stack[[i]][, -12]), y = as.matrix(stack[[i]][,12]), family = "binomial", alpha=0.5) m_pred <- predict(m, test_ds[,-12], type="probs") responses[[i]] <- m_pred } m5_pred <- calc_prob(responses) # ---------------------------------------------------------------- mse(test_ds$Class, m1_pred) mse(test_ds$Class, m2_pred) mse(test_ds$Class, m3_pred) mse(test_ds$Class, m4_pred) mse(test_ds$Class, m5_pred) acc(test_ds$Class, m1_pred) acc(test_ds$Class, m2_pred) acc(test_ds$Class, m3_pred) acc(test_ds$Class, m4_pred) acc(test_ds$Class, m5_pred) acc1(test_ds$Class, m1_pred) acc1(test_ds$Class, m2_pred) acc1(test_ds$Class, m3_pred)
/EDA_wina/POLR_2.R
no_license
airi314/ProjektWB2
R
false
false
4,342
r
library(dplyr) library(readr) library(caret) # library(mlr3) library(MASS) data <- readr::read_csv("phpTJRsqa.csv") #install.packages("tableone") #install.packages("dlookr") library(glmnet) library(ranger) """ Attribute Information: For more information, read [Cortez et al., 2009]. Input variables (based on physicochemical tests): 1 - fixed acidity 2 - volatile acidity 3 - citric acid 4 - residual sugar 5 - chlorides 6 - free sulfur dioxide 7 - total sulfur dioxide 8 - density 9 - pH 10 - sulphates 11 - alcohol Output variable (based on sensory data): 12 - quality (score between 0 and 10) """ data <- data[data$Class!=7 & data$Class!=1,] data$Class <- as.factor(data$Class) data$Class <- ordered(data$Class) features <- data[, -12] set.seed(999) train_index <- createDataPartition(data$Class, p=0.8, list=FALSE, times = 1) levels(data$Class) train_ds <- data[train_index,] test_ds <- data[-train_index,] mse <- function(x,y){ mean((as.integer(x)-as.integer(y))^2) } acc <- function(x,y){ mean(as.numeric(x) == as.numeric(y)) } acc1 <- function(x,y){ mean(abs((as.numeric(x)-as.numeric(y)) <= 1)) } m1 <- polr(Class ~ ., data = train_ds, Hess=TRUE) m1_pred <- predict(m1, test_ds) library(nnet) m2 <- nnet::multinom(Class ~ ., data = train_ds) #m2 <- glmnet::glmnet(x = as.matrix(train_ds[, -12]), y = as.matrix(train_ds[,12]), family = "multinomial", alpha=0.5) m2_pred <- predict(m2, test_ds) mse(test_ds$Class, m1_pred) mse(test_ds$Class, m2_pred) acc(test_ds$Class, m1_pred) acc(test_ds$Class, m2_pred) #---------------------------------------------------------------------------------------- convert <- function(x){ tresholds <- as.numeric(levels(train_ds$Class)) tresholds <- tresholds[-length(tresholds)] lables <- matrix(ncol = 4, nrow = nrow(x)) for(i in 1:nrow(x)){ lables[i,] <- as.logical(as.numeric(x$Class[i]) > tresholds-1) } lables list(cbind(train_ds[,-12], a=lables[,1]), cbind(train_ds[,-12], b=lables[,2]), cbind(train_ds[,-12], c=lables[,3]), cbind(train_ds[,-12], d=lables[,4])) } #------------------------------------------------------------------------------------------- calc_prob <- function(responses){ a <- responses[[1]] b <- responses[[2]] c <- responses[[3]] d <- responses[[4]] # a,b,c,d - probabilities tresholds (vectors) from model for classes n <- length(a) predicts <- numeric(n) for(i in 1:n){ probs <- numeric(5) probs[1] <- 1 - a[i] probs[2] <- a[i] - b[i] probs[3] <- b[i] - c[i] probs[4] <- c[i] - d[i] probs[5] <- d[i] predicts[i] <- which.max(probs) + 1 } predicts } stack <- convert(train_ds) responses <- list() for(i in 1:4){ print(i) df <- stack[[i]] target_name <- colnames(df)[ncol(df)] m <- ranger(paste(c(target_name, " ~ ."), collapse = ""), data = df) #glm(paste(c(target_name, " ~ ."), collapse = ""), data = df, family = "binomial") #m <- glmnet::glmnet(x = as.matrix(stack[[i]][, -12]), y = as.matrix(stack[[i]][,12]), family = "binomial", alpha=0.5) m_pred <- predict(m, test_ds[,-12], type="response") responses[[i]] <- m_pred$predictions } m3_pred <- calc_prob(responses) #---------------------------------------------------------- m4 <- ranger(Class ~ ., train_ds) m4_pred <- predict(m4, test_ds, type = "response") m4_pred <- m4_pred$predictions # ---------------------------------------------------------------- responses_logit <- list() for(i in 1:4){ print(i) df <- stack[[i]] target_name <- colnames(df)[ncol(df)] m <- multinom(paste(c(target_name, " ~ ."), collapse = ""), data = df) #glm(paste(c(target_name, " ~ ."), collapse = ""), data = df, family = "binomial") #m <- glmnet::glmnet(x = as.matrix(stack[[i]][, -12]), y = as.matrix(stack[[i]][,12]), family = "binomial", alpha=0.5) m_pred <- predict(m, test_ds[,-12], type="probs") responses[[i]] <- m_pred } m5_pred <- calc_prob(responses) # ---------------------------------------------------------------- mse(test_ds$Class, m1_pred) mse(test_ds$Class, m2_pred) mse(test_ds$Class, m3_pred) mse(test_ds$Class, m4_pred) mse(test_ds$Class, m5_pred) acc(test_ds$Class, m1_pred) acc(test_ds$Class, m2_pred) acc(test_ds$Class, m3_pred) acc(test_ds$Class, m4_pred) acc(test_ds$Class, m5_pred) acc1(test_ds$Class, m1_pred) acc1(test_ds$Class, m2_pred) acc1(test_ds$Class, m3_pred)
# -------------------------------------------------- # # Climate Risk Profiles -- Main functions # A. Esquivel, H. Achicanoy & J. Ramirez-Villegas # Alliance Bioversity-CIAT, 2021 # -------------------------------------------------- # # Windows parallelization functions clusterExport <- local({ gets <- function(n, v) { assign(n, v, envir = .GlobalEnv); NULL } function(cl, list, envir = .GlobalEnv) { ## do this with only one clusterCall--loop on slaves? for (name in list) { clusterCall(cl, gets, name, get(name, envir = envir)) } } }) createCluster <- function(noCores, logfile = "/dev/null", export = NULL, lib = NULL) { require(doSNOW) cl <- makeCluster(noCores, type = "SOCK", outfile = logfile) if(!is.null(export)) clusterExport(cl, export) if(!is.null(lib)) { plyr::l_ply(lib, function(dum) { clusterExport(cl, "dum", envir = environment()) clusterEvalQ(cl, library(dum, character.only = TRUE)) }) } registerDoSNOW(cl) return(cl) } # Agro-climatic indices rsum.lapply <- function(x, n=3L) # Calculate rollin sum { lapply(1:(length(x)-n+1), function(i) { # Sum for n consecutive days z <- sum(x[i:(i+n-1)]) # Indices used to calculate the sum seq.sum <- as.numeric(i:(i+n-1)) # List with SUM and INDICES results <- list(z, seq.sum) return(results) }) } cumulative.r.sum <- function(results){ unlist(lapply(results, function(x){z <- x[[1]]; return(z)})) } # Extract the SUM is.leapyear <- function(year){ return(((year %% 4 == 0) & (year %% 100 != 0)) | (year %% 400 == 0)) } # Function to identify leap years ## CDD. Maximum number of consecutive dry days calc_cdd <- function(PREC, p_thresh=1){ runs <- rle(PREC < p_thresh) cons_days <- max(runs$lengths[runs$values==1], na.rm=TRUE) return(cons_days) } calc_cddCMP <- compiler::cmpfun(calc_cdd) ## CDD. Maximum number of consecutive days with TMAX above t_thresh calc_cdd_temp <- function(TMAX, t_thresh=37){ runs <- rle(TMAX > t_thresh) cons_days <- max(runs$lengths[runs$values==1], na.rm=TRUE) return(cons_days) } calc_cdd_tempCMP <- compiler::cmpfun(calc_cdd_temp) ## P5D. Maximum 5-day running average precipitation calc_p5d <- function(PREC){ runAvg <- caTools::runmean(PREC, k=5, endrule='NA') runAvg <- max(runAvg, na.rm=TRUE) return(runAvg) } calc_p5dCMP <- compiler::cmpfun(calc_p5d) ## NT35. Number of days with max. temperature above 35?C calc_hts <- function(tmax, t_thresh=35) { hts <- length(which(tmax >= t_thresh)) return(hts) } calc_htsCMP <- compiler::cmpfun(calc_hts) ## P95. 95th percentile of daily precipitation calc_p95 <- function(PREC){ quantile(PREC, probs = .95, na.rm = T) } calc_p95CMP <- compiler::cmpfun(calc_p95) ### Mean temperature *** tmean <- function(tmax, tmin, season_ini=1, season_end=365){ tavg <- lapply(1:nrow(tmax), function(i){ tavg <- (tmax[i, season_ini:season_end]+tmin[i, season_ini:season_end])/2 }) tavg <- do.call(rbind, tavg) return(tavg) } tmeanCMP <- compiler::cmpfun(tmean) ### Total prec at year *** calc_totprec <- function(prec){ totprec <- sum(prec, na.rm=T) return(totprec) } calc_totprecCMP <- compiler::cmpfun(calc_totprec) ### Maximum number of consecutive dry days, prec < 1 mm dr_stress <- function(PREC, p_thresh=1){ runs <- rle(PREC < p_thresh) cons_days <- max(runs$lengths[runs$values==1], na.rm=TRUE) return(cons_days) } dr_stressCMP <- compiler::cmpfun(dr_stress) ### number of prec days calc_precdays <- function(x, season_ini=1, season_end=365, p_thresh=0.1) { precdays <- length(which(x$prec[season_ini:season_end] > p_thresh)) return(precdays) } ### maximum consecutive dry days calc_max_cdd <- function(x, year=2000, season_ini=1, season_end=365, p_thresh=0.1) { cdd <- 0; cdd_seq <- c() for (i_x in season_ini:season_end) { if (x$prec[i_x] < p_thresh) { cdd <- cdd+1 } else { cdd_seq <- c(cdd_seq, cdd) cdd <- 0 } } max_cdd <- max(cdd_seq) return(max_cdd) } ### mean consecutive dry days calc_mean_cdd <- function(x, season_ini=1, season_end=365, p_thresh=0.1) { cdd <- 0; cdd_seq <- c() for (i_x in season_ini:season_end) { if (x$prec[i_x] < p_thresh) { cdd <- cdd+1 } else { cdd_seq <- c(cdd_seq, cdd) cdd <- 0 } } mean_cdd <- mean(cdd_seq[which(cdd_seq > 0)],na.rm=T) return(mean_cdd) } ### number of prec days calc_txxdays <- function(x, season_ini=1, season_end=365, t_thresh=30) { x$TDAY <- x$tmax*0.75 + x$tmin*0.25 #day temperature txxdays <- length(which(x$TDAY[season_ini:season_end] > t_thresh)) return(txxdays) } ### number of prec days calc_tnndays <- function(x, season_ini=1, season_end=365, t_thresh=10) { x$TDAY <- x$tmax*0.75 + x$tmin*0.25 #day temperature tnndays <- length(which(x$TDAY[season_ini:season_end] < t_thresh)) return(tnndays) } ### calculate soilcap in mm soilcap_calc <- function(x, minval, maxval) { rdepth <- max(c(x[4],minval)) #cross check rdepth <- min(c(rdepth,maxval)) #cross-check wc_df <- data.frame(depth=c(2.5,10,22.5,45,80,150),wc=(x[5:10])*.01) if (!rdepth %in% wc_df$depth) { wc_df1 <- wc_df[which(wc_df$depth < rdepth),] wc_df2 <- wc_df[which(wc_df$depth > rdepth),] y1 <- wc_df1$wc[nrow(wc_df1)]; y2 <- wc_df2$wc[1] x1 <- wc_df1$depth[nrow(wc_df1)]; x2 <- wc_df2$depth[1] ya <- (rdepth-x1) / (x2-x1) * (y2-y1) + y1 wc_df <- rbind(wc_df1,data.frame(depth=rdepth,wc=ya),wc_df2) } wc_df <- wc_df[which(wc_df$depth <= rdepth),] wc_df$soilthick <- wc_df$depth - c(0,wc_df$depth[1:(nrow(wc_df)-1)]) wc_df$soilcap <- wc_df$soilthick * wc_df$wc soilcp <- sum(wc_df$soilcap) * 10 #in mm return(soilcp) } # potential evapotranspiration peest <- function(srad, tmin, tmax) { #constants albedo <- 0.2 vpd_cte <- 0.7 #soil heat flux parameters a_eslope=611.2 b_eslope=17.67 c_eslope=243.5 #input parameters tmean <- (tmin+tmax)/2 #net radiation rn = (1-albedo) * srad #soil heat flux eslope=a_eslope*b_eslope*c_eslope/(tmean+c_eslope)^2*exp(b_eslope*tmean/(tmean+c_eslope)) #estimate vpd esat_min=0.61120*exp((17.67*tmin)/(tmin+243.5)) esat_max=0.61120*exp((17.67*tmax)/(tmax+243.5)) vpd=vpd_cte*(esat_max-esat_min) #kPa #Priestley-Taylor pt_const=1.26 pt_fact=1 vpd_ref=1 psycho=62 rho_w=997 rlat_ht=2.26E6 pt_coef=pt_fact*pt_const pt_coef = 1 + (pt_coef-1) * vpd / vpd_ref #*10^6? To convert fluxes MJ to J #rlat_ht? Latent heat flux to water flux #100/rho_w? Kg/m^2 to cm et_max=(pt_coef * rn * eslope/(eslope+psycho) * 10^6 / rlat_ht * 100/rho_w)*10 #in mm return(et_max) } # the two functions below estimate the ea/ep # based on Jones (1987) # ea/ep: actual to potential evapotranspiration ratio eabyep_calc <- function(soilcp=100, cropfc=1, avail=50, prec, evap) { avail <- min(c(avail,soilcp)) eratio <- eabyep(soilcp,avail) demand <- eratio*cropfc*evap result <- avail + prec - demand runoff <- result - soilcp avail <- min(c(soilcp,result)) avail <- max(c(avail,0)) runoff <- max(c(runoff,0)) out <- data.frame(AVAIL=avail,DEMAND=demand,ERATIO=eratio,prec=prec,RUNOFF=runoff) return(out) } # ea/ep function eabyep <- function(soilcp, avail) { percwt <- min(c(100,avail/soilcp*100)) percwt <- max(c(1,percwt)) eratio <- min(c(percwt/(97-3.868*sqrt(soilcp)),1)) return(eratio) } # wrapper to calculate the water balance modeling variables watbal_wrapper <- function(out_all, soilcp){ out_all$Etmax <- out_all$AVAIL <- out_all$ERATIO <- out_all$RUNOFF <- out_all$DEMAND <- out_all$CUM_prec <- NA for (d in 1:nrow(out_all)) { out_all$Etmax[d] <- peest(out_all$srad[d], out_all$tmin[d], out_all$tmax[d]) if (d==1) { out_all$CUM_prec[d] <- out_all$prec[d] sfact <- eabyep_calc(soilcp=soilcp, cropfc=1, avail=0, prec=out_all$prec[d], evap=out_all$Etmax[d]) out_all$AVAIL[d] <- sfact$AVAIL out_all$ERATIO[d] <- sfact$ERATIO out_all$RUNOFF[d] <- sfact$RUNOFF out_all$DEMAND[d] <- sfact$DEMAND } else { out_all$CUM_prec[d] <- out_all$CUM_prec[d-1] + out_all$prec[d] sfact <- eabyep_calc(soilcp=soilcp, cropfc=1, avail=out_all$AVAIL[d-1], prec=out_all$prec[d], evap=out_all$Etmax[d]) out_all$AVAIL[d] <- sfact$AVAIL out_all$ERATIO[d] <- sfact$ERATIO out_all$RUNOFF[d] <- sfact$RUNOFF out_all$DEMAND[d] <- sfact$DEMAND } } return(out_all) } # calculate number of water stress days calc_wsdays <- function(ERATIO, season_ini=1, season_end=365, e_thresh=0.3) { wsdays <- length(which(ERATIO[season_ini:season_end] < e_thresh)) return(wsdays) } calc_wsdaysCMP <- compiler::cmpfun(calc_wsdays) ### HTS1, HTS2, LETHAL: heat stress using tmax *** calc_hts <- function(tmax, season_ini=1, season_end=365, t_thresh=35) { hts <- length(which(tmax[season_ini:season_end] >= t_thresh)) return(hts) } calc_htsCMP <- compiler::cmpfun(calc_hts) ### CD: crop duration, if Tmean > (22, 23, 24) then CD=T-23, else CD=0 *** calc_cdur <- function(TMEAN, season_ini=1, season_end=365, t_thresh=35){ tmean <- mean(TMEAN[season_ini:season_end], na.rm=T) if (tmean > t_thresh) {cdur <- tmean - t_thresh} else {cdur <- 0} return(cdur) } calc_cdurCMP <- compiler::cmpfun(calc_cdur) # DS2: max number of consecutive days Ea/Ep < 0.4, 0.5, 0.6 calc_cons_wsdays <- function(x, season_ini=1, season_end=365, e_thresh=0.4) { cdd <- 0; cdd_seq <- c() for (i_x in season_ini:season_end) { if (x$ERATIO[i_x] < e_thresh) { cdd <- cdd+1 } else { cdd_seq <- c(cdd_seq, cdd) cdd <- 0 } } cdd_seq <- c(cdd_seq, cdd) max_cdd <- max(cdd_seq) return(max_cdd) } # ATT: accum thermal time using capped top, Tb=7,8,9, To=30,32.5,35 calc_att <- function(x, season_ini=1, season_end=365, tb=10, to=20) { x$TMEAN <- (x$tmin + x$tmax) * 0.5 att <- sapply(x$TMEAN[season_ini:season_end], ttfun, tb, to) att <- sum(att,na.rm=T) return(att) } # function to calc tt ttfun <- function(tmean, tb, to) { if (tmean<to & tmean>tb) { teff <- tmean-tb } else if (tmean>=to) { teff <- to-tb } else if (tmean<=tb) { teff <- 0 } return(teff) } # DLOSS: duration loss (difference between No. days to reach ATT_baseline in future vs. baseline) calc_dloss <- function(x, season_ini, dur_b=110, att_b=5000, tb=10, to=20) { x$TMEAN <- (x$tmin + x$tmax) * 0.5 att <- sapply(x$TMEAN[season_ini:(nrow(x))], ttfun, tb, to) att <- cumsum(att) estdur <- length(att[which(att < att_b)]) dloss <- dur_b - estdur return(dloss) } # WES: wet early season if period between sowing and anthesis is above field cap. >= 50 % time # i.e. frequency of days if RUNOFF > 1 calc_wes <- function(x, season_ini, season_end, r_thresh=1) { wes <- length(which(x$RUNOFF[season_ini:season_end] > r_thresh)) return(wes) } # BADSOW: no. days in sowing window +-15 centered at sdate with 0.05*SOILCP < AVAIL < 0.9*SOILCP # if this is < 3 then crop runs into trouble calc_badsow <- function(x, season_ini, soilcp) { sow_i <- season_ini - 15; sow_f <- season_ini + 15 if (sow_i < 1) {sow_i <- 1}; if (sow_f > 365) {sow_f <- 365} x <- x[sow_i:sow_f,] badsow <- length(which(x$AVAIL > (0.05*soilcp) & x$AVAIL < (0.9*soilcp))) return(badsow) } # BADHAR: no. days in harvest window (+25 after hdate) with AVAIL < 0.85*SOILCP # if this is < 3 then crop runs into trouble calc_badhar <- function(x, season_end, soilcp) { har_i <- season_end har_f <- har_i + 25; if (har_f > 365) {har_f <- 365} x <- x[har_i:har_f,] badhar <- length(which(x$AVAIL < (0.85*soilcp))) return(badhar) }
/00_main_functions.R
no_license
haachicanoy/climate-risk-profiles
R
false
false
11,629
r
# -------------------------------------------------- # # Climate Risk Profiles -- Main functions # A. Esquivel, H. Achicanoy & J. Ramirez-Villegas # Alliance Bioversity-CIAT, 2021 # -------------------------------------------------- # # Windows parallelization functions clusterExport <- local({ gets <- function(n, v) { assign(n, v, envir = .GlobalEnv); NULL } function(cl, list, envir = .GlobalEnv) { ## do this with only one clusterCall--loop on slaves? for (name in list) { clusterCall(cl, gets, name, get(name, envir = envir)) } } }) createCluster <- function(noCores, logfile = "/dev/null", export = NULL, lib = NULL) { require(doSNOW) cl <- makeCluster(noCores, type = "SOCK", outfile = logfile) if(!is.null(export)) clusterExport(cl, export) if(!is.null(lib)) { plyr::l_ply(lib, function(dum) { clusterExport(cl, "dum", envir = environment()) clusterEvalQ(cl, library(dum, character.only = TRUE)) }) } registerDoSNOW(cl) return(cl) } # Agro-climatic indices rsum.lapply <- function(x, n=3L) # Calculate rollin sum { lapply(1:(length(x)-n+1), function(i) { # Sum for n consecutive days z <- sum(x[i:(i+n-1)]) # Indices used to calculate the sum seq.sum <- as.numeric(i:(i+n-1)) # List with SUM and INDICES results <- list(z, seq.sum) return(results) }) } cumulative.r.sum <- function(results){ unlist(lapply(results, function(x){z <- x[[1]]; return(z)})) } # Extract the SUM is.leapyear <- function(year){ return(((year %% 4 == 0) & (year %% 100 != 0)) | (year %% 400 == 0)) } # Function to identify leap years ## CDD. Maximum number of consecutive dry days calc_cdd <- function(PREC, p_thresh=1){ runs <- rle(PREC < p_thresh) cons_days <- max(runs$lengths[runs$values==1], na.rm=TRUE) return(cons_days) } calc_cddCMP <- compiler::cmpfun(calc_cdd) ## CDD. Maximum number of consecutive days with TMAX above t_thresh calc_cdd_temp <- function(TMAX, t_thresh=37){ runs <- rle(TMAX > t_thresh) cons_days <- max(runs$lengths[runs$values==1], na.rm=TRUE) return(cons_days) } calc_cdd_tempCMP <- compiler::cmpfun(calc_cdd_temp) ## P5D. Maximum 5-day running average precipitation calc_p5d <- function(PREC){ runAvg <- caTools::runmean(PREC, k=5, endrule='NA') runAvg <- max(runAvg, na.rm=TRUE) return(runAvg) } calc_p5dCMP <- compiler::cmpfun(calc_p5d) ## NT35. Number of days with max. temperature above 35?C calc_hts <- function(tmax, t_thresh=35) { hts <- length(which(tmax >= t_thresh)) return(hts) } calc_htsCMP <- compiler::cmpfun(calc_hts) ## P95. 95th percentile of daily precipitation calc_p95 <- function(PREC){ quantile(PREC, probs = .95, na.rm = T) } calc_p95CMP <- compiler::cmpfun(calc_p95) ### Mean temperature *** tmean <- function(tmax, tmin, season_ini=1, season_end=365){ tavg <- lapply(1:nrow(tmax), function(i){ tavg <- (tmax[i, season_ini:season_end]+tmin[i, season_ini:season_end])/2 }) tavg <- do.call(rbind, tavg) return(tavg) } tmeanCMP <- compiler::cmpfun(tmean) ### Total prec at year *** calc_totprec <- function(prec){ totprec <- sum(prec, na.rm=T) return(totprec) } calc_totprecCMP <- compiler::cmpfun(calc_totprec) ### Maximum number of consecutive dry days, prec < 1 mm dr_stress <- function(PREC, p_thresh=1){ runs <- rle(PREC < p_thresh) cons_days <- max(runs$lengths[runs$values==1], na.rm=TRUE) return(cons_days) } dr_stressCMP <- compiler::cmpfun(dr_stress) ### number of prec days calc_precdays <- function(x, season_ini=1, season_end=365, p_thresh=0.1) { precdays <- length(which(x$prec[season_ini:season_end] > p_thresh)) return(precdays) } ### maximum consecutive dry days calc_max_cdd <- function(x, year=2000, season_ini=1, season_end=365, p_thresh=0.1) { cdd <- 0; cdd_seq <- c() for (i_x in season_ini:season_end) { if (x$prec[i_x] < p_thresh) { cdd <- cdd+1 } else { cdd_seq <- c(cdd_seq, cdd) cdd <- 0 } } max_cdd <- max(cdd_seq) return(max_cdd) } ### mean consecutive dry days calc_mean_cdd <- function(x, season_ini=1, season_end=365, p_thresh=0.1) { cdd <- 0; cdd_seq <- c() for (i_x in season_ini:season_end) { if (x$prec[i_x] < p_thresh) { cdd <- cdd+1 } else { cdd_seq <- c(cdd_seq, cdd) cdd <- 0 } } mean_cdd <- mean(cdd_seq[which(cdd_seq > 0)],na.rm=T) return(mean_cdd) } ### number of prec days calc_txxdays <- function(x, season_ini=1, season_end=365, t_thresh=30) { x$TDAY <- x$tmax*0.75 + x$tmin*0.25 #day temperature txxdays <- length(which(x$TDAY[season_ini:season_end] > t_thresh)) return(txxdays) } ### number of prec days calc_tnndays <- function(x, season_ini=1, season_end=365, t_thresh=10) { x$TDAY <- x$tmax*0.75 + x$tmin*0.25 #day temperature tnndays <- length(which(x$TDAY[season_ini:season_end] < t_thresh)) return(tnndays) } ### calculate soilcap in mm soilcap_calc <- function(x, minval, maxval) { rdepth <- max(c(x[4],minval)) #cross check rdepth <- min(c(rdepth,maxval)) #cross-check wc_df <- data.frame(depth=c(2.5,10,22.5,45,80,150),wc=(x[5:10])*.01) if (!rdepth %in% wc_df$depth) { wc_df1 <- wc_df[which(wc_df$depth < rdepth),] wc_df2 <- wc_df[which(wc_df$depth > rdepth),] y1 <- wc_df1$wc[nrow(wc_df1)]; y2 <- wc_df2$wc[1] x1 <- wc_df1$depth[nrow(wc_df1)]; x2 <- wc_df2$depth[1] ya <- (rdepth-x1) / (x2-x1) * (y2-y1) + y1 wc_df <- rbind(wc_df1,data.frame(depth=rdepth,wc=ya),wc_df2) } wc_df <- wc_df[which(wc_df$depth <= rdepth),] wc_df$soilthick <- wc_df$depth - c(0,wc_df$depth[1:(nrow(wc_df)-1)]) wc_df$soilcap <- wc_df$soilthick * wc_df$wc soilcp <- sum(wc_df$soilcap) * 10 #in mm return(soilcp) } # potential evapotranspiration peest <- function(srad, tmin, tmax) { #constants albedo <- 0.2 vpd_cte <- 0.7 #soil heat flux parameters a_eslope=611.2 b_eslope=17.67 c_eslope=243.5 #input parameters tmean <- (tmin+tmax)/2 #net radiation rn = (1-albedo) * srad #soil heat flux eslope=a_eslope*b_eslope*c_eslope/(tmean+c_eslope)^2*exp(b_eslope*tmean/(tmean+c_eslope)) #estimate vpd esat_min=0.61120*exp((17.67*tmin)/(tmin+243.5)) esat_max=0.61120*exp((17.67*tmax)/(tmax+243.5)) vpd=vpd_cte*(esat_max-esat_min) #kPa #Priestley-Taylor pt_const=1.26 pt_fact=1 vpd_ref=1 psycho=62 rho_w=997 rlat_ht=2.26E6 pt_coef=pt_fact*pt_const pt_coef = 1 + (pt_coef-1) * vpd / vpd_ref #*10^6? To convert fluxes MJ to J #rlat_ht? Latent heat flux to water flux #100/rho_w? Kg/m^2 to cm et_max=(pt_coef * rn * eslope/(eslope+psycho) * 10^6 / rlat_ht * 100/rho_w)*10 #in mm return(et_max) } # the two functions below estimate the ea/ep # based on Jones (1987) # ea/ep: actual to potential evapotranspiration ratio eabyep_calc <- function(soilcp=100, cropfc=1, avail=50, prec, evap) { avail <- min(c(avail,soilcp)) eratio <- eabyep(soilcp,avail) demand <- eratio*cropfc*evap result <- avail + prec - demand runoff <- result - soilcp avail <- min(c(soilcp,result)) avail <- max(c(avail,0)) runoff <- max(c(runoff,0)) out <- data.frame(AVAIL=avail,DEMAND=demand,ERATIO=eratio,prec=prec,RUNOFF=runoff) return(out) } # ea/ep function eabyep <- function(soilcp, avail) { percwt <- min(c(100,avail/soilcp*100)) percwt <- max(c(1,percwt)) eratio <- min(c(percwt/(97-3.868*sqrt(soilcp)),1)) return(eratio) } # wrapper to calculate the water balance modeling variables watbal_wrapper <- function(out_all, soilcp){ out_all$Etmax <- out_all$AVAIL <- out_all$ERATIO <- out_all$RUNOFF <- out_all$DEMAND <- out_all$CUM_prec <- NA for (d in 1:nrow(out_all)) { out_all$Etmax[d] <- peest(out_all$srad[d], out_all$tmin[d], out_all$tmax[d]) if (d==1) { out_all$CUM_prec[d] <- out_all$prec[d] sfact <- eabyep_calc(soilcp=soilcp, cropfc=1, avail=0, prec=out_all$prec[d], evap=out_all$Etmax[d]) out_all$AVAIL[d] <- sfact$AVAIL out_all$ERATIO[d] <- sfact$ERATIO out_all$RUNOFF[d] <- sfact$RUNOFF out_all$DEMAND[d] <- sfact$DEMAND } else { out_all$CUM_prec[d] <- out_all$CUM_prec[d-1] + out_all$prec[d] sfact <- eabyep_calc(soilcp=soilcp, cropfc=1, avail=out_all$AVAIL[d-1], prec=out_all$prec[d], evap=out_all$Etmax[d]) out_all$AVAIL[d] <- sfact$AVAIL out_all$ERATIO[d] <- sfact$ERATIO out_all$RUNOFF[d] <- sfact$RUNOFF out_all$DEMAND[d] <- sfact$DEMAND } } return(out_all) } # calculate number of water stress days calc_wsdays <- function(ERATIO, season_ini=1, season_end=365, e_thresh=0.3) { wsdays <- length(which(ERATIO[season_ini:season_end] < e_thresh)) return(wsdays) } calc_wsdaysCMP <- compiler::cmpfun(calc_wsdays) ### HTS1, HTS2, LETHAL: heat stress using tmax *** calc_hts <- function(tmax, season_ini=1, season_end=365, t_thresh=35) { hts <- length(which(tmax[season_ini:season_end] >= t_thresh)) return(hts) } calc_htsCMP <- compiler::cmpfun(calc_hts) ### CD: crop duration, if Tmean > (22, 23, 24) then CD=T-23, else CD=0 *** calc_cdur <- function(TMEAN, season_ini=1, season_end=365, t_thresh=35){ tmean <- mean(TMEAN[season_ini:season_end], na.rm=T) if (tmean > t_thresh) {cdur <- tmean - t_thresh} else {cdur <- 0} return(cdur) } calc_cdurCMP <- compiler::cmpfun(calc_cdur) # DS2: max number of consecutive days Ea/Ep < 0.4, 0.5, 0.6 calc_cons_wsdays <- function(x, season_ini=1, season_end=365, e_thresh=0.4) { cdd <- 0; cdd_seq <- c() for (i_x in season_ini:season_end) { if (x$ERATIO[i_x] < e_thresh) { cdd <- cdd+1 } else { cdd_seq <- c(cdd_seq, cdd) cdd <- 0 } } cdd_seq <- c(cdd_seq, cdd) max_cdd <- max(cdd_seq) return(max_cdd) } # ATT: accum thermal time using capped top, Tb=7,8,9, To=30,32.5,35 calc_att <- function(x, season_ini=1, season_end=365, tb=10, to=20) { x$TMEAN <- (x$tmin + x$tmax) * 0.5 att <- sapply(x$TMEAN[season_ini:season_end], ttfun, tb, to) att <- sum(att,na.rm=T) return(att) } # function to calc tt ttfun <- function(tmean, tb, to) { if (tmean<to & tmean>tb) { teff <- tmean-tb } else if (tmean>=to) { teff <- to-tb } else if (tmean<=tb) { teff <- 0 } return(teff) } # DLOSS: duration loss (difference between No. days to reach ATT_baseline in future vs. baseline) calc_dloss <- function(x, season_ini, dur_b=110, att_b=5000, tb=10, to=20) { x$TMEAN <- (x$tmin + x$tmax) * 0.5 att <- sapply(x$TMEAN[season_ini:(nrow(x))], ttfun, tb, to) att <- cumsum(att) estdur <- length(att[which(att < att_b)]) dloss <- dur_b - estdur return(dloss) } # WES: wet early season if period between sowing and anthesis is above field cap. >= 50 % time # i.e. frequency of days if RUNOFF > 1 calc_wes <- function(x, season_ini, season_end, r_thresh=1) { wes <- length(which(x$RUNOFF[season_ini:season_end] > r_thresh)) return(wes) } # BADSOW: no. days in sowing window +-15 centered at sdate with 0.05*SOILCP < AVAIL < 0.9*SOILCP # if this is < 3 then crop runs into trouble calc_badsow <- function(x, season_ini, soilcp) { sow_i <- season_ini - 15; sow_f <- season_ini + 15 if (sow_i < 1) {sow_i <- 1}; if (sow_f > 365) {sow_f <- 365} x <- x[sow_i:sow_f,] badsow <- length(which(x$AVAIL > (0.05*soilcp) & x$AVAIL < (0.9*soilcp))) return(badsow) } # BADHAR: no. days in harvest window (+25 after hdate) with AVAIL < 0.85*SOILCP # if this is < 3 then crop runs into trouble calc_badhar <- function(x, season_end, soilcp) { har_i <- season_end har_f <- har_i + 25; if (har_f > 365) {har_f <- 365} x <- x[har_i:har_f,] badhar <- length(which(x$AVAIL < (0.85*soilcp))) return(badhar) }
\name{write.px} \alias{write.px} \title{ Write a PC-Axis file } \description{ This function writes an object of class \code{px} to a PC-Axis file } \usage{ write.px(obj.px, filename, heading = NULL, stub = NULL, keys = NULL , write.na = FALSE, write.zero = FALSE, fileEncoding = "ISO-8859-1") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{obj.px}{The name of a object of class \code{px}} \item{filename}{The name of the PC-Axis file to create} \item{heading}{An optional character vector with the names of variables in the HEADING part of the output file} \item{stub}{An optional character vector with the names of the variables in the STUB part of the output file} \item{keys}{An optional character vector indicating the variables used as keys} \item{write.na}{Whether to write rows with NA values (if keys are used)} \item{write.zero}{Whetehr to write rows with 0 values (if keys are used)} \item{fileEncoding}{A character string describing the encoding to use in px file (see \code{\link{iconv}} for details)} } \details{ The function can write either regular files or files with KEYS. For regular files, by default, the output file will have a single variable in the HEADING part of the file (columns in the matrix). It is possible to override the default by providing the \code{heading} and \code{stub} parameters. These are optional, non-overlapping, exhaustive character vectors of names of variables in the output matrix. It is possible to write files with KEYS which could help reduce the final file size for large and sparse datasets. In such case, it is possible to indicate whether to write rows with all values equal to 0 or NA or not (via arguments \code{write.zero} and ·\code{write.na}). } \references{ \url{http://www.scb.se/upload/PC-Axis/Support/Documents/PC-Axis_fileformat.pdf} } \author{ Francisco J. Viciana Fernández, Oscar Perpiñan Lamigueiro, Carlos J. Gil Bellosta } \seealso{ \code{\link{read.px}}, \code{\link{as.data.frame.px}}, \code{\link{as.array.px}} \code{\link{iconv}} } \examples{ opx1 <- read.px(system.file( "extdata", "example.px", package = "pxR")) \dontrun{ write.px(opx1, file = "opx.px") write.px(opx1, file = "opx.px", heading = c("sexo", "edad"), stub = "municipios") write.px(opx1, filename = "opx.px", keys = c("municipios","edad") ) } } \keyword{ manip }
/man/write.px.Rd
no_license
uthink-git/pxR
R
false
false
2,490
rd
\name{write.px} \alias{write.px} \title{ Write a PC-Axis file } \description{ This function writes an object of class \code{px} to a PC-Axis file } \usage{ write.px(obj.px, filename, heading = NULL, stub = NULL, keys = NULL , write.na = FALSE, write.zero = FALSE, fileEncoding = "ISO-8859-1") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{obj.px}{The name of a object of class \code{px}} \item{filename}{The name of the PC-Axis file to create} \item{heading}{An optional character vector with the names of variables in the HEADING part of the output file} \item{stub}{An optional character vector with the names of the variables in the STUB part of the output file} \item{keys}{An optional character vector indicating the variables used as keys} \item{write.na}{Whether to write rows with NA values (if keys are used)} \item{write.zero}{Whetehr to write rows with 0 values (if keys are used)} \item{fileEncoding}{A character string describing the encoding to use in px file (see \code{\link{iconv}} for details)} } \details{ The function can write either regular files or files with KEYS. For regular files, by default, the output file will have a single variable in the HEADING part of the file (columns in the matrix). It is possible to override the default by providing the \code{heading} and \code{stub} parameters. These are optional, non-overlapping, exhaustive character vectors of names of variables in the output matrix. It is possible to write files with KEYS which could help reduce the final file size for large and sparse datasets. In such case, it is possible to indicate whether to write rows with all values equal to 0 or NA or not (via arguments \code{write.zero} and ·\code{write.na}). } \references{ \url{http://www.scb.se/upload/PC-Axis/Support/Documents/PC-Axis_fileformat.pdf} } \author{ Francisco J. Viciana Fernández, Oscar Perpiñan Lamigueiro, Carlos J. Gil Bellosta } \seealso{ \code{\link{read.px}}, \code{\link{as.data.frame.px}}, \code{\link{as.array.px}} \code{\link{iconv}} } \examples{ opx1 <- read.px(system.file( "extdata", "example.px", package = "pxR")) \dontrun{ write.px(opx1, file = "opx.px") write.px(opx1, file = "opx.px", heading = c("sexo", "edad"), stub = "municipios") write.px(opx1, filename = "opx.px", keys = c("municipios","edad") ) } } \keyword{ manip }
writeMotion <- function(x, file, digits = NULL, na="NaN", ...){ # Create write mat write_mat <- NULL tmat_only <- FALSE if(is.matrix(x)){ write_mat <- x }else{ # If only object in list is tmat if(length(x) == 1 && names(x)[1] == 'tmat') tmat_only <- TRUE for(xn in names(x)){ # Internal field used with filterRows if(xn %in% c('replace.rows', 'n.iter')) next if('tmat' %in% class(x[[xn]]) || xn == 'tmat'){ # Convert array of transformation matrices to matrix tmat <- as.data.frame(tmarr2mat(x[[xn]])) if(!is.null(digits)) tmat <- signif(tmat, digits) # Add columns if(is.null(write_mat)){ write_mat <- tmat }else{ write_mat <- cbind(write_mat, tmat) } }else if('xyz' %in% class(x[[xn]]) || xn == 'xyz'){ # Convert array of points to matrix xyz <- as.data.frame(arr2mat(x[[xn]])) if(!is.null(digits)) xyz <- signif(xyz, digits) # Add columns if(is.null(write_mat)){ write_mat <- xyz }else{ write_mat <- cbind(write_mat, xyz) } }else{ # If is list, unlist if(is.list(x[[xn]])) x[[xn]] <- unlist(x[[xn]]) # Need to test whether values are numeric first (including case of some or all NAs) if(!is.null(digits) && sum(!suppressWarnings(is.numeric(x[[xn]]))) == 0 && sum(is.na(x[[xn]])) == 0) x[[xn]] <- signif(x[[xn]], digits) # Add columns if(is.null(write_mat)){ write_mat <- as.data.frame(matrix(x[[xn]], ncol=1, dimnames=list(NULL, xn))) }else{ write_mat <- cbind(write_mat, as.data.frame(matrix(x[[xn]], ncol=1, dimnames=list(NULL, xn)))) } } } } # If only tmat set NAs to NaN so that the file is read properly by Maya? #print(tmat_only) #if(tmat_only){ na_handling <- "NaN" }else{ na_handling <- "NA" } if(grepl('[.]csv$', file)) write.csv(x=write_mat, file=file, row.names=FALSE, na=na, ...) }
/R/writeMotion.R
no_license
aaronolsen/matools
R
false
false
1,886
r
writeMotion <- function(x, file, digits = NULL, na="NaN", ...){ # Create write mat write_mat <- NULL tmat_only <- FALSE if(is.matrix(x)){ write_mat <- x }else{ # If only object in list is tmat if(length(x) == 1 && names(x)[1] == 'tmat') tmat_only <- TRUE for(xn in names(x)){ # Internal field used with filterRows if(xn %in% c('replace.rows', 'n.iter')) next if('tmat' %in% class(x[[xn]]) || xn == 'tmat'){ # Convert array of transformation matrices to matrix tmat <- as.data.frame(tmarr2mat(x[[xn]])) if(!is.null(digits)) tmat <- signif(tmat, digits) # Add columns if(is.null(write_mat)){ write_mat <- tmat }else{ write_mat <- cbind(write_mat, tmat) } }else if('xyz' %in% class(x[[xn]]) || xn == 'xyz'){ # Convert array of points to matrix xyz <- as.data.frame(arr2mat(x[[xn]])) if(!is.null(digits)) xyz <- signif(xyz, digits) # Add columns if(is.null(write_mat)){ write_mat <- xyz }else{ write_mat <- cbind(write_mat, xyz) } }else{ # If is list, unlist if(is.list(x[[xn]])) x[[xn]] <- unlist(x[[xn]]) # Need to test whether values are numeric first (including case of some or all NAs) if(!is.null(digits) && sum(!suppressWarnings(is.numeric(x[[xn]]))) == 0 && sum(is.na(x[[xn]])) == 0) x[[xn]] <- signif(x[[xn]], digits) # Add columns if(is.null(write_mat)){ write_mat <- as.data.frame(matrix(x[[xn]], ncol=1, dimnames=list(NULL, xn))) }else{ write_mat <- cbind(write_mat, as.data.frame(matrix(x[[xn]], ncol=1, dimnames=list(NULL, xn)))) } } } } # If only tmat set NAs to NaN so that the file is read properly by Maya? #print(tmat_only) #if(tmat_only){ na_handling <- "NaN" }else{ na_handling <- "NA" } if(grepl('[.]csv$', file)) write.csv(x=write_mat, file=file, row.names=FALSE, na=na, ...) }
if (!exists("local_directory")) { local_directory <- "~/Sales_Forecast_UDJ" source(paste(local_directory,"R/library.R",sep="/")) source(paste(local_directory,"R/heatmapOutput.R",sep="/")) } # To be able to upload data up to 30MB options(shiny.maxRequestSize=30*1024^2) options(rgl.useNULL=TRUE) options(scipen = 50) shinyServer(function(input, output,session) { ############################################################ # STEP 1: Read the data read_dataset <- reactive({ input$datafile_name_coded # First read the pre-loaded file, and if the user loads another one then replace # ProjectData with the filethe user loads ProjectData <- read.csv(paste("../data", paste(input$datafile_name_coded, "csv", sep="."), sep = "/"), sep=";", dec=",") # this contains only the matrix ProjectData updateSelectInput(session, "dependent_variable","Dependent variable", colnames(ProjectData), selected=NULL) updateSelectInput(session, "independent_variables","Independent variables", colnames(ProjectData), selected=NULL) updateSelectInput(session, "hist_var","Select Variable", colnames(ProjectData), selected=colnames(ProjectData)[1]) updateSelectInput(session, "scatter1","x-axis", colnames(ProjectData), selected=colnames(ProjectData)[1]) updateSelectInput(session, "scatter2","y-axis", colnames(ProjectData), selected=colnames(ProjectData)[1]) updateSelectInput(session, "residual_scatter1","Select Variable", colnames(ProjectData), selected=colnames(ProjectData)[1]) ProjectData }) user_inputs <- reactive({ input$datafile_name_coded input$dependent_variable input$independent_variables list(ProjectData = read_dataset(), dependent_variable = input$dependent_variable, independent_variables = setdiff(input$independent_variables,input$dependent_variable)) }) ############################################################ # STEP 2: create a "reactive function" as well as an "output" # for each of the R code chunks in the report/slides to use in the web application. # These also correspond to the tabs defined in the ui.R file. # The "reactive function" recalculates everything the tab needs whenever any of the inputs # used (in the left pane of the application) for the calculations in that tab is modified by the user # The "output" is then passed to the ui.r file to appear on the application page/ ########## The Parameters Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_parameters_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$action_parameters all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables if (is.null(dependent_variable) | is.null(independent_variables)){ res <- matrix(0,ncol=1) colnames(res) <- "Waiting for variable selection" return(res) } allparameters=c(nrow(ProjectData),ncol(ProjectData), colnames(ProjectData), dependent_variable, independent_variables) allparameters <- matrix(allparameters,ncol=1) rownames(allparameters)<-c("Number of Observations", "Number of Variables", paste("Variable:",1:ncol(ProjectData)), "Dependent Variable", paste("Independent Variable:",1:length(independent_variables))) colnames(allparameters)<-NULL allparameters }) # Now pass to ui.R what it needs to display this tab output$parameters<-renderTable({ the_parameters_tab() }) ########## The Summary Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_summary_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$action_summary all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables my_summary(ProjectData) }) # Now pass to ui.R what it needs to display this tab output$summary <- renderTable({ the_summary_tab() }) ########## The Histograms Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_histogram_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$hist_var input$action_Histograms all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables ProjectData <- data.matrix(read_dataset() )# call the data reading reactive FUNCTION (hence we need "()" ) if (!length(intersect(colnames(ProjectData), input$hist_var))){ res = NULL } else { res = ProjectData[,input$hist_var, drop=F] } res }) # Now pass to ui.R what it needs to display this tab output$histogram<-renderPlot({ data_to_plot = the_histogram_tab() if (!length(data_to_plot)) { hist(0, main = "VARIABLE DOES NOT EXIST" ) } else { hist(data_to_plot, main = paste("Histogram of", as.character(input$hist_var), sep=" "), xlab=as.character(input$hist_var),breaks = max(5,round(length(data_to_plot)/5))) } }) ########## The Correlations Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_correlation_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$action_correlations all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables if ((length(intersect(colnames(ProjectData),independent_variables)) & length(intersect(colnames(ProjectData),dependent_variable)))){ data_reorder=cbind(ProjectData[,independent_variables,drop=F],ProjectData[,dependent_variable,drop=F]) } else { data_reorder=ProjectData[,1,drop=F] } thecor=cor(data_reorder) colnames(thecor)<-colnames(thecor) rownames(thecor)<-rownames(thecor) thecor }) # Now pass to ui.R what it needs to display this tab output$correlation<-renderHeatmap({ the_correlation_tab() }) ########## The Scatter Plots Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_scatter_plots_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$scatter1 input$scatter2 input$action_scatterplots all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables ProjectData <- data.matrix(read_dataset() )# call the data reading reactive FUNCTION (hence we need "()" ) if ((length(intersect(colnames(ProjectData),independent_variables)) & length(intersect(colnames(ProjectData),dependent_variable)))){ res = ProjectData[, c(input$scatter1,input$scatter2)] } else { res = 0*ProjectData[,1:2] colnames(res)<- c("Not Valid Variable Name", "Not Valid Variable Name") } res }) # Now pass to ui.R what it needs to display this tab output$scatter<-renderPlot({ thedata <- the_scatter_plots_tab() plot(thedata[,1], thedata[,2], xlab=colnames(thedata)[1], ylab=colnames(thedata)[2]) }) ########## The Regression Output Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_regression_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$action_regression all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables if ((length(intersect(colnames(ProjectData),independent_variables)) & length(intersect(colnames(ProjectData),dependent_variable)))){ if (length(independent_variables) == 1){ regression_model=paste(paste(dependent_variable, "~",sep=""), independent_variables,sep="") } else { res=independent_variables[1] for (iter in 2:(length(independent_variables)-1)) res=paste(res,independent_variables[iter],sep="+") res=paste(res,tail(independent_variables,1),sep="+") regression_model = paste(dependent_variable, res, sep="~") } the_fit<-lm(regression_model,data=ProjectData) } else { regression_model = paste(paste(colnames(ProjectData)[1], "~",sep=""), colnames(ProjectData)[2],sep="") the_fit<-lm(regression_model,data=ProjectData) } the_fit }) # Now pass to ui.R what it needs to display this tab output$regression_output <- renderTable({ the_fit = the_regression_tab() summary(the_fit) }) ########## The Residuals plot Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_residuals_plot_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$action_residuals input$action_residualshist all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables the_fit = the_regression_tab() residuals(the_fit) }) # Now pass to ui.R what it needs to display this tab output$residuals_plot<-renderPlot({ plot(the_residuals_plot_tab(),xlab="Observations",ylab="Residuals",main="The Residuals") }) ########## The Residuals Histogram Tab # first the reactive function doing all calculations when the related inputs were modified by the user # this one uses the same reactive function as the previous tab... # Now pass to ui.R what it needs to display this tab output$residuals_hist<-renderPlot({ dataused = the_residuals_plot_tab() hist(dataused, main = "Histogram of the Residuals", breaks = max(5,round(length(dataused)/5))) }) ########## The Residuals Scatter Plots Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_residuals_scatter_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$residual_scatter1 input$action_residuals_scatter all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables ProjectData <- data.matrix(read_dataset() )# call the data reading reactive FUNCTION (hence we need "()" ) the_residuals <- the_residuals_plot_tab() if (length(intersect(colnames(ProjectData),input$residual_scatter1))){ res = cbind(ProjectData[, input$residual_scatter1], the_residuals) colnames(res)<- c(input$residual_scatter1, "Residuals") } else { res = 0*ProjectData[,1:2] colnames(res)<- c("Not Valid Variable Name", "Not Valid Variable Name") } res }) # Now pass to ui.R what it needs to display this tab output$residuals_scatter<-renderPlot({ thedata <- the_residuals_scatter_tab() plot(thedata[,1],thedata[,2],xlab=colnames(thedata)[1],ylab=colnames(thedata)[2]) }) # Now the report and slides # first the reactive function doing all calculations when the related inputs were modified by the user the_slides_and_report <-reactive({ input$datafile_name_coded input$dependent_variable input$independent_variables all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables ############################################################# # A list of all the (SAME) parameters that the report takes from RunStudy.R list(ProjectData = ProjectData, independent_variables = independent_variables, dependent_variable = dependent_variable) }) # The new report output$report = downloadHandler( filename <- function() {paste(paste('SALES_Report',Sys.time() ),'.html')}, content = function(file) { filename.Rmd <- paste('SALES_Report', 'Rmd', sep=".") filename.md <- paste('SALES_Report', 'md', sep=".") filename.html <- paste('SALES_Report', 'html', sep=".") ############################################################# # All the (SAME) parameters that the report takes from RunStudy.R reporting_data<- the_slides_and_report() ProjectData<-reporting_data$ProjectData dependent_variable <- reporting_data$dependent_variable independent_variables <- reporting_data$independent_variables ############################################################# if (file.exists(filename.html)) file.remove(filename.html) unlink(".cache", recursive=TRUE) unlink("assets", recursive=TRUE) unlink("figures", recursive=TRUE) file.copy(paste(local_directory,"doc/SALES_Report.Rmd",sep="/"),filename.Rmd,overwrite=T) out = knit2html(filename.Rmd,quiet=TRUE) unlink(".cache", recursive=TRUE) unlink("assets", recursive=TRUE) unlink("figures", recursive=TRUE) file.remove(filename.Rmd) file.remove(filename.md) file.rename(out, file) # move pdf to file for downloading }, contentType = 'application/pdf' ) # The new slide output$slide = downloadHandler( filename <- function() {paste(paste('SALES_Slides',Sys.time() ),'.html')}, content = function(file) { filename.Rmd <- paste('SALES_Slides', 'Rmd', sep=".") filename.md <- paste('SALES_Slides', 'md', sep=".") filename.html <- paste('SALES_Slides', 'html', sep=".") ############################################################# # All the (SAME) parameters that the report takes from RunStudy.R reporting_data<- the_slides_and_report() ProjectData<-reporting_data$ProjectData dependent_variable <- reporting_data$dependent_variable independent_variables <- reporting_data$independent_variables ############################################################# if (file.exists(filename.html)) file.remove(filename.html) unlink(".cache", recursive=TRUE) unlink("assets", recursive=TRUE) unlink("figures", recursive=TRUE) file.copy(paste(local_directory,"doc/SALES_Slides.Rmd",sep="/"),filename.Rmd,overwrite=T) slidify(filename.Rmd) unlink(".cache", recursive=TRUE) unlink("assets", recursive=TRUE) unlink("figures", recursive=TRUE) file.remove(filename.Rmd) file.remove(filename.md) file.rename(filename.html, file) # move pdf to file for downloading }, contentType = 'application/pdf' ) })
/tools/server.R
permissive
npunwani/Sales_Forecast_UDJ
R
false
false
16,460
r
if (!exists("local_directory")) { local_directory <- "~/Sales_Forecast_UDJ" source(paste(local_directory,"R/library.R",sep="/")) source(paste(local_directory,"R/heatmapOutput.R",sep="/")) } # To be able to upload data up to 30MB options(shiny.maxRequestSize=30*1024^2) options(rgl.useNULL=TRUE) options(scipen = 50) shinyServer(function(input, output,session) { ############################################################ # STEP 1: Read the data read_dataset <- reactive({ input$datafile_name_coded # First read the pre-loaded file, and if the user loads another one then replace # ProjectData with the filethe user loads ProjectData <- read.csv(paste("../data", paste(input$datafile_name_coded, "csv", sep="."), sep = "/"), sep=";", dec=",") # this contains only the matrix ProjectData updateSelectInput(session, "dependent_variable","Dependent variable", colnames(ProjectData), selected=NULL) updateSelectInput(session, "independent_variables","Independent variables", colnames(ProjectData), selected=NULL) updateSelectInput(session, "hist_var","Select Variable", colnames(ProjectData), selected=colnames(ProjectData)[1]) updateSelectInput(session, "scatter1","x-axis", colnames(ProjectData), selected=colnames(ProjectData)[1]) updateSelectInput(session, "scatter2","y-axis", colnames(ProjectData), selected=colnames(ProjectData)[1]) updateSelectInput(session, "residual_scatter1","Select Variable", colnames(ProjectData), selected=colnames(ProjectData)[1]) ProjectData }) user_inputs <- reactive({ input$datafile_name_coded input$dependent_variable input$independent_variables list(ProjectData = read_dataset(), dependent_variable = input$dependent_variable, independent_variables = setdiff(input$independent_variables,input$dependent_variable)) }) ############################################################ # STEP 2: create a "reactive function" as well as an "output" # for each of the R code chunks in the report/slides to use in the web application. # These also correspond to the tabs defined in the ui.R file. # The "reactive function" recalculates everything the tab needs whenever any of the inputs # used (in the left pane of the application) for the calculations in that tab is modified by the user # The "output" is then passed to the ui.r file to appear on the application page/ ########## The Parameters Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_parameters_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$action_parameters all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables if (is.null(dependent_variable) | is.null(independent_variables)){ res <- matrix(0,ncol=1) colnames(res) <- "Waiting for variable selection" return(res) } allparameters=c(nrow(ProjectData),ncol(ProjectData), colnames(ProjectData), dependent_variable, independent_variables) allparameters <- matrix(allparameters,ncol=1) rownames(allparameters)<-c("Number of Observations", "Number of Variables", paste("Variable:",1:ncol(ProjectData)), "Dependent Variable", paste("Independent Variable:",1:length(independent_variables))) colnames(allparameters)<-NULL allparameters }) # Now pass to ui.R what it needs to display this tab output$parameters<-renderTable({ the_parameters_tab() }) ########## The Summary Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_summary_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$action_summary all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables my_summary(ProjectData) }) # Now pass to ui.R what it needs to display this tab output$summary <- renderTable({ the_summary_tab() }) ########## The Histograms Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_histogram_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$hist_var input$action_Histograms all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables ProjectData <- data.matrix(read_dataset() )# call the data reading reactive FUNCTION (hence we need "()" ) if (!length(intersect(colnames(ProjectData), input$hist_var))){ res = NULL } else { res = ProjectData[,input$hist_var, drop=F] } res }) # Now pass to ui.R what it needs to display this tab output$histogram<-renderPlot({ data_to_plot = the_histogram_tab() if (!length(data_to_plot)) { hist(0, main = "VARIABLE DOES NOT EXIST" ) } else { hist(data_to_plot, main = paste("Histogram of", as.character(input$hist_var), sep=" "), xlab=as.character(input$hist_var),breaks = max(5,round(length(data_to_plot)/5))) } }) ########## The Correlations Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_correlation_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$action_correlations all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables if ((length(intersect(colnames(ProjectData),independent_variables)) & length(intersect(colnames(ProjectData),dependent_variable)))){ data_reorder=cbind(ProjectData[,independent_variables,drop=F],ProjectData[,dependent_variable,drop=F]) } else { data_reorder=ProjectData[,1,drop=F] } thecor=cor(data_reorder) colnames(thecor)<-colnames(thecor) rownames(thecor)<-rownames(thecor) thecor }) # Now pass to ui.R what it needs to display this tab output$correlation<-renderHeatmap({ the_correlation_tab() }) ########## The Scatter Plots Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_scatter_plots_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$scatter1 input$scatter2 input$action_scatterplots all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables ProjectData <- data.matrix(read_dataset() )# call the data reading reactive FUNCTION (hence we need "()" ) if ((length(intersect(colnames(ProjectData),independent_variables)) & length(intersect(colnames(ProjectData),dependent_variable)))){ res = ProjectData[, c(input$scatter1,input$scatter2)] } else { res = 0*ProjectData[,1:2] colnames(res)<- c("Not Valid Variable Name", "Not Valid Variable Name") } res }) # Now pass to ui.R what it needs to display this tab output$scatter<-renderPlot({ thedata <- the_scatter_plots_tab() plot(thedata[,1], thedata[,2], xlab=colnames(thedata)[1], ylab=colnames(thedata)[2]) }) ########## The Regression Output Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_regression_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$action_regression all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables if ((length(intersect(colnames(ProjectData),independent_variables)) & length(intersect(colnames(ProjectData),dependent_variable)))){ if (length(independent_variables) == 1){ regression_model=paste(paste(dependent_variable, "~",sep=""), independent_variables,sep="") } else { res=independent_variables[1] for (iter in 2:(length(independent_variables)-1)) res=paste(res,independent_variables[iter],sep="+") res=paste(res,tail(independent_variables,1),sep="+") regression_model = paste(dependent_variable, res, sep="~") } the_fit<-lm(regression_model,data=ProjectData) } else { regression_model = paste(paste(colnames(ProjectData)[1], "~",sep=""), colnames(ProjectData)[2],sep="") the_fit<-lm(regression_model,data=ProjectData) } the_fit }) # Now pass to ui.R what it needs to display this tab output$regression_output <- renderTable({ the_fit = the_regression_tab() summary(the_fit) }) ########## The Residuals plot Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_residuals_plot_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$action_residuals input$action_residualshist all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables the_fit = the_regression_tab() residuals(the_fit) }) # Now pass to ui.R what it needs to display this tab output$residuals_plot<-renderPlot({ plot(the_residuals_plot_tab(),xlab="Observations",ylab="Residuals",main="The Residuals") }) ########## The Residuals Histogram Tab # first the reactive function doing all calculations when the related inputs were modified by the user # this one uses the same reactive function as the previous tab... # Now pass to ui.R what it needs to display this tab output$residuals_hist<-renderPlot({ dataused = the_residuals_plot_tab() hist(dataused, main = "Histogram of the Residuals", breaks = max(5,round(length(dataused)/5))) }) ########## The Residuals Scatter Plots Tab # first the reactive function doing all calculations when the related inputs were modified by the user the_residuals_scatter_tab<-reactive({ # list the user inputs the tab depends on (easier to read the code) input$datafile_name_coded input$dependent_variable input$independent_variables input$residual_scatter1 input$action_residuals_scatter all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables ProjectData <- data.matrix(read_dataset() )# call the data reading reactive FUNCTION (hence we need "()" ) the_residuals <- the_residuals_plot_tab() if (length(intersect(colnames(ProjectData),input$residual_scatter1))){ res = cbind(ProjectData[, input$residual_scatter1], the_residuals) colnames(res)<- c(input$residual_scatter1, "Residuals") } else { res = 0*ProjectData[,1:2] colnames(res)<- c("Not Valid Variable Name", "Not Valid Variable Name") } res }) # Now pass to ui.R what it needs to display this tab output$residuals_scatter<-renderPlot({ thedata <- the_residuals_scatter_tab() plot(thedata[,1],thedata[,2],xlab=colnames(thedata)[1],ylab=colnames(thedata)[2]) }) # Now the report and slides # first the reactive function doing all calculations when the related inputs were modified by the user the_slides_and_report <-reactive({ input$datafile_name_coded input$dependent_variable input$independent_variables all_inputs <- user_inputs() ProjectData <- all_inputs$ProjectData dependent_variable <- all_inputs$dependent_variable independent_variables <- all_inputs$independent_variables ############################################################# # A list of all the (SAME) parameters that the report takes from RunStudy.R list(ProjectData = ProjectData, independent_variables = independent_variables, dependent_variable = dependent_variable) }) # The new report output$report = downloadHandler( filename <- function() {paste(paste('SALES_Report',Sys.time() ),'.html')}, content = function(file) { filename.Rmd <- paste('SALES_Report', 'Rmd', sep=".") filename.md <- paste('SALES_Report', 'md', sep=".") filename.html <- paste('SALES_Report', 'html', sep=".") ############################################################# # All the (SAME) parameters that the report takes from RunStudy.R reporting_data<- the_slides_and_report() ProjectData<-reporting_data$ProjectData dependent_variable <- reporting_data$dependent_variable independent_variables <- reporting_data$independent_variables ############################################################# if (file.exists(filename.html)) file.remove(filename.html) unlink(".cache", recursive=TRUE) unlink("assets", recursive=TRUE) unlink("figures", recursive=TRUE) file.copy(paste(local_directory,"doc/SALES_Report.Rmd",sep="/"),filename.Rmd,overwrite=T) out = knit2html(filename.Rmd,quiet=TRUE) unlink(".cache", recursive=TRUE) unlink("assets", recursive=TRUE) unlink("figures", recursive=TRUE) file.remove(filename.Rmd) file.remove(filename.md) file.rename(out, file) # move pdf to file for downloading }, contentType = 'application/pdf' ) # The new slide output$slide = downloadHandler( filename <- function() {paste(paste('SALES_Slides',Sys.time() ),'.html')}, content = function(file) { filename.Rmd <- paste('SALES_Slides', 'Rmd', sep=".") filename.md <- paste('SALES_Slides', 'md', sep=".") filename.html <- paste('SALES_Slides', 'html', sep=".") ############################################################# # All the (SAME) parameters that the report takes from RunStudy.R reporting_data<- the_slides_and_report() ProjectData<-reporting_data$ProjectData dependent_variable <- reporting_data$dependent_variable independent_variables <- reporting_data$independent_variables ############################################################# if (file.exists(filename.html)) file.remove(filename.html) unlink(".cache", recursive=TRUE) unlink("assets", recursive=TRUE) unlink("figures", recursive=TRUE) file.copy(paste(local_directory,"doc/SALES_Slides.Rmd",sep="/"),filename.Rmd,overwrite=T) slidify(filename.Rmd) unlink(".cache", recursive=TRUE) unlink("assets", recursive=TRUE) unlink("figures", recursive=TRUE) file.remove(filename.Rmd) file.remove(filename.md) file.rename(filename.html, file) # move pdf to file for downloading }, contentType = 'application/pdf' ) })
## the following functions will take advantage of the scoping rules of the R language ## and manipulate them to preserve state inside of an R object. ## This function creates a special "matrix" object that can cache its inverse. ## It creates a special "matrix", which is really a list containing a function to # set the value of the matrix # get the value of the matrix # set the inverse of the matrix # get the inverse of the matrix makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinv <- function(inv) m <<- inv getinv <- function() m list(set = set, get = get, setinv = setinv, getinv = getinv) } ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ## If the inverse has already been calculated (and the matrix has not changed), ## then the cachesolve should retrieve the inverse from the cache. ## Otherwise, it calculates the inverse of the matrix and sets the inverse of the matrix in the cache via the setinv function. cacheSolve <- function(x, ...) { m <- x$getinv() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setinv(m) m }
/R_programming/ProgrammingAssignment2/cachematrix.R
no_license
victoongo/LaddeR
R
false
false
1,292
r
## the following functions will take advantage of the scoping rules of the R language ## and manipulate them to preserve state inside of an R object. ## This function creates a special "matrix" object that can cache its inverse. ## It creates a special "matrix", which is really a list containing a function to # set the value of the matrix # get the value of the matrix # set the inverse of the matrix # get the inverse of the matrix makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinv <- function(inv) m <<- inv getinv <- function() m list(set = set, get = get, setinv = setinv, getinv = getinv) } ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ## If the inverse has already been calculated (and the matrix has not changed), ## then the cachesolve should retrieve the inverse from the cache. ## Otherwise, it calculates the inverse of the matrix and sets the inverse of the matrix in the cache via the setinv function. cacheSolve <- function(x, ...) { m <- x$getinv() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setinv(m) m }
library(ape) library(phangorn) library(phylobase) library(geiger) ########################################################################### setwd("/users/aesin/Desktop/Deer/Selection_analysis/Species_tree/") timetree <- read.tree("TimetreeOfLife.nwk") ## All tips ## all_keep_tips <- c("Ailuropoda_melanoleuca", "Bos_taurus", "Camelus_ferus", "Felis_catus", "Myotis_lucifugus", "Homo_sapiens", "Sus_scrofa", "Ovis_aries", "Capra_hircus", "Mus_musculus", "Oryctolagus_cuniculus", "Callithrix_jacchus", "Macaca_mulatta", "Papio_anubis", "Rattus_norvegicus", "Pteropus_vampyrus", "Odobenus_rosmarus", "Odocoileus_virginianus", "Alces_alces", "Cervus_elaphus", "Elaphurus_davidianus", "Cervus_nippon", "Rangifer_tarandus", "Dama_dama", "Moschus_berezovskii", "Equus_caballus", "Ceratotherium_simum", "Eptesicus_fuscus", "Panthera_tigris", "Capreolus_capreolus", "Pudu_puda", "Giraffa_camelopardalis", "Hydropotoes_inermis", "Cervus_albirostris", "Rucervus_duvaucelii", "Muntiacus_reevesi", "Vicugna_pacos") #"Dasypus_novemcinctus" for_dating_tree <- drop.tip(timetree, setdiff(timetree$tip.label, all_keep_tips)) ################################## ## No deer ## keep_tips <- c("Ailuropoda_melanoleuca", "Bos_taurus", "Camelus_ferus","Vicugna_pacos", "Felis_catus", "Myotis_lucifugus", "Homo_sapiens", "Sus_scrofa", "Ovis_aries", "Capra_hircus", "Mus_musculus", "Oryctolagus_cuniculus", "Callithrix_jacchus", "Macaca_mulatta", "Papio_anubis", "Rattus_norvegicus", "Pteropus_vampyrus", "Odobenus_rosmarus", "Moschus_berezovskii", "Equus_caballus", "Ceratotherium_simum", "Eptesicus_fuscus", "Panthera_tigris", "Giraffa_camelopardalis", "Vicugna_pacos") #"Dasypus_novemcinctus" time_prune_tree <- drop.tip(timetree, setdiff(timetree$tip.label, keep_tips)) time_prune_tree$edge.length[which(time_prune_tree$edge.length == 0)] <- mean(time_prune_tree$edge.length) ################################## ## Change position of carnivores to match Meredith (2011) ## # Isolate carnivore subtree # carnivore_node <- getMRCA(time_prune_tree, c("Felis_catus", "Ailuropoda_melanoleuca")) carnivore_tips <- tips(time_prune_tree, carnivore_node) carnivore_subtree <- drop.tip(time_prune_tree, setdiff(time_prune_tree$tip.label, carnivore_tips)) carnivore_topo <- chronopl(carnivore_subtree, 0, age.min = 1, age.max = NULL,node = "root", S = 1, tol = 1e-8,CV = FALSE, eval.max = 500, iter.max = 500) time_prune_tree <- drop.tip(time_prune_tree, carnivore_tips) # Rebind subtree # message("Bind Carnivora") plot.phylo(time_prune_tree) bind_where <- getMRCA(time_prune_tree, c("Myotis_lucifugus", "Ceratotherium_simum")) rebind_carnivore <- bind.tree(time_prune_tree, carnivore_topo, where = bind_where, position = 0.1) rebind_carnivore <- multi2di(rebind_carnivore, random = FALSE) time_prune_tree <- rebind_carnivore #time_prune_tree$edge.length <- rep(1, length(time_prune_tree$edge.length)) # rebind_carnivore$edge.length[which(rebind_carnivore$edge.length == 0)] <- 1 #rebind_carnivore <- chronopl(rebind_carnivore, 0, age.min = 1, age.max = NULL,node = "root", S = 1, tol = 1e-8,CV = FALSE, eval.max = 500, iter.max = 500) ################################## ## Get the tenk tree ## tenk_tree <- read.nexus(file = "Artiodactyl_with_deer_10ktrees.nex") ## Extract deer subtree ## deer_node <- getMRCA(tenk_tree, c("Dama_dama", "Alces_alces")) deer_tips <- tips(tenk_tree, deer_node) deer_prune <- drop.tip(tenk_tree, setdiff(tenk_tree$tip.label, deer_tips)) ## Add the Bactrian tip ## bactrian_tip <- list(edge=matrix(c(2,1),1,2), tip.label="Cervus_elaphus_bactrianus", edge.length=1.0, Nnode=1) class(bactrian_tip)<-"phylo" message("Bind the Bactrian deer") plot.phylo(deer_prune) deer_prune <- bind.tree(deer_prune, bactrian_tip, interactive = TRUE) deer_prune <- chronopl(deer_prune, 0, age.min = 1, age.max = NULL,node = "root", S = 1, tol = 1e-8,CV = FALSE, eval.max = 500, iter.max = 500) ## Add the Cervus_canadensis tip ## wapiti_tip <- list(edge=matrix(c(2,1),1,2), tip.label="Cervus_canadensis", edge.length=1.0, Nnode=1) class(wapiti_tip)<-"phylo" message("Bind the Waptiti (sister to nippon") plot.phylo(deer_prune) deer_prune <- bind.tree(deer_prune, wapiti_tip, interactive = TRUE) deer_prune <- chronopl(deer_prune, 0, age.min = 1, age.max = NULL,node = "root", S = 1, tol = 1e-8,CV = FALSE, eval.max = 500, iter.max = 500) ## Rename the Swamp_deer to Rucervus ## swamp_deer_index <- grep("Cervus_duvaucelii", deer_prune) deer_prune$tip.label[swamp_deer_index] <- "Rucervus_duvaucelii" ## Ultrametric deer ## deer_topo <- chronopl(deer_prune, 0, age.min = 1, age.max = NULL,node = "root", S = 1, tol = 1e-8,CV = FALSE, eval.max = 500, iter.max = 500) # Write out just the deer tree # just_deer_write <- deer_topo; just_deer_write$edge.length <- NULL; just_deer_write$node.label <- NULL write.tree(just_deer_write, file = "Just_deer/Just_deer_topo_tree.txt") ################################## ## Bind deer tree to main tree ## message("Bind Deer to Artiodactyla") plot.phylo(time_prune_tree) bind_where <- getMRCA(time_prune_tree, c("Moschus_berezovskii", "Bos_taurus")) bound_deer <- bind.tree(time_prune_tree, deer_topo, where = bind_where, position = 0.1) bound_deer <- multi2di(bound_deer, random = FALSE) bound_deer$edge.length[which(bound_deer$edge.length == 0)] <- mean(bound_deer$edge.length) ## Set edge legths to NULL to make topo only ## all_topo <- bound_deer all_topo$edge.length <- NULL all_topo$node.label <- NULL ## Write tree ## write.tree(all_topo, file = "Timetree_prune_meredith.txt") ########################################################################### ## Also write out just the ruminant topology ## ## Extract ruminant subtree ## rum_node <- getMRCA(all_topo, c("Giraffa_camelopardalis", "Alces_alces")) rum_tips <- tips(all_topo, rum_node) rum_prune <- drop.tip(all_topo, setdiff(all_topo$tip.label, rum_tips)) write.tree(rum_prune, file = "Rum_only_topo.txt") ########################################################################### ## Also write out just the deer topology ## ## Extract ruminant subtree ## deer_node <- getMRCA(all_topo, c("Dama_dama", "Alces_alces")) deer_tips <- tips(all_topo, deer_node) deer_prune <- drop.tip(all_topo, setdiff(all_topo$tip.label, deer_tips)) write.tree(deer_prune, file = "Deer_only_topo.txt") ########################################################################### ## Make two reindeer ## # two_rein_tree_topo <- all_topo # two_rein_tree_topo$tip.label[which(two_rein_tree_topo$tip.label == "Rangifer_tarandus")] <- "Rangifer_tarandus_fennicus" # new_rein_tip <- list(edge=matrix(c(2,1),1,2), tip.label="Rangifer_tarandus_tarandus", edge.length=1.0, Nnode=1) # class(new_rein_tip)<-"phylo" # # attach to any node (say, node 16) # plot.phylo(two_rein_tree_topo) # two_rein_tree_topo <- bind.tree(two_rein_tree_topo, new_rein_tip, interactive = TRUE) # two_rein_tree_topo <- chronopl(two_rein_tree_topo, 0, age.min = 1, age.max = NULL,node = "root", S = 1, tol = 1e-8,CV = FALSE, eval.max = 500, iter.max = 500) # plot.phylo(two_rein_tree_topo) # write.tree(two_rein_tree_topo, "Meredith_topo_two_rein.txt")
/Deer/Selection_testing/Make_species_trees.R
no_license
AlexanderEsin/Scripts
R
false
false
7,138
r
library(ape) library(phangorn) library(phylobase) library(geiger) ########################################################################### setwd("/users/aesin/Desktop/Deer/Selection_analysis/Species_tree/") timetree <- read.tree("TimetreeOfLife.nwk") ## All tips ## all_keep_tips <- c("Ailuropoda_melanoleuca", "Bos_taurus", "Camelus_ferus", "Felis_catus", "Myotis_lucifugus", "Homo_sapiens", "Sus_scrofa", "Ovis_aries", "Capra_hircus", "Mus_musculus", "Oryctolagus_cuniculus", "Callithrix_jacchus", "Macaca_mulatta", "Papio_anubis", "Rattus_norvegicus", "Pteropus_vampyrus", "Odobenus_rosmarus", "Odocoileus_virginianus", "Alces_alces", "Cervus_elaphus", "Elaphurus_davidianus", "Cervus_nippon", "Rangifer_tarandus", "Dama_dama", "Moschus_berezovskii", "Equus_caballus", "Ceratotherium_simum", "Eptesicus_fuscus", "Panthera_tigris", "Capreolus_capreolus", "Pudu_puda", "Giraffa_camelopardalis", "Hydropotoes_inermis", "Cervus_albirostris", "Rucervus_duvaucelii", "Muntiacus_reevesi", "Vicugna_pacos") #"Dasypus_novemcinctus" for_dating_tree <- drop.tip(timetree, setdiff(timetree$tip.label, all_keep_tips)) ################################## ## No deer ## keep_tips <- c("Ailuropoda_melanoleuca", "Bos_taurus", "Camelus_ferus","Vicugna_pacos", "Felis_catus", "Myotis_lucifugus", "Homo_sapiens", "Sus_scrofa", "Ovis_aries", "Capra_hircus", "Mus_musculus", "Oryctolagus_cuniculus", "Callithrix_jacchus", "Macaca_mulatta", "Papio_anubis", "Rattus_norvegicus", "Pteropus_vampyrus", "Odobenus_rosmarus", "Moschus_berezovskii", "Equus_caballus", "Ceratotherium_simum", "Eptesicus_fuscus", "Panthera_tigris", "Giraffa_camelopardalis", "Vicugna_pacos") #"Dasypus_novemcinctus" time_prune_tree <- drop.tip(timetree, setdiff(timetree$tip.label, keep_tips)) time_prune_tree$edge.length[which(time_prune_tree$edge.length == 0)] <- mean(time_prune_tree$edge.length) ################################## ## Change position of carnivores to match Meredith (2011) ## # Isolate carnivore subtree # carnivore_node <- getMRCA(time_prune_tree, c("Felis_catus", "Ailuropoda_melanoleuca")) carnivore_tips <- tips(time_prune_tree, carnivore_node) carnivore_subtree <- drop.tip(time_prune_tree, setdiff(time_prune_tree$tip.label, carnivore_tips)) carnivore_topo <- chronopl(carnivore_subtree, 0, age.min = 1, age.max = NULL,node = "root", S = 1, tol = 1e-8,CV = FALSE, eval.max = 500, iter.max = 500) time_prune_tree <- drop.tip(time_prune_tree, carnivore_tips) # Rebind subtree # message("Bind Carnivora") plot.phylo(time_prune_tree) bind_where <- getMRCA(time_prune_tree, c("Myotis_lucifugus", "Ceratotherium_simum")) rebind_carnivore <- bind.tree(time_prune_tree, carnivore_topo, where = bind_where, position = 0.1) rebind_carnivore <- multi2di(rebind_carnivore, random = FALSE) time_prune_tree <- rebind_carnivore #time_prune_tree$edge.length <- rep(1, length(time_prune_tree$edge.length)) # rebind_carnivore$edge.length[which(rebind_carnivore$edge.length == 0)] <- 1 #rebind_carnivore <- chronopl(rebind_carnivore, 0, age.min = 1, age.max = NULL,node = "root", S = 1, tol = 1e-8,CV = FALSE, eval.max = 500, iter.max = 500) ################################## ## Get the tenk tree ## tenk_tree <- read.nexus(file = "Artiodactyl_with_deer_10ktrees.nex") ## Extract deer subtree ## deer_node <- getMRCA(tenk_tree, c("Dama_dama", "Alces_alces")) deer_tips <- tips(tenk_tree, deer_node) deer_prune <- drop.tip(tenk_tree, setdiff(tenk_tree$tip.label, deer_tips)) ## Add the Bactrian tip ## bactrian_tip <- list(edge=matrix(c(2,1),1,2), tip.label="Cervus_elaphus_bactrianus", edge.length=1.0, Nnode=1) class(bactrian_tip)<-"phylo" message("Bind the Bactrian deer") plot.phylo(deer_prune) deer_prune <- bind.tree(deer_prune, bactrian_tip, interactive = TRUE) deer_prune <- chronopl(deer_prune, 0, age.min = 1, age.max = NULL,node = "root", S = 1, tol = 1e-8,CV = FALSE, eval.max = 500, iter.max = 500) ## Add the Cervus_canadensis tip ## wapiti_tip <- list(edge=matrix(c(2,1),1,2), tip.label="Cervus_canadensis", edge.length=1.0, Nnode=1) class(wapiti_tip)<-"phylo" message("Bind the Waptiti (sister to nippon") plot.phylo(deer_prune) deer_prune <- bind.tree(deer_prune, wapiti_tip, interactive = TRUE) deer_prune <- chronopl(deer_prune, 0, age.min = 1, age.max = NULL,node = "root", S = 1, tol = 1e-8,CV = FALSE, eval.max = 500, iter.max = 500) ## Rename the Swamp_deer to Rucervus ## swamp_deer_index <- grep("Cervus_duvaucelii", deer_prune) deer_prune$tip.label[swamp_deer_index] <- "Rucervus_duvaucelii" ## Ultrametric deer ## deer_topo <- chronopl(deer_prune, 0, age.min = 1, age.max = NULL,node = "root", S = 1, tol = 1e-8,CV = FALSE, eval.max = 500, iter.max = 500) # Write out just the deer tree # just_deer_write <- deer_topo; just_deer_write$edge.length <- NULL; just_deer_write$node.label <- NULL write.tree(just_deer_write, file = "Just_deer/Just_deer_topo_tree.txt") ################################## ## Bind deer tree to main tree ## message("Bind Deer to Artiodactyla") plot.phylo(time_prune_tree) bind_where <- getMRCA(time_prune_tree, c("Moschus_berezovskii", "Bos_taurus")) bound_deer <- bind.tree(time_prune_tree, deer_topo, where = bind_where, position = 0.1) bound_deer <- multi2di(bound_deer, random = FALSE) bound_deer$edge.length[which(bound_deer$edge.length == 0)] <- mean(bound_deer$edge.length) ## Set edge legths to NULL to make topo only ## all_topo <- bound_deer all_topo$edge.length <- NULL all_topo$node.label <- NULL ## Write tree ## write.tree(all_topo, file = "Timetree_prune_meredith.txt") ########################################################################### ## Also write out just the ruminant topology ## ## Extract ruminant subtree ## rum_node <- getMRCA(all_topo, c("Giraffa_camelopardalis", "Alces_alces")) rum_tips <- tips(all_topo, rum_node) rum_prune <- drop.tip(all_topo, setdiff(all_topo$tip.label, rum_tips)) write.tree(rum_prune, file = "Rum_only_topo.txt") ########################################################################### ## Also write out just the deer topology ## ## Extract ruminant subtree ## deer_node <- getMRCA(all_topo, c("Dama_dama", "Alces_alces")) deer_tips <- tips(all_topo, deer_node) deer_prune <- drop.tip(all_topo, setdiff(all_topo$tip.label, deer_tips)) write.tree(deer_prune, file = "Deer_only_topo.txt") ########################################################################### ## Make two reindeer ## # two_rein_tree_topo <- all_topo # two_rein_tree_topo$tip.label[which(two_rein_tree_topo$tip.label == "Rangifer_tarandus")] <- "Rangifer_tarandus_fennicus" # new_rein_tip <- list(edge=matrix(c(2,1),1,2), tip.label="Rangifer_tarandus_tarandus", edge.length=1.0, Nnode=1) # class(new_rein_tip)<-"phylo" # # attach to any node (say, node 16) # plot.phylo(two_rein_tree_topo) # two_rein_tree_topo <- bind.tree(two_rein_tree_topo, new_rein_tip, interactive = TRUE) # two_rein_tree_topo <- chronopl(two_rein_tree_topo, 0, age.min = 1, age.max = NULL,node = "root", S = 1, tol = 1e-8,CV = FALSE, eval.max = 500, iter.max = 500) # plot.phylo(two_rein_tree_topo) # write.tree(two_rein_tree_topo, "Meredith_topo_two_rein.txt")
toate acuzatiile din " Jurnalul national " au inceput de la o poveste simpla . intr - o zi am publicat in " Evenimentul zilei " citeva telexuri prin care Dan Voiculescu cerea sefilor sai din Cipru sa - i cumpere bilete de avion la clasa business pentru o excursie prin Istanbul - Atena - Viena - Roma si Paris , cu cazare la hoteluri de cinci stele . fiind vara lui 1986 si luind in calcul ca , pe linga cele trei cucoane Voiculescu , a mai calatorit si patronul de azi al Antenei 1 , plus Anca Gheorghe , putem estima ca a fost vorba de o excursie care a costat intre 15.000 si 25.000 de dolari . numai hotelurile si biletele de avion . Nu mai vorbim de masa si maruntisuri de buzunar . pentru vremurile acelea de restriste , suma era exorbitanta si , in acelasi timp , suspecta . In loc sa raspunda ca a fost un cadou sau sa explice , chiar si in dorul lelii , cum se facea ca invirtea asemenea bani intr - o perioada in care militia te retinea si pentru zece dolari , Dan Voiculescu le - a cerut lui Marius Tuca si lui Dan Constantin ( fost sef al biroului de presa de la Comitetul Municipal PCR Bucuresti si , mai tirziu , mina moarta si paguboasa in trustul Ringier ) sa inceapa ofensiva . " jurnalul national " a publicat o declaratie olografa a subsemnatului , data in 1990 , pentru tribunalul de la Paris . scriam acolo despre legaturile lui Dan Voiculescu cu Securitatea . " jurnalul national " , fie din prostie , fie din rea - credinta , a reprodus inceputul declaratiei ca pe o dovada a colaborarii mele cu Securitatea . conform gindirii inferioare din straturile superioare ale lui Dan Voiculescu , daca el a facut afaceri cu " Dunarea " , toata lumea a facut afaceri cu " Dunarea " si cu Securitatea . daca el este sisiit , toata populatia este sisiita . daca el are capul tuguiat , toti sint tuguiati , exact ca in Oblio . acest mediocru miliardar de carton , dupa ce a reusit sa invirta afaceri cu Securitatea si sa scoata incredibil de multi bani , mai are nevoie doar de onorabilitate . se crede atit de destept si de instruit incit se mira si sufera ca n - a ajuns inca parlamentar , prim - ministru sau mare guru al Romaniei . cu propozitii schioape , el vrea sa acrediteze o opera stiintifica . s - a zbatut ca , peste noapte , sa ajunga profesor universitar , sarind peste rind si invatindu - i pe studenti cum sa faca bani din sisiieli intelectuale . si - a facut si fundatie , si partid . amaritul acela de Partid Umanist Roman ( care dadea , cu ani in urma , televizoare si pachete incit unii au crezut ca e o Casa de Ajutor Reciproc a Pensionarilor ) , oricit a fost el de umflat de Antena 1 si de " Jurnalul national " , tot n - a depasit cu mult popularitatea partidului lui Bratianu Cartof . Dan Voiculescu s - a prezentat si ca profesor , si ca sef de partid , si ca mare om de afaceri , tronind prin tot felul de asociatii . oricit ii place sa se autointituleze profesor , om de afaceri si sef de partid , Dan Voiculescu nu reuseste sa cucereasca nici publicul , nici electoratul . explicatia e simpla . Oamenii nu - l cred . unii nici nu stiu de ce . altii pentru ca se indoiesc de originea banilor sai . Iar altii pentru ca il considera sters . daca a avut milioane de dolari in 1990 si 1991 si n - a ajuns nici macar sa - si cumpere un teren pentru a - si construi un sediu , atunci cu siguranta ca si la materia cenusie " profesorul " Voiculescu sta modest . cu asemenea sume , alti romani mai destepti ar fi ajuns mult mai departe . visul de onorabilitate al lui Dan Voiculescu s - a scaldat prea multi ani in neputinta . asa s - a hotarit sa puna la bataie toate mijloacele sale . daca Dan Voiculescu vrea ceva si nu obtine , Antena 1 si " Jurnalul national " incep sa vomeze vrute si nevrute la adresa celor care ii stau in cale . pentru ca incercam sa aflam originea banilor de la baza acestui imperiu cu picioare de lut , Dan Voiculescu a dat mai multe semnale ca ar dori sa facem pace . cind a fost refuzat , a inceput campania sa bazata pe filosofia tuguiatilor . toti au capul strimb ca al lui ! daca vrem sa aflam ce afaceri a facut el cu Securitatea , toti sintem securisti . daca cerem deschiderea arhivei de la firma " Dunarea " si analizarea ei ( pentru a sti de unde vin banii lui Dan Voiculescu si ai altor miliardari de carton ) , " Jurnalul national " tipa ca toti am furat , am facut speculatii sau reprezentam fortele trecutului , intinind biata noastra democratie reprezentata de un sisiit atit de feciorelnic . p . S . Pentru a - si continua campania , " Jurnalul national " a recurs si la editorialistul de la " Dimineata " ( organul PDSR ) . Serban Cionoff , fost la " Tinarul leninist " , dupa ce a golit trei fabrici de alcool , isi stringe pielea si oasele intr - un ultim efort si incearca sa imparta pe la cunoscuti toate vagoanele de rahat pe care le - a mincat o viata intreaga .
/data/Newspapers/2000.07.07.editorial.27634.0381.r
no_license
narcis96/decrypting-alpha
R
false
false
4,879
r
toate acuzatiile din " Jurnalul national " au inceput de la o poveste simpla . intr - o zi am publicat in " Evenimentul zilei " citeva telexuri prin care Dan Voiculescu cerea sefilor sai din Cipru sa - i cumpere bilete de avion la clasa business pentru o excursie prin Istanbul - Atena - Viena - Roma si Paris , cu cazare la hoteluri de cinci stele . fiind vara lui 1986 si luind in calcul ca , pe linga cele trei cucoane Voiculescu , a mai calatorit si patronul de azi al Antenei 1 , plus Anca Gheorghe , putem estima ca a fost vorba de o excursie care a costat intre 15.000 si 25.000 de dolari . numai hotelurile si biletele de avion . Nu mai vorbim de masa si maruntisuri de buzunar . pentru vremurile acelea de restriste , suma era exorbitanta si , in acelasi timp , suspecta . In loc sa raspunda ca a fost un cadou sau sa explice , chiar si in dorul lelii , cum se facea ca invirtea asemenea bani intr - o perioada in care militia te retinea si pentru zece dolari , Dan Voiculescu le - a cerut lui Marius Tuca si lui Dan Constantin ( fost sef al biroului de presa de la Comitetul Municipal PCR Bucuresti si , mai tirziu , mina moarta si paguboasa in trustul Ringier ) sa inceapa ofensiva . " jurnalul national " a publicat o declaratie olografa a subsemnatului , data in 1990 , pentru tribunalul de la Paris . scriam acolo despre legaturile lui Dan Voiculescu cu Securitatea . " jurnalul national " , fie din prostie , fie din rea - credinta , a reprodus inceputul declaratiei ca pe o dovada a colaborarii mele cu Securitatea . conform gindirii inferioare din straturile superioare ale lui Dan Voiculescu , daca el a facut afaceri cu " Dunarea " , toata lumea a facut afaceri cu " Dunarea " si cu Securitatea . daca el este sisiit , toata populatia este sisiita . daca el are capul tuguiat , toti sint tuguiati , exact ca in Oblio . acest mediocru miliardar de carton , dupa ce a reusit sa invirta afaceri cu Securitatea si sa scoata incredibil de multi bani , mai are nevoie doar de onorabilitate . se crede atit de destept si de instruit incit se mira si sufera ca n - a ajuns inca parlamentar , prim - ministru sau mare guru al Romaniei . cu propozitii schioape , el vrea sa acrediteze o opera stiintifica . s - a zbatut ca , peste noapte , sa ajunga profesor universitar , sarind peste rind si invatindu - i pe studenti cum sa faca bani din sisiieli intelectuale . si - a facut si fundatie , si partid . amaritul acela de Partid Umanist Roman ( care dadea , cu ani in urma , televizoare si pachete incit unii au crezut ca e o Casa de Ajutor Reciproc a Pensionarilor ) , oricit a fost el de umflat de Antena 1 si de " Jurnalul national " , tot n - a depasit cu mult popularitatea partidului lui Bratianu Cartof . Dan Voiculescu s - a prezentat si ca profesor , si ca sef de partid , si ca mare om de afaceri , tronind prin tot felul de asociatii . oricit ii place sa se autointituleze profesor , om de afaceri si sef de partid , Dan Voiculescu nu reuseste sa cucereasca nici publicul , nici electoratul . explicatia e simpla . Oamenii nu - l cred . unii nici nu stiu de ce . altii pentru ca se indoiesc de originea banilor sai . Iar altii pentru ca il considera sters . daca a avut milioane de dolari in 1990 si 1991 si n - a ajuns nici macar sa - si cumpere un teren pentru a - si construi un sediu , atunci cu siguranta ca si la materia cenusie " profesorul " Voiculescu sta modest . cu asemenea sume , alti romani mai destepti ar fi ajuns mult mai departe . visul de onorabilitate al lui Dan Voiculescu s - a scaldat prea multi ani in neputinta . asa s - a hotarit sa puna la bataie toate mijloacele sale . daca Dan Voiculescu vrea ceva si nu obtine , Antena 1 si " Jurnalul national " incep sa vomeze vrute si nevrute la adresa celor care ii stau in cale . pentru ca incercam sa aflam originea banilor de la baza acestui imperiu cu picioare de lut , Dan Voiculescu a dat mai multe semnale ca ar dori sa facem pace . cind a fost refuzat , a inceput campania sa bazata pe filosofia tuguiatilor . toti au capul strimb ca al lui ! daca vrem sa aflam ce afaceri a facut el cu Securitatea , toti sintem securisti . daca cerem deschiderea arhivei de la firma " Dunarea " si analizarea ei ( pentru a sti de unde vin banii lui Dan Voiculescu si ai altor miliardari de carton ) , " Jurnalul national " tipa ca toti am furat , am facut speculatii sau reprezentam fortele trecutului , intinind biata noastra democratie reprezentata de un sisiit atit de feciorelnic . p . S . Pentru a - si continua campania , " Jurnalul national " a recurs si la editorialistul de la " Dimineata " ( organul PDSR ) . Serban Cionoff , fost la " Tinarul leninist " , dupa ce a golit trei fabrici de alcool , isi stringe pielea si oasele intr - un ultim efort si incearca sa imparta pe la cunoscuti toate vagoanele de rahat pe care le - a mincat o viata intreaga .
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert.R \name{x3p_scale_unit} \alias{x3p_scale_unit} \title{Scale x3p object by given unit} \usage{ x3p_scale_unit(x3p, scale_by) } \arguments{ \item{x3p}{object in x3p format, 3d topographic surface.} \item{scale_by}{numeric value. Value the surface to be scaled by. While not enforced, values of \code{scale_by} make most sense as multiples of 10 (for a metric system).} } \value{ x3p with header information in microns } \description{ x3p objects can be presented in different units. ISO standard 5436_2 asks for specification of values in meters. For topographic surfaces collected by microscopes values in microns are more readable. This functions allows to convert between different units. } \examples{ logo <- read_x3p(system.file("csafe-logo.x3p", package="x3ptools")) logo # measurements in meters x3p_scale_unit(logo, scale_by=10^6) # measurements in microns }
/man/x3p_scale_unit.Rd
no_license
sctyner/x3ptools
R
false
true
952
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert.R \name{x3p_scale_unit} \alias{x3p_scale_unit} \title{Scale x3p object by given unit} \usage{ x3p_scale_unit(x3p, scale_by) } \arguments{ \item{x3p}{object in x3p format, 3d topographic surface.} \item{scale_by}{numeric value. Value the surface to be scaled by. While not enforced, values of \code{scale_by} make most sense as multiples of 10 (for a metric system).} } \value{ x3p with header information in microns } \description{ x3p objects can be presented in different units. ISO standard 5436_2 asks for specification of values in meters. For topographic surfaces collected by microscopes values in microns are more readable. This functions allows to convert between different units. } \examples{ logo <- read_x3p(system.file("csafe-logo.x3p", package="x3ptools")) logo # measurements in meters x3p_scale_unit(logo, scale_by=10^6) # measurements in microns }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dependencies.R \name{addFontAwesome} \alias{addFontAwesome} \title{Use fontAwesome icons in visNetwork \code{graph}} \usage{ addFontAwesome(graph, name = "font-awesome") } \arguments{ \item{graph}{: a visNetwork object} \item{name}{: name of dependency} } \value{ \code{graph} htmlwidget with Font-Awesome dependencies attached. } \description{ Add \href{http://fortawesome.github.io/Font-Awesome/}{Font-Awesome} for styling our \code{graph} with beautiful, professional icons. Please note that you'll already have these icons if using Shiny. Can also use \link{addIonicons} } \examples{ # use fontAwesome icons using groups or nodes options # font-awesome is not part of dependencies. use addFontAwesome() if needed. # https://fontawesome.com/v4.7.0/ # Version in package (and compatible with vis.js) : 4.7.0 # cheatsheet available in package: # system.file("fontAwesome/Font_Awesome_Cheatsheet.pdf", package = "visNetwork") # definition in groups nodes <- data.frame(id = 1:3, group = c("B", "A", "B")) edges <- data.frame(from = c(1,2), to = c(2,3)) visNetwork(nodes, edges) \%>\% visGroups(groupname = "A", shape = "icon", icon = list(code = "f0c0", size = 75)) \%>\% visGroups(groupname = "B", shape = "icon", icon = list(code = "f007", color = "red")) \%>\% addFontAwesome() # definition in nodes nodes <- data.frame(id = 1:3, shape = "icon", icon.face = 'FontAwesome', icon.code = "f0c0") edges <- data.frame(from = c(1,2), to = c(1,3)) visNetwork(nodes, edges) \%>\% addFontAwesome() # using shinydashboard : change name if needed visNetwork(nodes, edges) \%>\% addFontAwesome(name = "font-awesome-visNetwork") }
/man/addFontAwesome.Rd
no_license
42-Discworld/visNetwork
R
false
true
1,779
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dependencies.R \name{addFontAwesome} \alias{addFontAwesome} \title{Use fontAwesome icons in visNetwork \code{graph}} \usage{ addFontAwesome(graph, name = "font-awesome") } \arguments{ \item{graph}{: a visNetwork object} \item{name}{: name of dependency} } \value{ \code{graph} htmlwidget with Font-Awesome dependencies attached. } \description{ Add \href{http://fortawesome.github.io/Font-Awesome/}{Font-Awesome} for styling our \code{graph} with beautiful, professional icons. Please note that you'll already have these icons if using Shiny. Can also use \link{addIonicons} } \examples{ # use fontAwesome icons using groups or nodes options # font-awesome is not part of dependencies. use addFontAwesome() if needed. # https://fontawesome.com/v4.7.0/ # Version in package (and compatible with vis.js) : 4.7.0 # cheatsheet available in package: # system.file("fontAwesome/Font_Awesome_Cheatsheet.pdf", package = "visNetwork") # definition in groups nodes <- data.frame(id = 1:3, group = c("B", "A", "B")) edges <- data.frame(from = c(1,2), to = c(2,3)) visNetwork(nodes, edges) \%>\% visGroups(groupname = "A", shape = "icon", icon = list(code = "f0c0", size = 75)) \%>\% visGroups(groupname = "B", shape = "icon", icon = list(code = "f007", color = "red")) \%>\% addFontAwesome() # definition in nodes nodes <- data.frame(id = 1:3, shape = "icon", icon.face = 'FontAwesome', icon.code = "f0c0") edges <- data.frame(from = c(1,2), to = c(1,3)) visNetwork(nodes, edges) \%>\% addFontAwesome() # using shinydashboard : change name if needed visNetwork(nodes, edges) \%>\% addFontAwesome(name = "font-awesome-visNetwork") }
# Viva 450K # Generate official raw dataset-starting from iDATs- in coordination with official V14 data from Dawn and Jarrett # clean failed samples ############################## library(Hmisc) library(methylumi) library(wateRmelon) library(ggplot2) library(matrixStats) library(sas7bdat) # library(XLConnect) CURRENTLY GIVING ERRORS library(data.table) library(readxl) # USING THIS INSTEAD OF XLConnect ########################################################### # CALL IN IDAT TRANSLATION KEY SHEET ############################################################ idattrans <- data.table(read_excel("/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/samplesheets/IDAT_Translation_Key_B.xlsx", sheet = 1),check.names=TRUE) dim(idattrans) # pull out a the unrelated subtable of chip and batch from idat translation key beadchipbatch <- idattrans[, list(BeadChip = BeadChip.1, Batch)] beadchipbatch <- beadchipbatch[!is.na(BeadChip)] # drop some extra columns idattrans[, c("Col9", "Col10", "Col11", "BeadChip.1", "Batch") := NULL] head(idattrans) # pull out sampleid idattrans[, sampleid := sub("WG.*_", "", Sample.1)] idattrans[, samplename := paste0(BeadChip, "_", Position)] head(idattrans) table(duplicated(idattrans$samplename)) # no duplicates ######### FAILED SAMPLES ################## # Create a flag for 'failed samples' failed <- grep("Failed",idattrans$Comment) print(table(idattrans$Comment[failed])) idattrans[, failed := F] idattrans[grepl("Fail", Comment), failed := T] table(idattrans$failed) ############## CONTROL SAMPLES ###################### array.Ctrl <- idattrans[sampleid == "Ctrl"] dim(array.Ctrl) # create a flag for control samples idattrans[, controlSamps := F] idattrans[sampleid == "Ctrl", controlSamps := T] table(idattrans$controlSamps) # make the Ctrl samples have a unique sampleid # (This doesn't have any immediate use in current script - but may be useful as code for later work on controls..) idattrans[sampleid == "Ctrl", sampleid := paste0("Ctrl", 1:.N)] head(idattrans[grepl("Ctrl",sampleid)]) #################################################### # CALL IN SHEET TO EXTRACT VIVA ALIAS ######################################################### # mapping of SAMPLE e.g. "S-001259392" to Viva alias "122972" vivaalias <- data.table(read_excel("/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/samplesheets/VIVA_sample_mapping_with_age.xlsx", sheet = 1),check.names=TRUE) dim(vivaalias) head(vivaalias) vivaalias[, alias := as.numeric(gsub("c|C|-", "", RECRUITMENT.ID))] head(vivaalias) # check alias values are all in the fullphenodata that Dawn sent setnames(vivaalias, "SAMPLE", "sampleid") describe(vivaalias) # only 728 unique for 1125 unique sampleids in a list of 1161 values vivaalias[duplicated(vivaalias)] # our repeats are c116996 and c107473 # samples that were run multiple times --> 32 + 16 =48 vivaalias[alias %in% c(116996, 107473)] # is each sampleid occur only once - not always! vivaalias[, .N, by=sampleid][, describe(N)] # well, apart from the two aliases above it is: vivaalias[!alias %in% c(116996, 107473), .N, by = sampleid][, describe(N)] # does each sampleid map to a unique alias? Yes! vivaalias[, list(nalias = length(unique(alias))), by=sampleid][, describe(nalias)] # each non-qc alias can have 1-3 sampleids vivaalias[, .N, by=alias][, describe(N)] vivaalias[, .N, by=sampleid][, describe(N)] # clean up sample type/stage vivaalias[, stage := factor(COLLECTION, levels = c("Proband Delivery", "Proband Age 3", "Proband Age 7"), labels = c("CB", "Y3", "Y7"))] # make a var for qc samples vivaalias[alias == 116996 & stage == "CB", qcid := "MR32"] vivaalias[alias == 107473 & stage == "CB", qcid := "MR16"] ################################################# # MERGE IN ALIAS WITH IDAT TRANSLATION KEY TO HANDLE MULTIPLE REPEATS #################################################################### idattrans[, c("alias", "stage", "qcid") := NULL] # take it out first in case repeating idattrans <- merge(idattrans, unique(vivaalias[,list(sampleid, alias, stage, qcid)]), by = "sampleid", all.x = T) ### WE WANT TO KEEP 1 FROM EACH MULTIPLE REPEAT - ALLAN KEEPS THE SAME ONES THAT JARRETT DID!! ### # > pDat[sampleNames=='S-000670341','repUse'] <- TRUE # > pDat[sampleNames=='S-000621296','repUse'] <- TRUE # need an indicator for which samples are solely qc idattrans[, isqc := F] idattrans[alias %in% c(116996, 107473) & stage == "CB", isqc := T] # but we get back one of each - we need to pick # but which are the designated qc samps to use? (which of the 16 or 32 are on same chip as other samples?) # which chips are the non Cord Blood samples on ? idattrans[alias == 116996 & stage != "CB", list(sampleid, BeadChip, stage)] # which samples from that person are on those chips? idattrans[alias == 116996 & BeadChip %in% .Last.value[, BeadChip], list(sampleid, BeadChip, stage)] # how many of each sampleid were plated? (especially from the previous command) idattrans[alias == 116996 & stage == "CB", .N, by=sampleid] # so we will designate the singleton that is on the same chip as other samples - just like everyone else idattrans[alias == 116996 & stage == "CB" & sampleid == "S-000621296", isqc := F] # again for the next Multiple-Repeat sample idattrans[alias == 107473 & stage != "CB", list(sampleid, BeadChip, stage)] idattrans[alias == 107473 & BeadChip %in% .Last.value[, BeadChip], list(sampleid, BeadChip, stage)] idattrans[alias == 107473 & stage == "CB", .N, by=sampleid] idattrans[alias == 107473 & stage == "CB" & sampleid == "S-000670341", isqc := F] ################################################################ # SEE WHAT NUMBERS LOOK LIKE WHEN YOU SEQUENTIALLY DROP ACCORDING TO FLAGS ############################################################################## dim(idattrans) # dropping failed samples idattrans1 <- idattrans[failed==F,] dim(idattrans1) #so 226 dropped - as was the case in Jarrett's code # dropping control samples idattrans2 <- idattrans1[controlSamps==F,] # so 25 dropped!! This now matches array.Ctrl in Jarrett's code! ## array.Ctrl ## FALSE TRUE ## 1151 25 dim(idattrans2) # THIS MATCHES 1151 IN pDat from V14 FROM DAWN!! print(idattrans2[is.na(Sample)]) # none - these all disappeared with removing 'failed samples' head(idattrans2) class(idattrans2) # [1] "data.table" "data.frame" # NOW LOAD V14 VIVA DATA OFFICIALLY PROVIDED BY DAWN!! load("/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/VivaThousandQC_BMIQ_1449230271_V14.RData") pDat <- data.table(pDat) head(pDat) # Both these have many of the same variables - make sure that they all correspond! # create a sampleid var in pDat just like that in idattrans2 pDat[, sampleid := sub("WG.*_", "", Sample.1)] pDat[, samplename := paste0(Slide, "_", Array)] # also create a similar variable in idattrans2 from beadchip and position idattrans2[, BeadChip_Position := paste0(BeadChip, "_", Position)] dim(pDat) dim(idattrans2) class(pDat$sampleid) class(idattrans2$sampleid) class(pDat$samplename) class(idattrans2$samplename) idattrans2_check <- idattrans2[idattrans2$sampleid %in% pDat$sampleid,] dim(idattrans2_check) idattrans2_check2 <- idattrans2[idattrans2$Sample %in% pDat$Sample,] dim(idattrans2_check2) idattrans2_check3 <- idattrans2[idattrans2$samplename %in% pDat$samplename,] dim(idattrans2_check3) idattrans2_check4 <- idattrans2[idattrans2$BeadChip_Position %in% pDat$samplename,] dim(idattrans2_check4) # Does "questionable quality" from our idattrans2 match "lowQual" from dawn's pDat? - YES! table(pDat$lowQual) idattrans2[, lowQual := F] idattrans2[grepl("Questionable Quality", Comment), lowQual := T] table(idattrans2$lowQual) # does our "isqc" match Dawn's "repUse" - YES! table(pDat$repUse) table(idattrans2$isqc) # # # so everything matches, now use pDat to make a manifest to replace idattrans2 # # # length(unique(idattrans2$samplename)) describe(idattrans2$samplename) length(unique(pDat$samplename)) describe(pDat$samplename) # match idattrans2 to pDat - ensure it is fully aligned idattransfinal <- idattrans2[match(pDat$samplename, samplename),] # Check thoroughly all(idattransfinal$samplename==pDat$samplename) identical(idattransfinal$samplename,pDat$samplename) all(idattransfinal$BeadChip_Position==pDat$samplename) all(idattransfinal$sampleid==pDat$sampleid) all(idattransfinal$Sample==pDat$Sample) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # NOW YOU CAN USE Dawn's pDat INSTEAD OF 'IDATTRANS' (i.e.IDATTRANS2) TO MERGE IN AS A MANIFEST # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # But we also want to add information on multiple scans # Call in master sheet created by Allan - with information on all folders/subfolders files <- read.csv("/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/viva_idat_file_table_20141010.csv") dim(files) # [1] 3728 18 files <- data.table(files) head(files) # propagate subfolderversion to both green and red file for each barcode/folder combo (each scan) # GA: (b/c Allan initially filled in info form a lot of summary variables just for the 'Grn' files') files[, subfolderversion := .SD[color == "Grn", subfolderversion], by = c("barcode", "folder")] # We want to use parent-level(1st-level) folder IDATs - all samples coming from the same scanner! dim(files[subfolder == F]) dim(files[subfolderversion == 1,]) # so this corresponds with subfolder == F # this means that if subfolder == F, then subfolderversion == 1 - these are the parent-level IDATs # Subset to parent-level folders filesParent <- files[subfolder == F,] dim(filesParent) # So how many where nbarcode > 2 (i.e. scanned more than once)? dim(filesParent[nbarcode > 2]) table(filesParent$nbarcode, exclude=NULL) # make a variable to indicate which beadchips were scanned more than once vs. only once filesParent[,multipleScans := 0] filesParent[nbarcode > 2, multipleScans := 1] table(filesParent$multipleScans) # You also want to make a variable to indicate number of scans - based on 'nbarcode' filesParent[,NumOfScans:=nbarcode/2] table(filesParent$nbarcode, exclude=NULL) table(filesParent$NumOfScans, exclude=NULL) # worked perfectly # now divide # of beadchips by 2, because you have 2 of everything right now (1 for green, 1 for red) filesParentHalf<- filesParent[color == "Grn"] dim(filesParentHalf) # [1] 1402 19 table(filesParentHalf$multipleScans) # 384 matches the number indicated in Allan's email: "Illumina rescanned 384 samples..." table(filesParentHalf$NumOfScans) # Now use barcode/samplename to merge scan info into PDat dim(pDat) dim(filesParentHalf) # samplename in pDat = barcode class(pDat$samplename) class(filesParentHalf$barcode) filesParentHalf$barcode <- as.character(filesParentHalf$barcode) filesParentFinal <- filesParentHalf[filesParentHalf$barcode %in% pDat$samplename,] dim(filesParentFinal) filesParentFinal <- filesParentFinal[match(pDat$samplename, barcode),] all(filesParentFinal$barcode==pDat$samplename) identical(filesParentFinal$barcode,pDat$samplename) filesParentFINAL <- filesParentFinal[,c("barcode","multipleScans","NumOfScans"),with=FALSE] head(filesParentFINAL) all(filesParentFINAL$barcode==pDat$samplename) identical(filesParentFINAL$barcode,pDat$samplename) pDatFINAL <- cbind(pDat,filesParentFINAL) all(pDatFINAL$barcode==pDat$samplename) identical(pDatFINAL$barcode,pDat$samplename) identical(pDatFINAL$barcode,pDatFINAL$samplename) identical(filesParentFINAL$barcode,pDatFINAL$samplename) table(pDatFINAL$multipleScans) table(filesParentFinal$multipleScans) table(filesParentFINAL$multipleScans) table(pDatFINAL$NumOfScans) table(filesParentFinal$NumOfScans) table(filesParentFINAL$NumOfScans) # Rename pDatFINAL back to 'pDat' before merging it in as methylumi manifest rm(pDat) pDat <- pDatFINAL dim(pDat) # Call in the official iDATs obtained from Dawn and imported in Dec 2015 load("/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/imported/raw_idat_all_official.RData") load("/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/imported/raw_RGset_all_official.RData") dim(raw.idat.all) dim(raw.RGset.all) class(sampleNames(raw.idat.all)) class(sampleNames(raw.RGset.all)) class(pDat$samplename) # Restrict imported iDAT data to what is in pDat from Dawn raw.idat.official <- raw.idat.all[,sampleNames(raw.idat.all) %in% pDat$samplename] dim(raw.idat.official) raw.RGset.official <- raw.RGset.all[,sampleNames(raw.RGset.all) %in% pDat$samplename] dim(raw.RGset.official) rm(raw.idat.all) rm(raw.RGset.all) # prepare to merge in with datasets ########################################################## pDat <- pDat[match(sampleNames(raw.idat.official), samplename),] identical(sampleNames(raw.idat.official), pDat$samplename) all(sampleNames(raw.idat.official)==pDat$samplename) # bring them in as pData manifest1 <- as.data.frame(pDat[match(sampleNames(raw.idat.official), pDat$samplename),]) row.names(manifest1) <- manifest1$samplename # put them into the methylumi object if(exists("raw.idat.official") & identical(sampleNames(raw.idat.official), manifest1$samplename)){ pData(raw.idat.official) <- manifest1 } else { stop("REORDER YOUR DFCOVARS!!!")} save(raw.idat.official, file = "/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/output/intermediate/raw_idat_V14_official.RData") rm(raw.idat.official) ###### pDat <- pDat[match(sampleNames(raw.RGset.official), samplename),] identical(sampleNames(raw.RGset.official), pDat$samplename) all(sampleNames(raw.RGset.official)==pDat$samplename) manifest2 <- as.data.frame(pDat[match(sampleNames(raw.RGset.official), pDat$samplename), ]) row.names(manifest2) <- manifest2$samplename # put them into the minfi object if(exists("raw.RGset.official") & identical(sampleNames(raw.RGset.official), manifest2$samplename)){ pData(raw.RGset.official) <- manifest2 } else { stop("REORDER YOUR DFCOVARS!!!")} save(raw.RGset.official, file = "/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/output/intermediate/raw_RGset_V14_official.RData") rm(raw.RGset.official)
/QC/450k_V14_Allan/viva450k_2_filterSamps_generatePdat_official.R
no_license
snlent/Viva
R
false
false
14,827
r
# Viva 450K # Generate official raw dataset-starting from iDATs- in coordination with official V14 data from Dawn and Jarrett # clean failed samples ############################## library(Hmisc) library(methylumi) library(wateRmelon) library(ggplot2) library(matrixStats) library(sas7bdat) # library(XLConnect) CURRENTLY GIVING ERRORS library(data.table) library(readxl) # USING THIS INSTEAD OF XLConnect ########################################################### # CALL IN IDAT TRANSLATION KEY SHEET ############################################################ idattrans <- data.table(read_excel("/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/samplesheets/IDAT_Translation_Key_B.xlsx", sheet = 1),check.names=TRUE) dim(idattrans) # pull out a the unrelated subtable of chip and batch from idat translation key beadchipbatch <- idattrans[, list(BeadChip = BeadChip.1, Batch)] beadchipbatch <- beadchipbatch[!is.na(BeadChip)] # drop some extra columns idattrans[, c("Col9", "Col10", "Col11", "BeadChip.1", "Batch") := NULL] head(idattrans) # pull out sampleid idattrans[, sampleid := sub("WG.*_", "", Sample.1)] idattrans[, samplename := paste0(BeadChip, "_", Position)] head(idattrans) table(duplicated(idattrans$samplename)) # no duplicates ######### FAILED SAMPLES ################## # Create a flag for 'failed samples' failed <- grep("Failed",idattrans$Comment) print(table(idattrans$Comment[failed])) idattrans[, failed := F] idattrans[grepl("Fail", Comment), failed := T] table(idattrans$failed) ############## CONTROL SAMPLES ###################### array.Ctrl <- idattrans[sampleid == "Ctrl"] dim(array.Ctrl) # create a flag for control samples idattrans[, controlSamps := F] idattrans[sampleid == "Ctrl", controlSamps := T] table(idattrans$controlSamps) # make the Ctrl samples have a unique sampleid # (This doesn't have any immediate use in current script - but may be useful as code for later work on controls..) idattrans[sampleid == "Ctrl", sampleid := paste0("Ctrl", 1:.N)] head(idattrans[grepl("Ctrl",sampleid)]) #################################################### # CALL IN SHEET TO EXTRACT VIVA ALIAS ######################################################### # mapping of SAMPLE e.g. "S-001259392" to Viva alias "122972" vivaalias <- data.table(read_excel("/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/samplesheets/VIVA_sample_mapping_with_age.xlsx", sheet = 1),check.names=TRUE) dim(vivaalias) head(vivaalias) vivaalias[, alias := as.numeric(gsub("c|C|-", "", RECRUITMENT.ID))] head(vivaalias) # check alias values are all in the fullphenodata that Dawn sent setnames(vivaalias, "SAMPLE", "sampleid") describe(vivaalias) # only 728 unique for 1125 unique sampleids in a list of 1161 values vivaalias[duplicated(vivaalias)] # our repeats are c116996 and c107473 # samples that were run multiple times --> 32 + 16 =48 vivaalias[alias %in% c(116996, 107473)] # is each sampleid occur only once - not always! vivaalias[, .N, by=sampleid][, describe(N)] # well, apart from the two aliases above it is: vivaalias[!alias %in% c(116996, 107473), .N, by = sampleid][, describe(N)] # does each sampleid map to a unique alias? Yes! vivaalias[, list(nalias = length(unique(alias))), by=sampleid][, describe(nalias)] # each non-qc alias can have 1-3 sampleids vivaalias[, .N, by=alias][, describe(N)] vivaalias[, .N, by=sampleid][, describe(N)] # clean up sample type/stage vivaalias[, stage := factor(COLLECTION, levels = c("Proband Delivery", "Proband Age 3", "Proband Age 7"), labels = c("CB", "Y3", "Y7"))] # make a var for qc samples vivaalias[alias == 116996 & stage == "CB", qcid := "MR32"] vivaalias[alias == 107473 & stage == "CB", qcid := "MR16"] ################################################# # MERGE IN ALIAS WITH IDAT TRANSLATION KEY TO HANDLE MULTIPLE REPEATS #################################################################### idattrans[, c("alias", "stage", "qcid") := NULL] # take it out first in case repeating idattrans <- merge(idattrans, unique(vivaalias[,list(sampleid, alias, stage, qcid)]), by = "sampleid", all.x = T) ### WE WANT TO KEEP 1 FROM EACH MULTIPLE REPEAT - ALLAN KEEPS THE SAME ONES THAT JARRETT DID!! ### # > pDat[sampleNames=='S-000670341','repUse'] <- TRUE # > pDat[sampleNames=='S-000621296','repUse'] <- TRUE # need an indicator for which samples are solely qc idattrans[, isqc := F] idattrans[alias %in% c(116996, 107473) & stage == "CB", isqc := T] # but we get back one of each - we need to pick # but which are the designated qc samps to use? (which of the 16 or 32 are on same chip as other samples?) # which chips are the non Cord Blood samples on ? idattrans[alias == 116996 & stage != "CB", list(sampleid, BeadChip, stage)] # which samples from that person are on those chips? idattrans[alias == 116996 & BeadChip %in% .Last.value[, BeadChip], list(sampleid, BeadChip, stage)] # how many of each sampleid were plated? (especially from the previous command) idattrans[alias == 116996 & stage == "CB", .N, by=sampleid] # so we will designate the singleton that is on the same chip as other samples - just like everyone else idattrans[alias == 116996 & stage == "CB" & sampleid == "S-000621296", isqc := F] # again for the next Multiple-Repeat sample idattrans[alias == 107473 & stage != "CB", list(sampleid, BeadChip, stage)] idattrans[alias == 107473 & BeadChip %in% .Last.value[, BeadChip], list(sampleid, BeadChip, stage)] idattrans[alias == 107473 & stage == "CB", .N, by=sampleid] idattrans[alias == 107473 & stage == "CB" & sampleid == "S-000670341", isqc := F] ################################################################ # SEE WHAT NUMBERS LOOK LIKE WHEN YOU SEQUENTIALLY DROP ACCORDING TO FLAGS ############################################################################## dim(idattrans) # dropping failed samples idattrans1 <- idattrans[failed==F,] dim(idattrans1) #so 226 dropped - as was the case in Jarrett's code # dropping control samples idattrans2 <- idattrans1[controlSamps==F,] # so 25 dropped!! This now matches array.Ctrl in Jarrett's code! ## array.Ctrl ## FALSE TRUE ## 1151 25 dim(idattrans2) # THIS MATCHES 1151 IN pDat from V14 FROM DAWN!! print(idattrans2[is.na(Sample)]) # none - these all disappeared with removing 'failed samples' head(idattrans2) class(idattrans2) # [1] "data.table" "data.frame" # NOW LOAD V14 VIVA DATA OFFICIALLY PROVIDED BY DAWN!! load("/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/VivaThousandQC_BMIQ_1449230271_V14.RData") pDat <- data.table(pDat) head(pDat) # Both these have many of the same variables - make sure that they all correspond! # create a sampleid var in pDat just like that in idattrans2 pDat[, sampleid := sub("WG.*_", "", Sample.1)] pDat[, samplename := paste0(Slide, "_", Array)] # also create a similar variable in idattrans2 from beadchip and position idattrans2[, BeadChip_Position := paste0(BeadChip, "_", Position)] dim(pDat) dim(idattrans2) class(pDat$sampleid) class(idattrans2$sampleid) class(pDat$samplename) class(idattrans2$samplename) idattrans2_check <- idattrans2[idattrans2$sampleid %in% pDat$sampleid,] dim(idattrans2_check) idattrans2_check2 <- idattrans2[idattrans2$Sample %in% pDat$Sample,] dim(idattrans2_check2) idattrans2_check3 <- idattrans2[idattrans2$samplename %in% pDat$samplename,] dim(idattrans2_check3) idattrans2_check4 <- idattrans2[idattrans2$BeadChip_Position %in% pDat$samplename,] dim(idattrans2_check4) # Does "questionable quality" from our idattrans2 match "lowQual" from dawn's pDat? - YES! table(pDat$lowQual) idattrans2[, lowQual := F] idattrans2[grepl("Questionable Quality", Comment), lowQual := T] table(idattrans2$lowQual) # does our "isqc" match Dawn's "repUse" - YES! table(pDat$repUse) table(idattrans2$isqc) # # # so everything matches, now use pDat to make a manifest to replace idattrans2 # # # length(unique(idattrans2$samplename)) describe(idattrans2$samplename) length(unique(pDat$samplename)) describe(pDat$samplename) # match idattrans2 to pDat - ensure it is fully aligned idattransfinal <- idattrans2[match(pDat$samplename, samplename),] # Check thoroughly all(idattransfinal$samplename==pDat$samplename) identical(idattransfinal$samplename,pDat$samplename) all(idattransfinal$BeadChip_Position==pDat$samplename) all(idattransfinal$sampleid==pDat$sampleid) all(idattransfinal$Sample==pDat$Sample) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # NOW YOU CAN USE Dawn's pDat INSTEAD OF 'IDATTRANS' (i.e.IDATTRANS2) TO MERGE IN AS A MANIFEST # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # But we also want to add information on multiple scans # Call in master sheet created by Allan - with information on all folders/subfolders files <- read.csv("/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/viva_idat_file_table_20141010.csv") dim(files) # [1] 3728 18 files <- data.table(files) head(files) # propagate subfolderversion to both green and red file for each barcode/folder combo (each scan) # GA: (b/c Allan initially filled in info form a lot of summary variables just for the 'Grn' files') files[, subfolderversion := .SD[color == "Grn", subfolderversion], by = c("barcode", "folder")] # We want to use parent-level(1st-level) folder IDATs - all samples coming from the same scanner! dim(files[subfolder == F]) dim(files[subfolderversion == 1,]) # so this corresponds with subfolder == F # this means that if subfolder == F, then subfolderversion == 1 - these are the parent-level IDATs # Subset to parent-level folders filesParent <- files[subfolder == F,] dim(filesParent) # So how many where nbarcode > 2 (i.e. scanned more than once)? dim(filesParent[nbarcode > 2]) table(filesParent$nbarcode, exclude=NULL) # make a variable to indicate which beadchips were scanned more than once vs. only once filesParent[,multipleScans := 0] filesParent[nbarcode > 2, multipleScans := 1] table(filesParent$multipleScans) # You also want to make a variable to indicate number of scans - based on 'nbarcode' filesParent[,NumOfScans:=nbarcode/2] table(filesParent$nbarcode, exclude=NULL) table(filesParent$NumOfScans, exclude=NULL) # worked perfectly # now divide # of beadchips by 2, because you have 2 of everything right now (1 for green, 1 for red) filesParentHalf<- filesParent[color == "Grn"] dim(filesParentHalf) # [1] 1402 19 table(filesParentHalf$multipleScans) # 384 matches the number indicated in Allan's email: "Illumina rescanned 384 samples..." table(filesParentHalf$NumOfScans) # Now use barcode/samplename to merge scan info into PDat dim(pDat) dim(filesParentHalf) # samplename in pDat = barcode class(pDat$samplename) class(filesParentHalf$barcode) filesParentHalf$barcode <- as.character(filesParentHalf$barcode) filesParentFinal <- filesParentHalf[filesParentHalf$barcode %in% pDat$samplename,] dim(filesParentFinal) filesParentFinal <- filesParentFinal[match(pDat$samplename, barcode),] all(filesParentFinal$barcode==pDat$samplename) identical(filesParentFinal$barcode,pDat$samplename) filesParentFINAL <- filesParentFinal[,c("barcode","multipleScans","NumOfScans"),with=FALSE] head(filesParentFINAL) all(filesParentFINAL$barcode==pDat$samplename) identical(filesParentFINAL$barcode,pDat$samplename) pDatFINAL <- cbind(pDat,filesParentFINAL) all(pDatFINAL$barcode==pDat$samplename) identical(pDatFINAL$barcode,pDat$samplename) identical(pDatFINAL$barcode,pDatFINAL$samplename) identical(filesParentFINAL$barcode,pDatFINAL$samplename) table(pDatFINAL$multipleScans) table(filesParentFinal$multipleScans) table(filesParentFINAL$multipleScans) table(pDatFINAL$NumOfScans) table(filesParentFinal$NumOfScans) table(filesParentFINAL$NumOfScans) # Rename pDatFINAL back to 'pDat' before merging it in as methylumi manifest rm(pDat) pDat <- pDatFINAL dim(pDat) # Call in the official iDATs obtained from Dawn and imported in Dec 2015 load("/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/imported/raw_idat_all_official.RData") load("/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/imported/raw_RGset_all_official.RData") dim(raw.idat.all) dim(raw.RGset.all) class(sampleNames(raw.idat.all)) class(sampleNames(raw.RGset.all)) class(pDat$samplename) # Restrict imported iDAT data to what is in pDat from Dawn raw.idat.official <- raw.idat.all[,sampleNames(raw.idat.all) %in% pDat$samplename] dim(raw.idat.official) raw.RGset.official <- raw.RGset.all[,sampleNames(raw.RGset.all) %in% pDat$samplename] dim(raw.RGset.official) rm(raw.idat.all) rm(raw.RGset.all) # prepare to merge in with datasets ########################################################## pDat <- pDat[match(sampleNames(raw.idat.official), samplename),] identical(sampleNames(raw.idat.official), pDat$samplename) all(sampleNames(raw.idat.official)==pDat$samplename) # bring them in as pData manifest1 <- as.data.frame(pDat[match(sampleNames(raw.idat.official), pDat$samplename),]) row.names(manifest1) <- manifest1$samplename # put them into the methylumi object if(exists("raw.idat.official") & identical(sampleNames(raw.idat.official), manifest1$samplename)){ pData(raw.idat.official) <- manifest1 } else { stop("REORDER YOUR DFCOVARS!!!")} save(raw.idat.official, file = "/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/output/intermediate/raw_idat_V14_official.RData") rm(raw.idat.official) ###### pDat <- pDat[match(sampleNames(raw.RGset.official), samplename),] identical(sampleNames(raw.RGset.official), pDat$samplename) all(sampleNames(raw.RGset.official)==pDat$samplename) manifest2 <- as.data.frame(pDat[match(sampleNames(raw.RGset.official), pDat$samplename), ]) row.names(manifest2) <- manifest2$samplename # put them into the minfi object if(exists("raw.RGset.official") & identical(sampleNames(raw.RGset.official), manifest2$samplename)){ pData(raw.RGset.official) <- manifest2 } else { stop("REORDER YOUR DFCOVARS!!!")} save(raw.RGset.official, file = "/net/rcnfs05/srv/export/baccarelli_lab/share_root/viva_450k/data/output/intermediate/raw_RGset_V14_official.RData") rm(raw.RGset.official)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_plots.R \name{mplot_topcats} \alias{mplot_topcats} \title{Top Hit Ratios for Multi-Classification Models} \usage{ mplot_topcats(tag, score, multis, model_name = NA) } \arguments{ \item{tag}{Vector. Real known label.} \item{score}{Vector. Predicted value or model's result.} \item{multis}{Data.frame. Containing columns with each category probability or score (only used when more than 2 categories coexist).} \item{model_name}{Character. Model's name} } \description{ Calculate and plot a multi-class model's predictions accuracy based on top N predictions and distribution of probabilities. } \examples{ options("lares.font" = NA) # Temporal data(dfr) # Results for AutoML Predictions mplot_topcats(dfr$class3$tag, dfr$class3$score, multis = subset(dfr$class3, select = -c(tag, score)), model_name = "Titanic Class Model") } \seealso{ Other ML Visualization: \code{\link{mplot_conf}()}, \code{\link{mplot_cuts_error}()}, \code{\link{mplot_cuts}()}, \code{\link{mplot_density}()}, \code{\link{mplot_full}()}, \code{\link{mplot_gain}()}, \code{\link{mplot_importance}()}, \code{\link{mplot_lineal}()}, \code{\link{mplot_metrics}()}, \code{\link{mplot_response}()}, \code{\link{mplot_roc}()}, \code{\link{mplot_splits}()} } \concept{ML Visualization}
/man/mplot_topcats.Rd
no_license
alexandereric995/lares
R
false
true
1,366
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_plots.R \name{mplot_topcats} \alias{mplot_topcats} \title{Top Hit Ratios for Multi-Classification Models} \usage{ mplot_topcats(tag, score, multis, model_name = NA) } \arguments{ \item{tag}{Vector. Real known label.} \item{score}{Vector. Predicted value or model's result.} \item{multis}{Data.frame. Containing columns with each category probability or score (only used when more than 2 categories coexist).} \item{model_name}{Character. Model's name} } \description{ Calculate and plot a multi-class model's predictions accuracy based on top N predictions and distribution of probabilities. } \examples{ options("lares.font" = NA) # Temporal data(dfr) # Results for AutoML Predictions mplot_topcats(dfr$class3$tag, dfr$class3$score, multis = subset(dfr$class3, select = -c(tag, score)), model_name = "Titanic Class Model") } \seealso{ Other ML Visualization: \code{\link{mplot_conf}()}, \code{\link{mplot_cuts_error}()}, \code{\link{mplot_cuts}()}, \code{\link{mplot_density}()}, \code{\link{mplot_full}()}, \code{\link{mplot_gain}()}, \code{\link{mplot_importance}()}, \code{\link{mplot_lineal}()}, \code{\link{mplot_metrics}()}, \code{\link{mplot_response}()}, \code{\link{mplot_roc}()}, \code{\link{mplot_splits}()} } \concept{ML Visualization}
# Here we use the ecr package to optimize the TSP tour for a # random TSP instance with n = 20 cities. For sure there are # better solvers for this optimization problem, but here it # serves to demonstrate the packages ability to operate on # permutations, since a TSP tour is a permutation of the cities. library(methods) library(testthat) library(devtools) library(BBmisc) library(tspmeta) library(ggplot2) load_all(".") set.seed(1) # generate instance n.nodes = 20L inst = random_instance(size = n.nodes) # The target fun is the length of a given tour fitness.fun = function(tour) { tour_length(x = TSP(inst$dists), order = as.integer(tour)) } res = ecr(fitness.fun = fitness.fun, n.objectives = 1L, representation = "permutation", perm = n.nodes, mu = 50L, lambda = 50L, survival.strategy = "comma", n.elite = 25L, survival.selector = setupSimpleSelector(), recombinator = NULL, terminators = list(stopOnIters(1000L))) print(res) # plot computed tour print(autoplot(inst, opt_tour = res$best.x[[1L]]))
/inst/examples/tspmeta_example.R
no_license
niiwise/ecr2
R
false
false
1,028
r
# Here we use the ecr package to optimize the TSP tour for a # random TSP instance with n = 20 cities. For sure there are # better solvers for this optimization problem, but here it # serves to demonstrate the packages ability to operate on # permutations, since a TSP tour is a permutation of the cities. library(methods) library(testthat) library(devtools) library(BBmisc) library(tspmeta) library(ggplot2) load_all(".") set.seed(1) # generate instance n.nodes = 20L inst = random_instance(size = n.nodes) # The target fun is the length of a given tour fitness.fun = function(tour) { tour_length(x = TSP(inst$dists), order = as.integer(tour)) } res = ecr(fitness.fun = fitness.fun, n.objectives = 1L, representation = "permutation", perm = n.nodes, mu = 50L, lambda = 50L, survival.strategy = "comma", n.elite = 25L, survival.selector = setupSimpleSelector(), recombinator = NULL, terminators = list(stopOnIters(1000L))) print(res) # plot computed tour print(autoplot(inst, opt_tour = res$best.x[[1L]]))
library(datasets) data<-read.table("household_power_consumption.txt", sep=";", header = TRUE, na.strings = "?") data$Date<-as.Date(data$Date, "%d/%m/%Y") data<-subset(data, Date>="2007-02-01" & Date<="2007-02-02") row.names(data)<-NULL data$DateTime<-as.POSIXct(paste(data$Date, data$Time), format="%Y-%m-%d %H:%M:%S") png(filename = "plot2.png", width=480, height=480) with(data, plot(DateTime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")) dev.off()
/plot2.R
no_license
tchenowe/ExploratoryDataAnalysisProj1
R
false
false
509
r
library(datasets) data<-read.table("household_power_consumption.txt", sep=";", header = TRUE, na.strings = "?") data$Date<-as.Date(data$Date, "%d/%m/%Y") data<-subset(data, Date>="2007-02-01" & Date<="2007-02-02") row.names(data)<-NULL data$DateTime<-as.POSIXct(paste(data$Date, data$Time), format="%Y-%m-%d %H:%M:%S") png(filename = "plot2.png", width=480, height=480) with(data, plot(DateTime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")) dev.off()
# Swaggy Jenkins # # Jenkins API clients generated from Swagger / Open API specification # # OpenAPI spec version: 1.0.0 # Contact: blah@cliffano.com # Generated by: https://openapi-generator.tech #' PipelineRunImpllinks Class #' #' @field nodes #' @field log #' @field self #' @field actions #' @field steps #' @field _class #' #' @importFrom R6 R6Class #' @importFrom jsonlite fromJSON toJSON #' @export PipelineRunImpllinks <- R6::R6Class( 'PipelineRunImpllinks', public = list( `nodes` = NULL, `log` = NULL, `self` = NULL, `actions` = NULL, `steps` = NULL, `_class` = NULL, initialize = function(`nodes`, `log`, `self`, `actions`, `steps`, `_class`){ if (!missing(`nodes`)) { stopifnot(R6::is.R6(`nodes`)) self$`nodes` <- `nodes` } if (!missing(`log`)) { stopifnot(R6::is.R6(`log`)) self$`log` <- `log` } if (!missing(`self`)) { stopifnot(R6::is.R6(`self`)) self$`self` <- `self` } if (!missing(`actions`)) { stopifnot(R6::is.R6(`actions`)) self$`actions` <- `actions` } if (!missing(`steps`)) { stopifnot(R6::is.R6(`steps`)) self$`steps` <- `steps` } if (!missing(`_class`)) { stopifnot(is.character(`_class`), length(`_class`) == 1) self$`_class` <- `_class` } }, toJSON = function() { PipelineRunImpllinksObject <- list() if (!is.null(self$`nodes`)) { PipelineRunImpllinksObject[['nodes']] <- self$`nodes`$toJSON() } if (!is.null(self$`log`)) { PipelineRunImpllinksObject[['log']] <- self$`log`$toJSON() } if (!is.null(self$`self`)) { PipelineRunImpllinksObject[['self']] <- self$`self`$toJSON() } if (!is.null(self$`actions`)) { PipelineRunImpllinksObject[['actions']] <- self$`actions`$toJSON() } if (!is.null(self$`steps`)) { PipelineRunImpllinksObject[['steps']] <- self$`steps`$toJSON() } if (!is.null(self$`_class`)) { PipelineRunImpllinksObject[['_class']] <- self$`_class` } PipelineRunImpllinksObject }, fromJSON = function(PipelineRunImpllinksJson) { PipelineRunImpllinksObject <- jsonlite::fromJSON(PipelineRunImpllinksJson) if (!is.null(PipelineRunImpllinksObject$`nodes`)) { nodesObject <- Link$new() nodesObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$nodes, auto_unbox = TRUE)) self$`nodes` <- nodesObject } if (!is.null(PipelineRunImpllinksObject$`log`)) { logObject <- Link$new() logObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$log, auto_unbox = TRUE)) self$`log` <- logObject } if (!is.null(PipelineRunImpllinksObject$`self`)) { selfObject <- Link$new() selfObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$self, auto_unbox = TRUE)) self$`self` <- selfObject } if (!is.null(PipelineRunImpllinksObject$`actions`)) { actionsObject <- Link$new() actionsObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$actions, auto_unbox = TRUE)) self$`actions` <- actionsObject } if (!is.null(PipelineRunImpllinksObject$`steps`)) { stepsObject <- Link$new() stepsObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$steps, auto_unbox = TRUE)) self$`steps` <- stepsObject } if (!is.null(PipelineRunImpllinksObject$`_class`)) { self$`_class` <- PipelineRunImpllinksObject$`_class` } }, toJSONString = function() { sprintf( '{ "nodes": %s, "log": %s, "self": %s, "actions": %s, "steps": %s, "_class": %s }', self$`nodes`$toJSON(), self$`log`$toJSON(), self$`self`$toJSON(), self$`actions`$toJSON(), self$`steps`$toJSON(), self$`_class` ) }, fromJSONString = function(PipelineRunImpllinksJson) { PipelineRunImpllinksObject <- jsonlite::fromJSON(PipelineRunImpllinksJson) LinkObject <- Link$new() self$`nodes` <- LinkObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$nodes, auto_unbox = TRUE)) LinkObject <- Link$new() self$`log` <- LinkObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$log, auto_unbox = TRUE)) LinkObject <- Link$new() self$`self` <- LinkObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$self, auto_unbox = TRUE)) LinkObject <- Link$new() self$`actions` <- LinkObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$actions, auto_unbox = TRUE)) LinkObject <- Link$new() self$`steps` <- LinkObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$steps, auto_unbox = TRUE)) self$`_class` <- PipelineRunImpllinksObject$`_class` } ) )
/clients/r/generated/R/PipelineRunImpllinks.r
permissive
rahulyhg/swaggy-jenkins
R
false
false
4,948
r
# Swaggy Jenkins # # Jenkins API clients generated from Swagger / Open API specification # # OpenAPI spec version: 1.0.0 # Contact: blah@cliffano.com # Generated by: https://openapi-generator.tech #' PipelineRunImpllinks Class #' #' @field nodes #' @field log #' @field self #' @field actions #' @field steps #' @field _class #' #' @importFrom R6 R6Class #' @importFrom jsonlite fromJSON toJSON #' @export PipelineRunImpllinks <- R6::R6Class( 'PipelineRunImpllinks', public = list( `nodes` = NULL, `log` = NULL, `self` = NULL, `actions` = NULL, `steps` = NULL, `_class` = NULL, initialize = function(`nodes`, `log`, `self`, `actions`, `steps`, `_class`){ if (!missing(`nodes`)) { stopifnot(R6::is.R6(`nodes`)) self$`nodes` <- `nodes` } if (!missing(`log`)) { stopifnot(R6::is.R6(`log`)) self$`log` <- `log` } if (!missing(`self`)) { stopifnot(R6::is.R6(`self`)) self$`self` <- `self` } if (!missing(`actions`)) { stopifnot(R6::is.R6(`actions`)) self$`actions` <- `actions` } if (!missing(`steps`)) { stopifnot(R6::is.R6(`steps`)) self$`steps` <- `steps` } if (!missing(`_class`)) { stopifnot(is.character(`_class`), length(`_class`) == 1) self$`_class` <- `_class` } }, toJSON = function() { PipelineRunImpllinksObject <- list() if (!is.null(self$`nodes`)) { PipelineRunImpllinksObject[['nodes']] <- self$`nodes`$toJSON() } if (!is.null(self$`log`)) { PipelineRunImpllinksObject[['log']] <- self$`log`$toJSON() } if (!is.null(self$`self`)) { PipelineRunImpllinksObject[['self']] <- self$`self`$toJSON() } if (!is.null(self$`actions`)) { PipelineRunImpllinksObject[['actions']] <- self$`actions`$toJSON() } if (!is.null(self$`steps`)) { PipelineRunImpllinksObject[['steps']] <- self$`steps`$toJSON() } if (!is.null(self$`_class`)) { PipelineRunImpllinksObject[['_class']] <- self$`_class` } PipelineRunImpllinksObject }, fromJSON = function(PipelineRunImpllinksJson) { PipelineRunImpllinksObject <- jsonlite::fromJSON(PipelineRunImpllinksJson) if (!is.null(PipelineRunImpllinksObject$`nodes`)) { nodesObject <- Link$new() nodesObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$nodes, auto_unbox = TRUE)) self$`nodes` <- nodesObject } if (!is.null(PipelineRunImpllinksObject$`log`)) { logObject <- Link$new() logObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$log, auto_unbox = TRUE)) self$`log` <- logObject } if (!is.null(PipelineRunImpllinksObject$`self`)) { selfObject <- Link$new() selfObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$self, auto_unbox = TRUE)) self$`self` <- selfObject } if (!is.null(PipelineRunImpllinksObject$`actions`)) { actionsObject <- Link$new() actionsObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$actions, auto_unbox = TRUE)) self$`actions` <- actionsObject } if (!is.null(PipelineRunImpllinksObject$`steps`)) { stepsObject <- Link$new() stepsObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$steps, auto_unbox = TRUE)) self$`steps` <- stepsObject } if (!is.null(PipelineRunImpllinksObject$`_class`)) { self$`_class` <- PipelineRunImpllinksObject$`_class` } }, toJSONString = function() { sprintf( '{ "nodes": %s, "log": %s, "self": %s, "actions": %s, "steps": %s, "_class": %s }', self$`nodes`$toJSON(), self$`log`$toJSON(), self$`self`$toJSON(), self$`actions`$toJSON(), self$`steps`$toJSON(), self$`_class` ) }, fromJSONString = function(PipelineRunImpllinksJson) { PipelineRunImpllinksObject <- jsonlite::fromJSON(PipelineRunImpllinksJson) LinkObject <- Link$new() self$`nodes` <- LinkObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$nodes, auto_unbox = TRUE)) LinkObject <- Link$new() self$`log` <- LinkObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$log, auto_unbox = TRUE)) LinkObject <- Link$new() self$`self` <- LinkObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$self, auto_unbox = TRUE)) LinkObject <- Link$new() self$`actions` <- LinkObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$actions, auto_unbox = TRUE)) LinkObject <- Link$new() self$`steps` <- LinkObject$fromJSON(jsonlite::toJSON(PipelineRunImpllinksObject$steps, auto_unbox = TRUE)) self$`_class` <- PipelineRunImpllinksObject$`_class` } ) )
library(tseries) # Procter and gamble # na.locf removes NA and carries forward the most recent non NA value PG <- na.locf(get.hist.quote("PG", quote="Adj", start="2005-12-25", retclass="zoo")) # To make week end prices: nextfri.Date <- function(x) 7 * ceiling(as.numeric(x - 1)/7) + as.Date(1) w.PG <- aggregate(PG, nextfri.Date,tail,1) # To convert month end prices: m.PG <- aggregate(PG, as.yearmon, tail, 1) # Convert weekly prices into weekly returns lwr.PG <- diff(log(wPG)) # convert prices to log returns swr.PG <- exp(wr.PG)-1 # back to simple returns # Convert monthly prices into monthly returns lmr.PG <- diff(log(mPG)) # convert prices to log returns smr.PG <- exp(rm.PG)-1 # Write output data to csv file write.zoo(swr.PG, file="swr_PG.csv",sep=",",col.names=c("Dates","Percent Return")) write.zoo(smr.PG, file="smr_PG.csv",sep=",",col.names=c("Dates","Percent Return"))
/finance/pull_yahoo.R
no_license
agawronski/datasciencecoursera
R
false
false
905
r
library(tseries) # Procter and gamble # na.locf removes NA and carries forward the most recent non NA value PG <- na.locf(get.hist.quote("PG", quote="Adj", start="2005-12-25", retclass="zoo")) # To make week end prices: nextfri.Date <- function(x) 7 * ceiling(as.numeric(x - 1)/7) + as.Date(1) w.PG <- aggregate(PG, nextfri.Date,tail,1) # To convert month end prices: m.PG <- aggregate(PG, as.yearmon, tail, 1) # Convert weekly prices into weekly returns lwr.PG <- diff(log(wPG)) # convert prices to log returns swr.PG <- exp(wr.PG)-1 # back to simple returns # Convert monthly prices into monthly returns lmr.PG <- diff(log(mPG)) # convert prices to log returns smr.PG <- exp(rm.PG)-1 # Write output data to csv file write.zoo(swr.PG, file="swr_PG.csv",sep=",",col.names=c("Dates","Percent Return")) write.zoo(smr.PG, file="smr_PG.csv",sep=",",col.names=c("Dates","Percent Return"))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/irf.R, R/plot.bvarirf.R \name{irf} \alias{irf} \alias{plot.bvarirf} \title{Impulse Response Function} \usage{ irf(object, impulse = NULL, response = NULL, n.ahead = 5, ci = 0.95, type = "feir", cumulative = FALSE, keep_draws = FALSE) \method{plot}{bvarirf}(x, ...) } \arguments{ \item{object}{an object of class \code{"bvar"}, usually, a result of a call to \code{\link{bvar}} or \code{\link{bvec_to_bvar}}.} \item{impulse}{name of the impulse variable.} \item{response}{name of the response variable.} \item{n.ahead}{number of steps ahead.} \item{ci}{a numeric between 0 and 1 specifying the probability mass covered by the credible intervals. Defaults to 0.95.} \item{type}{type of the impulse resoponse. Possible choices are forecast error \code{"feir"} (default), orthogonalised \code{"oir"}, structural \code{"sir"}, generalised \code{"gir"}, and structural generalised \code{"sgir"} impulse responses.} \item{cumulative}{logical specifying whether a cumulative IRF should be calculated.} \item{keep_draws}{logical specifying whether the function should return all draws of the posterior impulse response function. Defaults to \code{FALSE} so that the median and the credible intervals of the posterior draws are returned.} \item{x}{an object of class "bvarirf", usually, a result of a call to \code{\link{irf}}.} \item{...}{further graphical parameters.} } \value{ A time-series object of class \code{"bvarirf"} and if \code{keep_draws = TRUE} a simple matrix. } \description{ Computes the impulse response coefficients of an object of class \code{"bvar"} for \code{n.ahead} steps. A plot function for objects of class "bvarirf". } \details{ The function produces different types of impulse responses for the VAR model \deqn{y_t = \sum_{i = 1}^{p} A_{i} y_{t-i} + A_0^{-1} u_t,} with \eqn{u_t \sim N(0, \Sigma)}. Forecast error impulse responses \eqn{\Phi_i} are obtained by recursions \deqn{\Phi_i = \sum_{j = 1}^{i} \Phi_{i-j} A_j, i = 1, 2,...,h} with \eqn{\Phi_0 = I_K}. Orthogonalised impulse responses \eqn{\Theta^o_i} are calculated as \eqn{\Theta^o_i = \Phi_i P}, where P is the lower triangular Choleski decomposition of \eqn{\Sigma}. \eqn{A_0} is assumed to be an identity matrix. Structural impulse responses \eqn{\Theta^s_i} are calculated as \eqn{\Theta^s_i = \Phi_i A_0^{-1}}. (Structural) Generalised impulse responses for variable \eqn{j}, i.e. \eqn{\Theta^g_ji} are calculated as \eqn{\Theta^g_{ji} = \sigma_{jj}^{-1/2} \Phi_i A_0^{-1} \Sigma e_j}, where \eqn{\sigma_{jj}} is the variance of the \eqn{j^{th}} diagonal element of \eqn{\Sigma} and \eqn{e_i} is a selection vector containing one in its \eqn{j^{th}} element and zero otherwise. If the \code{"bvar"} object does not contain draws of \eqn{A_0}, it is assumed to be an identity matrix. } \examples{ data("e1") e1 <- diff(log(e1)) data <- gen_var(e1, p = 2, deterministic = "const") y <- data$Y[, 1:73] x <- data$Z[, 1:73] set.seed(1234567) iter <- 500 # Number of iterations of the Gibbs sampler # Chosen number of iterations should be much higher, e.g. 30000. burnin <- 100 # Number of burn-in draws store <- iter - burnin t <- ncol(y) # Number of observations k <- nrow(y) # Number of endogenous variables m <- k * nrow(x) # Number of estimated coefficients # Set (uninformative) priors a_mu_prior <- matrix(0, m) # Vector of prior parameter means a_v_i_prior <- diag(0, m) # Inverse of the prior covariance matrix u_sigma_df_prior <- 0 # Prior degrees of freedom u_sigma_scale_prior <- diag(0, k) # Prior covariance matrix u_sigma_df_post <- t + u_sigma_df_prior # Posterior degrees of freedom # Initial values u_sigma_i <- diag(.00001, k) u_sigma <- solve(u_sigma_i) # Data containers for posterior draws draws_a <- matrix(NA, m, store) draws_sigma <- matrix(NA, k^2, store) # Start Gibbs sampler for (draw in 1:iter) { # Draw conditional mean parameters a <- post_normal(y, x, u_sigma_i, a_mu_prior, a_v_i_prior) # Draw variance-covariance matrix u <- y - matrix(a, k) \%*\% x # Obtain residuals u_sigma_scale_post <- solve(u_sigma_scale_prior + tcrossprod(u)) u_sigma_i <- matrix(rWishart(1, u_sigma_df_post, u_sigma_scale_post)[,, 1], k) u_sigma <- solve(u_sigma_i) # Invert Sigma_i to obtain Sigma # Store draws if (draw > burnin) { draws_a[, draw - burnin] <- a draws_sigma[, draw - burnin] <- u_sigma } } # Generate bvar object bvar_est <- bvar(y = y, x = x, A = draws_a[1:18,], C = draws_a[19:21, ], Sigma = draws_sigma) # Generate impulse response IR <- irf(bvar_est, impulse = "income", response = "cons", n.ahead = 8) # Plot plot(IR, main = "Forecast Error Impulse Response", xlab = "Period", ylab = "Response") data("e1") e1 <- diff(log(e1)) data <- gen_var(e1, p = 2, deterministic = "const") y <- data$Y[, 1:73] x <- data$Z[, 1:73] set.seed(1234567) iter <- 500 # Number of iterations of the Gibbs sampler # Chosen number of iterations should be much higher, e.g. 30000. burnin <- 100 # Number of burn-in draws store <- iter - burnin t <- ncol(y) # Number of observations k <- nrow(y) # Number of endogenous variables m <- k * nrow(x) # Number of estimated coefficients # Set (uninformative) priors a_mu_prior <- matrix(0, m) # Vector of prior parameter means a_v_i_prior <- diag(0, m) # Inverse of the prior covariance matrix u_sigma_df_prior <- 0 # Prior degrees of freedom u_sigma_scale_prior <- diag(0, k) # Prior covariance matrix u_sigma_df_post <- t + u_sigma_df_prior # Posterior degrees of freedom # Initial values u_sigma_i <- diag(.00001, k) u_sigma <- solve(u_sigma_i) # Data containers for posterior draws draws_a <- matrix(NA, m, store) draws_sigma <- matrix(NA, k^2, store) # Start Gibbs sampler for (draw in 1:iter) { # Draw conditional mean parameters a <- post_normal(y, x, u_sigma_i, a_mu_prior, a_v_i_prior) # Draw variance-covariance matrix u <- y - matrix(a, k) \%*\% x # Obtain residuals u_sigma_scale_post <- solve(u_sigma_scale_prior + tcrossprod(u)) u_sigma_i <- matrix(rWishart(1, u_sigma_df_post, u_sigma_scale_post)[,, 1], k) u_sigma <- solve(u_sigma_i) # Invert Sigma_i to obtain Sigma # Store draws if (draw > burnin) { draws_a[, draw - burnin] <- a draws_sigma[, draw - burnin] <- u_sigma } } # Generate bvar object bvar_est <- bvar(y = y, x = x, A = draws_a[1:18,], C = draws_a[19:21, ], Sigma = draws_sigma) # Generate impulse response IR <- irf(bvar_est, impulse = "income", response = "cons", n.ahead = 8) # Plot plot(IR, main = "Forecast Error Impulse Response", xlab = "Period", ylab = "Response") } \references{ Lütkepohl, H. (2007). \emph{New introduction to multiple time series analysis} (2nd ed.). Berlin: Springer. Pesaran, H. H., Shin, Y. (1998). Generalized impulse response analysis in linear multivariate models. \emph{Economics Letters, 58}, 17-29. }
/man/irf.Rd
no_license
skycaptainleo/bvartools
R
false
true
6,888
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/irf.R, R/plot.bvarirf.R \name{irf} \alias{irf} \alias{plot.bvarirf} \title{Impulse Response Function} \usage{ irf(object, impulse = NULL, response = NULL, n.ahead = 5, ci = 0.95, type = "feir", cumulative = FALSE, keep_draws = FALSE) \method{plot}{bvarirf}(x, ...) } \arguments{ \item{object}{an object of class \code{"bvar"}, usually, a result of a call to \code{\link{bvar}} or \code{\link{bvec_to_bvar}}.} \item{impulse}{name of the impulse variable.} \item{response}{name of the response variable.} \item{n.ahead}{number of steps ahead.} \item{ci}{a numeric between 0 and 1 specifying the probability mass covered by the credible intervals. Defaults to 0.95.} \item{type}{type of the impulse resoponse. Possible choices are forecast error \code{"feir"} (default), orthogonalised \code{"oir"}, structural \code{"sir"}, generalised \code{"gir"}, and structural generalised \code{"sgir"} impulse responses.} \item{cumulative}{logical specifying whether a cumulative IRF should be calculated.} \item{keep_draws}{logical specifying whether the function should return all draws of the posterior impulse response function. Defaults to \code{FALSE} so that the median and the credible intervals of the posterior draws are returned.} \item{x}{an object of class "bvarirf", usually, a result of a call to \code{\link{irf}}.} \item{...}{further graphical parameters.} } \value{ A time-series object of class \code{"bvarirf"} and if \code{keep_draws = TRUE} a simple matrix. } \description{ Computes the impulse response coefficients of an object of class \code{"bvar"} for \code{n.ahead} steps. A plot function for objects of class "bvarirf". } \details{ The function produces different types of impulse responses for the VAR model \deqn{y_t = \sum_{i = 1}^{p} A_{i} y_{t-i} + A_0^{-1} u_t,} with \eqn{u_t \sim N(0, \Sigma)}. Forecast error impulse responses \eqn{\Phi_i} are obtained by recursions \deqn{\Phi_i = \sum_{j = 1}^{i} \Phi_{i-j} A_j, i = 1, 2,...,h} with \eqn{\Phi_0 = I_K}. Orthogonalised impulse responses \eqn{\Theta^o_i} are calculated as \eqn{\Theta^o_i = \Phi_i P}, where P is the lower triangular Choleski decomposition of \eqn{\Sigma}. \eqn{A_0} is assumed to be an identity matrix. Structural impulse responses \eqn{\Theta^s_i} are calculated as \eqn{\Theta^s_i = \Phi_i A_0^{-1}}. (Structural) Generalised impulse responses for variable \eqn{j}, i.e. \eqn{\Theta^g_ji} are calculated as \eqn{\Theta^g_{ji} = \sigma_{jj}^{-1/2} \Phi_i A_0^{-1} \Sigma e_j}, where \eqn{\sigma_{jj}} is the variance of the \eqn{j^{th}} diagonal element of \eqn{\Sigma} and \eqn{e_i} is a selection vector containing one in its \eqn{j^{th}} element and zero otherwise. If the \code{"bvar"} object does not contain draws of \eqn{A_0}, it is assumed to be an identity matrix. } \examples{ data("e1") e1 <- diff(log(e1)) data <- gen_var(e1, p = 2, deterministic = "const") y <- data$Y[, 1:73] x <- data$Z[, 1:73] set.seed(1234567) iter <- 500 # Number of iterations of the Gibbs sampler # Chosen number of iterations should be much higher, e.g. 30000. burnin <- 100 # Number of burn-in draws store <- iter - burnin t <- ncol(y) # Number of observations k <- nrow(y) # Number of endogenous variables m <- k * nrow(x) # Number of estimated coefficients # Set (uninformative) priors a_mu_prior <- matrix(0, m) # Vector of prior parameter means a_v_i_prior <- diag(0, m) # Inverse of the prior covariance matrix u_sigma_df_prior <- 0 # Prior degrees of freedom u_sigma_scale_prior <- diag(0, k) # Prior covariance matrix u_sigma_df_post <- t + u_sigma_df_prior # Posterior degrees of freedom # Initial values u_sigma_i <- diag(.00001, k) u_sigma <- solve(u_sigma_i) # Data containers for posterior draws draws_a <- matrix(NA, m, store) draws_sigma <- matrix(NA, k^2, store) # Start Gibbs sampler for (draw in 1:iter) { # Draw conditional mean parameters a <- post_normal(y, x, u_sigma_i, a_mu_prior, a_v_i_prior) # Draw variance-covariance matrix u <- y - matrix(a, k) \%*\% x # Obtain residuals u_sigma_scale_post <- solve(u_sigma_scale_prior + tcrossprod(u)) u_sigma_i <- matrix(rWishart(1, u_sigma_df_post, u_sigma_scale_post)[,, 1], k) u_sigma <- solve(u_sigma_i) # Invert Sigma_i to obtain Sigma # Store draws if (draw > burnin) { draws_a[, draw - burnin] <- a draws_sigma[, draw - burnin] <- u_sigma } } # Generate bvar object bvar_est <- bvar(y = y, x = x, A = draws_a[1:18,], C = draws_a[19:21, ], Sigma = draws_sigma) # Generate impulse response IR <- irf(bvar_est, impulse = "income", response = "cons", n.ahead = 8) # Plot plot(IR, main = "Forecast Error Impulse Response", xlab = "Period", ylab = "Response") data("e1") e1 <- diff(log(e1)) data <- gen_var(e1, p = 2, deterministic = "const") y <- data$Y[, 1:73] x <- data$Z[, 1:73] set.seed(1234567) iter <- 500 # Number of iterations of the Gibbs sampler # Chosen number of iterations should be much higher, e.g. 30000. burnin <- 100 # Number of burn-in draws store <- iter - burnin t <- ncol(y) # Number of observations k <- nrow(y) # Number of endogenous variables m <- k * nrow(x) # Number of estimated coefficients # Set (uninformative) priors a_mu_prior <- matrix(0, m) # Vector of prior parameter means a_v_i_prior <- diag(0, m) # Inverse of the prior covariance matrix u_sigma_df_prior <- 0 # Prior degrees of freedom u_sigma_scale_prior <- diag(0, k) # Prior covariance matrix u_sigma_df_post <- t + u_sigma_df_prior # Posterior degrees of freedom # Initial values u_sigma_i <- diag(.00001, k) u_sigma <- solve(u_sigma_i) # Data containers for posterior draws draws_a <- matrix(NA, m, store) draws_sigma <- matrix(NA, k^2, store) # Start Gibbs sampler for (draw in 1:iter) { # Draw conditional mean parameters a <- post_normal(y, x, u_sigma_i, a_mu_prior, a_v_i_prior) # Draw variance-covariance matrix u <- y - matrix(a, k) \%*\% x # Obtain residuals u_sigma_scale_post <- solve(u_sigma_scale_prior + tcrossprod(u)) u_sigma_i <- matrix(rWishart(1, u_sigma_df_post, u_sigma_scale_post)[,, 1], k) u_sigma <- solve(u_sigma_i) # Invert Sigma_i to obtain Sigma # Store draws if (draw > burnin) { draws_a[, draw - burnin] <- a draws_sigma[, draw - burnin] <- u_sigma } } # Generate bvar object bvar_est <- bvar(y = y, x = x, A = draws_a[1:18,], C = draws_a[19:21, ], Sigma = draws_sigma) # Generate impulse response IR <- irf(bvar_est, impulse = "income", response = "cons", n.ahead = 8) # Plot plot(IR, main = "Forecast Error Impulse Response", xlab = "Period", ylab = "Response") } \references{ Lütkepohl, H. (2007). \emph{New introduction to multiple time series analysis} (2nd ed.). Berlin: Springer. Pesaran, H. H., Shin, Y. (1998). Generalized impulse response analysis in linear multivariate models. \emph{Economics Letters, 58}, 17-29. }
#EDX Capstone #ddalnekoff #You Wanted A Hit? #Artist Genre Grouping Work #A note on this file: This includes work I started to attempt to identify and assign a "major" genre to each artist in order to add additional variables to the dataset for the capstone project. #In the interest of time, I had to put this on shelf, but when the time permits I will re-visit this work to help further enrich our data to support a more accurate model. if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org") if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org") if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org") if(!require(request)) install.packages("request", repos = "http://cran.us.r-project.org") if(!require(igraph)) install.packages("igraph", repos = "http://cran.us.r-project.org") if(!require(kknn)) install.packages("kknn", repos = "http://cran.us.r-project.org") if(!require(readr)) install.packages("readr", repos = "http://cran.us.r-project.org") if(!require(gbm)) install.packages("gbm", repos = "http://cran.us.r-project.org") if(!require(matrixstats)) install.packages("matrixStats", repos = "http://cran.us.r-project.org") library(matrixStats) data <- read_csv("~/R/capstone/data.csv") data_artist <- read_csv("~/R/capstone/data_by_artist.csv") data_genres <- read_csv("~/R/capstone/data_by_genres.csv") data_year <- read_csv("~/R/capstone/data_by_year.csv") data_w_genres <- read_csv("~/R/capstone/data_w_genres.csv") #make a mapping of consolidated artist genre to song #steps: # "split_data" 1) get songs to a list of artist & artists split_data <- stack(setNames(strsplit(data$artists,','), data$artists)) %>% distinct() split_data %>% filter(values == 'Sergei Rachmaninoff') # "artist_split_song_list" 2) get a distinct list of all songs and each individual artist (now duplicated by song) # too much memory temp_split_song_data <- unique(data, by = c("artists", "name")) %>% select(artists, name) #this adds several hundred MB in data #alternate approach temp_split_song_data <- data %>% select(artists, name) %>% distinct() # no memory change artist_split_song_list <- temp_split_song_data %>% select (artists, name) %>% mutate(ind = artists) %>% left_join(split_data, by = 'ind' ) %>% distinct() #1GB artist_split_song_list <- artist_split_song_list %>% select(artists, name, values) %>% distinct() #no mem change #need to redesign the above staging step to be more memory efficient...the initial duplicated list is too large #doing as a single step won't work #artist_split_song_list <- %>% select(artists, name) %>% %>% mutate(ind = artists) %>% left_join(split_data, by = 'ind' ) %>% unique(., by = c("artists","name","values")) %>% select(artists, name, values) artist_split_song_list %>% filter(values == 'Sergei Rachmaninoff') rm(artist_split_song_list) #build the artist genre list artist_genre <- data_w_genres %>% select( artists, genres) %>% mutate(artist_clean = artists) %>% distinct() artist_genre %>% filter( artists == 'Wookiefoot') %>% select(artists,genres, artist_clean) # 3) get artist and their genres artist_genre_list<- stack(setNames(strsplit(artist_genre$genres,','), artist_genre$artists)) %>% distinct() #clean the genre list artist_genre_list <- artist_genre_list %>% mutate( values = gsub("\\[|\\]", "",values)) artist_genre_list <- artist_genre_list %>% mutate( values = gsub("\\'|\\'", "",values)) artist_genre_list <- artist_genre_list %>% mutate( genre = values, artist_clean = ind) %>% select(artist_clean, genre) %>% distinct() # artist genres 4) join in the genres and for each artist break out the list of genres into unique elements #add genres to the list of artist, artists, song artist_genre_temp <- artist_genre_list %>% mutate(values = artist_clean) %>% select(values, genre) %>% distinct() artist_split_song_genre_list <- artist_split_song_list %>% left_join(artist_genre_temp, by = "values") %>% distinct() # now build a list of unique genres from the artists for each song #select song, comma separated list of genres that is unique artists_name_genres <- artist_split_song_genre_list %>% group_by(artists, name) %>% summarise( genres = toString(genre)) %>% ungroup() %>% distinct() artists_genre_list <- stack(setNames(strsplit(artists_name_genres$genres,','), artists_name_genres$artists)) %>% distinct() artists_genre_list <- artists_genre_list %>% mutate( ind = gsub("\\[|\\]", "",ind)) artists_genre_list <- artists_genre_list %>% mutate( ind = gsub("\\'|\\'", "",ind)) artists_genre_list <- artists_genre_list %>% mutate( genre = values, artists = ind) %>% select(artists,genre) %>% distinct() artists_genre_list <- artists_genre_list %>% mutate(artists = str_squish(artists), genre = str_squish(genre)) #12/5 working here #the goal is to categorize genres so there are not so many different values head(artists_genre_list) temp_genre_rank <- artists_genre_list %>% group_by(genre) %>% summarize( counts = n()) temp_genre_rank <- temp_genre_rank[order(-temp_genre_rank$counts),] temp_genre_rank <- temp_genre_rank %>% mutate( newgenre = ifelse(genre == 'NA', '', ifelse(genre == '' , '', ifelse(genre %like% 'classical', 'classical', ifelse(genre %like% 'rap', 'rap', ifelse(genre %like% 'hip hop', 'hip hop', ifelse(genre %like% 'pop', 'pop', ifelse(genre %like% 'adult standards', 'adult standards', ifelse(genre %like% 'romantic', 'romantic', ifelse(genre %like% 'urban', 'urban', ifelse(genre %like% 'r&b', 'r&b', ifelse(genre %like% 'rock', 'rock', ifelse(genre %like% 'latin', 'latin', ifelse(genre %like% 'opera', 'opera', ifelse(genre %like% 'edm', 'edm', ifelse(genre %like% 'jazz', 'jazz', ifelse(genre %like% 'rock', 'rock', ifelse(genre %like% 'lounge', 'lounge', ifelse(genre %like% 'tropic', 'tropic', ifelse(genre %like% 'country', 'country', 0 )))))))))))))))))))) temp_genre_rank <- temp_genre_rank %>% filter(newgenre != 0) artists_genre_list_new <- artists_genre_list %>% left_join(temp_genre_rank, by = "genre") %>% select(artists, newgenre) %>% distinct() #resume artists_genre_temp <- artists_genre_list_new %>% select(artists, newgenre) %>% distinct() artist_split_song_list <- artist_split_song_list %>% mutate( artists = gsub("\\[|\\]", "",gsub("\\'|\\'", "",artists))) artists_split_song_genre_list <- artist_split_song_list %>% left_join(artists_genre_temp, by = "artists") %>% distinct() head(artist_split_song_list) head(artists_genre_temp) #ignore this section for now 12/4 #now join unique name artists back to genres #working_data %>% filter(artists %like% 'Sergei') %>% select(name, artists) #test <- working_data %>% left_join(artists_name_genres, by = c('name', 'artists')) #test %>% filter(artists %like% 'Sergei') %>% select(name, artists, genres) #other idea - add genres as features #1) take artist genre, spread on genres, join back in #not required for artists work artist_genre_pivot <- artist_genre_list %>% dplyr::count(artist_clean, genre) %>% tidyr::spread(key = genre, value = n) %>% distinct() artist_genre_pivot %>% filter(artist_clean %like% 'Sergei') %>% select(artist_clean, classical) #can we group or optimize the genre list? artists_genre_pivot <- artists_genre_list_new %>% dplyr::count(artists, newgenre) %>% tidyr::spread(key = newgenre, value = n) %>% distinct() #clean up artists_genre_pivot <- artists_genre_pivot %>% mutate(artists = gsub("\\'|\\'", "",artists)) artists_genre_pivot <- artists_genre_pivot %>% mutate(artists = gsub("\\[|\\]", "",artists)) #need to clean up NA and v1 column artist_genre_pivot[is.na(artist_genre_pivot)] <- 0 artists_genre_pivot[is.na(artists_genre_pivot)] <- 0 #use nearZeroVar to look for cols to keep ( we can use this to remove the least common genres as there are over 4400 in the data) #nearZeroVar recommends columns to remove due to near 0 variance #nzv <- nearZeroVar(artist_genre_pivot) #col_index <- setdiff(1:ncol(artist_genre_pivot), nzv) #length(col_index) # this came back with just 1,2 as the recommended keepers which won't work #let's try to do this manually and take the top 5% of columns column_sums <- colSums(artist_genre_pivot[,3:ncol(artist_genre_pivot)]) column_sums <-append(c(0,0) , column_sums) genres_to_keep <- append('artist_clean',names(head(sort(column_sums, decreasing=TRUE), length(column_sums)*.05) )) columns_sums <- colSums(artists_genre_pivot[,3:ncol(artists_genre_pivot)]) columns_sums <-append(c(0,0) , columns_sums) #no longer needed after genre consolidation #genres_to_keep_2 <- append('artists',names(head(sort(columns_sums, decreasing=TRUE), 5 ))) #changed from top 5% of genres to top 50 #length(columns_sums)*.005) )) #genres_to_keep_2 #new version of artist_genre_pivot that is only the top 5% of designated genres artist_genre_pivot_reduced <- artist_genre_pivot %>% select(genres_to_keep) artists_genre_pivot_reduced <- artists_genre_pivot %>% select(genres_to_keep_2) %>% distinct() #remove the column NA artists_genre_pivot_reduced <- artists_genre_pivot[,-2] artists_genre_pivot_reduced <- artists_genre_pivot_reduced[,-2] #convert data frame values to 1 and 0 for the genre attributes artists_genre_pivot_reduced <- artists_genre_pivot_reduced %>% mutate_if(is.numeric, ~1 * (. > 0)) #now join this set of genre flags (based on artists) back to our working data data <- data %>% mutate(artists = gsub("\\'|\\'", "",gsub("\\[|\\]", "",artists))) %>% distinct() #quick cleanup to add artists without genres into the data artists_genre_pivot_reduced <- data %>% select(artists) %>% left_join(artists_genre_pivot_reduced, by = "artists") %>% distinct() artists_genre_pivot_reduced[is.na(artists_genre_pivot_reduced)] <- 0 #quick data checks working_data %>% filter(artists == "Sergei Rachmaninoff, James Levine, Berliner Philharmoniker" ) %>% select(artists) artists_genre_pivot_reduced %>% filter(artists %like% "Sergei Rachmaninoff, James Levine, Berliner Philharmoniker" ) %>% select(artists) working_data %>% filter(artists == "Dennis Day" ) %>% select(artists) artists_genre_pivot_reduced %>% filter(artists %like% "Dennis Day" ) %>% select(artists)
/artist_genre_classification.R
no_license
dalnekoff/edx_1259x_capstone
R
false
false
12,526
r
#EDX Capstone #ddalnekoff #You Wanted A Hit? #Artist Genre Grouping Work #A note on this file: This includes work I started to attempt to identify and assign a "major" genre to each artist in order to add additional variables to the dataset for the capstone project. #In the interest of time, I had to put this on shelf, but when the time permits I will re-visit this work to help further enrich our data to support a more accurate model. if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org") if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org") if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org") if(!require(request)) install.packages("request", repos = "http://cran.us.r-project.org") if(!require(igraph)) install.packages("igraph", repos = "http://cran.us.r-project.org") if(!require(kknn)) install.packages("kknn", repos = "http://cran.us.r-project.org") if(!require(readr)) install.packages("readr", repos = "http://cran.us.r-project.org") if(!require(gbm)) install.packages("gbm", repos = "http://cran.us.r-project.org") if(!require(matrixstats)) install.packages("matrixStats", repos = "http://cran.us.r-project.org") library(matrixStats) data <- read_csv("~/R/capstone/data.csv") data_artist <- read_csv("~/R/capstone/data_by_artist.csv") data_genres <- read_csv("~/R/capstone/data_by_genres.csv") data_year <- read_csv("~/R/capstone/data_by_year.csv") data_w_genres <- read_csv("~/R/capstone/data_w_genres.csv") #make a mapping of consolidated artist genre to song #steps: # "split_data" 1) get songs to a list of artist & artists split_data <- stack(setNames(strsplit(data$artists,','), data$artists)) %>% distinct() split_data %>% filter(values == 'Sergei Rachmaninoff') # "artist_split_song_list" 2) get a distinct list of all songs and each individual artist (now duplicated by song) # too much memory temp_split_song_data <- unique(data, by = c("artists", "name")) %>% select(artists, name) #this adds several hundred MB in data #alternate approach temp_split_song_data <- data %>% select(artists, name) %>% distinct() # no memory change artist_split_song_list <- temp_split_song_data %>% select (artists, name) %>% mutate(ind = artists) %>% left_join(split_data, by = 'ind' ) %>% distinct() #1GB artist_split_song_list <- artist_split_song_list %>% select(artists, name, values) %>% distinct() #no mem change #need to redesign the above staging step to be more memory efficient...the initial duplicated list is too large #doing as a single step won't work #artist_split_song_list <- %>% select(artists, name) %>% %>% mutate(ind = artists) %>% left_join(split_data, by = 'ind' ) %>% unique(., by = c("artists","name","values")) %>% select(artists, name, values) artist_split_song_list %>% filter(values == 'Sergei Rachmaninoff') rm(artist_split_song_list) #build the artist genre list artist_genre <- data_w_genres %>% select( artists, genres) %>% mutate(artist_clean = artists) %>% distinct() artist_genre %>% filter( artists == 'Wookiefoot') %>% select(artists,genres, artist_clean) # 3) get artist and their genres artist_genre_list<- stack(setNames(strsplit(artist_genre$genres,','), artist_genre$artists)) %>% distinct() #clean the genre list artist_genre_list <- artist_genre_list %>% mutate( values = gsub("\\[|\\]", "",values)) artist_genre_list <- artist_genre_list %>% mutate( values = gsub("\\'|\\'", "",values)) artist_genre_list <- artist_genre_list %>% mutate( genre = values, artist_clean = ind) %>% select(artist_clean, genre) %>% distinct() # artist genres 4) join in the genres and for each artist break out the list of genres into unique elements #add genres to the list of artist, artists, song artist_genre_temp <- artist_genre_list %>% mutate(values = artist_clean) %>% select(values, genre) %>% distinct() artist_split_song_genre_list <- artist_split_song_list %>% left_join(artist_genre_temp, by = "values") %>% distinct() # now build a list of unique genres from the artists for each song #select song, comma separated list of genres that is unique artists_name_genres <- artist_split_song_genre_list %>% group_by(artists, name) %>% summarise( genres = toString(genre)) %>% ungroup() %>% distinct() artists_genre_list <- stack(setNames(strsplit(artists_name_genres$genres,','), artists_name_genres$artists)) %>% distinct() artists_genre_list <- artists_genre_list %>% mutate( ind = gsub("\\[|\\]", "",ind)) artists_genre_list <- artists_genre_list %>% mutate( ind = gsub("\\'|\\'", "",ind)) artists_genre_list <- artists_genre_list %>% mutate( genre = values, artists = ind) %>% select(artists,genre) %>% distinct() artists_genre_list <- artists_genre_list %>% mutate(artists = str_squish(artists), genre = str_squish(genre)) #12/5 working here #the goal is to categorize genres so there are not so many different values head(artists_genre_list) temp_genre_rank <- artists_genre_list %>% group_by(genre) %>% summarize( counts = n()) temp_genre_rank <- temp_genre_rank[order(-temp_genre_rank$counts),] temp_genre_rank <- temp_genre_rank %>% mutate( newgenre = ifelse(genre == 'NA', '', ifelse(genre == '' , '', ifelse(genre %like% 'classical', 'classical', ifelse(genre %like% 'rap', 'rap', ifelse(genre %like% 'hip hop', 'hip hop', ifelse(genre %like% 'pop', 'pop', ifelse(genre %like% 'adult standards', 'adult standards', ifelse(genre %like% 'romantic', 'romantic', ifelse(genre %like% 'urban', 'urban', ifelse(genre %like% 'r&b', 'r&b', ifelse(genre %like% 'rock', 'rock', ifelse(genre %like% 'latin', 'latin', ifelse(genre %like% 'opera', 'opera', ifelse(genre %like% 'edm', 'edm', ifelse(genre %like% 'jazz', 'jazz', ifelse(genre %like% 'rock', 'rock', ifelse(genre %like% 'lounge', 'lounge', ifelse(genre %like% 'tropic', 'tropic', ifelse(genre %like% 'country', 'country', 0 )))))))))))))))))))) temp_genre_rank <- temp_genre_rank %>% filter(newgenre != 0) artists_genre_list_new <- artists_genre_list %>% left_join(temp_genre_rank, by = "genre") %>% select(artists, newgenre) %>% distinct() #resume artists_genre_temp <- artists_genre_list_new %>% select(artists, newgenre) %>% distinct() artist_split_song_list <- artist_split_song_list %>% mutate( artists = gsub("\\[|\\]", "",gsub("\\'|\\'", "",artists))) artists_split_song_genre_list <- artist_split_song_list %>% left_join(artists_genre_temp, by = "artists") %>% distinct() head(artist_split_song_list) head(artists_genre_temp) #ignore this section for now 12/4 #now join unique name artists back to genres #working_data %>% filter(artists %like% 'Sergei') %>% select(name, artists) #test <- working_data %>% left_join(artists_name_genres, by = c('name', 'artists')) #test %>% filter(artists %like% 'Sergei') %>% select(name, artists, genres) #other idea - add genres as features #1) take artist genre, spread on genres, join back in #not required for artists work artist_genre_pivot <- artist_genre_list %>% dplyr::count(artist_clean, genre) %>% tidyr::spread(key = genre, value = n) %>% distinct() artist_genre_pivot %>% filter(artist_clean %like% 'Sergei') %>% select(artist_clean, classical) #can we group or optimize the genre list? artists_genre_pivot <- artists_genre_list_new %>% dplyr::count(artists, newgenre) %>% tidyr::spread(key = newgenre, value = n) %>% distinct() #clean up artists_genre_pivot <- artists_genre_pivot %>% mutate(artists = gsub("\\'|\\'", "",artists)) artists_genre_pivot <- artists_genre_pivot %>% mutate(artists = gsub("\\[|\\]", "",artists)) #need to clean up NA and v1 column artist_genre_pivot[is.na(artist_genre_pivot)] <- 0 artists_genre_pivot[is.na(artists_genre_pivot)] <- 0 #use nearZeroVar to look for cols to keep ( we can use this to remove the least common genres as there are over 4400 in the data) #nearZeroVar recommends columns to remove due to near 0 variance #nzv <- nearZeroVar(artist_genre_pivot) #col_index <- setdiff(1:ncol(artist_genre_pivot), nzv) #length(col_index) # this came back with just 1,2 as the recommended keepers which won't work #let's try to do this manually and take the top 5% of columns column_sums <- colSums(artist_genre_pivot[,3:ncol(artist_genre_pivot)]) column_sums <-append(c(0,0) , column_sums) genres_to_keep <- append('artist_clean',names(head(sort(column_sums, decreasing=TRUE), length(column_sums)*.05) )) columns_sums <- colSums(artists_genre_pivot[,3:ncol(artists_genre_pivot)]) columns_sums <-append(c(0,0) , columns_sums) #no longer needed after genre consolidation #genres_to_keep_2 <- append('artists',names(head(sort(columns_sums, decreasing=TRUE), 5 ))) #changed from top 5% of genres to top 50 #length(columns_sums)*.005) )) #genres_to_keep_2 #new version of artist_genre_pivot that is only the top 5% of designated genres artist_genre_pivot_reduced <- artist_genre_pivot %>% select(genres_to_keep) artists_genre_pivot_reduced <- artists_genre_pivot %>% select(genres_to_keep_2) %>% distinct() #remove the column NA artists_genre_pivot_reduced <- artists_genre_pivot[,-2] artists_genre_pivot_reduced <- artists_genre_pivot_reduced[,-2] #convert data frame values to 1 and 0 for the genre attributes artists_genre_pivot_reduced <- artists_genre_pivot_reduced %>% mutate_if(is.numeric, ~1 * (. > 0)) #now join this set of genre flags (based on artists) back to our working data data <- data %>% mutate(artists = gsub("\\'|\\'", "",gsub("\\[|\\]", "",artists))) %>% distinct() #quick cleanup to add artists without genres into the data artists_genre_pivot_reduced <- data %>% select(artists) %>% left_join(artists_genre_pivot_reduced, by = "artists") %>% distinct() artists_genre_pivot_reduced[is.na(artists_genre_pivot_reduced)] <- 0 #quick data checks working_data %>% filter(artists == "Sergei Rachmaninoff, James Levine, Berliner Philharmoniker" ) %>% select(artists) artists_genre_pivot_reduced %>% filter(artists %like% "Sergei Rachmaninoff, James Levine, Berliner Philharmoniker" ) %>% select(artists) working_data %>% filter(artists == "Dennis Day" ) %>% select(artists) artists_genre_pivot_reduced %>% filter(artists %like% "Dennis Day" ) %>% select(artists)
context("KP - Get player stats") test_that("KP - Get player stats", { skip_on_cran() skip_on_ci() x <- kp_playerstats(metric = 'eFG', conf_only = FALSE, year=2020) cols_x1 <- c( "rk", "player", "team" ) cols_x2 <- c( "hgt", "wgt", "yr", "year" ) expect_equal(colnames(x[1:3]), cols_x1) expect_equal(colnames(x[5:8]), cols_x2) expect_s3_class(x, 'data.frame') })
/tests/testthat/test-kp_playerstats.R
permissive
mrcaseb/hoopR
R
false
false
397
r
context("KP - Get player stats") test_that("KP - Get player stats", { skip_on_cran() skip_on_ci() x <- kp_playerstats(metric = 'eFG', conf_only = FALSE, year=2020) cols_x1 <- c( "rk", "player", "team" ) cols_x2 <- c( "hgt", "wgt", "yr", "year" ) expect_equal(colnames(x[1:3]), cols_x1) expect_equal(colnames(x[5:8]), cols_x2) expect_s3_class(x, 'data.frame') })