blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
caaccd1427a9714ed1b824bb5d856a261402de57
1ed12913fb9b98db702389871ea761c46fdee116
/R/sum.td.data.frame.R
82d075c4775fd5137b0a7c8152177638d7ce7cbb
[]
no_license
ghuiber/teradataR
9053adf62e0151b320da4f9ca840d056adcdcad2
d097a9484f8cf53803f1ba26181970042bd146bb
refs/heads/master
2021-01-22T01:33:57.288324
2014-09-12T20:43:55
2014-09-12T20:43:55
null
0
0
null
null
null
null
UTF-8
R
false
false
872
r
sum.td.data.frame.R
sum.td.data.frame <- function(tdf, ...) { if(!is.td.data.frame(tdf)) stop("'tdf' is not a td data frame") if(!length(names(tdf))) stop("td.data.frame contains no columns") obj <- .td.object(attr(tdf,"tableName"),attr(tdf,"database")) sumVal <- 0 exprs <- attr(tdf, "expressions") wc <- "" if(!is.null(attr(tdf, "whereClause"))) wc <- paste(" WHERE ", attr(tdf, "whereClause")) for(i in 1:length(names(tdf))) { if(as.character(i) %in% names(exprs)) query <- gettextf("SELECT SUM(%s) FROM %s %s", .td.gencolumnexpr(tdf[i]), obj, wc) else query <- gettextf("SELECT SUM(\"%s\") FROM %s %s", names(tdf)[i], obj, wc) df <- try(tdQuery(query)) if(is.null(attr(df,"class"))) stop("Only defined on a td data frame with all numeric variables") sumVal <- sumVal + as.numeric(df) } return(sumVal) }
cbdad5b6cc9d526d7b4a46081f8100d16c090537
454a2e5c87a170b9bcfe0fd2b11516b90dcc1b05
/man/est_params.Rd
ac38784787b60ba94d3eef82a07d6ab7cda9311c
[ "LicenseRef-scancode-generic-cla", "MIT" ]
permissive
test-mass-forker-org-1/CausalGrid
1bec395e2bb68d12cf3c1e4f87d15650b1adec15
1aba80502457c211dbfa2099fcef91f97a4fb74f
refs/heads/main
2023-06-03T04:20:03.314112
2021-06-23T18:43:50
2021-06-23T18:43:50
null
0
0
null
null
null
null
UTF-8
R
false
true
1,131
rd
est_params.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimator_plans.R \name{est_params} \alias{est_params} \alias{est_params.lm_est} \alias{est_params.simple_est} \alias{est_params.grid_rf} \title{Estimate parameters} \usage{ est_params(obj, y, d = NULL, X, sample = "est", ret_var = FALSE) \method{est_params}{lm_est}(obj, y, d = NULL, X, sample = "est", ret_var = FALSE) \method{est_params}{simple_est}(obj, y, d = NULL, X, sample = "est", ret_var = FALSE) \method{est_params}{grid_rf}(obj, y, d = NULL, X, sample = "est", ret_var = FALSE) } \arguments{ \item{obj}{an EstimatorPlan object} \item{y}{A N-vector} \item{d}{A N-vector or NxM matrix (so that they can be estimated jointly)} \item{X}{A NxK matrix or data.frame} \item{sample}{One of: "trtr", "trcv", "est"} \item{ret_var}{Return Variance in the return list} } \value{ \code{list(param_est=...)} or \code{list(param_est=...)} if \code{ret_var} } \description{ Estimate parameters on the data. } \section{Methods (by class)}{ \itemize{ \item \code{lm_est}: lm_est \item \code{simple_est}: simple_est \item \code{grid_rf}: grid_rf }}
67aa618714f780babef96303ab4d14b2401f1d8e
bb4c63c25c9d546d530065591fb552cf562dce33
/man/Boot4PTCA.Rd
0c1d2ec60693f2cfebf70e78808bb985fa129dbe
[]
no_license
HerveAbdi/PTCA4CATA
98d6be1b0781f79d8f1ed591f4598883e4f7b764
23e8764e0e1f2806b4defe584aa876f0b91c5485
refs/heads/master
2022-07-27T07:23:39.066852
2022-07-14T21:28:33
2022-07-14T21:28:33
92,956,064
8
8
null
2022-05-04T21:45:54
2017-05-31T14:42:24
R
UTF-8
R
false
true
2,551
rd
Boot4PTCA.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/BootRatio.R \name{Boot4PTCA} \alias{Boot4PTCA} \title{Compute the Bootstrapped factors scores for the \eqn{I} and \eqn{J} sets from a Partial Triadic Correspondence analysis (PTCA).} \usage{ Boot4PTCA( ZeDataCube, fi, fj, eigs, nf2keep = 2, nBootIter = 100, compact = FALSE, eigen = FALSE, eigen.compact = TRUE ) } \arguments{ \item{ZeDataCube}{An \eqn{I} * \eqn{J} * \eqn{K} data cube (\eqn{K} are observations) The third dimension (i.e., \eqn{K}) is bootstrapped.} \item{fi}{The factor scores for \eqn{I} (rows) from the \code{epCA} program} \item{fj}{The factor scores for \eqn{J} (columns) from the epCA program} \item{eigs}{The eigenvalues from the epCA program} \item{nf2keep}{how many factors to keep, default to 2} \item{nBootIter}{How many Bootstrap samples, default to 100 \code{(RowsBoot = ZeBootCube_I, ColumnsBoot = ZeBootCube_J)}.} \item{compact}{(default = \code{FALSE}) if \code{TRUE} gives a compact version with only the results for the symmetric approach.} \item{eigen}{if \code{FALSE} compute also the bootstraped eigenvalues. NB It seems that the bootstrapped eigenvalues are biased (i.e., their mean is not equal to the corresponding eigenvalue). So this feature is experimental.} \item{eigen.compact}{when \code{TRUE} returns the whole matrix of bootstrapped eigenvalues. Default is \code{FALSE}. When \code{eigen} is \code{FALSE}, \code{eigen.compact} has no effect.} } \value{ With notation: \eqn{I}: number of rows (of \code{ZeDataCube}), \eqn{J}: number of columns (of \code{ZeDataCube}), \eqn{L}: number of factors kept (i.e., \code{nf2keep}), \eqn{B}: number of Bootstrap replicates (i.e., \code{nBootIter}); \code{Boot4PTCA} returns a list if compact \code{FALSE}: 1a) \code{RowsBoot} an \eqn{I} * \eqn{L} * \eqn{B} cube of Bootstrapped coordinates for the \eqn{I}-set 1b) \code{RowsBoot.asym} an \eqn{I} * \eqn{L} * \eqn{B} cube of Bootstrapped coordinates for the \eqn{I}-set (asymmetric projections); 2a) \code{ColumnsBoot} a \eqn{J} * \eqn{L} * \eqn{B} cube of Bootstrapped coordinates for the \eqn{J}-set if compact is \code{FALSE} 2b) \code{ColumnsBoot.asym} a \eqn{J} * \eqn{L} * \eqn{B} cube of Bootstrapped coordinates for the \eqn{J}-set. } \description{ \code{Boot4PTCA} bootstraps the \eqn{K}-th dimension of a data cube and computes bootstrapped factor scores. } \examples{ \dontrun{ BootFactorsIJ <- Boot4PTCAt(A.Cube.Of.Data,fi = fi, fj = fj, eigs = eigs) } } \author{ Hervé Abdi }
d9f95790236a7a956c009efee80ab116582d6f34
b1a12b171097fcb0b2a6f7a10e0ab7afdf41aac1
/R/AmChart.R
7b22285a29ecad1f718ead7ea3039e5c41770763
[]
no_license
myndworkz/rAmCharts
7e1d66002cbca9ef63e1d2af6b4e49a1ac7cd3c3
6ea352cab2c9bc5f647447e5e7d902d9cbec0931
refs/heads/master
2021-01-14T13:06:28.947936
2015-07-29T12:34:13
2015-07-29T12:34:13
39,955,321
1
0
null
2015-07-30T14:37:43
2015-07-30T14:37:43
null
UTF-8
R
false
false
11,629
r
AmChart.R
#' @include AmObject.R NULL #' @title AmChart #' @description This class allow to define the amCharts parameters #' @details API for plotting AmChart with R #' @section Slots: #' @slot \code{allLabels}: Object of class \code{"list"}. List of Labels properties. #' See \code{\linkS4class{Label}.} #' @slot \code{arrows}: Object of class \code{"list"}. #' List of properties of \code{\linkS4class{GaugeArrow}}s. #' @slot \code{axes}: Object of class \code{"list"}. #' List of properties of \code{\linkS4class{GaugeArrow}}s. #' @slot \code{balloon}: Object of class \code{"list"}. #' List of an \code{\linkS4class{AmBalloon}} class properties. #' Creates the balloons ( tooltips ) of the chart, #' It follows the mouse cursor when you roll-over the data items. #' The framework generates the instances automatically you only need to adjust the appearance to your needs. #' @slot \code{categoryAxis}: Object of class \code{"list"}. #' List of a \code{\linkS4class{CategoryAxis}} properties. #' Read-only. Chart creates category axis itself. #' If you want to change some properties, #' you should get this axis from the chart and set properties to this object. #' @slot \code{categoryField}: {Object of class \code{"character"}. #' Category field name tells the chart the name of the field in your dataProvider object #' which will be used for category axis values.} #' @slot \code{ChartCursor}: Object of class \code{"list"}. #' List of a \code{\linkS4class{ChartCursor}} class properties. #' Properties of the chart's cursor. #' @slot \code{ChartScrollbar}: Object of class \code{"list"}. #' List of a \code{\linkS4class{ChartScrollbar}} class properties. #' Properties of chart's scrollbar. #' @slot \code{creditsPosition}: {Object of class \code{"character"}, #' specifying position of link to amCharts site. #' Allowed values are: top-left, top-right, bottom-left and bottom-right..} #' @slot \code{dataProvider}: {Object of class \code{"list"}, containing the data. #' Use providing method toList* to convert a \code{data.frame}} #' @slot \code{graphs}: {Object of class \code{list}. List of AmGraphs properties #' See \code{\linkS4class{AmGraph} class}. #' Creates the visualization of the data in following types: line, column, step line, #' smoothed line, olhc and candlestick.} #' @slot \code{guides}: {Object of class \code{list}. List of Guides properties. #' See \code{\linkS4class{Guides} class}. #' Instead of adding guides to the axes, you can push all of them to this array. #' In case guide has category or date defined, it will automatically will be assigned to the category axis. #' Otherwise to first value axis, unless you specify a different valueAxes for the guide.} #' @slot \code{legend}: {Object of class \code{"list"}. #' List of an \code{\linkS4class{AmLegend}} class properties. #' Properties of chart's legend.} #' @slot \code{titles}: {Object of class \code{"list"}. List of Titles properties #' See \code{\linkS4class{Title}} class}. #' @slot \code{trendLines}: {Object of class \code{"list"}. #' List of \code{\linkS4class{trendLine}} objects added to a chart. #' You can add trend lines to a chart using this list or access already existing trend lines.} #' @slot \code{type}: {Object of class \code{"character"}. #' Possible types are: serial, pie, radar, #' (types xy, radar, funnel, gauge, map, stock. are in development).} #' @slot \code{valueAxes}: Object of class \code{"list"}. List of ValueAxes' properties. #' See \code{\linkS4class{ValueAxis}} class. #' Chart creates one value axis automatically, #' so if you need only one value axis, you don't need to create it. #' @slot \code{valueAxis}: Object of class \code{list}. #' List of Value axis properties for Gantt chart. Set it's type to "date" if your data is date or time based. #' @examples #' new("AmChart") #' \dontrun{ #' # see available methods #' showMethods(class="AmChart") #' } #' @author Dataknowledge #' @seealso \code{\url{http://docs.amcharts.com/3/javascriptcharts}} #' @family rAmChart classes #' @export setClass( Class = "AmChart", contains = "AmObject", representation = representation( allLabels = "list", arrows = "list", axes = "list", balloon = "list", categoryAxis = "list", categoryField = "character", chartCursor = "list", chartScrollbar = "list", creditsPosition = "character", dataProvider = "list", graphs = "list", graph = "list", guides = "list", legend = "list", listeners = "list", segmentsField = "character", subChartProperties = "list", theme = "character", titles = "list", trendLines = "list", type = "character", valueAxes = "list", valueAxis = "list" ), validity = function(object) { if( length(object@type) > 0 && ! object@type %in% c("funnel", "gantt", "gauge", "pie", "radar", "serial", "xy", "stock" ) ){ stop( "[AmChart]: error when changing the type, maybe it is not implemented yet") } } ) #' @title Initialize an AmChart #' @description Method for initializing any S4 class provided by the package. #' @examples #' new("AmChart", valueField = "value") #' @family AmChart methods #' @seealso \code{\linkS4class{AmChart}} S4 class #' @export setMethod(f = "initialize", signature = "AmChart", definition = function(.Object, allLabels, arrows, axes, balloon, categoryAxis, categoryField, chartCursor, chartScrollbar, creditsPosition, dataProvider, graphs, graph, guides, legend, segmentsField, theme, titles, trendLines, type, valueAxes, valueAxis, pathToImages = "http://www.amcharts.com/lib/3/images/",...) { if( !missing(allLabels) ){ .Object <- setAllLabels( .Object, allLabels ) }else{} if( !missing(arrows) ){ .Object <- setArrows( .Object, arrows ) }else{} if( !missing(axes) ){ .Object <- setAxes( .Object, axes ) }else{} if( !missing(balloon) ){ .Object <- setBalloon( .Object, balloon ) }else{} if( !missing(categoryAxis) ){ .Object <- setCategoryAxis( .Object, categoryAxis ) }else{} if( !missing(categoryField) ){ .Object<- setCategoryField( .Object, categoryField ) }else{} if( !missing(creditsPosition) ){ .Object <- setCreditsPosition( .Object, creditsPosition ) }else{} if( !missing(chartCursor) ){ .Object <- setChartCursor( .Object, chartCursor ) }else{} if( !missing(chartScrollbar) ){ .Object <- setChartScrollbar( .Object, chartScrollbar ) }else{} if( !missing(dataProvider) ){ .Object <- setDataProvider( .Object, dataProvider ) }else{} if( !missing(graphs) ){ .Object <- setGraphs(.Object, graphs) }else{} if( !missing(graph) ){ .Object <- setGraph( .Object, graph) }else{} if( !missing(guides) ){ .Object <- setGuides(.Object, guides) }else{} if( !missing(legend) ){ .Object <- setLegend( .Object, legend ) }else{} if( !missing(segmentsField) ){ .Object@segmentsField <- segmentsField }else{} if(!missing(theme)){ .Object@theme <- theme }else{} if(!missing(titles)){ .Object <- setTitles(.Object, titles) }else{} if(!missing(trendLines)){ .Object <- setTrendLines(.Object, trendLines) }else{} if(!missing(type)){ .Object <- setType( .Object, type) }else{} if(!missing(valueAxes)){ .Object <- setValueAxes(.Object, valueAxes) }else{} if( !missing(valueAxis) > 0 ){ .Object <- setValueAxis(.Object, valueAxis) }else{} .Object <- setProperties(.Object, pathToImages = pathToImages, ...) #, path = "http://www.amcharts.com/lib/3/images/")) validObject(.Object) return(.Object) } ) #' @title List attributes of an AmChart #' @description This method lists attributes of an AmChart to fit the API #' @details For certain attributes we do not verify if they are NULL, see constructor. #' @examples #' library(pipeR) #' amChart() %>>% setProperties(test = 1) %>>% listProperties #' @importFrom rlist list.append setMethod( f = "listProperties", signature = "AmChart", definition = function(.Object) { ls <- callNextMethod() if( length(.Object@allLabels) > 0 ){ ls <- rlist::list.append(ls, allLabels = .Object@allLabels) }else{} if( length(.Object@arrows) > 0 ){ ls <- rlist::list.append(ls, arrows = .Object@arrows) }else{} if( length(.Object@axes) > 0 ){ ls <- rlist::list.append(ls, axes = .Object@axes) }else{} if( length(.Object@balloon) > 0 ){ ls <- rlist::list.append(ls, balloon = .Object@balloon) }else{} if( length(.Object@categoryAxis) > 0 ){ ls <- rlist::list.append(ls, categoryAxis = .Object@categoryAxis) }else{} if( length(.Object@categoryField) > 0 ){ ls <- rlist::list.append(ls, categoryField = .Object@categoryField) }else{} if( length(.Object@creditsPosition) > 0 ){ ls <- rlist::list.append(ls, creditsPosition = .Object@creditsPosition) }else{} if( length(.Object@chartCursor) > 0 ){ ls <- rlist::list.append(ls, chartCursor = .Object@chartCursor) }else{} if( length(.Object@chartScrollbar) > 0 ){ ls <- rlist::list.append(ls, chartScrollbar = .Object@chartScrollbar) }else{} if( length(.Object@dataProvider) > 0 ){ ls <- rlist::list.append(ls, dataProvider = .Object@dataProvider) }else{} if( length(.Object@graphs) > 0 ){ ls <- rlist::list.append(ls, graphs = .Object@graphs) }else{} if( length(.Object@graph) > 0 ){ ls <- rlist::list.append( ls, graph = .Object@graph) }else{} if( length(.Object@guides) > 0 ){ ls <- rlist::list.append(ls, guides = .Object@guides) }else{} if( length(.Object@legend) > 0 ){ ls <- rlist::list.append(ls, legend = .Object@legend) }else{} if( length(.Object@listeners) > 0 ){ ls <- rlist::list.append(ls, listeners = .Object@listeners) }else{} if(length(.Object@segmentsField) > 0){ ls <- rlist::list.append(ls, segmentsField = .Object@segmentsField) }else{} if(length(.Object@subChartProperties) > 0){ ls <- rlist::list.append(ls, subChartProperties = .Object@subChartProperties) }else{} if(length(.Object@theme) > 0){ ls <- rlist::list.append(ls, theme = .Object@theme) }else{} if(length(.Object@titles) > 0){ ls <- rlist::list.append(ls, titles = .Object@titles) }else{} if(length(.Object@trendLines) > 0){ ls <- rlist::list.append(ls, trendLines = .Object@trendLines) }else{} if(length(.Object@type) > 0){ ls <- rlist::list.append(ls, type = .Object@type) }else{} if(length(.Object@valueAxes) > 0){ ls <- rlist::list.append(ls, valueAxes = .Object@valueAxes) }else{} if( length(.Object@valueAxis) > 0 ){ ls <- rlist::list.append( ls, valueAxis = .Object@valueAxis) }else{} return(ls) } )
f13df7eeddcdf535bfc43ac00af18dcd25061afc
29bccff3bbed4db64b69bdce06bd43e584ef79e1
/R/onLoad.R
210b835031c46a25079cdaf547b7efe8f67809c8
[]
no_license
FluvialLandscapeLab/datapuppy
55ac83d5d5ee067936092367ca8fe90a4956d898
d406d2e98510e3570e3e109538a1c86283797aae
refs/heads/master
2021-05-01T10:18:03.006980
2017-04-10T19:20:40
2017-04-10T19:20:40
43,246,577
0
0
null
2020-06-01T19:34:36
2015-09-27T12:58:55
R
UTF-8
R
false
false
2,353
r
onLoad.R
.onLoad <- function(libname, pkgname) { op = options() op.datapuppy = list( datapuppy.connections = c("MySQLConnection") ) # # # THE FOLLOWING OPTIONS WERE USED WHEN DATAPUPPY WAS GOING TO WRITE PROVENANCE # # TO XML. I DON'T THINK THEY ARE USEFUL ANYMORE. # # # op <- options() # # op.datapuppy <- list( # # datapuppy.path = "~/R-dev", # # datapuppy.install.args = "", # # datapuppy.name = "Geoffrey Poole", # # datapuppy.desc.author = '"Geoffrey Poole <gpoole@montana.edu> [aut, cre]"', # # datapuppy.desc.license = "What license is it under?", # # datapuppy.desc.suggests = NULL, # # datapuppy.desc = list(), # # # # datapuppy.root = "datapuppy", # # datapuppy.tweaks = "tweaks", # # datapuppy.keys = "foreignkeys", # # datapuppy.tweakorders = "tweakorders", # # datapuppy.why = "comment", # # datapuppy.orders = "ordering", # # # # datapuppy.path = "basepath", # # datapuppy.files = "datafiles", # # datapuppy.impdefs = "importdefs", # # datapuppy.sets = "datasets", # # datapuppy.sets.keys = "foreignkeys", # # datapuppy.sets.tweaks = "tweaks", # # datapuppy.sets.tweakorders = "tweakorders", # # datapuppy.sets.tweaks.attr = list(activate = "added", deactivate = "removed"), # # # # datapuppy.db.reqObservationFields = c(observationID = "ObservationIDX", # # observationDateTime = "ObservationTime", # # datasetID = "DeploymentIDX", # # metricID = "MetricIDX", # # value = "Value" # # ), # # datapuppy.db.reqDatasetFields = c(datasetID = "DeploymentIDX", # # datasetName = "DeploymentName", # # datafileSource = "rawDataFile" # # ), # # datapuppy.db.reqMetricFields = c(metricID = "MetricIDX", # # metricName = "MetricName" # # ), # # datapuppy.db.setTable = NULL, # # datapuppy.db.metricTable = NULL, # # datapuppy.db.dataTable = NULL, # # datapuppy.db.dsn = NULL # # ) options(op.datapuppy) invisible() } # #
0d66cac7edfebde3039d59797cfadc354c9f9569
31c1fc57b9e4778a58c9e21a4b015eb165ee57c4
/run_analysis.R
19ab92a1cfccc8171a38095b6e020f83c8774ddf
[]
no_license
student73891/gcd
9c93b0b7f7191e831ed0845f45d65064a9098b72
d23592b176de56b49c2c5e2897b24a19e7108155
refs/heads/master
2021-01-10T22:23:20.011005
2014-09-20T12:55:35
2014-09-20T12:55:35
null
0
0
null
null
null
null
UTF-8
R
false
false
3,801
r
run_analysis.R
# run_analysis.R: Getting and Cleaning Data 2014/09 Course Project # ====================================================================== # Directory containing data files dataDir <- "UCI HAR Dataset" # Full name of a data file # This function prepends dataDir to its arguments, and then joins # everything with a "/" character, thus giving us a quick way to access # data files. The name of the function, df, is short for "data file". df <- function(...) paste(dataDir, ..., sep="/") # Read complete train or test set # This function uses mapply to read in data from the subject, y, and X data # files, which gives us a list, containing three data frames, with # different numbers of columns, but with the same number of rows. The list # is then converted to a data.frame using as.data.frame, which amounts to # column-binding the list elements. The resulting data.frame has the # following columns: subject, activity, feature1, ..., feature561 (but # these column names are for convenience only, they are not the descriptive # column names that the assignment asks for, which are set later). The call # to read.table uses colClasses for increased speed. The argument set has # to be one of "train" or "test". readSet <- function(set) { files <- paste(c("subject", "y", "X"), "_", set, ".txt", sep="") classes <- c("integer", "integer", "numeric") readFile <- function(f, c) read.table(df(set, f), colClasses=c) result <- as.data.frame(mapply(readFile, files, classes)) colnames(result) <- c("subject", "activity", paste0("feature", seq_len(ncol(result) - 2))) return(result) } # The processing starts here # Features # First we read feature names from the file features.txt. We then search # for features we want to keep (those with -mean() or -std() in their # name). Finally, we create descriptive column names by first # removing (), then by translating the characters (,- to an underscore _, # and finally by removing ). For example, tGravityAcc-arCoeff()-X,1 will # become tGravityAcc_arCoeff_X_1, and angle(X,gravityMean) will become # angle_X_gravityMean. features <- read.table(df("features.txt"), as.is=TRUE)[, 2] keep <- grep("-(mean|std)\\(\\)", features) colNames <- gsub("\\)", "", gsub("[(,-]", "_", gsub("\\(\\)", "", features))) # Read train and test sets # Here we read in the train and test sets, immediately row-binding them, # and keeping only the columns that we need, ie subject, activity, means # and standard deviations. We also take care of column names by renaming # all columns except the first two using the names generated above. The # name din is short "data input". This completes steps 1, 2 and 4 of the # assignment. din <- rbind(readSet("train"), readSet("test"))[, c(1, 2, keep + 2)] # 10299 x 68 colnames(din)[-c(1, 2)] <- colNames[keep] # Activity labels # First we read activity labels from the file activity_labels.txt, then we # convert din$activity to a factor, using the labels we have. This # completes step 3. activityLabels <- read.table(df("activity_labels.txt"), as.is=TRUE)[, 2] din$activity <- factor(din$activity, levels=1:6, labels=activityLabels) # Aggregated data set # Finally, we aggregate the data by computing the mean for each combination # of subject and activity. The resulting data frame dout (which stands for # "data output") is saved to the file aggregated.txt in the current # directory. The file contains nice column names, and activities are saved # as WALKING, WALKING_UPSTAIRS, ... and so on. The file can be read in # using # # read.table("aggregated.txt", header=TRUE) # # This completes step 5 of the assignment. dout <- aggregate(din[, -c(1, 2)], by=list(subject=din$subject, activity=din$activity), mean) # 180 x 68 write.table(dout, "aggregated.txt", quote=FALSE, row.names=FALSE) # EOF
3510f9367f900998e4ff110a23b6403f317efe14
2b2ffa2e4274ab381b0c00efa5bc513c7c54a6b7
/beta/ui.R
e4de8d47fe3861a532af14a47d62c7b989e44be5
[]
no_license
vnsriniv/shinydiversity
6b4ef5d73c32692854e3e293087dd51d8c369f30
9519bb2e629a7658180ab8b93e0e71bea13cede5
refs/heads/master
2021-07-17T01:53:04.778250
2017-10-21T21:25:48
2017-10-21T21:25:48
null
0
0
null
null
null
null
UTF-8
R
false
false
688
r
ui.R
# Beta Module UI funciton betaUI <- function(id) { # Create a namespace function using the provided ID ns <- NS(id) # NOTE: All referenced input variables must be wrapped with a call to ns(), ex. ns("someInputID") # Define the UI for the app tagList( fluidPage( titlePanel("Beta Diversiy"), # Sidebar layout sidebarLayout( # Sidebar panel sidebarPanel( # TODO: Sidebar panel UI logic here ), # Main panel mainPanel( # TODO: Main panel UI logic here ) ) ) ) }
474f8f2d89dde9528a0f71460d3600559607d14e
b525b2183c597b7a3dedf0f37d4f712a392c2f77
/test-that/tests/test-missing-values.R
c31d2f72bbd0de3ed3959b550ccafdc9a3a1659d
[]
no_license
dakotalim/stat159labs
61f33997a1140ec50f2b11988308f46a0f2671b1
84e99137fb57926d575520bb65264e5caee66876
refs/heads/master
2021-01-17T10:27:11.635645
2016-10-31T20:16:22
2016-10-31T20:16:22
68,638,972
0
0
null
2016-10-24T20:26:38
2016-09-19T19:28:34
null
UTF-8
R
false
false
612
r
test-missing-values.R
# load the source code of the functions to be tested source("../functions/missing-values.R") # context with one test that groups expectations context("Test for missing value") test_that("missing value", { x <- c(1, 2, 3, 4, 5) y <- c(1, 2, 3, 4, NA) z <- c(1, NA, 3, 4, NA) nas <- rep(NA, 10) expect_equal(missing_values(x), 0) expect_length(missing_values(x), 1) expect_gte(missing_values(x), 0) expect_equal(missing_values(y), 1) expect_length(missing_values(y), 1) expect_equal(missing_values(z), 2) expect_length(missing_values(z), 1) expect_equal(missing_values(nas), 10) })
9162ed2a08d83d66d41b252383142e964ab7b728
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/trend/examples/bartels.test.Rd.R
4fdb088f1784c8e675a8e1ce61d10dc38f29fd4f
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
588
r
bartels.test.Rd.R
library(trend) ### Name: bartels.test ### Title: Bartels Test for Randomness ### Aliases: bartels.test ### Keywords: htest nonparametric ts univar ### ** Examples # Example from Schoenwiese (1992, p. 113) ## Number of frost days in April at Munich from 1957 to 1968 ## frost <- ts(data=c(9,12,4,3,0,4,2,1,4,2,9,7), start=1957) bartels.test(frost) ## Example from Sachs (1997, p. 486) x <- c(5,6,2,3,5,6,4,3,7,8,9,7,5,3,4,7,3,5,6,7,8,9) bartels.test(x) ## Example from Bartels (1982, p. 43) x <- c(4, 7, 16, 14, 12, 3, 9, 13, 15, 10, 6, 5, 8, 2, 1, 11, 18, 17) bartels.test(x)
69e8acdb6311eca57368a65876ed0e86eed48c8e
9bd5a5ab85e68040bc9a8694692ef3dfdea3d713
/R/RcppExports.R
1bdcdb21b4fcc8a1aaf30b71a536d5f9745f4f6c
[]
no_license
rfsaldanha/scanstatistics
578e9c8eaefa9ce4823dc5dac72e1168e4034b0d
652f027f40928b776e01373a191e05101bd8f841
refs/heads/master
2021-01-13T16:47:38.434417
2017-01-04T15:28:38
2017-01-04T15:28:38
null
0
0
null
null
null
null
UTF-8
R
false
false
6,686
r
RcppExports.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' The log probability mass function of the Poisson distribution. #' #' @param x An integer. #' @param lambda A positive scalar. #' @return A log-probability. #' @export #' @keywords internal poisson_lpmf <- function(x, lambda) { .Call('scanstatistics_poisson_lpmf', PACKAGE = 'scanstatistics', x, lambda) } #' Terms in the log-product (sum) of the EB-ZIP window statistic. #' #' Computes one or more terms in the log-product (sum) of the numerator or #' denominator of the EB-ZIP window statistic (i.e. the one calculated for a #' given space-time window W). #' @param p Numeric vector of structural zero probabilities. #' @param d Numeric vector of estimates of the structural zero indicators. Of #' same length as \code{p}. #' @param mu Numeric vector of given/estimated Poisson expected value #' parameters. Of same length as \code{p}. #' @param y Integer vector of observed counts. Of same length as \code{p}. #' @param tol Scalar; probability p below this is considered equal to zero. #' @return A numeric vector of same length as input vector \code{p}. #' @keywords internal zip_statistic_logfactor <- function(p, d, mu, y, tol = 1e-08) { .Call('scanstatistics_zip_statistic_logfactor', PACKAGE = 'scanstatistics', p, d, mu, y, tol) } #' Estimate the relative risk for an outbreak using a ZIP distribution. #' #' Scalar estimate of the relative risk \eqn{q} for zero-inflated Poisson data. #' @param d A vector of indicator variables for whether the corresponding count #' in the argument \code{y} is an excess zero or not. Can also be estimates #' of these indicators. #' @param mu A vector of given/estimated Poisson expected value parameters, of #' same length as \code{d}. #' @param y An integer vector of the observed counts, of same length as #' \code{d}. #' @return A scalar, the estimated relative risk. #' @keywords internal estimate_zip_relrisk <- function(d, mu, y) { .Call('scanstatistics_estimate_zip_relrisk', PACKAGE = 'scanstatistics', d, mu, y) } #' Estimate the indicators of excess zeros for a ZIP distribution. #' #' Given counts and (estimated) Poisson expected value parameters and excess #' zero probabilities, this function estimates the indicator variable for an #' excess zero, for each count. #' @param p A numeric vector, each element being the given/estimated #' probability of an excess zero for the corresponding count in \code{y}. #' @param mu A numeric vector, each element being the given/estimated Poisson #' expected value parameter for the corresponding count in \code{y}. Of same #' length as \code{p}. #' @param y An integer vector containing the observed counts. Of same length as #' \code{p}. #' @return A numeric vector, of same length as the input vector \code{p}. #' @keywords internal estimate_d <- function(p, mu, y) { .Call('scanstatistics_estimate_d', PACKAGE = 'scanstatistics', p, mu, y) } #' Estimates the ZIP relative risk and excess zero indicators for a window. #' #' For a single spatial or space-time window, this function uses the EM #' algorithm to estimate the relative risk and the excess zero indicators for #' counts assumed to be generated from a zero-inflated Poisson distribution. #' @param p A numeric vector of the given/estimated excess zero probabilities #' corresponding to each count. #' @param mu A numeric vector of the given/estimated Poisson expected value #' parameters corresponding to each count. Of same length as \code{p}. #' @param y An integer vector of the observed counts, of same length as #' \code{p}. #' @param tol A scalar between 0 and 1. It is the absolute tolerance criterion #' for the estimate of the excess zero indicator; convergence is reached when #' two successive elements in the sequence of estimates have an absolute #' difference less than \code{tol}. #' @return A list with two elements: #' \describe{ #' \item{q}{Scalar estimate of the relative risk.} #' \item{dstar}{Estimates of the excess zero indicator variables.} #' } #' @keywords internal zip_em_estimates <- function(p, mu, y, tol = 0.01) { .Call('scanstatistics_zip_em_estimates', PACKAGE = 'scanstatistics', p, mu, y, tol) } #' Calculate a term in the sum of the logarithm of the ZIP window statistic. #' #' This function calculates a term which appears in the sum of the logarithm of #' the zero-inflated Poisson statistic for a given space-time window. #' @param q Scalar; the relative risk. #' @param p Numeric vector of excess zero probabilities. #' @param dstar Numeric vector of estimates of the excess zero indicators, under #' the alternative hypothesis of an outbreak. Of same length as \code{p}. #' @param ddagger Numeric vector of estimates of the excess zero indicators, #' under the null hypothesis of no outbreak. Of same length as \code{p}. #' @param mu Numeric vector of given/estimated Poisson expected value #' parameters. Of same length as \code{p}. #' @param y Integer vector of observed counts. Of same length as \code{p}. #' @return A numeric vector of same length as input vector \code{p}. #' @keywords internal zip_statistic_term <- function(q, p, dstar, ddagger, mu, y) { .Call('scanstatistics_zip_statistic_term', PACKAGE = 'scanstatistics', q, p, dstar, ddagger, mu, y) } #' Calculate the ZIP statistic for a single space-time window. #' #' Calculate the single-window statistic for the zero-inflated Poisson #' distribution using the EM algorithm. #' @inheritParams zip_em_estimates #' @param ... Named parameters passed to \code{\link{zip_em_estimates}}. #' @return A scalar, the (logarithm of the) ZIP statistic. #' @keywords internal window_zip_statistic <- function(p, mu, y, tol = 0.01) { .Call('scanstatistics_window_zip_statistic', PACKAGE = 'scanstatistics', p, mu, y, tol) } #' Calculate the ZIP window statistic over all durations, for a given zone. #' #' This function calculates the zero-inflated Poisson statistic for a given #' spatial zone, for all durations considered. #' @param duration An integer vector. #' @inheritParams zip_em_estimates #' @return A list with two elements: #' \describe{ #' \item{duration}{Vector of integers from 1 to \code{maxdur}.} #' \item{statistic}{Numeric vector containing the ZIP statistics corresponding #' to each duration, for the given spatial zone.} #' } #' @keywords internal calc_zipstat_over_duration <- function(duration, p, mu, y, maxdur, tol = 0.01) { .Call('scanstatistics_calc_zipstat_over_duration', PACKAGE = 'scanstatistics', duration, p, mu, y, maxdur, tol) }
6870b755e4c66258e7e987563743a4f44d13c9b4
8f6111bea0b4e0af40adebebdb709fb16a680e56
/plot6.R
f6cb558a6c6b5b54bf97d32c84dbb41a1f66ace4
[]
no_license
Kbushu/Pollution
3fc8a2de996610486c7ba94e7984a856b82fe78c
b0303fdf9dd2a8c47a54f2df6a537c3101bcfaa7
refs/heads/master
2021-01-01T05:12:26.026239
2016-05-08T21:52:37
2016-05-08T21:52:37
58,314,527
0
0
null
null
null
null
UTF-8
R
false
false
1,342
r
plot6.R
# How have emissions from # motor vehicle sources changed from 1999–2008 in Baltimore City? # Read the data setwd("~/40 L&G/Coursera/ExDataAnalysis/Project2") if (!exists("NEI")) { # Emissions Data NEI <- readRDS(paste0(getwd() ,"/Data/summarySCC_PM25.rds")) } if (!exists("SCC")) { # Source Classification Code Table SCC <- readRDS(paste0(getwd() ,"/Data/Source_Classification_Code.rds")) } # Select the SCC codes for vehicle sources library(dplyr) vehicles <- SCC %>% filter(grepl("Mobile", x = SCC.Level.One ,ignore.case = TRUE)) %>% filter(grepl("vehicle", x = Short.Name, ignore.case = TRUE)) %>% select(SCC,Short.Name) #Convert to a vector to use in the filter vehicles <- vehicles %>% .$SCC # Start the png driver png(filename= paste0("plot6.png"), height=295, width=600, bg="white") # Prepare Data for plot df <- NEI %>% select(Emissions, year, fips, SCC) %>% filter(SCC %in% vehicles) %>% filter(fips == "24510") %>% group_by(year) %>% summarise(Total = sum(Emissions)) library(ggplot2) qplot(x = year, y = Total, data = df, geom = c("point", "line"), xlab="Year", ylab = "Tons of PM2.5", main = "Total Annual Vehicle Related Emissions \n Baltimore City, Maryland") # Export the plot dev.off()
da70ee8d3e6af6d241b8c3cbf5ac4e562687f61a
272ba64bc58529861e91e96e3a6f803c1fd85dc7
/PestNameGroups.R
84d9d0d4762b07bf3bd1a81a209510709dfdc614
[]
no_license
juliaceae/PSPMaster
376184e2d2dc8c0a3d6ac88fb03c882982a1ae8c
cb3261be4e4c283f49d56d224598a3e5d8a753fe
refs/heads/master
2020-04-06T07:02:05.533232
2016-08-24T17:53:26
2016-08-24T17:53:26
20,000,621
1
1
null
null
null
null
UTF-8
R
false
false
285
r
PestNameGroups.R
#JCrown #PSP #Grouping parmater names sort(unique(detects$Analyte)) #non-pesticides Non.Pest <- c("Conductivity", "Dissolved Oxygen", "Dissolved Oxygen, Saturation", "Temperature", "Total Solids", "Turbidity")
c451d488dea44a9654f2658c3ea5f204bbc19139
beaeb1d8298f35ac41af5d736b0065d390d8781e
/Problem 15.2.R
09e47cb8cc67e28999fac841bdb8224b8a5650ec
[]
no_license
HarshBhotika/Multivariate-Data-Analysis
014e0e4459f5eb3c8e5b77e67a3bfe1bd0266fea
b9a0bea6f05407b19930a3ad511dc51c00eefc50
refs/heads/master
2020-12-02T07:42:13.226631
2017-07-22T21:36:41
2017-07-22T21:36:41
96,715,108
1
0
null
null
null
null
UTF-8
R
false
false
616
r
Problem 15.2.R
# Code written by Harsh Bhotika depress <- read.csv(file.choose(), header = TRUE) View(depress) attach(depress) install.packages("psych") library(psych) install.packages("psy") library(psy) bind <- depress[,c("C1", "C2", "C3","C4", "C5", "C6", "C7", "C8", "C9", "C10", "C11", "C12", "C13" , "C14", "C15", "C16", "C17", "C18", "C19", "C20")] bind dataframe <- data.frame(bind) dataframe View(dataframe) factor1<- factanal(dataframe,factor=4, rotation = "varimax") factor1 factor2<- factanal(dataframe,factor=4, rotation = "promax") factor2 scree.plot(factor1$correlations)
50f75d6750dbfc77c87e1e3252dab23d953a4ce6
62a5ff47bc82332003d28a2c17bc8e94d17f4b69
/Documents/MA684/Work/hw6/hw6.R
61d0b06b9fa214eb21a2b5ad90bfa03ecbd7c310
[]
no_license
jiayuans/Applied-Multiple-Regression-and-Multivariable-Methods
13ce5039c174ae3a4c0228b0a1b345d95e8c42c1
dae2fe6d2a1ccb2a7c529bb210b330ea69399efe
refs/heads/master
2021-01-10T09:08:43.260841
2016-03-29T18:40:38
2016-03-29T18:40:38
null
0
0
null
null
null
null
UTF-8
R
false
false
597
r
hw6.R
setwd("/Users/jiayuan/Documents/MA684/hw6") homeprices <- read.csv("homeprices.csv",header=T) attach(homeprices) cor.test(price,size) cor.test(price,bedrooms) cor.test(price,age) cor.test(age,size) pcor.test(price,bedrooms,size) pcor.test(price,age,size) score <- read.csv("EnviroAttitudeTrial.csv",header=T) attach(score) head(score) reg <- lm(enviroscore1~enviroscore0+SexM+intervention) summary(reg) reg0i <- lm(enviroscore1~enviroscore0+SexM+intervention+enviroscore0*intervention) summary(reg0i) regSi <- lm(enviroscore1~enviroscore0+SexM+intervention+SexM*intervention) summary(regSi)
2170c4f632c0a2efc8ec5a3c608ef6837847f18b
4663b90ac7c885c7184c9b18a0a25e2c28005413
/simulation/case2/Functions/formulaConstruct.R
8685c0e4d94b43e8944d9a92ec480a5f6cfc75b1
[]
no_license
TingtingKayla/Stats_Robust_Causal_Estimation
c32a6bc6af32e1ea77ada65078233e6a8fbfdf6d
69174f1651777616edfa454dbafe5d873008e512
refs/heads/master
2023-04-17T14:58:37.143263
2021-04-30T00:54:13
2021-04-30T00:54:13
362,987,687
0
0
null
null
null
null
UTF-8
R
false
false
336
r
formulaConstruct.R
######formula construct formulaF=function(varList, y.name){ return ( as.formula(paste(y.name, "~ ", paste(c(varList), collapse = "+"))) ) } ###formula construct for generalized model formulaGAM=function(varList, y.name, spline){ return ( as.formula(paste(y.name, " ~ ", paste(c(varList), collapse = "+"), "+", spline)) ) }
f126ec6beb3f42c1f30524a4ce7c2b37491d812b
29585dff702209dd446c0ab52ceea046c58e384e
/dvfBm/R/dvFBM.R
f3ca5760298295bee9ff0ea207aec44bd86cafe5
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
5,268
r
dvFBM.R
filt<-function(nm="i2"){ ## gives the coefficients of an increments type filter, a Daublet, Symmlet or Coiflet filter fact<-function(n) ifelse(n==0,1,prod(1:n)) cnk<-function(n,k) fact(n)/fact(k)/fact(n-k) if (!is.na(o<-as.numeric(strsplit(nm,"i")[[1]][2]))) { l<-o+1 a<-rep(0,l) for (k in 0:o){ a[k+1]<-cnk(o,k)*(-1)^(k) } } else { require(wmtsa) a<-wavDaubechies(nm)$wav } a } dilatation <- function(a=c(1,-2,1), m=2){ ## provides the dilated version of a filter a if(m>1){ la <- length(a) am <- rep(0, m * la - 1) am[seq(1, m * la - 1, by = m)] <- a am } else a } dvFBM<-function(fbm,nma="i2",M1=1,M2=5,method=c("ST","Q","TM","B1-ST","B1-Q","B1-TM","B0-ST","B0-Q","B0-TM"),par=list(),llplot=FALSE){ if (missing(fbm)) stop("Missing data") listDaub<-c("d2", "d4", "d6", "d8","d10", "d12", "d14", "d16", "d18", "d20","s2","s4", "s6", "s8", "s10","s12", "s14", "s16", "s18", "s20", "l2","l4", "l6", "l14", "l18", "l20","c6", "c12", "c18", "c24", "c30") l<-strsplit(nma,"")[[1]][1] if (l=="i") { if (length(strsplit(nma,"i")[[1]])>2) stop("Bad entry name for the filter ") if (is.na(as.numeric(strsplit(nma,"i")[[1]][2]))) stop("Bad entry name for the filter") a<-filt(nma) }else{ if (nma %in% listDaub) { a<-filt(nma) }else stop("Bad entry name for the filter") } if (method %in% c("Q","B0-Q","B1-Q")){ if (is.null(par$vecp) | is.null(par$vecc)) stop('"par=list(vecp=...,vecc=...) is needed for methods "Q","B0-Q","B1-Q"') } if (method %in% c("TM","B0-TM","B1-TM")){ if (is.null(par$beta1) | is.null(par$beta2)) stop('par=list(beta1=...,beta2=...) is needed for methods "TM","B0-TM","B1-TM"') } if (!(method %in% c("ST","Q","TM","B1-ST","B1-Q","B1-TM","B0-ST","B0-Q","B0-TM"))) stop('Method should be one of "ST","Q","TM","B1-ST","B1-Q","B1-TM","B0-ST","B0-Q","B0-TM"') l<-length(a)-1 n<-length(fbm) Unam<-NULL Unam<-switch(method, "ST"={ for (m in M1:M2){ am<-dilatation(a,m) Vam <- filter(fbm, am, sides = 1) Vam <- Vam[!is.na(Vam)] Unam<-c(Unam,mean(Vam^2)) } Unam }, "Q"={ vecp<-par$vecp vecc<-par$vecc for (m in M1:M2) { am<-dilatation(a,m) Vam <- filter(fbm,dilatation(a,m),sides=1) Vam <- Vam[!is.na(Vam)] Unam <- c(Unam, sum(vecc * quantile(Vam^2,vecp))) } Unam }, "TM"={ beta1<-par$beta1 beta2<-par$beta2 for (m in M1:M2) { am<-dilatation(a,m) Vam <- filter(fbm, am, sides = 1) Vam <- Vam[ - (1:(m*l))] tmp<-sort(Vam^2) nn<-length(Vam) Unam<-c(Unam,mean(tmp[(trunc(nn*beta1)+1):(nn-trunc(nn*beta2))])) } Unam }, "B1-ST"={ for (m in M1:M2) { Va2m <- filter(fbm,dilatation(a,2*m),sides=1) Va2m <- Va2m[!is.na(Va2m)] Vam <- filter(fbm,dilatation(a,m),sides=1) Vam <- Vam[!is.na(Vam)] Unam <- c(Unam,abs(mean(Va2m^2)-mean(Vam^2))) } Unam }, "B1-Q"={ vecp<-par$vecp vecc<-par$vecc for (m in M1:M2) { Va2m <- filter(fbm,dilatation(a,2*m),sides=1) Va2m <- Va2m[!is.na(Va2m)] Vam <- filter(fbm,dilatation(a,m),sides=1) Vam <- Vam[!is.na(Vam)] Unam <- c(Unam, abs(sum(vecc * quantile(Va2m^2,vecp)) - sum(vecc * quantile(Vam^2,vecp)))) } Unam }, "B1-TM"={ beta1<-par$beta1 beta2<-par$beta2 for (m in M1:M2) { Va2m <- filter(fbm,dilatation(a,2*m),sides=1) Va2m <- Va2m[!is.na(Va2m)] Vam <- filter(fbm,dilatation(a,m),sides=1) Vam <- Vam[!is.na(Vam)] tmp<-sort(Vam^2);nn<-length(Vam) tmp2<-sort(Va2m^2);nn2<-length(Va2m) Unam<-c(Unam, abs(mean(tmp2[(trunc(nn2*beta1)+1):(nn2-trunc(nn2*beta2))]) -mean(tmp[(trunc(nn*beta1)+1):(nn-trunc(nn*beta2))]))) } Unam }, "B0-ST"={ for (m in M1:M2) { Va2m <- filter(fbm,dilatation(a,2*m),sides=1) Va2m <- Va2m[!is.na(Va2m)] Vam <- filter(fbm,dilatation(a,m),sides=1) Vam <- Vam[!is.na(Vam)] Unam <- c(Unam,abs(mean(Va2m^2)/(2*m)-mean(Vam^2)/m)) } Unam }, "B0-Q"={ vecp<-par$vecp vecc<-par$vecc for (m in M1:M2) { Va2m <- filter(fbm,dilatation(a,2*m),sides=1) Va2m <- Va2m[!is.na(Va2m)] Vam <- filter(fbm,dilatation(a,m),sides=1) Vam <- Vam[!is.na(Vam)] Unam <- c(Unam, abs(sum(vecc * quantile(Va2m^2,vecp))/(2*m) - sum(vecc * quantile(Vam^2,vecp))/m)) } Unam }, "B0-TM"={ beta1<-par$beta1 beta2<-par$beta2 for (m in M1:M2) { Va2m <- filter(fbm,dilatation(a,2*m),sides=1) Va2m <- Va2m[!is.na(Va2m)] Vam <- filter(fbm,dilatation(a,m),sides=1) Vam <- Vam[!is.na(Vam)] tmp<-sort(Vam^2);nn<-length(Vam) tmp2<-sort(Va2m^2);nn2<-length(Va2m) Unam<-c(Unam, abs(mean(tmp2[(trunc(nn2*beta1)+1):(nn2-trunc(nn2*beta2))])/(2*m) -mean(tmp[(trunc(nn*beta1)+1):(nn-trunc(nn*beta2))])/m )) } Unam } ) reg<-lm(log(Unam)~log(M1:M2)) opt<-rev(reg$coeff) if (method %in% c("B0-ST","B0-Q","B0-TM")) Hest<-(opt[1]+1)/2 else Hest<-opt[1]/2 if (llplot) { plot(log(Unam)~log(M1:M2));abline(reg)} Hest }
29e9ffe38f4aaa0810fd62e934c6fa2675e44513
3d402d23d8d7148664c3ef47d350c4d31a6dbf1f
/Visualization_Scripts/Dual_Process_Signal_Detection_Parameter_Estimates_Bar_Graphs.R
27e4dfe9785dd07e8f354f4227831229dd18d692
[]
no_license
RuthAShaffer/Analyzing-the-mechanisms-that-underlie-the-testing-effect-across-age
2c0d7641e82f94ae63b19bd2ad9597c431be5752
23e4126f6fd1a70d8b4e847ffb0c527e8b8eed85
refs/heads/main
2023-01-09T17:12:29.486536
2020-11-01T15:42:23
2020-11-01T15:42:23
308,968,751
1
0
null
null
null
null
UTF-8
R
false
false
15,867
r
Dual_Process_Signal_Detection_Parameter_Estimates_Bar_Graphs.R
##### SCRIPT INFO ##### # This script loads summary data and creates visualizations of # the testing effect in Dual-Process parameter estimates. # Bar graphs: With and without individual data points overlain # Author: Ruth A. Shaffer ##### SET WD TO FILE PATH #### setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) ##### LOAD PACKAGES #### library("ggplot2") library("dplyr") library("tidyr") ##################### IMPORT DATA ##################### ##### IMPORT SUMMARY DATA #### SUBJECT_LEVEL_ESTIMATES = as.data.frame(read.csv("../../DATA/R_SUMMARY_DATA/SUBJECT_LEVEL_ESTIMATES.csv",header=TRUE,sep=",")) ##### VARIABLES: SUMMARY DATA #### SUBJECT_LEVEL_ESTIMATES ##################### CREATE SESSION 2 GRAPH ##################### ##### ORGANIZE DATA FOR PLOTTING #### # data for working with SESSION2_DATA = SUBJECT_LEVEL_ESTIMATES # variables as factors SESSION2_DATA = SESSION2_DATA %>% mutate(subid = factor(subid), group = factor(group, levels = c("younger", "older"), labels = c("Younger", "Older")), delay = factor(delay, levels = c("delay0", "delay1"), labels = c("No Delay", "1 Day Delay"))) # data into long format SESSION2_DATA_LONG = gather(SESSION2_DATA, temp_name, estimate, c("S2_REC_WITH_FA_TEST","S2_REC_WITH_FA_NOTEST","S2_FAM_WITH_FA_TEST","S2_FAM_WITH_FA_NOTEST"), factor_key=TRUE) %>% separate(temp_name, sep = "_WITH_FA_", c("parameter", "prior_condition")) # refactor variables SESSION2_DATA_LONG = SESSION2_DATA_LONG %>% mutate(parameter = factor(parameter, levels = c("S2_REC", "S2_FAM"), labels = c("Recollection", "Familiarity")), prior_condition = factor(prior_condition, levels = c("TEST", "NOTEST"), labels = c("Test", "No Test"))) # get summary stats (mean / SE) for graphing SESSION2_DATA_LONG_SUMMARY = SESSION2_DATA_LONG %>% group_by(group,delay,parameter,prior_condition) %>% summarise(MEAN = mean(estimate,na.rm=TRUE), SE = sd(estimate,na.rm=TRUE)/sqrt(sum(!is.na(estimate)))) # add labels for plotting SESSION2_DATA_LONG_SUMMARY$label = ifelse( (SESSION2_DATA_LONG_SUMMARY$group == "Younger" & SESSION2_DATA_LONG_SUMMARY$parameter == "Recollection"), "A", ifelse( (SESSION2_DATA_LONG_SUMMARY$group == "Younger" & SESSION2_DATA_LONG_SUMMARY$parameter == "Familiarity"), "C", ifelse( (SESSION2_DATA_LONG_SUMMARY$group == "Older" & SESSION2_DATA_LONG_SUMMARY$parameter == "Recollection"), "B", "D" ) ) ) ##### GRAPH: RECOLLECTION AND FAMILIARITY WITH INDIVIDUAL DATA POINTS ##### SESSION2_REC_FAM_WITH_FA_WITH_DATA_POINTS = ggplot(SESSION2_DATA_LONG_SUMMARY, aes(x = delay, y = MEAN, group = prior_condition, fill = prior_condition)) + facet_grid(parameter ~ group)+ geom_bar(position = position_dodge(), stat = "identity", alpha = .5, colour="black", size=1)+ geom_text( size = 5, data = SESSION2_DATA_LONG_SUMMARY, mapping = aes(x = Inf, y = Inf, label = label), hjust = 1.5, vjust = 1.5 ) + geom_errorbar(aes(ymin=MEAN-SE, ymax=MEAN+SE), width=.2,# Width of the error bars size=1, position=position_dodge(.9)) + geom_point(data=SESSION2_DATA_LONG, aes(x = delay, y = estimate, group = prior_condition), alpha = .4, position=position_jitterdodge(dodge.width=0.9, jitter.width = 0.4))+ scale_y_continuous(breaks=seq(0,1,.2))+ xlab("Delay") + ylab("Final Test Parameter Estimates") + scale_fill_manual(name="Learning Condition", values=c("gray54","gray100")) + guides(fill = guide_legend(override.aes = list(shape = NA))) + theme_bw() + theme(axis.title.x = element_text(size = 12, margin = margin(t = 15), colour = "black"), axis.text.x = element_text(size=12, colour = "black"), axis.title.y = element_text(size = 12, margin = margin(r = 15), colour = "black"), axis.text.y = element_text(size=12, colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_line(size = 1), panel.border = element_rect(size = 2, linetype = "solid", colour = "black"), axis.line.x = element_blank(), axis.line.y = element_blank(), legend.title = element_text(size=12, colour = "black"), legend.text = element_text(size=12, colour = "black"), legend.position = c(1.17, .5), plot.margin=unit(c(.5,4,.5,.5),"cm"), strip.text = element_text(size = 15,colour = "black"), strip.background = element_blank()) ##### SAVE GRAPH ##### ggsave("../../R_OUTPUT/PLOTS/SESSION2_REC_FAM_WITH_FA_WITH_DATA_POINTS.pdf", SESSION2_REC_FAM_WITH_FA_WITH_DATA_POINTS, width=8, height=9) ##### GRAPH: RECOLLECTION AND FAMILIARITY NO INDIVIDUAL DATA POINTS ##### SESSION2_REC_FAM_WITH_FA = ggplot(SESSION2_DATA_LONG_SUMMARY, aes(x = delay, y = MEAN, group = prior_condition, fill = prior_condition)) + facet_grid(parameter ~ group)+ geom_bar(position = position_dodge(), stat = "identity", alpha = .5, colour="black", size=1)+ geom_text( size = 5, data = SESSION2_DATA_LONG_SUMMARY, mapping = aes(x = Inf, y = Inf, label = label), hjust = 1.5, vjust = 1.5 ) + geom_errorbar(aes(ymin=MEAN-SE, ymax=MEAN+SE), width=.2,# Width of the error bars size=1, position=position_dodge(.9)) + scale_y_continuous(limits = c(0,.55), expand = c(0,0)) + xlab("Delay") + ylab("Final Test Parameter Estimates") + scale_fill_manual(name="Learning Condition", values=c("gray54","gray100")) + guides(fill = guide_legend(override.aes = list(shape = NA))) + theme_bw() + theme(axis.title.x = element_text(size = 12, margin = margin(t = 15), colour = "black"), axis.text.x = element_text(size=12, colour = "black"), axis.title.y = element_text(size = 12, margin = margin(r = 15), colour = "black"), axis.text.y = element_text(size=12, colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_line(size = 1), panel.border = element_rect(size = 2, linetype = "solid", colour = "black"), axis.line.x = element_blank(), axis.line.y = element_blank(), legend.title = element_text(size=12, colour = "black"), legend.text = element_text(size=12, colour = "black"), legend.position = c(1.17, .5), plot.margin=unit(c(.5,4,.5,.5),"cm"), strip.text = element_text(size = 15,colour = "black"), strip.background = element_blank()) ##### SAVE GRAPH ##### ggsave("../../R_OUTPUT/PLOTS/SESSION2_REC_FAM_WITH_FA.pdf", SESSION2_REC_FAM_WITH_FA, width=8, height=9) ##### GRAPH: YOUNGER + NO INDIVIDUAL DATA POINTS ##### # select younger adult data YOUNGER_FOR_PLOT = SESSION2_DATA_LONG_SUMMARY YOUNGER_FOR_PLOT = subset(YOUNGER_FOR_PLOT,group=="Younger") SESSION2_REC_FAM_WITH_FA_YOUNGER = ggplot(YOUNGER_FOR_PLOT, aes(x = parameter, y = MEAN, group = prior_condition, fill = prior_condition)) + facet_grid(~delay)+ geom_bar(position = position_dodge(), stat = "identity", alpha = .5, colour="black", size=1)+ geom_errorbar(aes(ymin=MEAN-SE, ymax=MEAN+SE), width=.2,# Width of the error bars size=1, position=position_dodge(.9)) + scale_y_continuous(limits = c(0,.55), expand = c(0,0)) + xlab("Parameter") + ylab("Final Test Parameter Estimates") + scale_fill_manual(name="Learning Condition", values=c("gray54","gray100")) + guides(fill = guide_legend(override.aes = list(shape = NA))) + theme_bw() + theme(axis.title.x = element_text(size = 12, margin = margin(t = 15), colour = "black"), axis.text.x = element_text(size=12, colour = "black"), axis.title.y = element_text(size = 12, margin = margin(r = 15), colour = "black"), axis.text.y = element_text(size=12, colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_line(size = 1), panel.border = element_rect(size = 2, linetype = "solid", colour = "black"), axis.line.x = element_blank(), axis.line.y = element_blank(), legend.title = element_text(size=12, colour = "black"), legend.text = element_text(size=12, colour = "black"), legend.position = c(1.12, .5), plot.margin=unit(c(.5,4,.5,.5),"cm"), strip.text = element_text(size = 15,colour = "black"), strip.background = element_blank()) ##### SAVE GRAPH ##### ggsave("../../R_OUTPUT/PLOTS/SESSION2_REC_FAM_WITH_FA_YOUNGER.pdf", SESSION2_REC_FAM_WITH_FA_YOUNGER, width=9, height=5.5) ##### GRAPH: OLDER + NO INDIVIDUAL DATA POINTS ##### # select older adult data OLDER_FOR_PLOT = SESSION2_DATA_LONG_SUMMARY OLDER_FOR_PLOT = subset(OLDER_FOR_PLOT,group=="Older") SESSION2_REC_FAM_WITH_FA_OLDER = ggplot(OLDER_FOR_PLOT, aes(x = parameter, y = MEAN, group = prior_condition, fill = prior_condition)) + facet_grid(~delay)+ geom_bar(position = position_dodge(), stat = "identity", alpha = .5, colour="black", size=1)+ geom_errorbar(aes(ymin=MEAN-SE, ymax=MEAN+SE), width=.2,# Width of the error bars size=1, position=position_dodge(.9)) + scale_y_continuous(limits = c(0,.55), expand = c(0,0)) + xlab("Parameter") + ylab("Final Test Parameter Estimates") + scale_fill_manual(name="Learning Condition", values=c("gray54","gray100")) + guides(fill = guide_legend(override.aes = list(shape = NA))) + theme_bw() + theme(axis.title.x = element_text(size = 12, margin = margin(t = 15), colour = "black"), axis.text.x = element_text(size=12, colour = "black"), axis.title.y = element_text(size = 12, margin = margin(r = 15), colour = "black"), axis.text.y = element_text(size=12, colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_line(size = 1), panel.border = element_rect(size = 2, linetype = "solid", colour = "black"), axis.line.x = element_blank(), axis.line.y = element_blank(), legend.title = element_text(size=12, colour = "black"), legend.text = element_text(size=12, colour = "black"), legend.position = c(1.12, .5), plot.margin=unit(c(.5,4,.5,.5),"cm"), strip.text = element_text(size = 15,colour = "black"), strip.background = element_blank()) ##### SAVE GRAPH ##### ggsave("../../R_OUTPUT/PLOTS/SESSION2_REC_FAM_WITH_FA_OLDER.pdf", SESSION2_REC_FAM_WITH_FA_OLDER, width=9, height=5.5) ##### GRAPH: RECOLLECTION + NO INDIVIDUAL DATA POINTS ##### # select recollection data RECOLLECTION_FOR_PLOT = SESSION2_DATA_LONG_SUMMARY RECOLLECTION_FOR_PLOT = subset(RECOLLECTION_FOR_PLOT,parameter=="Recollection") SESSION2_REC_FAM_WITH_FA_RECOLLECTION_ONLY = ggplot(RECOLLECTION_FOR_PLOT, aes(x = group, y = MEAN, group = prior_condition, fill = prior_condition)) + facet_grid(~delay)+ geom_bar(position = position_dodge(), stat = "identity", alpha = .5, colour="black", size=1)+ geom_errorbar(aes(ymin=MEAN-SE, ymax=MEAN+SE), width=.2,# Width of the error bars size=1, position=position_dodge(.9)) + scale_y_continuous(limits = c(0,.55), expand = c(0,0)) + xlab("Age Group") + ylab("Final Test Parameter Estimates") + scale_fill_manual(name="Learning Condition", values=c("gray54","gray100")) + guides(fill = guide_legend(override.aes = list(shape = NA))) + theme_bw() + theme(axis.title.x = element_text(size = 12, margin = margin(t = 15), colour = "black"), axis.text.x = element_text(size=12, colour = "black"), axis.title.y = element_text(size = 12, margin = margin(r = 15), colour = "black"), axis.text.y = element_text(size=12, colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_line(size = 1), panel.border = element_rect(size = 2, linetype = "solid", colour = "black"), axis.line.x = element_blank(), axis.line.y = element_blank(), legend.title = element_text(size=12, colour = "black"), legend.text = element_text(size=12, colour = "black"), legend.position = c(1.12, .5), plot.margin=unit(c(.5,4,.5,.5),"cm"), strip.text = element_text(size = 15,colour = "black"), strip.background = element_blank()) ##### SAVE GRAPH ##### ggsave("../../R_OUTPUT/PLOTS/SESSION2_REC_FAM_WITH_FA_RECOLLECTION_ONLY.pdf", SESSION2_REC_FAM_WITH_FA_RECOLLECTION_ONLY, width=9, height=5.5) ##### GRAPH: FAMILIARITY + NO INDIVIDUAL DATA POINTS ##### # select familiarity data FAMILIARITY_FOR_PLOT = SESSION2_DATA_LONG_SUMMARY FAMILIARITY_FOR_PLOT = subset(FAMILIARITY_FOR_PLOT,parameter=="Familiarity") SESSION2_REC_FAM_WITH_FA_FAMILIARITY_ONLY = ggplot(FAMILIARITY_FOR_PLOT, aes(x = group, y = MEAN, group = prior_condition, fill = prior_condition)) + facet_grid(~delay)+ geom_bar(position = position_dodge(), stat = "identity", alpha = .5, colour="black", size=1)+ geom_errorbar(aes(ymin=MEAN-SE, ymax=MEAN+SE), width=.2,# Width of the error bars size=1, position=position_dodge(.9)) + scale_y_continuous(limits = c(0,.55), expand = c(0,0)) + xlab("Age Group") + ylab("Final Test Parameter Estimates") + scale_fill_manual(name="Learning Condition", values=c("gray54","gray100")) + guides(fill = guide_legend(override.aes = list(shape = NA))) + theme_bw() + theme(axis.title.x = element_text(size = 12, margin = margin(t = 15), colour = "black"), axis.text.x = element_text(size=12, colour = "black"), axis.title.y = element_text(size = 12, margin = margin(r = 15), colour = "black"), axis.text.y = element_text(size=12, colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_line(size = 1), panel.border = element_rect(size = 2, linetype = "solid", colour = "black"), axis.line.x = element_blank(), axis.line.y = element_blank(), legend.title = element_text(size=12, colour = "black"), legend.text = element_text(size=12, colour = "black"), legend.position = c(1.12, .5), plot.margin=unit(c(.5,4,.5,.5),"cm"), strip.text = element_text(size = 15,colour = "black"), strip.background = element_blank()) ##### SAVE GRAPH ##### ggsave("../../R_OUTPUT/PLOTS/SESSION2_REC_FAM_WITH_FA_FAMILIARITY_ONLY.pdf", SESSION2_REC_FAM_WITH_FA_FAMILIARITY_ONLY, width=9, height=5.5)
a72d067f6ffebdb583b707d3a9b686feb2485e4b
1eb7b9f8cebd79d9d2e3a745e0975a37f7c8ea35
/chapter_9/chapter_9.r
5f9acfe3daf5b405817d572b7d3a7a2b667089ac
[]
no_license
raleighlittles/The_Pirates_Guide_to_R-Solutions
115bfdaad800eaeba3e2020065ecad8048ef5863
e023a6b3d54ef5d7042c2719a392f6ef45a31bef
refs/heads/master
2020-03-15T11:34:58.562961
2019-02-08T07:12:21
2019-02-08T07:12:21
132,123,340
0
0
null
null
null
null
UTF-8
R
false
false
98
r
chapter_9.r
# These are interactive R Studio dependent exercises that there are not really any solutions for.
058e997886dbe66e4295874a1e5d75f23cc54fe0
27f9da112786a84da2296cd94f2a3cc0e455f8cc
/cortest.R
3cf126b5236878a1be72b62c05a34e89f74e0569
[]
no_license
Dan609/stat
a980e4aac638d626132dcb72bba5ee8301796830
25851eb526bfcddaf058db08747132e94f334497
refs/heads/master
2021-06-17T21:42:45.725305
2021-03-01T07:40:48
2021-03-01T07:40:48
171,457,622
0
0
null
null
null
null
WINDOWS-1251
R
false
false
4,275
r
cortest.R
df <- mtcars cor.test(x = df$mpg, y = df$hp) fit <- cor.test(x = df$mpg, y = df$hp) cor.test(~ mpg + hp, df) str(fit) fit$p.value plot(x = df$mpg, y = df$hp) ggplot(df, aes(x = mpg, y = hp, col = factor(cyl)))+ geom_point(size = 5)+ facet_grid(. ~ am) ########################################### df <- mtcars df_numeric <- df[, c(1,3:7)] pairs(df_numeric) cor(df_numeric) fit <- corr.test(df_numeric) fit$r fit$p fit$adjust df[sapply(df, is.numeric)] #### corr.calc <- function(x){ x <- cor.test(~ x[,1] + x[,2], x) return(c(x$estimate, x$p.value)) } corr.calc( mtcars[, c(1,5)] ) corr.calc( iris[,1:2] ) ### step6 #рассчитывает коэффициенты корреляции Пирсона #между всеми парами количественных переменных и #возвращает наибольшее по модулю значение коэффициента #корреляции. step6 <- read.table("step6.csv", header=TRUE, sep=',' ) # P.S. sorry for writing code library(psych) filtered.cor <- function(x){ y <- cor(x[sapply(x, is.numeric)]) diag(y) <- 0 return(y[which.max(abs(y))]) } filtered.cor(iris) #[1] 0.9628654 iris$Petal.Length <- -iris$Petal.Length # сделаем отрицательной максимальную по модулю корреляцию filtered.cor(iris) # [1] - 0.9628654 filtered.cor(step6) # [1] 0.235997 ####################### ############data vec1 <- c(24.985, 8.425, 14.992, 18.096, 16.664, 2.695, 10.919, 12.912, 0.926, -2.941, 27.019, 31.122, 10.999, 37.391, 0.069, 32.565, 18.737, 12.03, 15.988, 29.278, 20.641, 17.138, 27.051, 36.6, 23.38, 12.726, 28.429, 31.066, 41.038, 25.11, -4.407, 20.313, 16.531, 25.782, 24.68, 18.422, 34.917, 22.477, 16.982, 18.531, 20.138, 30.896, 32.664, 34.821, 11.421, 6.543, 39.009, 24.499, 13.345, 5.28) vec2 <- c(4.247, 3.272, 7.384, -3.743, 10.315, 19.066, -9.901, 6.418, 7.287, 2.714, 5.895, 23.421, 12.151, 15.379, 13.808, 4.635, 11.795, 9.409, -0.799, 22.509, 16.575, 6.88, 24.828, 21.983, 13.111, 0.928, 12.409, 4.864, 6.04, 24.878, -5.797, -1.974, 4.576, 8.737, 2.773, 18.012, 16.747, 6.928, 4.748, 18.557, 8.633, 22.755, 5.759, 26.877, 13.31, 5.642, 14.142, 10.015, 15.29, 19.842) df <- c(vec1, vec2) ################# test_data <- as.data.frame(list(col1 = c(0.4, -0.35, 0.82, -0.23, -1.04, 1.21, 0.38, -0.72, 0.26, 1.95, -0.21, 1.96, 1.54, -0.36, -0.78, 0.04, 0.58, -0.49, 0.53, -1.3, -1.06, -0.47, 0.13, 0.95, -1.24, -1.27, 1.55, -0.99, 1.19, -1.49), col2 = c(0.58, -0.28, -0.05, -1.58, -2.13, -0.79, 0.05, 0.82, -0.55, -0.64, 0.35, -2.47, 1.31, 0.97, 1.37, 0.01, -0.34, 1.39, 1.53, -0.51, -1.85, 0.58, 0.21, -0.64, -0.52, 0.16, -0.56, 0.81, -1.14, 1.58))) ################# smart_cor <- function(x) { vv1 <<- shapiro.test(xV1)vv2 <- shapiro.test(xV2) if ((vv1p.value<0.05)|(vv1p.value<0.05)) { test <- cor.test(~V1+V2,x,method=c("spearman")) } else { test <- cor.test(x=xV1,y=xV2,method=c("pearson")) } return(test$p.value) } # в if нужно отдельно для каждого a b # прописать условие < 0.05 smart_cor <- function(x){ a <- shapiro.test(x[,1])$p.value b <- shapiro.test(x[,2])$p.value if(a|b < 0.05 ){ no <- cor.test(~ x[ ,1] + x[ ,2], x, method = "spearman") } else{ no <- cor.test(~ x[ ,1] + x[ ,2], x, method = "pearson") } return(no$estimate) } #### smart_cor <- function(x){ a <- shapiro.test(x[,1])$p.value b <- shapiro.test(x[,2])$p.value if (a < 0.05 | b < 0.05) { return(c(cor.test(x[,1], x[,2], method="spearman" ))$estimate) } else { return(c(cor.test(x[,1], x[,2], method="pearson" ))$estimate) } } ### smart_cor <- function(x){ if (shapiro.test(x[,1])$p.value < 0.05 | shapiro.test(x[,2])$p.value < 0.05) { return(c(cor.test(x[,1], x[,2], method="spearman" ))$estimate) } else { return(c(cor.test(x[,1], x[,2], method="pearson" ))$estimate) } }
1610641fd3f0f03dddfcc6283f5c43d4e852a8da
3fd90c9a4a7f2a35cd595f07bc32e579a6ce3734
/questionnaire/data/orm.R
c5657936da3aba59c5662c00f3014175975e357b
[]
no_license
kevinstadler/thesis
e6b6499e5b4328fe1dba370ad55dde8ca0a31daa
b02426c14d8d06e00da8db00273cb9184c84836c
refs/heads/master
2021-06-15T12:41:02.726374
2017-04-20T10:28:56
2017-04-20T10:28:56
45,398,580
3
0
null
null
null
null
UTF-8
R
false
false
4,831
r
orm.R
# utility functions for mixed effects ordered logistic regression # for documentation, see orderedlogisticregression.pdf in this directory format.p.value <- function(p) { p[which(p <= 0)] <- NA # bogus Chi2 tests with df=0 ifelse(p < .001, "< .001", sub("^0", "", round(p, 3))) } library(ordinal) # TODO need to fix this... dummymdl <- clm(self ~ age, data=d) clme <- function(..., attempts=10) { clmm2call <- match.call() clmm2call[[1]] <- quote(clmm2) if (is.null(clmm2call$data)) clmm2call$data <- d clmm2call$Hess <- TRUE # clmm2call$control <- clmm2.control(innerCtrl="giveError") attempt <- 0 warning <- TRUE while (!is.null(warning) && attempt < attempts) { warning <- NULL attempt <- attempt+1 mdl <- withCallingHandlers(eval.parent(clmm2call), warning=function(w) { if (w$message != "design appears to be rank-deficient, so dropping some coefs") { warning <<- w invokeRestart("muffleWarning") } }) } if (!is.null(warning)) { stop(paste("clmm2 fit failed in all", attempt, "attempts:", warning)) } # stargazer() supports clm but no clmm models mdl$call <- dummymdl$call mdl$alpha <- mdl$Alpha return(mdl) } library(stargazer, quietly=TRUE) # l before #? ormtable <- function(..., of="", title=paste("Ordered logistic regression model (coefficients and standard errors)", of), table.layout="=!#-!t-!s=n", showstats=TRUE, omit.table.layout=if (!showstats) "s=", ord.intercepts=TRUE, table.placement="htbp", dep.var="") { stargazer(..., ord.intercepts=ord.intercepts, title=title, no.space=TRUE, star.cutoffs=c(0.05, 0.01, 0.001), table.layout=table.layout, omit.table.layout=omit.table.layout, table.placement=table.placement, dep.var.caption=dep.var) } # performs pairwise model comparison modelcomparison <- function(..., modellabels=NULL) { models <- list(...) if (length(models) < 2) stop('Require at least two models for comparison') if (is.null(modellabels)) modellabels <- 0:(length(models)-1) models <- models[order(sapply(models, function(x) x$df.residual), decreasing = TRUE)] if (any(!sapply(models, inherits, "clm2"))) stop('not all objects are of class "clm2"') ns <- sapply(models, function(x) length(x$fitted.values)) if(any(ns != ns[1])) stop("models were not all fitted to the same size of dataset") rsp <- unique(sapply(models, function(x) { tmp <- attr(x$location, "terms") class(tmp) <- "formula" paste(tmp[2]) } )) mds <- sapply(models, function(x) { modelname <- gsub(" * ", ":", deparse(attr(x$location, "terms")[[3]]), fixed=TRUE)#⨯ # if (!is.null(x$stDev)) # modelname <- paste(modelname, " + (1|", names(varmdl$stDev), ")", sep="") if (!is.null(x$scale)) modelname <- paste(modelname, "|", attr(x$scale, "terms")[[2]]) if (!is.null(x$nominal)) modelname <- paste(modelname, "|", attr(x$nominal, "terms")[[2]]) modelname }) lls <- sapply(models, function(x) -2*x$logLik) dfs <- sapply(models, function(x) x$df.residual) # find the next consecutive model with lower residual df baseline <- 1+length(dfs)-apply(outer(dfs, dfs, FUN=">"), 2, function(col) match(TRUE, rev(col))) df <- dfs[baseline] - dfs x2 <- lls[baseline] - lls pr <- c(NA, 1 - pchisq(x2[-1], df[-1])) out <- data.frame(Model = mds, Resid.df = dfs, '-2logLik' = lls, Test=ifelse(is.na(baseline), NA, paste(modellabels[baseline], "vs", modellabels)), Df = df, LRtest = x2, Prob = pr) rownames(out) <- paste("(", modellabels, ")", sep="") names(out) <- c("Model", "Res. df", "-2LL", "Test", "df", "LR", "P(>Chi)") class(out) <- c("Anova", "data.frame") attr(out, "heading") <- c("Likelihood ratio tests of cumulative link models\n", paste("Response:", rsp)) out } modelcomparisontable <- function(anova, of="", title=paste("Pairwise model comparison", of), label=NULL, decmodelnumbers=TRUE, ...) { # if (decmodelnumbers) # rownames(anova) <- 0:(nrow(anova)-1) # p value will be in the last column (hopefully) anova[,ncol(anova)] <- format.p.value(anova[,ncol(anova)]) stargazer(anova[,!(names(anova) %in% c("BIC"))], summary=FALSE, title=title, label=label) } ormtables <- function(nullmdl, ..., label=NULL, of=NULL, title=paste("Ordered logistic regression model (coefficients and standard errors)", of), ord.intercepts=TRUE) { ormtable(..., showstats=FALSE, of=of, title=title, ord.intercepts=ord.intercepts, label=label) modelcomparisontable(do.call("modelcomparison", if (is.null(nullmdl)) list(...) else c(list(nullmdl), list(...))), decmodelnumbers=!is.null(nullmdl), title=paste("Pairwise comparison of the models in Table~\\ref{", label, "}.", sep=""), label=if (!is.null(label)) paste(label, "comparison", sep="")) }
60e5091d413ae466937b9284dc374fc58cae5a6a
6659ca68795cac95cecd04ff12bea86fff02fd94
/Qfolder2/SPL_Q2_india_finalization.R
fcab18eebd6f4e97b4894f2f306c4f2babef4b93
[]
no_license
jaidikam/sps_ws1718
687c1f209442bff860fa8990abf31bbcb67dd00c
9e44d539df8df4e426bfb9c69c5aa79cf4f7cb60
refs/heads/master
2021-09-10T18:52:39.737879
2018-03-31T04:26:46
2018-03-31T04:26:46
106,296,792
0
0
null
null
null
null
UTF-8
R
false
false
2,635
r
SPL_Q2_india_finalization.R
#prepare rain and temp data: mean amount of rain and mean temperature for each quarter year rain = readRDS(".\\Qfolder2\\Q2_india_rain.rds") temp = readRDS(".\\Qfolder2\\Q2_india_temp.rds") rain$ISO3 = NULL rain$ISO2 = NULL rain$year = rain$X.Year rain$X.Year = NULL rain$month = rain$Month rain$Month = NULL temp$year = temp$X.Year temp$X.Year = NULL temp$month = temp$Month temp$Month = NULL raintemp = merge(rain,temp[c("tas","year","month")],by=c("month","year")) # Calculate average rain and temp per quarter # Define function for average rain and temp per quarter avgRainTempQuarter = function(ds,month,mp_year,pr,tas){ if (is.na(ds[[pr]]) ||is.na(ds[[tas]])) { message(paste("No missing values allowed!")) } else { ds$tas_q1 = 0 ds$tas_q2 = 0 ds$tas_q3 = 0 ds$tas_q4 = 0 ds$pr_q1 = 0 ds$pr_q2 = 0 ds$pr_q3 = 0 ds$pr_q4 = 0 for(z in min(ds[[mp_year]]):max(ds[[mp_year]])){ ds[ds[[mp_year]] == z ,]$pr_q1 = sum(ds[ds[[mp_year]] == z & ds[[month]] %in% c("1","2","3"),][[pr]])/3 ds[ds[[mp_year]] == z ,]$pr_q2 = sum(ds[ds[[mp_year]] == z & ds[[month]] %in% c("4","5","6"),][[pr]])/3 ds[ds[[mp_year]] == z ,]$pr_q3 = sum(ds[ds[[mp_year]] == z & ds[[month]] %in% c("7","8","9"),][[pr]])/3 ds[ds[[mp_year]] == z ,]$pr_q4 = sum(ds[ds[[mp_year]] == z & ds[[month]] %in% c("10","11","12"),][[pr]])/3 ds[ds[[mp_year]] == z ,]$tas_q1 = sum(ds[ds[[mp_year]] == z & ds[[month]] %in% c("1","2","3"),][[tas]])/3 ds[ds[[mp_year]] == z ,]$tas_q2 = sum(ds[ds[[mp_year]] == z & ds[[month]] %in% c("4","5","6"),][[tas]])/3 ds[ds[[mp_year]] == z ,]$tas_q3 = sum(ds[ds[[mp_year]] == z & ds[[month]] %in% c("7","8","9"),][[tas]])/3 ds[ds[[mp_year]] == z ,]$tas_q4 = sum(ds[ds[[mp_year]] == z & ds[[month]] %in% c("10","11","12"),][[tas]])/3 } return(ds) } } raintemp = avgRainTempQuarter(raintemp,"month","year","pr","tas") #load the india dataset and the remaining variables india_wip = readRDS(".\\Qfolder1\\Q1_india_wip.rds") rest = readRDS(".\\Qfolder2\\Q2_india_rest.rds") #merge india with wheater data india_wip = merge(india_wip,unique(raintemp[c("tas_q1","tas_q2","tas_q3","tas_q4","pr_q1","pr_q2","pr_q3","pr_q4","year")]),by=c("year")) #join the datasets india_fin = merge(india_wip, rest, by=c("prod_price")) #prod_price is unique, #therefore it can be used as a key for the merge #save dataset saveRDS(india_fin, (".\\Qfolder2\\Q2_india_fin.rds")) #cleanup rm(list = setdiff(ls(), lsf.str()))
088baa3b7e69eb6b92a82582f7ec0736bdaef439
961f1a2de9dd6875fb6a86b08dfc917a78f0933d
/my_attr_calc_func.R
b6fd10e0de2eabbaf9a14bc2cf4000043ebf2450
[]
no_license
alex7777777/my_funktion
c9ba83405f4e37438565d559b749ee2980cfca16
fb1741a910a975f5c2052b6de3ccfa1be39e4a4f
refs/heads/master
2020-05-29T13:48:15.868690
2020-01-09T22:10:13
2020-01-09T22:10:13
189,173,623
2
1
null
null
null
null
UTF-8
R
false
false
1,560
r
my_attr_calc_func.R
# ############################################################## # # Equal, max, last, first attribution / distribution computation # # ############################################################## # library(dplyr) # attribution_calc <- function(att_004, label_for_att = "No label") attribution_calc <- function(att_004) { att_005 <- att_004 %>% group_by(order_id) %>% mutate(sequence = row_number()) att_006 <- att_005 %>% arrange(order_id, desc(received_at)) %>% group_by(order_id) %>% mutate(sequence_last = row_number()) %>% arrange(order_id, received_at) lasttouchverteilung <- att_006 %>% filter(sequence_last == 1) %>% group_by(marketing_channel) %>% summarise(lasttouch=n()) # add firsttouchverteilung <- att_006 %>% filter(sequence == 1) %>% group_by(marketing_channel) %>% summarise(firsttouch=n()) gleichverteilung <- group_by(att_004,marketing_channel) %>% summarise(gleichverteilung=sum(key)) gleichverteilung$gleichverteilung <- round(gleichverteilung$gleichverteilung,digits=0) maximalattribution <- group_by(att_004,marketing_channel) %>% summarise(maximalattribution=n()) attribution_final <- left_join(gleichverteilung,maximalattribution,by="marketing_channel") %>% left_join(.,lasttouchverteilung, by="marketing_channel") %>% left_join(.,firsttouchverteilung, by="marketing_channel") attribution_final <- data.frame(attribution_final) # attribution_final$label <- label_for_att return(attribution_final) }
daeb5eb15f19ce5e80b4e948e685c7360e6c26bf
579820f0b9432b837738fd3506c9582bf9cd07da
/man/logitToProbs.Rd
fe1ec05c75a891c8de70c4a51018d14c2864bbf5
[]
no_license
albertbuchard/r-pipeline
ffcfee60439a773a915e69cc3a654ddca7447454
e1848f8b8f73b358066732345caa90d65cd889a3
refs/heads/master
2021-03-27T20:02:09.315730
2018-02-25T21:37:04
2018-02-25T21:37:04
66,743,305
3
0
null
null
null
null
UTF-8
R
false
true
266
rd
logitToProbs.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R \name{logitToProbs} \alias{logitToProbs} \title{logit to probs} \usage{ logitToProbs(logitValue) } \description{ Converts logit values from e.g a GLM logit, to probabilities }
27df2203441cc93a6f92f13e36137f802787fff7
ccf24bfede30e688075ba70ea40a509f9cf52c3f
/using_feature_selection_method.R
ab3c19e590c3501537401143a742224e5b6923e0
[]
no_license
akshay-vaidya/DengAI-Predicting-Dengue-Disease-Spread
0f0c6599fceeeb6d2b9d56e94c430dba15dd03cc
a6af18a6b6ace2a3196681926c65e2b2d82a888b
refs/heads/master
2021-01-12T03:38:24.779094
2018-05-31T22:09:12
2018-05-31T22:09:12
78,245,770
0
0
null
null
null
null
UTF-8
R
false
false
1,012
r
using_feature_selection_method.R
for (i in 1:dim(training)[2]) { means=colMeans(training[i],na.rm=TRUE) training[i][is.na(training[i])]= means } correlationMatrix <- cor(training) highlyCorrelated <- findCorrelation(correlationMatrix, cutoff=0.5) v<-data.frame(highlyCorrelated) newdata <- v[order(highlyCorrelated),] training<- training[,newdata] training_features1<- training[,newdata] training_features=cbind(training_features1,training["total_cases"]) test_features<- test[,newdata] (train.con <- train.kknn(total_cases~., data = training_features, kmax = 25, kernel = c("rectangular", "triangular", "epanechnikov", "gaussian", "rank", "optimal"))) prediction<-predict(train.con,test_features) prediction<-data.frame(prediction) test1<-dat_test["city"] test2<-cbind(test1,dat_test["weekofyear"],dat_test["year"]) test2<-cbind(test2,round(prediction["prediction"])) write.csv(test2, file = "F:\\Data driven competitions\\predict_dengue\\predictions_feature_Sel.csv", fileEncoding = "UTF-16LE")
958470d83c087b3e0562dc1d42ca82ad82519bbd
2a9722073f2c19d7d5c3cda608aadaf3f1e8eec3
/Codes/Simulation/power/HPC2011-11-15/Model1.r
c6f5ca65841d312b255ced11f32fd20a446b0f5c
[]
no_license
seahearman/Jung-Ying
b65e22a0ffd44db74324912a505c60d040939c5b
108425c2733437ba3c5745efcf474c3d35dea49a
refs/heads/master
2020-06-05T08:11:41.858711
2011-12-12T22:03:26
2011-12-12T22:03:26
2,438,827
0
0
null
null
null
null
UTF-8
R
false
false
12,866
r
Model1.r
library(stats) library(MASS) library(CompQuadForm) library(pls) rm(list= ls()) set.seed(4) #--------------------------- Load functions ---------------------------------- RealisticSimulation <- function(Sample_size,SNP_posi,Risk_Model, risk_effect){ #------------------------------ 1.1. Load the data ------------------------------------ Gene_RBJ <- read.table("RBJ.txt", header=FALSE) #load the SNP information of the Gene RBJ Gene_RBJ <- as.matrix(Gene_RBJ) Gene_GPRC5B <- read.table("GPRC5B.txt", header=FALSE) #load the SNP information of the Gene GPRC5B Gene_GPRC5B <- as.matrix(Gene_GPRC5B) N_snp_RBJ <- ncol(Gene_RBJ) #find how many SNPs in the RBJ gene N_snp_GPRC5B <- ncol(Gene_GPRC5B) #find how many SNPs in the GPRC5B gene N_sample_RBJ <- nrow(Gene_RBJ) #find how many individuals in the RBJ gene N_sample_GPRC5B <- nrow(Gene_GPRC5B) #find how many individuals in the GPRC5B gene #------------------------------ 1.2. Set the parameters ------------------------------- SNP11_posi <- SNP_posi[1] #locate the 1st causal SNP in RBJ SNP12_posi <- SNP_posi[2] #locate the 2nd causal SNP in RBJ SNP21_posi <- SNP_posi[3] #locate the 1st causal SNP in GPRC5B SNP22_posi <- SNP_posi[4] #locate the 2nd causal SNP in GPRC5B causal_posi <- c(SNP11_posi,SNP12_posi,SNP21_posi+N_snp_RBJ,SNP22_posi+N_snp_RBJ) #when we consider the position as two genes, we need to add the number of first gene which is RBJ here #------------------------------ 1.3 Genearte the genotype ----------------------------- Genotype <- array(0, dim=c(Sample_size, N_snp_RBJ+N_snp_GPRC5B)) # the final genotype output Phenotype <- array(0, dim=c(Sample_size, 1)) # the final phenotype output tempA <- round(runif(Sample_size,1,N_sample_RBJ)) # randomly pick sample size individuals from the Gene bank of RBJ GeneA <- Gene_RBJ[tempA,] tempB <- round(runif(Sample_size,1,N_sample_GPRC5B)) # randomly pick sample size individuals from the Gene bank of GPRC5B GeneB <- Gene_GPRC5B[tempB,] genotype <- cbind(GeneA,GeneB) Causal_SNP <- genotype[,causal_posi] if (Risk_Model==1){ Main_effect <- 0 Epi_effect <- risk_effect*(Causal_SNP[,1]*Causal_SNP[,3]+Causal_SNP[,2]*Causal_SNP[,4]) } if (Risk_Model==2){ Main_effect <- risk_effect*(Causal_SNP[,1]+Causal_SNP[,3]) Epi_effect <- 0 } if (Risk_Model==3){ Main_effect <- 0 Epi_effect <- risk_effect*(Causal_SNP[,1]*Causal_SNP[,2]+Causal_SNP[,3]*Causal_SNP[,4]) } #------------------------------ 1.4 Calculate the phenotype --------------------------- error_variance<- 1 error <- rnorm(Sample_size, 0,error_variance) Phenotype <- Main_effect + Epi_effect + error dim(Phenotype) <- c(Sample_size,1) Output <- cbind(Phenotype, genotype) X = array(1,dim=c(Sample_size,1)) geno1 <- genotype[,1:N_snp_RBJ] geno2 <- genotype[,-(1:N_snp_RBJ)] return(list(Y = Phenotype , X=X,geno1 = geno1,geno2 = geno2)) } TypicalIBS_genotype = function (geno){ #- If the data has a 2-allelic genotype format, i.e. there are only 2 allele for each #- marker, say A, a, and the genotype is recorded by a scale, that is, AA:0, Aa:1 and aa:2 #- if the genotype has more than 2 allelic data, we may want to calculated in other way #- Sepetating the genotype into 2 haplotype matrix ---- #--------------------------------------------------------- # this is the version can be applied to the 2-allelic data #--------------------------------------------------------- N_gene = ncol(geno) N = nrow(geno) h1A_ = array(0,dim=c(N,N_gene)) # h1A_ is used to indicate whether the 1st allele is A for gene h1_a = array(0,dim=c(N,N_gene)) # h1_a is used to indicate whether the 1st allele is a for gene h2A_ = array(0,dim=c(N,N_gene)) # h2A_ is used to indicate whether the 2nd allele is A for gene h2_a = array(0,dim=c(N,N_gene)) # h2_a is used to indicate whether the 2nd allele is a for gene h1_a = (geno>0)*1 # This is a small algorithm to generate the h1A_~h2_a from the genotype h2_a = geno-h1_a h1A_ = 1-h1_a h2A_ = 1-h2_a #----------------- Get the allele freq for each locus in each two gene------------- qA = array(1,dim=c(1,N_gene)) # calculate the freq of main allele for gene qa = array(1,dim=c(1,N_gene)) # calculate the freq of minor allele for gene wA =qA^(-0.75) # using the freq^(-3/4) as the weight wa =qa^(-0.75) S_original = array(0,dim=c(N,N)) # S by {0, 0.5, 1} value set # Following is small loop to generate the similarity matrix for gene1 and gene2 for (i in 1:N_gene){ temp1 = wA[i]*h1A_[,i] %*% t(h1A_[,i])+wa[i]*h1_a[,i] %*% t(h1_a[,i]) + wA[i]*h2A_[,i] %*% t(h2A_[,i])+wa[i]*h2_a[,i] %*% t(h2_a[,i]) temp2 = wA[i]*h1A_[,i] %*% t(h2A_[,i])+wa[i]*h1_a[,i] %*% t(h2_a[,i]) + wA[i]*h2A_[,i] %*% t(h1A_[,i])+wa[i]*h2_a[,i] %*% t(h1_a[,i]) S_original = S_original+ temp1*(temp1>=temp2)+temp2*(temp1<temp2) } S = S_original/(2*N_gene) eg = eigen(S, symmetric=T) evalue = eg$values le = evalue[1] # Biggest eigenvalue. if (le == 0){ le = 1 } tmpkey = (evalue / le) > 1e-7 ll = sum(tmpkey) # Rank of SSS RRRR = diag(evalue[tmpkey]) HHHH = eg$vectors[,tmpkey] return(list(S = S, R = RRRR, H = HHHH, L = ll)) } AverageIBS_genotype = function (geno){ #- If the data has a 2-allelic genotype format, i.e. there are only 2 allele for each #- marker, say A, a, and the genotype is recorded by a scale, that is, AA:0, Aa:1 and aa:2 #- if the genotype has more than 2 allelic data, we may want to calculated in other way #- Sepetating the genotype into 2 haplotype matrix ---- hA = geno # hA is used to how many A's in the allele, it equals to genotype ha = 2-geno # ha is used to how many a's in the allele, it equals to 2 - genotype N_gene = ncol(geno) # the number of markers S_temp = hA %*% t(hA) + ha %*% t(ha) S = S_temp/(4*N_gene) eg = eigen(S, symmetric=T) evalue = eg$values le = evalue[1] # Biggest eigenvalue. if (le == 0){ le = 1 } tmpkey = (evalue / le) > 1e-7 ll = sum(tmpkey) # Rank of SSS RRRR = diag(evalue[tmpkey]) HHHH = eg$vectors[,tmpkey] return(list(S = S, R = RRRR, H = HHHH, L = ll)) } JointTest = function(Y,X,Simi1,Simi2){ N = nrow(Y) # the number of individual S1 = Simi1$S S2 = Simi2$S S12 = S1 * S2 # The similarity matrix for the interaction Q = diag(N) - X %*% solve(t(X) %*% X) %*% t(X) sigma = as.numeric (t(Y) %*% Q %*% Y/(N-ncol(X))) P0 = 1 / sigma * Q T0 = 1 / (2*sigma^2) * t(Y) %*% Q %*% (S1 + S2 + S12) %*% Q %*% Y Weights_all = eigen(1 / (2 * sigma) * Q %*% (S1 + S2 + S12) %*% Q, symmetric=TRUE, only.value=TRUE) temp = Weights_all$values temp2 = sort(temp,decreasing=TRUE) dim(temp2) = c(N,1) big_enough = sum(temp>10^-3) # Get the number of big eigen values. here, the threshold for "big" is 10^-3 Weights = array(temp2[1:big_enough,1],dim=c(big_enough,1)) p = liu(T0, Weights, h = rep(1, length(Weights)), delta = rep(0, length(Weights))) return(P=p) } #-------------------------------------------------------------------------------------- # PCA | #-------------------------------------------------------------------------------------- PCA_analysis <- function(phenotype,geno1,geno2){ N <- nrow(phenotype) # the nmuber of individuals in the sample #---------- 2.1 get the 1st PCA component of geno1 and geno2)---------------------- gene1_PCA <- princomp(geno1,cor=FALSE, scores=TRUE) gene2_PCA <- princomp(geno2,cor=FALSE, scores=TRUE) Z1 <- gene1_PCA$score[,1] Z2 <- gene2_PCA$score[,1] dat1 <- data.frame(phenotype=phenotype,geno1,geno2,Z1*Z2) model_analysis <- glm(phenotype~.,data = dat1) # the alternative model ptest1 <- anova(model_analysis,test="Chisq") power1 <- 1-pchisq((max(ptest1[[4]])-min(ptest1[[4]])),df=max(ptest1[[3]])-min(ptest1[[3]])) return(power1) } #-------------------------------------------------------------------------------------- # PLS | #-------------------------------------------------------------------------------------- PLS_analysis <- function(phenotype,geno1,geno2){ N <- nrow(phenotype) # the nmuber of individuals in the sample #---------- 2.1 get the 1st PLS component regression geno1 on (phenotype, geno2)--- center_geno1 <- apply(geno1, 2, function(x)(x-mean(x))/sd(x)) center_geno2 <- apply(geno2, 2, function(x)(x-mean(x))/sd(x)) #------------------------ The following 4 lines are used to remove NaN ------------ bad <- sapply(center_geno1[1,], function(x) all(is.nan(x))) center_geno1 <- center_geno1[,!bad] bad <- sapply(center_geno2[1,], function(x) all(is.nan(x))) center_geno2 <- center_geno2[,!bad] Y <- (phenotype-mean(phenotype))/sd(phenotype) dat1 <- data.frame(phenotype=phenotype,center_geno2) fit1 <- glm(phenotype~.,data=dat1) mu <- fitted.values(fit1) dat2 <- data.frame(Gene1=I(center_geno1),Y_Gene2=I(cbind(center_geno2,Y))) pls1 <- plsr(Y_Gene2 ~ Gene1, data = dat2) In <- scores(pls1)[,1]*mu dat3 <- data.frame(phenotype=phenotype,geno1,geno2,In) fit3 <- glm(phenotype~.,data=dat3) ptest1 <- anova(fit3,test="Chisq") power1 <- 1-pchisq((max(ptest1[[4]])-min(ptest1[[4]])),df=max(ptest1[[3]])-min(ptest1[[3]])) return(power1) } LM_analysis <- function(Y,X,gene1,gene2){ Inter = NULL for (i in 1:(ncol(gene1)-1)){ for (j in i:(ncol(gene2))){ Inter = cbind(Inter,gene1[,i]*gene2[,j]) } } dat.all = data.frame(Y,X,gene1,gene2,Inter) fit0 = lm(Y~X,data = dat.all) fit.all = update(fit0, . ~ . + gene1+gene2+Inter) p.all = anova(fit0,fit.all,test="F") return(p.all[2,6]) } #---------------------------- Main part ------------------------------------- N = 300 N.iter= 100 GeneA = array(c(1,3,7),dim=c(1,3)) LableA = array(c("HM","LC","LR"),dim=c(1,3)) GeneB = array(c(7,9,1,6,3),dim=c(1,5)) LableB = array(c("HC","NC","NM","LM","LR"),dim=c(1,5)) #--- Generate all the possible SNP combinations for Model 1 -- SNP_posi = NULL Lable = NULL for (gA1 in 1:2){ SA1 = GeneA[1,gA1] LA1 = LableA[1,gA1] for (gA2 in (gA1+1):3){ SA2 = GeneA[1,gA2] LA2 = LableA[1,gA2] for (gB1 in 1:4){ SB1 = GeneB[1,gB1] LB1 = LableB[1,gB1] for (gB2 in (gB1+1):5){ SB2 = GeneB[1,gB2] LB2 = LableB[1,gB2] SNP_posi = rbind(SNP_posi,array(c(SA1,SA2,SB1,SB2),dim=c(1,4))) Lable =rbind(Lable,paste(LA1,".",LB1,"+",LA2,".",LB2,sep="")) SNP_posi = rbind(SNP_posi,array(c(SA1,SA2,SB2,SB1),dim=c(1,4))) Lable =rbind(Lable,paste(LA1,".",LB2,"+",LA2,".",LB1,sep="")) } } } } NComb = nrow(SNP_posi) Power = array(0,dim=c(5,NComb)) #--- Power is used to calculate the power of the 5 methods for (round in 1:NComb){ cat(round,"\n") SNPs = SNP_posi[round,] for (i in 1:N.iter){ #- simulate the data ------ SData = RealisticSimulation(Sample_size=N,SNPs,Risk_Model=1, risk_effect=0.15) Y = SData$Y X = SData$X gene1 = SData$geno1 gene2 = SData$geno2 #-- Using the Average IBS ----- Simi1 = AverageIBS_genotype(gene1) Simi2 = AverageIBS_genotype(gene2) pvalue = JointTest(Y,X,Simi2,Simi1) if (pvalue<0.05) Power[1,round]=Power[1,round]+1/N.iter #-- Using the Typical IBS ----- Simi1 = TypicalIBS_genotype(gene1) Simi2 = TypicalIBS_genotype(gene2) pvalue = JointTest(Y,X,Simi2,Simi1) if (pvalue<0.05) Power[2,round]=Power[2,round]+1/N.iter #-- Using the PCA ------- pvalue = PCA_analysis(Y,gene1,gene2) if (pvalue<0.05) Power[3,round]=Power[3,round]+1/N.iter #-- Using the PLS ------- pvalue = PLS_analysis(Y,gene1,gene2) if (pvalue<0.05) Power[4,round]=Power[4,round]+1/N.iter #-- Using the LM-------- pvalue = LM_analysis(Y,X,gene1,gene2) if (pvalue<0.05) Power[5,round]=Power[5,round]+1/N.iter } #-- Output ---------- #- The output is based on the SNP positions --- } final.output = cbind(Lable,t(Power)) write.table(final.output,"Model1.txt",sep=" ",row.name=F,col.name=F)
34965a6c1b113d280a90f349f779329649ff03bf
96b4ed7146d04125292aaab53d01dea82bc5a9c6
/scripts/freq-table.R
18e3b5a867dd8f8e5ebf51b0a586871beb051356
[]
no_license
arundurvasula/missense
e2afcf80c90424be453306676e2ac303fdc45107
aac2f91a3ed25d7b27ef13512920610480c4c37c
refs/heads/master
2016-09-13T08:10:11.839719
2016-05-01T22:16:53
2016-05-01T22:16:53
57,383,501
0
0
null
null
null
null
UTF-8
R
false
false
2,847
r
freq-table.R
options(warn=-1) library(data.table) ## Missense, MAF > 0.1 AEN <- fread("results/AEN-maf-0.1.frq") names(AEN) <- c("CHROM", "POS", "N_ALLELES", "N_CHR", "MAJ_FREQ", "MIN_FREQ") CEM <- fread("results/CEM-maf-0.1.frq") names(CEM) <- c("CHROM", "POS", "N_ALLELES", "N_CHR", "MAJ_FREQ", "MIN_FREQ") STP <- fread("results/STP-maf-0.1.frq") names(STP) <- c("CHROM", "POS", "N_ALLELES", "N_CHR", "MAJ_FREQ", "MIN_FREQ") CLB <- fread("results/CLB-maf-0.1.frq") names(CLB) <- c("CHROM", "POS", "N_ALLELES", "N_CHR", "MAJ_FREQ", "MIN_FREQ") freqs <- data.table(AEN$MIN_FREQ, CEM$MIN_FREQ, STP$MIN_FREQ, CLB$MIN_FREQ) nchr <- data.table(AEN$N_CHR, CEM$N_CHR, STP$N_CHR, CLB$N_CHR) derived <- round(freqs * nchr) #write.table(freqs, "results/ancient.freqs-maf-0.1.csv", sep=",", col.names=F, row.names=F) write.table(nchr, "results/ancient.nchr-maf-0.1.csv", sep=",", col.names=F, row.names=F) write.table(derived, "results/ancient.derived-maf-0.1.csv", sep=",", col.names=F, row.names=F) ## Missense, MAF < 0.1 AEN <- fread("results/AEN-maf-lt-0.1.frq") names(AEN) <- c("CHROM", "POS", "N_ALLELES", "N_CHR", "MAJ_FREQ", "MIN_FREQ") CEM <- fread("results/CEM-maf-lt-0.1.frq") names(CEM) <- c("CHROM", "POS", "N_ALLELES", "N_CHR", "MAJ_FREQ", "MIN_FREQ") STP <- fread("results/STP-maf-lt-0.1.frq") names(STP) <- c("CHROM", "POS", "N_ALLELES", "N_CHR", "MAJ_FREQ", "MIN_FREQ") CLB <- fread("results/CLB-maf-lt-0.1.frq") names(CLB) <- c("CHROM", "POS", "N_ALLELES", "N_CHR", "MAJ_FREQ", "MIN_FREQ") freqs <- data.table(AEN$MIN_FREQ, CEM$MIN_FREQ, STP$MIN_FREQ, CLB$MIN_FREQ) nchr <- data.table(AEN$N_CHR, CEM$N_CHR, STP$N_CHR, CLB$N_CHR) derived <- round(freqs * nchr) #write.table(freqs, "results/ancient.freqs-maf-lt-0.1.csv", sep=",", col.names=F, row.names=F) write.table(nchr, "results/ancient.nchr-maf-lt-0.1.csv", sep=",", col.names=F, row.names=F) write.table(derived, "results/ancient.derived-maf-lt-0.1.csv", sep=",", col.names=F, row.names=F) ## Synonymous AEN <- fread("results/AEN-syn.frq") names(AEN) <- c("CHROM", "POS", "N_ALLELES", "N_CHR", "MAJ_FREQ", "MIN_FREQ") CEM <- fread("results/CEM-syn.frq") names(CEM) <- c("CHROM", "POS", "N_ALLELES", "N_CHR", "MAJ_FREQ", "MIN_FREQ") STP <- fread("results/STP-syn.frq") names(STP) <- c("CHROM", "POS", "N_ALLELES", "N_CHR", "MAJ_FREQ", "MIN_FREQ") CLB <- fread("results/CLB-syn.frq") names(CLB) <- c("CHROM", "POS", "N_ALLELES", "N_CHR", "MAJ_FREQ", "MIN_FREQ") freqs <- data.table(AEN$MIN_FREQ, CEM$MIN_FREQ, STP$MIN_FREQ, CLB$MIN_FREQ) nchr <- data.table(AEN$N_CHR, CEM$N_CHR, STP$N_CHR, CLB$N_CHR) derived <- round(freqs * nchr) #write.table(freqs, "results/ancient.freqs-syn.csv", sep=",", col.names=F, row.names=F) write.table(nchr, "results/ancient.nchr-syn.csv", sep=",", col.names=F, row.names=F) write.table(derived, "results/ancient.derived-syn.csv", sep=",", col.names=F, row.names=F)
2e940b811f6713e816fe76a4377cb6f42999a025
e06e3ae6e3a90dee08ce78d94a15e802dcdc4123
/static/post/intro-variational-autoencoder/Tuto_VAE.R
731d0e5973e6b358e46e4984628af0b6675cb991
[]
no_license
StateOfTheR/website
e71071958429e8db007accda019699ca87fdefd7
2001db349859db92d71062b464688b32837951e8
refs/heads/master
2023-09-04T06:18:22.462606
2023-08-23T15:44:24
2023-08-23T15:44:24
102,025,176
0
0
null
2023-08-23T15:44:25
2017-08-31T17:11:09
HTML
UTF-8
R
false
false
11,038
r
Tuto_VAE.R
## ----setup, message = FALSE--------------------------------------------------------------------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) library(keras) library(ggplot2) library(dplyr) library(corrplot) # pour représenter la matrice des corrélations empiriques library(factoextra) # pour avoir des représentations graphiques pour l'ACP library(tensorflow) #library(reticulate) # use_miniconda("r-reticulate") if (tensorflow::tf$executing_eagerly()) { tensorflow::tf$compat$v1$disable_eager_execution() } K <- keras::backend() ## ---- eval = FALSE------------------------------------------------------------------------------------------------------------------------ ## library(tensorflow) ## tf$constant("Hellow Tensorflow") ## ----loading-the-data--------------------------------------------------------------------------------------------------------------------- wine <- read.csv("http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv", sep = ";") # Taille du jeux de données dim(wine) # Aperçu des premières lignes head(wine[,-12]) # Résumé summary(wine) ## ----corrplot----------------------------------------------------------------------------------------------------------------------------- wine %>% select_if(is.numeric) %>% cor() %>% # Calcul de la matrice de corrélation empirique corrplot::corrplot() # représentation graphique de la matrice ## ----scale-the-data----------------------------------------------------------------------------------------------------------------------- scaled_data <- wine[, -12] %>% select_if(is.numeric) %>% mutate_all(.funs = scale) # On applique à toutes les colonnes la fonction scale # La fonction scale centre et réduit un vecteur ## ----train-and-test-datasets, echo = TRUE, eval = FALSE----------------------------------------------------------------------------------- ## set.seed(seed = 123) ## dataset_size <- nrow(wine) ## train_size <- as.integer(dataset_size * 0.85) ## # Training dataset ## train_dataset <- wine %>% ## group_by(quality) %>% ## sample_frac(0.85) ## # Creation of the test dataset ## test_dataset <- anti_join(as_tibble(wine), train_dataset) ## # Scale the data ## scaled_train <- train_dataset %>% ## select_if(is.numeric) %>% ## mutate_all(.funs = scale) ## # Attention : il faudrait aussi mettre à l'echelle le jeu de test, peut-etre plutôt avec les valeurs du jeu d'entrainement ## scaled_test <- test_dataset %>% ## select_if(is.numeric) %>% ## mutate_all(.funs = scale) ## ----resultat_acp------------------------------------------------------------------------------------------------------------------------- res.pca <- prcomp(scaled_data) head(res.pca$x) ## plot of the eigenvalues factoextra::fviz_eig(res.pca) # Plot -2D ggplot(as.data.frame(res.pca$x), aes(x = PC1, y = PC2, col =wine$quality)) + geom_point() # Plot - 3D #library(plotly) #pca_plotly <- plot_ly(as.data.frame(res.pca$x), x = ~PC1, y = ~PC2, z = ~PC3, color = ~wine$quality) %>% add_markers() #pca_plotly ## ----knn-prediction, eval = FALSE--------------------------------------------------------------------------------------------------------- ## library(class) ## y <- wine$quality ## neigh <- knn(scaled_data, scaled_data, cl = y, k = 3) ## # confusion matrix ## tab <- table(neigh, y) ## # accuracy ## sum(diag(tab)) / sum(rowSums(tab)) * 100 ## ## Using the 6 first PC ## neigh_reduced <- knn(res.pca$x[, 1:6], res.pca$x[, 1:6], cl = y, k = 3) ## tab <- table(neigh_reduced, y) ## sum(diag(tab)) / sum(rowSums(tab)) * 100 ## # si on voulait prédire des individus supplementaires : ## # Centre-reduire les individus supplementaires ## # ind.scaled <- scale(ind.sup, center = res.pca$center, scale = res.pca$scale) ## # ind.sup.coord <- predict(res.pca, newdata = ind.sup) ## ----autoencoder-1----------------------------------------------------------------------------------------------------------------------- # Ecriture condensée input_size <- 11 m <- 6 # nb de composantes ae_1 <- keras_model_sequential() ae_1 %>% layer_dense(units = m, input_shape = input_size, use_bias = TRUE, activation = "linear", name = "bottleneck") %>% layer_dense(units = input_size, use_bias = TRUE, activation = "linear") %>% summary(ae_1) ## ----autoencoder-2------------------------------------------------------------------------------------------------------------------------ # Ecriture en separant encodeur et decodeur # Encoder enc_input <- layer_input(shape = input_size) enc_output <- enc_input %>% layer_dense(units = m, activation = "linear") encoder <- keras_model(enc_input, enc_output) # Decoder dec_input <- layer_input(shape = m) dec_output <- dec_input %>% layer_dense(units = input_size, activation = "linear") decoder <- keras_model(dec_input, dec_output) # Autoencoder ae_2_input <- layer_input(shape = input_size) ae_2_output <- ae_2_input %>% encoder() %>% decoder() ae_2 <- keras_model(ae_2_input, ae_2_output) summary(ae_2) ## ----get-config--------------------------------------------------------------------------------------------------------------------------- # Encoder : m*n weights + m terms of bias # Decoder : m*n + n terms of bias get_config(ae_1) ## ----ae-1-compile------------------------------------------------------------------------------------------------------------------------- ae_1 %>% compile( loss = "mean_squared_error", optimizer = optimizer_sgd(learning_rate = 0.1) # stochastic gradient descent optimizer ) ## ----ae-1-fit----------------------------------------------------------------------------------------------------------------------------- epochs_nb <- 50L batch_size <- 10L scaled_train <- as.matrix(scaled_data) ae_1 %>% fit(x = scaled_train, y = scaled_train, epochs = epochs_nb, batch_size = batch_size) # evaluate the performance of the model mse.ae <- evaluate(ae_1, scaled_train, scaled_train) mse.ae ## ----ae-poids----------------------------------------------------------------------------------------------------------------------------- poids <- get_weights(ae_1) # Encoders/ decoders weights w_encodeur <- poids[[1]] %>% print() w_decodeur <- poids[[3]] %>% print() ## ----------------------------------------------------------------------------------------------------------------------------------------- # ACP : unit Norm: the weights on a layer have unit norm. sum(w_decodeur^2) / m ## ----ae1-predict, eval = FALSE------------------------------------------------------------------------------------------------------------ ## ae_1predict <- ae_1 %>% predict(scaled_train) ## # Repasser en non standardisé pour comparer... ## varcol <- apply(wine[, -12], 2, var) ## meancol <- colMeans(wine[, -12]) ## ae_1predict_or <- sapply(c(1:11), FUN = function(x) ae_1predict[, x] * sqrt(varcol[x]) + meancol[x]) ## ----botteleneck-layer-------------------------------------------------------------------------------------------------------------------- # extract the bottleneck layer intermediate_layer_model <- keras_model(inputs = ae_1$input, outputs = get_layer(ae_1, "bottleneck")$output) intermediate_output <- predict(intermediate_layer_model, scaled_train) ggplot(data.frame(PC1 = intermediate_output[,1], PC2 = intermediate_output[,2]), aes(x = PC1, y = PC2, col = wine$quality)) + geom_point() ## ----comparison-------------------------------------------------------------------------------------------------------------------------- # PCA reconstruction pca.recon <- function(pca, x, k){ mu <- matrix(rep(res.pca$center, nrow(pca$x)), nrow = nrow(res.pca$x), byrow = T) recon <- res.pca$x[,1:k] %*% t(res.pca$rotation[,1:k]) + mu mse <- mean((recon - x)^2) return(list(x = recon, mse = mse)) } xhat <- rep(NA, 10) for(k in 1:10){ xhat[k] <- pca.recon(res.pca, scaled_train, k)$mse } ae.mse <- rep(NA, 5) input_size <- 11 #m <- 6 # nb components for(k in 1:10){ modelk <- keras_model_sequential() modelk %>% layer_dense(units = k, input_shape = input_size, use_bias = TRUE, activation = "linear", name = "bottleneck") %>% layer_dense(units = input_size, use_bias = TRUE, activation = "linear") modelk %>% compile( loss = "mean_squared_error", optimizer = optimizer_sgd(learning_rate = 0.1) ) modelk %>% fit( x = scaled_train, y = scaled_train, epochs = 50, verbose = 0, ) ae.mse[k] <- unname(evaluate(modelk, scaled_train, scaled_train)) } df <- data.frame(k = c(1:10, 1:10), mse = c(xhat, ae.mse), method = c(rep("acp", 10), rep("autoencodeur", 10))) ggplot(df, aes(x = k, y = mse, col = method)) + geom_line() ## ----useful functions for variational autoencoder------------------------------------------------------------------------------------------- # Warning keras: k_random_normal, k_shape etc. sampling <- function(arg) { z_mean <- arg[, 1:(latent_dim)] z_log_var <- arg[, (latent_dim + 1):(2 * latent_dim)] epsilon <- k_random_normal( shape = c(k_shape(z_mean)[[1]]), mean = 0., stddev = epsilon_std ) z_mean + k_exp(z_log_var / 2) * epsilon } # Loss function vae_loss <- function(x, x_decoded, k1 = 1, k2 = 0.01) { mse_loss <- k_sum(loss_mean_squared_error(x, x_decoded)) kl_loss <- -0.5 * k_sum(1 + z_log_var - k_square(z_mean) - k_exp(z_log_var)) k1*mse_loss + k2*kl_loss } ## ----variational autoencoder------------------------------------------------------------------------------------------------------------ set.seed(123) # Parameters -------------------------------------------------------------- batch_size <- 32 latent_dim <- 6L epochs_nb <- 50L epsilon_std <- 1.0 # Model definition -------------------------------------------------------- x <- layer_input(shape = c(input_size)) z_mean <- layer_dense(x, latent_dim) z_log_var <- layer_dense(x, latent_dim) # note that "output_shape" isn't necessary with the TensorFlow backend z <- layer_concatenate(list(z_mean, z_log_var)) %>% layer_lambda(sampling) # On instancie les couches séparément pour pouvoir les réutiliser plus tard decoder <- layer_dense(units = input_size, activation = "sigmoid") x_decoded <- decoder(z) # end-to-end autoencoder vae <- keras_model(x, x_decoded) # encoder, from inputs to latent space encoder <- keras_model(x, z_mean) # generator, from latent space to reconstructed inputs decoder_input <- layer_input(shape = latent_dim) x_decoded_2 <- decoder(decoder_input) generator <- keras_model(decoder_input, x_decoded_2) vae %>% compile(optimizer = "sgd", loss = vae_loss) vae %>% fit(x = scaled_train, y = scaled_train, epochs = epochs_nb) x_train_encoded <- predict(encoder, scaled_train, batch_size = batch_size) ## Representation in the latent space x_train_encoded %>% as_tibble() %>% ggplot(aes(x = V1, y = V2, colour = wine$quality)) + geom_point() ## ----session-info------------------------------------------------------------------------------------------------------------------------- reticulate::py_config() tensorflow::tf_config()
e5e8065ed9fc1c5912bfd8cbc31122e6e803cb9e
e2c7181ed4e32ad6375160811fc1e13a6c5c1752
/man/ls_catalog.Rd
79197c68a2751e57187472d3a7b85acec94e57e1
[]
no_license
mauricioromero86/teamlucc
c6bbef6beff5bb19ba068db500e6c6e483086507
b41fdd3135dd58c45a0a76c8c568768104267eaa
refs/heads/master
2020-12-24T11:33:13.644455
2015-09-10T14:14:44
2015-09-10T14:14:44
40,678,059
0
2
null
2015-08-13T19:33:49
2015-08-13T19:33:48
R
UTF-8
R
false
false
649
rd
ls_catalog.Rd
% Generated by roxygen2 (4.0.2): do not edit by hand \name{ls_catalog} \alias{ls_catalog} \title{Catalog a folder of Landsat images} \usage{ ls_catalog(in_folder) } \arguments{ \item{in_folder}{path to a folder of Landsat surface reflectance images (for example, as extracted by the \code{espa_extract} function).} } \value{ a \code{data.frame} with a list of the Landsat images found within in_folder } \description{ This function is used to produce a \code{data.frame} of Landsat images stored locally after download from the USGS. The images should be in a series of subfolders named following the naming scheme of \code{\link{espa_extract}}. }
0c7bc3d0adc18ce02a32153788235035d705dd39
bf17824361e9edd03025f2e5d932339649a3bc63
/setup.R
9229ffffefca26b0b73343e7389f5014f8cd3ce1
[]
no_license
gkrowner/NDA-Shinyapp
bc2940cf19a4c80cfababb412834b2f3280441d9
43d0d11213d0d010bc489e83b2b927a825f20567
refs/heads/master
2021-05-23T15:26:51.528006
2020-04-06T01:26:04
2020-04-06T01:26:04
253,360,841
0
0
null
null
null
null
UTF-8
R
false
false
1,049
r
setup.R
# setup library(readr) library(data.table) library(igraph) library(ggplot2) library(shiny) library(rsconnect) library(stringr) library(shinyjs) library(shinycssloaders) library(shinydashboard) library(shinyWidgets) library(shinythemes) library(devtools) library(dashboardthemes) library(visNetwork) library(fmsb) library(plotly) library(wordcloud2) # Preprocessing #dt.spotify <- as.data.table(read.csv("top10s.csv")) # your repository #dt.spotify[, 2:4] <- lapply(dt.spotify[, 2:4], as.character) # to remove those character that can't be recognised #dt.spotify[, title := str_replace_all(title,"�", "")] #dt.spotify[, artist := str_replace_all(artist,"�", "é")] #colnames(dt.spotify)[-1] <- c("Title", "Artist", "Genre", "Year", "Beats_Per_Min", # "Energy", "Danceability", "Loudness", "Liveness", "Valence", # "Duration", "Acousticness", "Speechiness", "Popularity") #save(dt.spotify, file = "top10s.RData") # for encoding problem, it's better to directly load the top10s.RData file
92a9d5a827c82f91fb378efce721723b2603d408
f6e1ab6d9f234a031e05686d1d5c56252a702750
/man/update_contact_dates.Rd
850c1f8bf1188b94d59f746063aded4c2f039244
[]
no_license
NCEAS/awards-bot
8c4b943bba9539d73835a858485f1afb351260f4
c475c255f35b746c956517939f4e0bce69439084
refs/heads/main
2021-09-13T20:58:11.871112
2021-09-03T21:40:25
2021-09-03T21:40:25
139,081,603
5
7
null
2021-09-03T21:40:26
2018-06-29T00:23:07
R
UTF-8
R
false
true
851
rd
update_contact_dates.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/database_functions.R \name{update_contact_dates} \alias{update_contact_dates} \title{Update contact dates in awards database} \usage{ update_contact_dates( awards_db, annual_report_time, initial_aon_offset, aon_recurring_interval ) } \arguments{ \item{awards_db}{(data.frame) awards database} \item{annual_report_time}{(numeric) time in months after 'start_date' to send the first annual report reminder} \item{initial_aon_offset}{(numeric) time in months after 'start_date' to send the first aon data reminder} \item{aon_recurring_interval}{(numeric) time in months to send aon data recurring reminders} } \description{ Wrapper function for set_first_annual_report_due_date, update_annual_report_due_date set_first_aon_data_due_date, update_aon_data_due_date }
b26d3358f89151948b42e606f069e73f510b77ea
9717bf97b379d0520e8ae6b34c7b1f043db3c8d3
/ShinyTraining/ui.R
0935c071c89baccbcb72cac45bc919686cd09ca6
[]
no_license
mouthful-of-wasps/ShinyTraining
2459f5866a0d114bdab1e37f37ea4efe2911006a
036cec2b258762e281ed583c0e972e52d096db5a
refs/heads/main
2023-04-24T12:55:18.482799
2021-04-28T17:24:43
2021-04-28T17:24:43
null
0
0
null
null
null
null
UTF-8
R
false
false
4,208
r
ui.R
library(shinydashboard) library(shiny) library(shinythemes) library(tidyverse) library(titanic) ui <- shinydashboard::dashboardPage( dashboardHeader(title = "Titanic"), dashboardSidebar( collapsed = FALSE, sidebarMenu( id="dk", menuItem("Titanic survival info", tabName = "survival", icon = icon("chart-bar")), menuItem("Would you survive", tabName = "test", icon = icon("ship")) ) ), dashboardBody( fluidRow( valueBoxOutput(width = 6, outputId = "rate"), valueBoxOutput(width = 6, outputId = "age"), ), tabItems( tabItem( tabName = "survival", fluidRow( tabBox(title = "", id = "age_gender", width = 12, tabPanel("", fluidRow(column(width = 4, box(title = "Choose gender", status = "primary", width = NULL, solidHeader = TRUE, selectizeInput("sex_", "Gender:", c("both",sex_choices)), checkboxInput(inputId = "percentages", label = "Show as percentages", value = FALSE), submitButton(text = "Make changes") ))), fluidRow( column(width = 12, box(title = "Survivors by gender", status = "primary", width = NULL, plotOutput(outputId = "sex_graph"))) )) ) ), ), tabItem( tabName = "test", fluidRow( tabBox(title = "", id = "surivval", width = 12, tabPanel("", fluidRow(column(width = 8, box(title = "Your info", status = "primary", width = 8, solidHeader = TRUE, selectizeInput("gender_", "Gender:", c(sex_choices)), numericInput( inputId = "age_", label = "Age:", value = 50, min = 1, max = 200, step = 1 ), selectizeInput("embark_", "Port of embarkation:", c(embark)), submitButton(text = "Check your chances of survival") )), ), fluidRow( column(width = 12, box(title = "", status = "primary", width = NULL, h1(textOutput(outputId = "survival_chance")))), column(width = 12, h5("*Note model is not validated and output is generated from a very simple binomial GLM")) )) ) ), ) ) ) )
5099a6d2c8f1dfa7363ee670db02d70e06996b55
42843932c938ba616095879eb6559c3a7aef26d8
/ADP/Wisconsin Breast Cancer.R
6987c382cb4f49b8132b98cf8d19ca27a312f532
[]
no_license
leejungmu/first-repository
f2a4a5a494f62557712775de0b9801d80d63f9ec
bdc9ee2818b66684b16d6e91e71717aa170b180f
refs/heads/master
2023-02-04T05:24:04.054398
2023-01-26T06:38:47
2023-01-26T06:38:47
211,994,347
1
0
null
null
null
null
UHC
R
false
false
6,018
r
Wisconsin Breast Cancer.R
ls() #변수 반환 rm(list = ls()) #변수 제거 library(dplyr) library(ggplot2) library(MASS) library(glmnet) library(randomForest) library(gbm) library(rpart) library(boot) library(ROCR) library(gridE) data <- tbl_df(read.table("D:/DataScience/first-repository/raw_data/Wisconsin_Breast_Cancer/wdbc.data", strip.white=TRUE, sep=",", header=FALSE)) feature_names <- c('radius', 'texture', 'perimeter', 'area', 'smoothness', 'compactness', 'concavity', 'concave_points', 'symmetry', 'fractal_dim') names(data) <- c('id', 'class', paste0('mean_', feature_names), paste0('se_', feature_names), paste0('worst_', feature_names)) glimpse(data) data$class <- as.factor(data$class) summary(data) data <- data %>% dplyr::select(-id) data$class <- factor(ifelse(data$class == 'B', 0, 1)) glimpse(data) panel.cor <- function(x, y, digits=2, prefix="", cex.cor, ...){ usr <- par("usr"); on.exit(par(usr)) par(usr = c(0, 1, 0, 1)) r <- abs(cor(x, y)) txt <- format(c(r, 0.123456789), digits=digits)[1] txt <- paste0(prefix, txt) if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt) text(0.5, 0.5, txt, cex = cex.cor * r) } pairs(data %>% dplyr::select(class, starts_with('mean_')) %>% sample_n(min(1000, nrow(data))), lower.panel=function(x,y){ points(x,y); abline(0,1,col='red')}, upper.panel=panel.cor) library(ggplot2) library(dplyr) library(gridExtra) p1 <- data %>% ggplot(aes(class)) + geom_bar() p2 <- data %>% ggplot(aes(class, mean_concave_points)) + geom_jitter(col='gray')+ geom_boxplot(alpha=.5) p3 <- data %>% ggplot(aes(class, mean_radius)) + geom_jitter(col='gray') + geom_boxplot(alpha=.5) p4 <- data %>% ggplot(aes(mean_concave_points, mean_radius)) + geom_jitter(col='gray') + geom_smooth() grid.arrange(p1, p2, p3, p4, ncol=2) set.seed(1223) n <- nrow(data) idx <- 1:n training_idx <- sample(idx, n * .60) idx <- setdiff(idx, training_idx) validate_idx <- sample(idx, n * .20) test_idx <- setdiff(idx, validate_idx) training <- data[training_idx,] validation <- data[validate_idx,] test <- data[test_idx,] #lm model data_lm_full <- glm(class ~ ., data=training, family=binomial) summary(data_lm_full) predict(data_lm_full, newdata=data[1:5,], type='response') y_obs <- as.numeric(as.character(validation$class)) yhat_lm <- predict(data_lm_full, newdata=validation, type='response') pred_lm <- prediction(yhat_lm, y_obs) performance(pred_lm, "auc")@y.values[[1]] #Lasso model xx <- model.matrix(class ~ .-1, data) x <- xx[training_idx,] y <- as.numeric(as.character(training$class)) glimpse() data_cvfit <- cv.glmnet(x, y, family="binomial") plot(data_cvfit) coef(data_cvfit, s=c("lambda.1se")) coef(data_cvfit, s=c("lambda.min")) predict(data_cvfit, s="lambda.min", newx=x[1:5,], type='response') yhat_glmnet <- predict(data_cvfit, s="lambda.min", newx=xx[validate_idx,], type='response') yhat_glmnet <- yhat_glmnet[,1] pred_glmnet <- prediction(yhat_glmnet, y_obs) performance(pred_glmnet, "auc")@y.values[[1]] #Tree model data_tr <- rpart(class ~ ., data=training) data_tr printcp(data_tr) summary(data_tr) opar <- par(mfrow = c(1,1), xpd = NA) plot(data_tr) text(data_tr, use.n = TRUE) par(opar) yhat_tr <- predict(data_tr, validation) yhat_tr <- yhat_tr[,"1"] pred_tr <- prediction(yhat_tr, y_obs) performance(pred_tr, "auc")@y.values[[1]] #randomForest set.seed(1607) data_rf <- randomForest(class ~ ., training) data_rf opar <- par(mfrow=c(1,2)) plot(data_rf) varImpPlot(data_rf) par(opar) yhat_rf <- predict(data_rf, newdata=validation, type='prob')[,'1'] pred_rf <- prediction(yhat_rf, y_obs) performance(pred_rf, "auc")@y.values[[1]] #Boosting set.seed(1607) data_for_gbm <- training %>% mutate(class=as.numeric(as.character(class))) data_gbm <- gbm(class ~ ., data=data_for_gbm, distribution="bernoulli", n.trees=5000, cv.folds=3, verbose=TRUE) (best_iter = gbm.perf(data_gbm, method="cv")) yhat_gbm <- predict(data_gbm, n.trees=best_iter, newdata=validation, type='response') pred_gbm <- prediction(yhat_gbm, y_obs) performance(pred_gbm, "auc")@y.values[[1]] #최종 모형 선택 및 test set 오차 계산 data.frame(method=c('lm', 'glmnet', 'rf', 'gbm'), auc = c(performance(pred_lm, "auc")@y.values[[1]], performance(pred_glmnet, "auc")@y.values[[1]], performance(pred_rf, "auc")@y.values[[1]], performance(pred_gbm, "auc")@y.values[[1]])) perf_lm <- performance(pred_lm, measure = "tpr", x.measure = "fpr") perf_glmnet <- performance(pred_glmnet, measure = "tpr", x.measure = "fpr") perf_rf <- performance(pred_rf, measure = "tpr", x.measure = "fpr") perf_gbm <- performance(pred_gbm, measure = "tpr", x.measure = "fpr") plot(perf_lm, col = 'black', main="ROC Curve") plot(perf_glmnet, add=TRUE, col='blue') plot(perf_rf, add=TRUE, col='red') plot(perf_gbm, add=TRUE, col='cyan') abline(0,1) legend('bottomright', inset=.1, legend=c("GLM","glmnet","RF","GBM"), col=c('black','blue','red','cyan'), lty=1, lwd=2) y_obs_test <- as.numeric(as.character(test$class)) yhat_glmnet_test <- predict(data_cvfit, s='lambda.min', newx=xx[test_idx,], type='response') yhat_glmnet_test <- yhat_glmnet_test[,1] pred_glmnet_test <- prediction(yhat_glmnet_test, y_obs_test) performance(pred_glmnet_test, "auc")@y.values[[1]] yhat_gbm_test <- predict(data_gbm, n.trees=best_iter, newdata=test, type='response') pred_gbm_test <- prediction(yhat_gbm_test, y_obs_test) performance(pred_gbm_test, "auc")@y.values[[1]] pairs(data.frame(y_obs=y_obs, yhat_lm=yhat_lm, yhat_glmnet=c(yhat_glmnet), yhat_rf=yhat_rf, yhat_gbm=yhat_gbm), lower.panel=function(x,y){ points(x,y); abline(0,1,col='red')}, upper.panel=panel.cor)
2d53331e707ca8df6181bd1f290048e59e198b46
fcc73891f59b531fbd543c1290a6810d57277294
/Ant_analysis_RScript.R
7ae84c8935318d965c98868a0bfe5befd48354e9
[]
no_license
texmiller/BTNP_ants
22162035463d0666d00cf51f6bc8f61df676d679
9b1ec175ceeaa6cfbe234387d13b2e5da997e28e
refs/heads/master
2022-11-15T00:34:25.644463
2022-11-04T19:56:14
2022-11-04T19:56:14
152,653,588
0
0
null
null
null
null
UTF-8
R
false
false
48,013
r
Ant_analysis_RScript.R
### Title: Ant analysis ### Author: Marion Donald (pulling from scripts by Gabriela Zambrano and Meghan Hager) ### Date Started: 23 July 2019 ### Purpose: Re-run analyses for species accumulation, richness, community composition, etc. ### Date Updated: 30 July 2019 library(tidyverse) library(readxl) library(dplyr) library(tidyr) library(vegan) library(ggplot2) library(BiodiversityR) library(extrafont) library(ggpubr) library(broom) library(gridExtra) library(cowplot) library(ggpubr) library(wesanderson) ## read in data data_MH <- read_excel("Meghan Hager Big Thicket Ant Data Updated 07-22-19 (1).xlsx") data_GZ_all <- read_excel("big_thicket_ants20142018.xlsx", sheet = "Pitfall Data") native_classification <- read.csv("ant_species_native_classification.csv") data_MH_2 <- data_MH %>% filter(`Collection Method` == "Pit") %>% ## select just the pitfall trap data select("Aphenogaster carolinensis":"Trachymyrmex septentrionalis") ## select just the species names ## since MH data doesn't have as many species names as GZ data, we need to add these columns in, in order to merge the two dfs ## get list of species from GZ data_GZ_species <- data_GZ_all %>% filter(Genus != "NA") %>% unite("species", c("Genus", "Species"), remove = T, sep = " ") %>% select(species) %>% distinct(species) %>% arrange(species) ## finding out that there are some data frame issues -- some of the species names are capitalized while others are not ## this is resulting in duplicates data_GZ_clean <- data_GZ_all %>% filter(Year == 2014 | ## pull out just the two years of Gabriela's data Year == 2015, Genus != "NA") %>% mutate(Species = ifelse(Species == "Carolinensis", "carolinensis", ## correct the species names (mispellings and uppercase-to-lowercase) ifelse(Species == "Depilis", "depilis", ifelse(Species == "Patagonicus", "patagonicus", ifelse(Species == "Castaneus", "castaneus", ifelse(Species == "pennsylanicus" | Species == "Pennsylvanicus" | Species == "pennsylvancius", "pennsylvanicus", ifelse(Species == "ashmeadii", "ashmeadi", ifelse(Species == "Rimosus", "rimosus", ifelse(Species == "Opacior", "opacior", ifelse(Species == "Coecus", "coecus", ifelse(Species == "Americana", "americana", ifelse(Species == "fasionensis", "faisonensis", ifelse(Species == "Fulva", "fulva", ifelse(Species == "Harpax", "harpax", ifelse(Species == "Dentata", "dentata", ifelse(Species == "Dentigula", "dentigula", ifelse(Species == "Metallescens", "metallescens", ifelse(Species == "Invicta", "invicta", ifelse(Species == "Molesta", "molesta", ifelse(Species == "Louisianae", "louisianae", Species))))))))))))))))))), Genus = ifelse(Genus == "Pachydondyla", "Pachycondyla", Genus)) data_GZ_clean_sp_check <- data_GZ_clean %>% filter(Abundance != "NA") %>% filter(Genus != "NA") %>% unite("species", c("Genus", "Species"), remove = T, sep = " ") %>% spread("species", "Abundance") %>% mutate_all(~replace(., is.na(.), 0)) %>% summarize_at(vars(`Aphaenogaster carolinensis`:`Trachymyrmex septentrionalis`), sum) %>% gather(species, Abundance) data_GZ_clean_sp <- data_GZ_clean_sp_check %>% ungroup() %>% select(species) %>% distinct(species) %>% arrange(species) ## check species names in MH data **** THERE ARE SPELLING MISTAKES HERE data_MH_clean <- data_MH %>% rename("Aphaenogaster carolinensis" = 'Aphenogaster carolinensis', "Aphaenogaster texana" = "Aphenogaster texana", "Hypoponera opaciceps" = "Hyponera opaciceps", "Cyphomyrmex rimosus" = "Cyphomyrmex rimosous") data_MH_sp <- data_MH_clean %>% gather("Aphaenogaster carolinensis":"Trachymyrmex septentrionalis", key = "Species", value = "Abundance") %>% select(Species) %>% distinct(Species) %>% arrange(Species) %>% rename(species = Species) full_sp_list <- full_join(data_GZ_clean_sp, data_MH_sp) ## this is the full species list across the two datasets (but including all data from 2014-2018) #### data set up for species accumulation curve ## transform to wide df for species accumulation curve data_GZ_wide <- data_GZ_clean %>% filter(Abundance != "NA") %>% select(Year, Site, Station, Month, Genus, Species, Abundance) %>% unite("species", c("Genus", "Species"), remove = T, sep = " ") %>% # group_by(species) %>% # mutate(grouped_id = row_number()) %>% spread(key = "species", value = "Abundance") %>% mutate_all(~replace(., is.na(.), 0)) %>% ## make all the NAs under the species names zeros (for this analysis) select(-Site:-Month) ## drop the identifiers and just keep the species names and Year ## find the species that are in MH that we need to add to this df in order to merge them missing_GZ <- anti_join(data_MH_sp, data_GZ_clean_sp) ## 5 species that are in MH that need to be added to GZ data_GZ_wide2 <- data_GZ_wide %>% mutate("Aphaenogaster texana" = 0, "Brachymyrmex patagonicus" = 0, "Nylanderia terricola" = 0, "Pheidole moerens" = 0, "Prenolepis imparis" = 0) missing_MH <- anti_join(data_GZ_clean_sp, data_MH_sp) ## 14 spp that are in GZ that need to be added to MH data_MH_2a <- data_MH_clean %>% mutate("Aphaenogaster rudis-fulva-texana complex" = 0, "Aphaenogaster treatae" = 0, "Brachymyrmex depilis" = 0, "Crematogaster ashmeadi" = 0, "Crematogaster lineolata" = 0, "Crematogaster minutissima" = 0, "Crematogaster sp" = 0, "Hypoponera opacior" = 0, "Labidus coecus" = 0, "Pheidole flavens complex" = 0, "Pheidole sp" = 0, "Pseudomyrmex pallidus" = 0, "Solenopsis nickersoni" = 0, "Tapinoma sessile" = 0) %>% select("Aphaenogaster carolinensis":"Tapinoma sessile", -Site_Code, -`Inside or Outside (Inside=1)`) %>% mutate(Year = 2015) data_spacc_all <- data_MH_2a %>% bind_rows(data_GZ_wide2) %>% mutate(`Pheidole flavens complex` = `Pheidole flavens complex` + `Pheidole moerens`) %>% ## combining P. moerens since it can't be reliably identified from P. flavens complex select(-`Pheidole moerens`) ## drop P. moerens now that it's been combined with P. flavens data_spacc_year <- data_spacc_all %>% select(Year) data_spacc <- data_spacc_all %>% select(-Year) # drop year, just keep the species by site matrix curve <- specaccum(data_spacc, method = "exact") pitfalls <- as.data.frame(curve$sites) richness <- as.data.frame(curve$richness) sd <- as.data.frame(curve$sd) spacc_df <- pitfalls %>% bind_cols(richness, sd) %>% dplyr::rename(sites = `curve$sites`, richness = `curve$richness`, sd = `curve$sd`) ggplot(spacc_df, aes(sites, richness))+ geom_point(alpha = .5)+ geom_ribbon(aes(x = sites, ymin = richness-sd, ymax = richness+sd), alpha = 0.2)+ theme_classic()+ labs(x = "Number of pitfall traps", y = "Species richness") ## Chao estimated (36 species observed, ~42 +/- 6 species predicted by Chao, 42 predicted by jack1 +/- 2) richness_est_df <- vegan::specpool(data_spacc) data_spacc_df <- as.data.frame(data_spacc) ## calculate species rank abundance using the rankabundance package from BiodiversityR (only accepts dataframes not tibbles) RankAbun.1 <- BiodiversityR::rankabundance(as.data.frame(data_spacc)) ## conver to df for ggplot rank_abu_df <- as.data.frame(RankAbun.1) %>% rownames_to_column(var = "species") #%>% ## native classification df to merge (native = 1, invasive = 0) native_info <- native_classification %>% rename(species = "Species") %>% mutate(species = as.character(species)) rank_abu_df2 <- rank_abu_df %>% full_join(native_info) %>% mutate(Native = as.character(Native)) ## Pheidole flavens complex as Native for rank abundance rank_abu_P.flav.native <- rank_abu_df2 %>% mutate(Native = ifelse(species == "Pheidole flavens complex", 1, Native), Classification = ifelse(Native == 1, "native", "non-native")) ## Rank abundance curve with Pheidole flavens complex as native (1) rank_abu_fig <- ggplot(rank_abu_P.flav.native, aes(x=rank, y = abundance, label=species))+ geom_point(aes(color = Classification, shape = Classification), size = 4)+ geom_line()+ theme_classic()+ labs(x = "Species rank", y = "Abundance")+ geom_text(aes(label=ifelse(abundance>100,as.character(species),'')),hjust=-0.08,vjust=0, check_overlap = F)+ scale_color_manual(values = c("#055864","#04C0DD", "gray49"))+ scale_shape_manual(values = c(16, 16))+ theme(text = element_text(family = "Times New Roman", size = 14)) ## Pheidole flavens complex as Native for rank abundance rank_abu_P.flav.non.native <- rank_abu_df2 %>% mutate(Native = ifelse(species == "Pheidole flavens complex", 0, Native), Classification = ifelse(Native == 1, "native", "non-native")) ## Rank abundance curve with Pheidole flavens complex as non-native (NN) (0) rank_abu_NN_fig <- ggplot(rank_abu_P.flav.non.native, aes(x=rank, y = abundance, label=species))+ geom_point(aes(color = Classification, shape = Classification), size = 4)+ geom_line()+ theme_classic()+ labs(x = "Species rank", y = "Abundance")+ geom_text(aes(label=ifelse(abundance>100,as.character(species),'')),hjust=-0.08,vjust=0, check_overlap = F)+ scale_color_manual(values = c("#055864","#04C0DD", "gray49"))+ scale_shape_manual(values = c(16, 16))+ theme(text = element_text(family = "Times New Roman", size = 14)) plot_grid(rank_abu_fig, rank_abu_NN_fig) ## ## diversity measures -- no difference in alpha diversity across the years shannon_div <- diversity(data_spacc, "shannon") shannon_div_df <- as.data.frame(shannon_div) %>% bind_cols(data_spacc_year) %>% mutate(Year = as.factor(Year)) ggplot(shannon_div_df, aes(x = Year, y = shannon_div))+ geom_boxplot()+ geom_jitter(width = 0.05, height = 0)+ theme_classic()+ labs(x = "Sampling year", y = "Shannon diversity index")+ stat_compare_means(method = "t.test") ## simpson div - no difference in alpha diversity across the years simpson_div <- diversity(data_spacc, "simpson") simpson_div_df <- as.data.frame(simpson_div) %>% bind_cols(data_spacc_year) %>% mutate(Year = as.factor(Year)) ggplot(simpson_div_df, aes(x = Year, y = simpson_div))+ geom_boxplot()+ geom_jitter(width = 0.05, height = 0)+ theme_classic()+ labs(x = "Sampling year", y = "Simpson diversity index")+ stat_compare_means(method = "t.test") ## get df with info on sample ID (from row number) and year to get proportion of non-native to native ants (and also do it as occurrence for sp) data_spacc_all_tidy <- data_spacc_all %>% rownames_to_column("ID") %>% gather(key = "species", value = "abundance", c(-Year, -ID)) %>% full_join(native_info) ## Pheidole flavens complex is classed as "2" -- convert to native (1) or invasive (0) and run the analyses for proportion of invasives data_spacc_all_tidy_PF_native <- data_spacc_all_tidy %>% mutate(Native = ifelse(species == "Pheidole flavens complex", 1, Native)) ##### P. flavens as native -- proportion non-native analysis data_spacc_all_tidy_abu <- data_spacc_all_tidy_PF_native %>% group_by(ID, Year, Native) %>% summarize(abundance = sum(abundance)) total_abu <- data_spacc_all_tidy_abu %>% ungroup() %>% group_by(ID, Year) %>% summarize(total_abundance = sum(abundance)) non_native_abu <- data_spacc_all_tidy_abu %>% filter(Native == 0) %>% rename(non_native_abu = abundance) %>% select(-Native) non_native_propotion_df <- total_abu %>% left_join(non_native_abu) %>% mutate(non_native_abu_prop = non_native_abu/total_abundance) %>% ungroup() %>% mutate(ID = as.numeric(ID)) %>% arrange(ID) %>% bind_cols(shannon_div_df, simpson_div_df) non_native_propotion_df_cat <- non_native_propotion_df %>% filter(total_abundance != 0) %>% mutate(category = ifelse(non_native_abu_prop < .33, "Low", ifelse(non_native_abu_prop > .66, "High", "Medium"))) %>% left_join(sp_richness, by = "ID") non_native_propotion_df_cat$category <- factor(non_native_propotion_df_cat$category, levels = c("Low", "Medium", "High")) my_comparisons <- list( c("High", "Medium"), c("Low", "Medium"), c("Low", "High") ) non_native_propotion_df_cat_tidy <- non_native_propotion_df_cat %>% select(ID, Year, non_native_abu_prop, shannon_div, simpson_div, total_sp, category) %>% gather(diversity_type, diversity_measure, -c(ID, Year, non_native_abu_prop, category)) %>% mutate(diversity_type = case_when(diversity_type == "shannon_div" ~ "Shannon diversity index", diversity_type == "simpson_div" ~ "Simpson diversity index", diversity_type == "total_sp" ~ "Species richness")) abundance_prop_alpha_fig <- ggplot(non_native_propotion_df_cat_tidy, aes(category, diversity_measure, fill = category))+ geom_boxplot(position =position_dodge())+ geom_point(position = position_jitterdodge(0.25), alpha = .75)+ stat_compare_means(comparisons = my_comparisons, aes(label = ..p.signif..))+ scale_fill_manual(values = wes_palette("GrandBudapest1"))+ scale_color_manual(values = wes_palette("GrandBudapest1"))+ labs(y = "Alpha diversity", x = "Proportion of non-native species (by abundance P.flavens as native)")+ theme_classic()+ theme(text = element_text(family = "Times New Roman", size = 14), legend.position = "bottom", legend.title = element_blank())+ facet_grid(diversity_type~., scales="free_y") # linear model lm_fit <- lm(shannon_div ~ non_native_abu_prop, data=non_native_propotion_df) summary(lm_fit) plot(lm_fit) ## residuals are not "random" -- they form an inverted U, linear model is not a great fit for these data tidy(lm_fit) glance(lm_fit) ## looks like there's a hump-shaped relationship between alpha diversity and proportion of non-native ants, ## not sure that the linear model is the best fit for these data ggplot(non_native_propotion_df, aes(non_native_abu_prop, shannon_div))+ geom_point()+ #geom_line(data = broom::augment(lm_fit), aes(x = non_native_abu_prop, y = .fitted))+ #facet_grid(~Year)+ theme_classic()+ labs(x = "Proportion of non-native ant abundance", y = "Shannon diversity index") ### for simpson div # linear model lm_fit_simp<- lm(simpson_div ~ non_native_abu_prop, data=non_native_propotion_df) #summary(lm_fit_simp) #plot(lm_fit_simp) #tidy(lm_fit_simp) #glance(lm_fit_simp) ## looks like there's a hump-shaped relationship between alpha diversity and proportion of non-native ants, ## not sure that the linear model is the best fit for these data ggplot(non_native_propotion_df, aes(non_native_abu_prop, simpson_div))+ geom_point()+ geom_line(data = broom::augment(lm_fit_simp), aes(x = non_native_abu_prop, y = .fitted))+ #facet_grid(~Year)+ theme_classic()+ labs(x = "Proportion of non-native ant abundance", y = "Shannon diversity index") ### do the same as above but this time by species occurrence (presence/absence) rather than abundance (Pheidole flavens complex as native) data_spacc_all_tidy_occ <- data_spacc_all_tidy_PF_native %>% filter(abundance != 0) %>% # sp has to be present to contribute filter(Native == 1) %>% group_by(ID, Year) %>% summarize(num_native_sp = n()) data_spacc_all_tidy_non_native <- data_spacc_all_tidy %>% filter(abundance != 0) %>% # sp has to be present to contribute filter(Native == 0) %>% group_by(ID, Year) %>% summarize(num_nonnative_sp = n()) occurrence_proportion_df <- data_spacc_all_tidy_occ %>% full_join(data_spacc_all_tidy_non_native) %>% mutate_all(~replace(., is.na(.), 0)) %>% mutate(total_sp = num_native_sp + num_nonnative_sp, prop_nonnative_sp = num_nonnative_sp/total_sp) %>% ungroup() %>% mutate(ID = as.numeric(ID)) %>% arrange(ID) %>% bind_cols(shannon_div_df, simpson_div_df) ### for shannon div # linear model lm_fit_shan_occ<- lm(shannon_div ~ prop_nonnative_sp, data=occurrence_proportion_df) #summary(lm_fit_shan_occ) #plot(lm_fit_shan_occ) #tidy(lm_fit_shan_occ) #glance(lm_fit_shan_occ) ## residuals plot looks more "random" -- linear model is more appropriate for these occurrence data than the abundance proportion ggplot(occurrence_proportion_df, aes(prop_nonnative_sp, shannon_div))+ geom_jitter()+ geom_line(data = broom::augment(lm_fit_shan_occ), aes(x = prop_nonnative_sp, y = .fitted))+ #facet_grid(~Year)+ theme_classic()+ labs(x = "Proportion of non-native ant species", y = "Shannon diversity index") ### for simpson div # linear model lm_fit_simp_occ<- lm(simpson_div ~ prop_nonnative_sp, data=occurrence_proportion_df) summary(lm_fit_simp_occ) #plot(lm_fit_simp_occ) #tidy(lm_fit_simp_occ) #glance(lm_fit_simp_occ) ## residuals plot looks more "random" -- linear model is more appropriate for these data than the abundance proportion ggplot(occurrence_proportion_df, aes(prop_nonnative_sp, simpson_div))+ geom_jitter()+ geom_smooth()+ geom_line(data = broom::augment(lm_fit_simp_occ), aes(x = prop_nonnative_sp, y = .fitted))+ #facet_grid(~Year)+ theme_classic()+ labs(x = "Proportion of non-native ant species", y = "Simpson diversity index") occurrence_proportion_df_cat <- occurrence_proportion_df %>% filter(total_sp != 0) %>% mutate(category = ifelse(prop_nonnative_sp < .33, "Low", ifelse(prop_nonnative_sp > .66, "High", "Medium"))) occurrence_proportion_df_cat$category <- factor(occurrence_proportion_df_cat$category, levels = c("Low", "Medium", "High")) my_comparisons <- list( c("High", "Medium"), c("Low", "Medium"), c("Low", "High") ) ## get species richness (total # sp per trap) to use as the species richness diversity measure by abundance later sp_richness<- occurrence_proportion_df_cat %>% select(total_sp, ID) occurrence_proportion_df_cat_tidy <- occurrence_proportion_df_cat %>% gather(diversity_type, diversity_measure, -c(ID, Year, num_native_sp, num_nonnative_sp, prop_nonnative_sp, Year1, Year2, category)) %>% mutate(diversity_type = case_when(diversity_type == "shannon_div" ~ "Shannon diversity index", diversity_type == "simpson_div" ~ "Simpson diversity index", diversity_type == "total_sp" ~ "Species richness")) occurrence_prop_sp_fig <- ggplot(occurrence_proportion_df_cat_tidy, aes(category, diversity_measure, fill = category))+ geom_boxplot(position =position_dodge())+ geom_point(position = position_jitterdodge(0.25), alpha = .75)+ stat_compare_means(comparisons = my_comparisons, aes(label = ..p.signif..))+ scale_fill_manual(values = wes_palette("GrandBudapest1"))+ scale_color_manual(values = wes_palette("GrandBudapest1"))+ labs(y = "Alpha diversity", x = "Proportion of non-native species (by occurrence with P.flavens as native)")+ theme_classic()+ theme(text = element_text(family = "Times New Roman", size = 14), legend.position = "bottom", legend.title = element_blank())+ facet_grid(diversity_type~., scales="free_y") ##### RUN THE ABOVE WITH P. flavens as non-native data_spacc_all_tidy_PF_non_native <- data_spacc_all_tidy %>% mutate(Native = ifelse(species == "Pheidole flavens complex", 0, Native)) data_spacc_all_tidy_abu_NN <- data_spacc_all_tidy_PF_non_native %>% group_by(ID, Year, Native) %>% summarize(abundance = sum(abundance)) total_abu_NN <- data_spacc_all_tidy_abu_NN %>% ungroup() %>% group_by(ID, Year) %>% summarize(total_abundance = sum(abundance)) non_native_abu_NN <- data_spacc_all_tidy_abu_NN %>% filter(Native == 0) %>% rename(non_native_abu = abundance) %>% select(-Native) non_native_propotion_df_NN <- total_abu_NN %>% left_join(non_native_abu) %>% mutate(non_native_abu_prop = non_native_abu/total_abundance) %>% ungroup() %>% mutate(ID = as.numeric(ID)) %>% arrange(ID) %>% bind_cols(shannon_div_df, simpson_div_df) non_native_propotion_df_NN_cat <- non_native_propotion_df_NN %>% filter(total_abundance != 0) %>% mutate(category = ifelse(non_native_abu_prop < .33, "Low", ifelse(non_native_abu_prop > .66, "High", "Medium"))) %>% left_join(sp_richness, by = "ID") non_native_propotion_df_NN_cat$category <- factor(non_native_propotion_df_NN_cat$category, levels = c("Low", "Medium", "High")) my_comparisons <- list( c("High", "Medium"), c("Low", "Medium"), c("Low", "High") ) non_native_propotion_df_NN_cat_tidy <- non_native_propotion_df_NN_cat %>% select(ID, Year, non_native_abu_prop, shannon_div, simpson_div, total_sp, category) %>% gather(diversity_type, diversity_measure, -c(ID, Year, non_native_abu_prop, category)) %>% mutate(diversity_type = case_when(diversity_type == "shannon_div" ~ "Shannon diversity index", diversity_type == "simpson_div" ~ "Simpson diversity index", diversity_type == "total_sp" ~ "Species richness")) abundance_prop_alpha_fig_NN <- ggplot(non_native_propotion_df_NN_cat_tidy, aes(category, diversity_measure, fill = category))+ geom_boxplot(position =position_dodge())+ geom_point(position = position_jitterdodge(0.25), alpha = .75)+ stat_compare_means(comparisons = my_comparisons, aes(label = ..p.signif..))+ scale_fill_manual(values = wes_palette("GrandBudapest1"))+ scale_color_manual(values = wes_palette("GrandBudapest1"))+ labs(y = "Alpha diversity", x = "Proportion of non-native species (by abundance with P.flavens as non-native)")+ theme_classic()+ theme(text = element_text(family = "Times New Roman", size = 14), legend.position = "bottom", legend.title = element_blank())+ facet_grid(diversity_type~., scales="free_y") plot_grid(occurrence_prop_sp_fig, abundance_prop_alpha_fig) # linear model lm_fit <- lm(shannon_div ~ non_native_abu_prop, data=non_native_propotion_df_NN) summary(lm_fit) #plot(lm_fit) ## residuals are not "random" -- they form an inverted U, linear model is not a great fit for these data #tidy(lm_fit) #glance(lm_fit) ## looks like there's a hump-shaped relationship between alpha diversity and proportion of non-native ants, ## not sure that the linear model is the best fit for these data ggplot(non_native_propotion_df, aes(non_native_abu_prop, shannon_div), color = "red")+ geom_jitter(data = non_native_propotion_df, color = "black")+ geom_point(data= non_native_propotion_df_NN, aes(color = "red"))+ geom_line(data = broom::augment(lm_fit), aes(x = non_native_abu_prop, y = .fitted))+ #facet_grid(~Year)+ theme_classic()+ labs(x = "Proportion of non-native ant abundance", y = "Shannon diversity index") ### for simpson div # linear model lm_fit_simp<- lm(simpson_div ~ non_native_abu_prop, data=non_native_propotion_df) summary(lm_fit_simp) plot(lm_fit_simp) tidy(lm_fit_simp) glance(lm_fit_simp) ## looks like there's a hump-shaped relationship between alpha diversity and proportion of non-native ants, ## not sure that the linear model is the best fit for these data ggplot(non_native_propotion_df, aes(non_native_abu_prop, simpson_div))+ geom_point()+ geom_line(data = broom::augment(lm_fit), aes(x = non_native_abu_prop, y = .fitted))+ #facet_grid(~Year)+ theme_classic()+ labs(x = "Proportion of non-native ant abundance", y = "Shannon diversity index") ### do the same as above but this time by species occurrence (presence/absence) rather than abundance (Pheidole flavens complex as native) data_spacc_all_tidy_occ_NN <- data_spacc_all_tidy_PF_non_native %>% filter(abundance != 0) %>% # sp has to be present to contribute filter(Native == 1) %>% group_by(ID, Year) %>% summarize(num_native_sp = n()) data_spacc_all_tidy_non_native <- data_spacc_all_tidy %>% filter(abundance != 0) %>% # sp has to be present to contribute filter(Native == 0) %>% group_by(ID, Year) %>% summarize(num_nonnative_sp = n()) occurrence_proportion_df_NN <- data_spacc_all_tidy_occ_NN %>% full_join(data_spacc_all_tidy_non_native) %>% mutate_all(~replace(., is.na(.), 0)) %>% mutate(total_sp = num_native_sp + num_nonnative_sp, prop_nonnative_sp = num_nonnative_sp/total_sp) %>% ungroup() %>% mutate(ID = as.numeric(ID)) %>% arrange(ID) %>% bind_cols(shannon_div_df, simpson_div_df) occurrence_proportion_df_cat_NN <- occurrence_proportion_df_NN %>% filter(total_sp != 0) %>% mutate(category = ifelse(prop_nonnative_sp < .33, "Low", ifelse(prop_nonnative_sp > .66, "High", "Medium"))) occurrence_proportion_df_cat_NN$category <- factor(occurrence_proportion_df_cat_NN$category, levels = c("Low", "Medium", "High")) my_comparisons <- list( c("High", "Medium"), c("Low", "Medium"), c("Low", "High") ) occurrence_proportion_df_cat_tidy_NN <- occurrence_proportion_df_cat_NN %>% gather(diversity_type, diversity_measure, -c(ID, Year, num_native_sp, num_nonnative_sp, prop_nonnative_sp, Year1, Year2, category)) %>% mutate(diversity_type = case_when(diversity_type == "shannon_div" ~ "Shannon diversity index", diversity_type == "simpson_div" ~ "Simpson diversity index", diversity_type == "total_sp" ~ "Species richness")) occurrence_prop_sp_fig_NN <- ggplot(occurrence_proportion_df_cat_tidy_NN, aes(category, diversity_measure, fill = category))+ geom_boxplot(position =position_dodge())+ geom_point(position = position_jitterdodge(0.25), alpha = .75)+ stat_compare_means(comparisons = my_comparisons, aes(label = ..p.signif..))+ scale_fill_manual(values = wes_palette("GrandBudapest1"))+ scale_color_manual(values = wes_palette("GrandBudapest1"))+ labs(y = "Alpha diversity", x = "Proportion of non-native species (by occurrence with P.flavens as non-native)")+ theme_classic()+ theme(text = element_text(family = "Times New Roman", size = 14), legend.position = "bottom", legend.title = element_blank())+ facet_grid(diversity_type~., scales="free_y") plot_grid(occurrence_prop_sp_fig, occurrence_prop_sp_fig_NN, abundance_prop_alpha_fig, abundance_prop_alpha_fig_NN, cols = 2, rows =2) #### ^^ Not much (if any) real difference between outcomes of treating P. flavens as native or non-native ### or by having the proportion of non-native species be based on occurrence or abundance ### for shannon div # linear model lm_fit_shan_occ<- lm(shannon_div ~ prop_nonnative_sp, data=occurrence_proportion_df) summary(lm_fit_shan_occ) plot(lm_fit_shan_occ) tidy(lm_fit_shan_occ) glance(lm_fit_shan_occ) ## residuals plot looks more "random" -- linear model is more appropriate for these occurrence data than the abundance proportion ggplot(occurrence_proportion_df, aes(prop_nonnative_sp, shannon_div))+ geom_point()+ geom_line(data = broom::augment(lm_fit_shan_occ), aes(x = prop_nonnative_sp, y = .fitted))+ #facet_grid(~Year)+ theme_classic()+ labs(x = "Proportion of non-native ant species", y = "Shannon diversity index") ### for simpson div # linear model lm_fit_simp_occ<- lm(simpson_div ~ prop_nonnative_sp, data=occurrence_proportion_df) summary(lm_fit_simp_occ) plot(lm_fit_simp_occ) tidy(lm_fit_simp_occ) glance(lm_fit_simp_occ) ## residuals plot looks more "random" -- linear model is still not v. appropriate for these data than the abundance proportion ggplot(occurrence_proportion_df, aes(prop_nonnative_sp, simpson_div))+ geom_point()+ geom_line(data = broom::augment(lm_fit_simp_occ), aes(x = prop_nonnative_sp, y = .fitted))+ #facet_grid(~Year)+ theme_classic()+ labs(x = "Proportion of non-native ant species", y = "Simpson diversity index") ## Next step is to pull just Gabriela's data and look at year to year trends + seasonality in species composition ## using ordination (nMDS or PCoA and PERMANOVA + permdist) data_NMDS <- data_GZ_wide %>% select(-Year) # Then calculate multivariate distances between samples using vegdist() with one of various distance metrics: dist_jac <- vegdist(data_NMDS, method = "jaccard", binary = TRUE) # Binary jaccard distance (presence/absence) dist_BC <- vegdist(data_NMDS, method = "bray")#, binary = TRUE) # Bray-Curtis distance # Then generate an MDS object using metaMDS(): mds <- metaMDS(dist_BC) ## stress is too low with the full community -- too many super rare species str(mds) ## NMDS isn't converging with all of the individual pitfalls -- likely because many of them have few but differenct ant spp ## try this again but now grouped by site within year and season ## drop super rare species (present in > 1% of of total abundance) -- this results in 11 species data_new <- data_GZ_clean %>% select(Year, Month, Genus, Species, Abundance) %>% filter(Year == 2014 | Year == 2015) %>% rownames_to_column() %>% unite(species, c("Genus","Species"), sep = " ", remove = F) %>% spread(key = "species", value = "Abundance") %>% mutate_all(~replace(., is.na(.), 0))%>% ## make all the NAs under the species names zeros (for this analysis) group_by(Year, Month) %>% summarize_at(vars(`Aphaenogaster carolinensis`:`Trachymyrmex septentrionalis`), sum) %>% gather(key = "species", value = "Abundance", c(-Year,-Month)) %>% ungroup() %>% group_by(species) %>% summarize(Abundance = sum(Abundance)) %>% mutate(percent_total = Abundance/1754) %>% filter(percent_total > 0.01) sp_list <- as.list(data_new$species) total_abu <- data_new %>% ungroup() %>% summarize(total_abundance = sum(Abundance)) data_GZ_clean_subset <- data_GZ_clean %>% unite(species, c("Genus", "Species"), sep =" ", remove = F) %>% filter(species %in% sp_list) %>% spread("species", "Abundance") %>% mutate_all(~replace(., is.na(.), 0)) %>% group_by(Year, Month, Site) %>% ## summarize by site since we have multiple pitfall traps per site ## and the individual pitfall level resolution is too fine-scale summarize_at(vars(`Aphaenogaster carolinensis`:`Solenopsis molesta`), sum) GZ_wide_NMDS_new <- data_GZ_clean_subset %>% ungroup() %>% select(-c(Year:Site)) # Then calculate multivariate distances between samples using vegdist() with one of various distance metrics: dist_jac <- vegdist(GZ_wide_NMDS_new, method = "jaccard", binary = TRUE) # Binary jaccard distance (presence/absence) dist_BC <- vegdist(GZ_wide_NMDS_new, method = "bray")#, binary = TRUE) # Bray-Curtis distance # Then generate an MDS object using metaMDS(): ### hooray!! it converges and stress is 0.173 mds <- metaMDS(dist_BC, trymax = 300) # Plot the mds using basic graphics plot(mds) # Make this into a ggplot: # Extract the coordinates: points <- as.data.frame(mds$points) points # Add metadata to the points: data_GZ_clean_subset_2 <- data_GZ_clean_subset %>% rownames_to_column() %>% mutate(Season = ifelse(Month == 5, "Spring", "Fall")) %>% unite(Season_yr, c(Season, Year), remove =F) ## get the proportion of non-native/total spp for this subset of data subset_prop_NN <- data_GZ_clean_subset_2 %>% gather("Species", "abundance", -c(rowname:Site, Season)) %>% group_by(Season_yr, Site) %>% left_join(native_classification) %>% filter(abundance >0) %>% summarize(species_total = length(abundance), native_total = sum(Native)) %>% mutate(non_native_total = species_total - native_total, prop_non_native = non_native_total/species_total, category = ifelse(prop_non_native < .33, "Low", ifelse(prop_non_native > .66, "High", "Medium"))) data_GZ_clean_subset_2 <- data_GZ_clean_subset_2 %>% left_join(subset_prop_NN, by = c("Season_yr", "Site")) points <- points %>% mutate(Sample = rownames(points), Year = data_GZ_clean_subset_2$Year[match(rownames(points), data_GZ_clean_subset_2$rowname)], Month = data_GZ_clean_subset_2$Month[match(rownames(points), data_GZ_clean_subset_2$rowname)], Season = data_GZ_clean_subset_2$Season[match(rownames(points), data_GZ_clean_subset_2$rowname)], Season_yr = data_GZ_clean_subset_2$Season_yr[match(rownames(points), data_GZ_clean_subset_2$rowname)], category = data_GZ_clean_subset_2$category[match(rownames(points), data_GZ_clean_subset_2$rowname)], prop_non_native = data_GZ_clean_subset_2$prop_non_native[match(rownames(points), data_GZ_clean_subset_2$rowname)]) points # Q4: Now that we have the MDS ordination data, how can we make it into a nice-looking ggplot? ggplot(data = points, aes(x = MDS1, y = MDS2))+ geom_point(aes(color = Season))+ stat_ellipse(aes(color = (Season)))+ scale_color_manual(values = c("darkgray", "black"))+ theme_classic() ggplot(data = points, aes(x = MDS1, y = MDS2))+ geom_point(aes(color = as.factor(Month)))+ stat_ellipse(aes(color = as.factor(Month)))+ labs(x = "nMDS1", y = "nMDS2")+ theme_classic() perm <- adonis(dist_BC ~ Season*Year, data = data_GZ_clean_subset_2, strata = data_GZ_clean_subset_2$Site) perm ## Composition in ant community affected by seasonality but not by year ## check for homogeneity in dispersion - if there is, it may invalidate the PERMANOVA sp. composition results group <- data_GZ_clean_subset_2$Month ## Calculate multivariate dispersions mod <- betadisper(dist_BC, group) mod ## Perform test anova(mod) ## Permutation test for F permutest(mod, pairwise = TRUE) ## Tukey's Honest Significant Differences (mod.HSD <- TukeyHSD(mod)) plot(mod.HSD) ## Plot the groups and distances to centroids on the ## first two PCoA axes plot(mod) ## Draw a boxplot of the distances to centroid for each group boxplot(mod) ## no difference in homogeneity of dispersion, can continue with the seasonality differences being due to compositional differences ## rather than being driven by differences in composition within sample groups (higher vs lower beta diversity) ############################# ################## using Meghan's data test for correlation between proportion of non-native ants and distance to edge of preserve ### This section is just for pitfall traps ## GIS dataframe created by Meghan GIS_df <- read_excel("BIG THICKET GIS DATA 4-6-2016.xlsx", sheet="Sheet1") GIS_select <- GIS_df %>% select(Site_Code, DistanceToEdgeOfPreserve) data_MH_pit <- data_MH_clean %>% filter(`Collection Method` == "Pit") %>% gather("Species", "abundance", -c(Site:`Collector (1=SES)`,Site_Code, `Inside or Outside (Inside=1)`)) %>% left_join(native_classification, by = "Species") %>% filter(abundance != 0) data_MH_pit_summary <- data_MH_pit %>% group_by(Site, `Specific Site Number`, Site_Code, Replicate) %>% summarize(total_species = length(Species), total_native = sum(Native), total_non_native = total_species-total_native, prop_non_native = total_non_native/total_species) %>% mutate(non_native_binom = ifelse(total_non_native >0, 1, 0)) %>% left_join(GIS_select) %>% mutate(Site_ID = `Specific Site Number`, dist_km = (DistanceToEdgeOfPreserve/1000)) ## convert to km since glmer is having trouble with the scale of the data library(lme4) ## Binomial model of presence of non-native ant sp in pitfall trap by distance to preserve edge (Random effect of SITE) ## could also do model fitting glmer_0 <- glmer(cbind(total_non_native, total_native) ~ (1|Site_ID), family = "binomial", data = data_MH_pit_summary) glmer_fit <- glmer(cbind(total_non_native, total_native) ~ dist_km + (1|Site_ID), family = "binomial", data = data_MH_pit_summary) summary(glmer_0) summary(glmer_fit) ## model with the fixed effect of distance has the lower AIC value and the majority of the weight AICtab(glmer_0, glmer_fit, weights = T) plot(x=data_MH_pit_summary$dist_km, y = data_MH_pit_summary$non_native_binom) MyData =data.frame(cbind(dist_km = seq(from = min(data_MH_pit_summary$dist_km), to = max(data_MH_pit_summary$dist_km, length.out = nrow(data_MH_pit_summary))), Site_ID = seq(from = min(data_MH_pit_summary$dist_km), to = max(data_MH_pit_summary$dist_km, length.out = nrow(data_MH_pit_summary))))) Pred <- predict(glmer_fit, newdata = MyData, type = "response", allow.new.levels=T) lines(MyData$dist_km, Pred) ## make this in ggplot pd <-with(data_MH_pit_summary, data.frame(cbind(dist_km = seq(min(data_MH_pit_summary$dist_km), max(data_MH_pit_summary$dist_km), length.out = nrow(data_MH_pit_summary))), Site_ID = seq(min(data_MH_pit_summary$dist_km), max(data_MH_pit_summary$dist_km), length.out = nrow(data_MH_pit_summary)))) pd1 <- cbind(pd, predict(glmer_fit, newdata=pd, type = "response", allow.new.levels = T)) %>% rename(fit_y = `predict(glmer_fit, newdata = pd, type = "response", allow.new.levels = T)`) ## binomial model fit to presence/absence of any non-native ant sp found in pitfall trap ggplot(data_MH_pit_summary, aes((dist_km), non_native_binom))+ geom_point(position = position_jitter(width =.02, height = 0.02), alpha = .6)+ geom_line(data = pd1, aes(x = dist_km_x, y = fit_y))+ theme_classic()+ labs(x = "Distance to edge of preserve (km)", y = "Probability of non-native ant species present") anova(glmer_fit, test="Chisq") ##### visualize and test for relationship between the to dominant non-native ants and distance to edge data_MH_pit_invasives <- data_MH_clean %>% filter(`Collection Method` == "Pit") %>% gather("Species", "abundance", -c(Site:`Collector (1=SES)`,Site_Code, `Inside or Outside (Inside=1)`)) %>% left_join(native_classification, by = "Species") %>% filter(abundance != 0, Species == "Nylanderia fulva" | Species == "Solenopsis invicta") %>% left_join(GIS_select) %>% mutate(dist_km = (DistanceToEdgeOfPreserve/1000)) ggplot(data_MH_pit_invasives,aes(dist_km, abundance))+ geom_point(aes(color = Species))+ theme_classic() ############################ same as above but for all collection methods ## GIS dataframe created by Meghan GIS_select <- GIS_df %>% select(Site_Code, DistanceToEdgeOfPreserve) data_MH_all <- data_MH_clean %>% #filter(`Collection Method` == "Pit") %>% gather("Species", "abundance", -c(Site:`Collector (1=SES)`,Site_Code, `Inside or Outside (Inside=1)`)) %>% left_join(native_classification, by = "Species") %>% filter(abundance != 0) ## have to drop species that weren't found in the trap, since this is based on sp occurrence ## that is pooled at the Replicate level within site codes data_MH_all_summary <- data_MH_all %>% group_by(Site, `Specific Site Number`, Site_Code, Replicate, `Collection Method`) %>% summarize(total_species = length(Species), total_native = sum(Native), total_non_native = total_species-total_native, prop_non_native = total_non_native/total_species) %>% mutate(non_native_binom = ifelse(total_non_native >0, 1, 0)) %>% left_join(GIS_select) %>% mutate(Site_ID = `Specific Site Number`, dist_km = (DistanceToEdgeOfPreserve/1000)) ## convert to km since glmer is having trouble with the scale of the data library(lme4) ## Binomial model of presence of non-native ant sp in all trap types by distance to preserve edge (Random effect of SITE) ## could also do model fitting glmer_all_0 <- glmer(cbind(total_non_native, total_native) ~ (1|Site_ID), family = "binomial", data = data_MH_all_summary) glmer_all_fit <- glmer(cbind(total_non_native, total_native) ~ dist_km + (1|Site_ID), family = "binomial", data = data_MH_all_summary) summary(glmer_0) summary(glmer_fit) ## model with the fixed effect of distance has the lower AIC value and the majority of the weight AICtab(glmer_0, glmer_fit, weights = T) plot(x=data_MH_all_summary$dist_km, y = data_MH_all_summary$non_native_binom) MyData =data.frame(cbind(dist_km = seq(from = min(data_MH_all_summary$dist_km), to = max(data_MH_all_summary$dist_km, length.out = nrow(data_MH_all_summary))), Site_ID = seq(from = min(data_MH_all_summary$dist_km), to = max(data_MH_all_summary$dist_km, length.out = nrow(data_MH_all_summary))))) Pred <- predict(glmer_fit, newdata = MyData, type = "response", allow.new.levels=T) lines(MyData$dist_km, Pred) ## make this in ggplot pd <-with(data_MH_all_summary, data.frame(cbind(dist_km = seq(min(data_MH_all_summary$dist_km), max(data_MH_all_summary$dist_km), length.out = nrow(data_MH_all_summary))), Site_ID = seq(min(data_MH_all_summary$dist_km), max(data_MH_all_summary$dist_km), length.out = nrow(data_MH_all_summary)))) pd1 <- cbind(pd, predict(glmer_fit, newdata=pd, type = "response", allow.new.levels = T)) %>% rename(fit_y = `predict(glmer_fit, newdata = pd, type = "response", allow.new.levels = T)`) ## binomial model fit to presence/absence of any non-native ant sp found in pitfall trap ggplot(data_MH_all_summary, aes((dist_km), non_native_binom), color = `Collection Method`)+ geom_point(aes(color = `Collection Method`),position = position_jitter(width =.02, height = 0.02), alpha = .6)+ geom_line(data = pd1, aes(x = dist_km_x, y = fit_y))+ theme_classic()+ labs(x = "Distance to edge of preserve (km)", y = "Probability of non-native ant species present") ## comfortable with the spread of the different collection methods across the distance to edge, ## and occurrence of non-native ant spp ggplot(data_MH_all_summary, aes((dist_km), non_native_binom))+ geom_point(position = position_jitter(width =.02, height = 0.02), alpha = .6)+ geom_line(data = pd1, aes(x = dist_km_x, y = fit_y))+ theme_classic()+ labs(x = "Distance to edge of preserve (km)", y = "Probability of non-native ant species present") anova(glmer_fit, test="Chisq") ##### visualize and test for relationship between the to dominant non-native ants and distance to edge data_MH_all_invasives <- data_MH_clean %>% #filter(`Collection Method` == "Pit") %>% All collection methods gather("Species", "abundance", -c(Site:`Collector (1=SES)`,Site_Code, `Inside or Outside (Inside=1)`)) %>% left_join(native_classification, by = "Species") %>% filter(#abundance != 0, ## keeping in samples for which we did not observe the two spp of interest Species == "Nylanderia fulva" | Species == "Solenopsis invicta") %>% left_join(GIS_select) %>% filter(DistanceToEdgeOfPreserve != "NA") %>% mutate(Site_ID = `Specific Site Number`, dist_km = (DistanceToEdgeOfPreserve/1000)) lmer_inv_0 <- lmer(abundance ~ (1|Site_ID), data = data_MH_all_invasives, REML=F) lmer_inv_fit <- lmer(abundance ~ dist_km + (1|Site_ID), data = data_MH_all_invasives, REML=F) summary(lmer_inv_0) summary(lmer_inv_fit) ## model with the fixed effect of distance has the lower AIC value and the majority of the weight AICtab(lmer_inv_0, lmer_inv_fit, weights = T) pd_inv <-with(data_MH_all_invasives, data.frame(cbind(dist_km = seq(min(data_MH_all_invasives$dist_km), max(data_MH_all_invasives$dist_km), length.out = nrow(data_MH_all_invasives_no_zero))), Site_ID = seq(min(data_MH_all_invasives$dist_km), max(data_MH_all_invasives$dist_km), length.out = nrow(data_MH_all_invasives_no_zero)))) pd1_inv <- cbind(pd_inv, predict(lmer_inv_fit, newdata=pd_inv, type = "response", allow.new.levels = T)) %>% rename(fit_y = `predict(lmer_inv_fit, newdata = pd_inv, type = "response", allow.new.levels = T)`) ## similar trend across collection methods -- OK to "pool" ggplot(data_MH_pit_invasives,aes(dist_km, abundance))+ geom_point(aes(color = Species, shape = `Collection Method`))+ geom_line(data = pd1_inv, aes(x = dist_km, y = fit_y))+ #geom_smooth(method = "lm", se=F) ## nearly identical fit to our estimate with random effects of site theme_classic() ggplot(data_MH_pit_invasives,aes(dist_km, abundance))+ geom_point(aes(color = Species))+ theme_classic()+ geom_smooth(method = "lm", se=F) ## combining across collection methods doesn't affect presence/absence probability of presence, ## but it does greatly increases the abundances ##### visualize and test for relationship between the to dominant non-native ants and distance to edge data_MH_all_invasives_no_zero <- data_MH_clean %>% #filter(`Collection Method` == "Pit") %>% All collection methods gather("Species", "abundance", -c(Site:`Collector (1=SES)`,Site_Code, `Inside or Outside (Inside=1)`)) %>% left_join(native_classification, by = "Species") %>% filter(abundance != 0, ## drop instances where we did not observe species Species == "Nylanderia fulva" | Species == "Solenopsis invicta") %>% left_join(GIS_select) %>% filter(DistanceToEdgeOfPreserve != "NA") %>% mutate(Site_ID = `Specific Site Number`, dist_km = (DistanceToEdgeOfPreserve/1000)) ## convert to km since glmer is having trouble with the scale of the data lmer_inv_0 <- lm(abundance ~ (1), data = data_MH_all_invasives_no_zero) lmer_inv_fit <- lm(abundance ~ dist_km, data = data_MH_all_invasives_no_zero) ##^^ getting an error message "boundary (singular)" means variance is basically = 0 -- not fitting well summary(lmer_inv_0) summary(lmer_inv_fit) ## model with the random effect has the lower AIC value and the majority of the weight -- go with modelt hat includes zeros AICtab(lmer_inv_0, lmer_inv_fit, weights = T) #anova(lmer_inv_0, lmer_inv_fit) plot(x=data_MH_all_invasives_no_zero$dist_km, y = data_MH_all_invasives_no_zero$abundance) MyData =data.frame(cbind(dist_km = seq(from = min(data_MH_all_invasives_no_zero$dist_km), to = max(data_MH_all_invasives_no_zero$dist_km, length.out = nrow(data_MH_all_summary))), Site_ID = seq(from = min(data_MH_all_invasives_no_zero$dist_km), to = max(data_MH_all_invasives_no_zero$dist_km, length.out = nrow(data_MH_all_summary))))) Pred <- predict(lmer_inv_fit, newdata = MyData, type = "response", allow.new.levels=T) lines(MyData$dist_km, Pred) ## make this in ggplot pd <-with(data_MH_all_invasives_no_zero, data.frame(cbind(dist_km = seq(min(data_MH_all_invasives_no_zero$dist_km), max(data_MH_all_invasives_no_zero$dist_km), length.out = nrow(data_MH_all_invasives_no_zero))), Site_ID = seq(min(data_MH_all_invasives_no_zero$dist_km), max(data_MH_all_invasives_no_zero$dist_km), length.out = nrow(data_MH_all_invasives_no_zero)))) pd1 <- cbind(pd, predict(glmer_fit, newdata=pd, type = "response", allow.new.levels = T)) %>% rename(fit_y = `predict(glmer_fit, newdata = pd, type = "response", allow.new.levels = T)`) ## similar trend across collection methods -- OK to "pool" ggplot(data_MH_pit_invasives_no_zero,aes(dist_km, abundance))+ geom_point(aes(color = Species, shape = `Collection Method`))+ theme_classic()+ geom_smooth(method = "lm", se=F)+ facet_grid(~`Collection Method`) ggplot(data_MH_pit_invasives_no_zero,aes(dist_km, abundance))+ geom_point(aes(color = Species))+ theme_classic()+ geom_smooth(method = "lm", se=F)
dbdfce9c0b0852cd770c91b56e7ec7870262444d
422f5b03f9d34dbcae13e3631276bfdacf4ddb5a
/man/save_as_csv.Rd
b91838042ccd81747fc0ab8240698b10043d24b1
[]
no_license
priscilleb/pack2
baf01ac01560380a99ed0165e6647b6c5ba64a05
4ecf2f81f795faf236f575c769bfba32e5dfe51c
refs/heads/master
2021-07-11T06:01:31.248835
2017-10-16T07:23:12
2017-10-16T07:23:12
106,695,707
0
0
null
null
null
null
UTF-8
R
false
true
516
rd
save_as_csv.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/save_as_csv.R \name{save_as_csv} \alias{save_as_csv} \title{Save as csv function} \usage{ save_as_csv(dataset, filename, row.names = FALSE, ...) } \arguments{ \item{dataset}{the dataset you work with} \item{filename}{the file you want to read} \item{row.names}{the list of the names of the row} } \value{ a dataframe } \description{ Save as csv function } \examples{ \dontrun{ save_as_csv(dataset_name, myfile_with_a_csv_extension) } }
9ce3e533d379bd6e1a6813df20dcdb4ad6bc1fa4
3f68b93b391901166962be7557dc9fbca2b5a4b9
/Assignment2/CheckPoint2.R
787335425de99b8884291fba14e18a75d6ad30b0
[]
no_license
RodolfoViana/DataAnalysis
12ea14dcc843fe45363d210fbd27ba4ad826a8e3
515a8df17fad7d2a41ee4d2de802ca8f2efc2307
refs/heads/master
2021-01-18T21:11:36.781314
2016-04-27T14:31:53
2016-04-27T14:31:53
34,022,913
0
0
null
null
null
null
UTF-8
R
false
false
3,203
r
CheckPoint2.R
library(dplyr) library(ggplot2) # Dados da câmara dos deputados file <- read.csv("Projetos/DataAnalysis/Assignment1/AnoAtual.csv") # Selecionando apenas os gastos de combustíveis e lubrificantes vlrCombustiveis <- file %>% filter(txtDescricao == "COMBUSTÍVEIS E LUBRIFICANTES.") %>% select(vlrLiquido, numSubCota) # Media do valor de combustíveis e lubrificantes mediaVlrCombustiveis <- median(vlrCombustiveis$vlrLiquido) # função de distribuição de probabilidade da variavel combustíveis e lubrificantes ggplot(vlrCombustiveis, aes(vlrLiquido)) + geom_density() + labs(x='Valor liquido', y='Probablidade') + theme_bw() ggplot(vlrCombustiveis, aes(vlrLiquido)) + xlim(0, 1000) + geom_density() + labs(x='Valor liquido', y='Probablidade') + theme_bw() # Selecionar 10 amostras de tamanho 100 (valores escolhidos aleatoriamente) e plotar a função # de distribuição de probabilidade das médias dessas amostras # Média de 10 amostras com n = 100 dist_original <- vlrCombustiveis$vlrLiquido sample_size <- 100 num_samples <- 10 samples_means <- c() for(i in seq(1, num_samples)){ a_sample <- sample(dist_original, sample_size) samples_means[i] <- mean(a_sample) } ggplot(data.frame(samples_means), aes(samples_means)) + geom_density() + labs(x='Valor liquido', y='Probablidade') + theme_bw() # Repetir o passo 2 aumentando o número de amostras. Cuidado! # É para aumentar o número de amostras, não o de observações dentro das amostras. Plote cada iteração. # Média de 20 amostras com n = 100 dist_original <- vlrCombustiveis$vlrLiquido sample_size <- 100 num_samples <- 20 samples_means <- c() for(i in seq(1, num_samples)){ a_sample <- sample(dist_original, sample_size) samples_means[i] <- mean(a_sample) } ggplot(data.frame(samples_means), aes(samples_means)) + geom_histogram() # Média de 40 amostras com n = 100 dist_original <- vlrCombustiveis$vlrLiquido sample_size <- 100 num_samples <- 40 samples_means <- c() for(i in seq(1, num_samples)){ a_sample <- sample(dist_original, sample_size) samples_means[i] <- mean(a_sample) } ggplot(data.frame(samples_means), aes(samples_means)) + geom_histogram() # Média de 80 amostras com n = 100 dist_original <- vlrCombustiveis$vlrLiquido sample_size <- 100 num_samples <- 80 samples_means <- c() for(i in seq(1, num_samples)){ a_sample <- sample(dist_original, sample_size) samples_means[i] <- mean(a_sample) } ggplot(data.frame(samples_means), aes(samples_means)) + geom_histogram() # Média de 160 amostras com n = 100 dist_original <- vlrCombustiveis$vlrLiquido sample_size <- 100 num_samples <- 160 samples_means <- c() for(i in seq(1, num_samples)){ a_sample <- sample(dist_original, sample_size) samples_means[i] <- mean(a_sample) } ggplot(data.frame(samples_means), aes(samples_means)) + geom_histogram() # Média de 320 amostras com n = 100 dist_original <- vlrCombustiveis$vlrLiquido sample_size <- 100 num_samples <- 320 samples_means <- c() for(i in seq(1, num_samples)){ a_sample <- sample(dist_original, sample_size) samples_means[i] <- mean(a_sample) } ggplot(data.frame(samples_means), aes(samples_means)) + geom_histogram()
9554a6d7bafbd024b32da6b1942844c16e28d5ab
9f0d257aeced4a86589df64bc0dde2c3f67ec3db
/ExploratoryDataAnalysis/Kmeans-Clustering/kmeans.R
a5393d54383d81c21abb3089aaaddaa94abb83a1
[]
no_license
gomathipugazh/Data-Science-Coursera
399e9848288b230e7a41ebbf5e163f26e29e1eaa
ee74157abc61e5f1a93315d2a4fda6d293128388
refs/heads/master
2021-01-09T07:05:35.209982
2016-07-20T04:56:39
2016-07-20T04:56:39
63,537,762
0
0
null
null
null
null
UTF-8
R
false
false
4,424
r
kmeans.R
## 1. Statstic - Try to see the raw data in a understandable pattern. ## 2. Data compression - Compress the data as much as possible to read. ## Kmeans Clustering set.seed(1234) par(mar = c(0,0,0,0)) x <- rnorm(12, mean = rep(1:3, each = 4), sd = 0.2) y <- rnorm(12, mean = rep(c(1, 2, 1), each = 4), sd = 0.2) dataFrame <- data.frame(x, y) kmeansobj <- kmeans(dataFrame, centers = 3) names(kmeansobj) kmeansobj$cluster ## plotting Kmeans par(mar = rep(0.2, 4)) plot(x, y, col = kmeansobj$cluster, pch = 19, cex = 2) points(kmeansobj$centers, col = 1:3, pch = 3, cex = 3, lwd = 3) ## Heatmap() set.seed(1234) dataMatrix <- as.matrix(dataFrame)[sample(1:12),] kmeansobj2 <- kmeans(dataMatrix, centers = 3) par(mfrow = c(1, 2), mar = c(2, 4, 0.1, 0.1)) image(t(dataMatrix)[, nrow(dataMatrix):1], yaxt = "n") image(t(dataMatrix)[, order(kmeansobj$cluster)], yaxt = "n") ## Matrix data using Image Function(not real pattern) set.seed(12345) par(mar = rep(0.2, 4)) dataMatrix <- matrix(rnorm(400), nrow = 40) image(1:10, 1:40, t(dataMatrix)[,nrow(dataMatrix):1]) ##Cluster the matrix data(still not real patteren) par(mar = rep(0.2, 4)) heatmap(dataMatrix) ##Adding pattern set.seed(678910) for (i in 1:40){ ## Flip a coin coinflip <- rbinom(1, size = 1, prob = 0.5) ## If coin is head add a common pattern to that row if(coinflip) { dataMatrix[i,] <- dataMatrix[i,] + rep(c(0, 3), each = 5) } } ## Plot the data par(mar = rep(0.2, 4)) image(1:10, 1:40, t(dataMatrix)[,nrow(dataMatrix):1]) ##Cluster the data(Column-wise patterned) par(mar = rep(0.2, 4)) heatmap(dataMatrix) ##Pattern in rows and columns hh <- hclust(dist(dataMatrix)) dataMatrixOrdered <- dataMatrix[hh$order, ] par(mfrow = c(1, 3)) image(t(dataMatrixOrdered)[, nrow(dataMatrixOrdered):1]) plot(rowMeans(dataMatrixOrdered), 40:1, , xlab = "Row Mean", ylab = "Row", pch = 19) plot(colMeans(dataMatrixOrdered), xlab = "Column", ylab = "Column Mean", pch = 19) ## componenrts of the SVD u and v svd1 <- svd(scale(dataMatrixOrdered)) par(mfrow = c(1, 3)) image(t(dataMatrixOrdered)[, nrow(dataMatrixOrdered):1]) plot(svd1$u[, 1], 40:1, , xlab = "Row", ylab = "First left singular vector", pch = 19) plot(svd1$v[, 1], xlab = "Column", ylab = "First right singular vector", pch = 19) ##Components of the SVD - varience explained(d) par(mfrow = c(1, 2)) plot(svd1$d, xlab = "Column", ylab = "Singular value", pch = 19) plot(svd1$d^2/sum(svd1$d^2), xlab = "Column", ylab = "Prop. of variance explained", pch = 19) ## Relationship to principle components svd1 <- svd(scale(dataMatrixOrdered)) pca1 <- prcomp(dataMatrixOrdered, scale = TRUE) plot(pca1$rotation[, 1], svd1$v[, 1], pch = 19, xlab = "Principal Component 1", ylab = "Right Singular Vector 1") abline(c(0, 1)) ## Missing Value(Can not calculate SVD) dataMatrix2 <- dataMatrixOrdered ## Randomly insert some missing data dataMatrix2[sample(1:100, size = 40, replace = FALSE)] <- NA svd1 <- svd(scale(dataMatrix2)) ## Doesn't work! ## Replace missing value using impute source("https://bioconductor.org/biocLite.R") biocLite("impute") library(impute) ## Available from http://bioconductor.org dataMatrix2 <- dataMatrixOrdered dataMatrix2[sample(1:100,size=40,replace=FALSE)] <- NA dataMatrix2 <- impute.knn(dataMatrix2)$data svd1 <- svd(scale(dataMatrixOrdered)); svd2 <- svd(scale(dataMatrix2)) par(mfrow=c(1,2)); plot(svd1$v[,1],pch=19); plot(svd2$v[,1],pch=19) ## Reading face data library(jpeg) load("face.rda") image(t(faceData)[, nrow(faceData):1]) svd1 <- svd(scale(faceData)) plot(svd1$d^2/sum(svd1$d^2), pch = 19, xlab = "Singular Vector", ylab = "Varience explained") ##Note that %*% is matrix multiplication ## Here svd1$d[1] is constant approx1 <- svd1$u[,1] %*% t(svd1$v[,1]) * svd1$d[1] ##In this example we need to make diagonal matrix out of d approx5 <- svd1$u[, 1:5] %*% diag(svd1$d[1:5]) %*% t(svd1$v[,1:5]) approx10 <- svd1$u[, 1:10] %*% diag(svd1$d[1:10]) %*% t(svd1$v[,1:10]) par(mfrow = c(1, 4)) image(t(approx1)[,nrow(approx1):1], main = "(a)") image(t(approx5)[,nrow(approx5):1], main = "(b)") image(t(approx10)[,nrow(approx10):1], main = "(c)") image(t(faceData)[, nrow(faceData):1], main = "(d)") ##Original Data xy <- matrix(readJPEG("amma.jpg"),nrow = 150,ncol = 150) image(t(xy)[, nrow(xy):1]) readRDS("amma.Rda") read.table("amma.Rda") load("hand.jpg")
3f485f59414152610d2cf138ce9a695159cdaf0b
58b943c940514e5c08106b97da156404ba6345cf
/R/plot.treeshape.R
64538e4121a22a53f5d808c13493eb1090a2fe43
[]
no_license
bcm-uga/apTreeshape
1dd209ae8e4a0a30d22b906a99f98f459ef992f7
369236069399b25f462bc109db305cbdb4a6113c
refs/heads/master
2020-04-09T18:54:00.858207
2019-09-25T12:08:34
2019-09-25T12:08:34
124,239,795
2
0
null
null
null
null
UTF-8
R
false
false
955
r
plot.treeshape.R
"plot.treeshape" <- function(x, y, ...){ plotone <- function(tree, ...){ n <- nrow(tree$merge)+400 hc <- hclust(d = dist(runif(n), method = "euclidean"), method = "ward") hc$merge <- tree$merge hc$height <- 1:nrow(tree$merge) descendant <- smaller.clade.spectrum(tree)[,1] current <- 1 for (i in 2:(nrow(tree$merge)+1)) { if (sum(descendant==i)!=0) { descendant[descendant==i] <- current current <- current+1 } } for (i in 1:length(descendant)) { hc$height[length(descendant)-i+1]=descendant[i] } #return(hc$height) hc$labels <- tree$names hc <- as.dendrogram(hc) mar<-par()$mar par(mar=c(1,1,1,10)) plot(hc, horiz=TRUE, axes=FALSE, ...) par(mar=mar) } tree1<-x if (missing(y)){ plotone(tree1) #text(x=-2, y=1, label=tree1$names[1]) } else{ tree2<-y if (class(tree2)!='treeshape') { stop("invalid arguments") } layout(t(c(1,2))) plotone(tree1, ...) plotone(tree2, ...) layout(1) } }
a7625a8cee58023eabdaa5a462e869c0dfb9cd3d
ad944e3412abc220df1e41d23d7ade203e1af4db
/Arizona_expected_votes_2020.R
31fd8268fbf8f18f9198562c0e6a02e51522f356
[]
no_license
Cluestar/US-elections
b0d6563686b8427191bbaf4c7d82544ef39f215e
83c46f7b256318a010282f73cff07b1b646e9149
refs/heads/main
2023-01-09T10:45:34.642177
2020-11-16T04:19:43
2020-11-16T04:19:43
307,650,612
1
0
null
null
null
null
UTF-8
R
false
false
1,608
r
Arizona_expected_votes_2020.R
percentage <- c(82, 82, 83, 75, 89, 82, 99, 85, 97, 97, 89, 78) votes_trump <- c(69830, 38710, 24445, 6715, 4542, 84386, 29561, 231845, 323863, 254561, 31183, 97439) votes_biden <- c(22980,21684, 21374, 13979, 2050, 47164, 17843, 489165, 431656, 216512, 14282, 102322) counties <- c("Mohave", "Coconino", "Navajo", "Apache", "La Paz", "Yavapai", "Gila", "Graham", "Greenlee", "Yuma", "Maricopa", "Pinal", "Pima", "Santa Cruz", "Cochise") expected_biden <- votes_biden / percentage * 100 expected_trump <- votes_trump / percentage * 100 sumTrump <- cumsum(expected_trump) sumBiden <- cumsum(expected_biden) data <- data.frame( counties, votes_trump, votes_biden, expected_trump, expected_biden, sumTrump, sumBiden, percentage) TotalvotesTrump <- data[12, 6] TotalvotesBiden <- data[12, 7] Difference <- TotalvotesTrump - TotalvotesBiden data <- data.frame( counties, votes_trump, votes_biden, expected_trump, expected_biden, sumTrump, sumBiden, percentage, TotalvotesBiden, TotalvotesTrump, Difference) df1 <- data.frame(counties, expected_biden, votes_biden, expected_trump, votes_trump) df2 <- melt(df1, id.vars ='counties') ggplot(df2, aes(x=counties, y=value, fill=variable)) + geom_bar(stat = 'identity', position = 'dodge') + scale_fill_manual("Legend", values = c("expected_trump" = "red", "expected_biden" = "blue", "votes_biden" = "cyan", "votes_trump" = "pink"))
ee0d678e9c465d7ca805c949cf7f93349c13d03c
e6a0ec6265fd79fc78ef2bcc84f27e836fc51e4a
/universitySalariesR.R
50abe0a2241c1b7089773f52520e978157b4ff4b
[]
no_license
adarcang/syracuseUniversity
5f34450e25294956e314fd85283d2c15a3ff21b7
2f1b277bac85a912532939281a940b0ec6977b28
refs/heads/main
2023-08-08T06:09:56.439361
2021-08-30T16:41:56
2021-08-30T16:41:56
364,284,119
0
0
null
null
null
null
UTF-8
R
false
false
415
r
universitySalariesR.R
install.packages("sf") install.packages("tidyverse") install.packages("leaflet") install.packages("sp") install.packages("RODBC") install.packages("readxl") library(sf) library(tidyverse) library(leaflet) library(sp) library(RODBC) library(readxl) setwd("C:/Users/adarcangelo/Documents/GitHub/syracuseUniversity") salaries <- read_excel("University Salaries.xlsx") View(salaries) str(salaries) summary(salaries)
e0846a2ac09b223fb62e07cd8f9ae8a8547f737f
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
/CGGP/R/CGGP_fit_fs.R
c7b1138a83f4eb06f0504acd1fd4c5eff15a3141
[]
no_license
akhikolla/InformationHouse
4e45b11df18dee47519e917fcf0a869a77661fce
c0daab1e3f2827fd08aa5c31127fadae3f001948
refs/heads/master
2023-02-12T19:00:20.752555
2020-12-31T20:59:23
2020-12-31T20:59:23
325,589,503
9
2
null
null
null
null
UTF-8
R
false
false
23,834
r
CGGP_fit_fs.R
#' Update CGGP model given data #' #' This function will update the GP parameters for a CGGP design. #' #' @param CGGP Sparse grid objects #' @param Y Output values calculated at CGGP$design #' @param Xs Supplemental X matrix #' @param Ys Supplemental Y values #' @param theta0 Initial theta #' @param Ynew Values of `CGGP$design_unevaluated` #' @param separateoutputparameterdimensions If multiple output dimensions, #' should separate parameters be fit to each dimension? #' @param set_thetaMAP_to Value for thetaMAP to be set to #' @param HandlingSuppData How should supplementary data be handled? #' * Correct: full likelihood with grid and supplemental data #' * Only: only use supplemental data #' * Ignore: ignore supplemental data #' @param corr Will update correlation function, if left missing it will be #' same as last time. #' #' @importFrom stats optim rnorm runif nlminb #' #' @return Updated CGGP object fit to data given #' @export #' @family CGGP core functions #' #' @examples #' cg <- CGGPcreate(d=3, batchsize=100) #' y <- apply(cg$design, 1, function(x){x[1]+x[2]^2}) #' cg <- CGGPfit(CGGP=cg, Y=y) CGGPfit <- function(CGGP, Y, Xs=NULL,Ys=NULL, theta0 = pmax(pmin(CGGP$thetaMAP,0.8),-0.8), #gotta pull away from edges to get not corner solution HandlingSuppData=CGGP$HandlingSuppData, separateoutputparameterdimensions=is.matrix(CGGP$thetaMAP), set_thetaMAP_to, corr, Ynew) { # ========================================. # ==== Check inputs, get Y from Ynew ==== # ========================================. # If different correlation function is given, update it if (!missing(corr)) { message("Changing correlation function") CGGP <- CGGP_internal_set_corr(CGGP, corr) } # If Y or Ynew is matrix with 1 column, convert it to vector to avoid issues if (!missing(Y) && is.matrix(Y) && ncol(Y)==1) { Y <- c(Y) } if (!missing(Ynew) && is.matrix(Ynew) && ncol(Ynew)==1) { Ynew <- c(Ynew) } # If Ynew is given, it is only the points that were added last iteration. # Append it to previous Y if (!missing(Ynew)) { if (!missing(Y)) {stop("Don't give both Y and Ynew, only one")} if (is.null(CGGP$Y) || length(CGGP$Y)==0) { if (is.matrix(Ynew) && nrow(Ynew) != nrow(CGGP$design_unevaluated)) { stop("nrow(Ynew) doesn't match") } if (!is.matrix(Ynew) && length(Ynew) != nrow(CGGP$design_unevaluated)) { stop("length(Ynew) doesn't match") } Y <- Ynew } else if (is.matrix(CGGP$Y)) { if (!is.matrix(Ynew)) {stop("Ynew should be a matrix")} if (nrow(Ynew) != nrow(CGGP$design_unevaluated)) { stop("Ynew is wrong size") } Y <- rbind(CGGP$Y, Ynew) } else { # is numeric vector if (length(Ynew) != nrow(CGGP$design_unevaluated)) { stop("Ynew is wrong size") } Y <- c(CGGP$Y, Ynew) } } if ((is.matrix(Y) && nrow(Y) == nrow(CGGP$design)) || (length(Y) == nrow(CGGP$design))) { CGGP$design_unevaluated <- NULL } else { stop("CGGP$design and Y have different length") } # ====================================================================. # Do the pre-processing ==== # For cleanness: Y is always the user input, y is after transformation # ====================================================================. CGGP$Y = Y if (any(is.na(Y))) { message(paste0(sum(is.na(Y)), "/",length(Y)," Y values are NA, will impute them")) } if(is.null(Xs)){ # No supplemental data CGGP$supplemented = FALSE if(!is.matrix(Y)){ CGGP$mu = mean(Y[!is.na(Y)]) y = Y-CGGP$mu }else{ # Y is matrix, PCA no longer an option CGGP$mu = colMeans(Y) for(oplcv in 1:dim(Y)[2]){ CGGP$mu[oplcv] = mean(Y[!is.na(Y[,oplcv]),oplcv]) } y <- sweep(Y, 2, CGGP$mu) # Need to set CGGP$M somewhere so that it doesn't use transformation } CGGP$y = y } else{ # Has supp data, used for prediction but not for fitting params # stop("Not working for supp") CGGP$supplemented = TRUE CGGP$Xs = Xs CGGP$Ys = Ys if(!is.matrix(Y)){ CGGP$mu = mean(Ys[!is.na(Ys)]) y = Y-CGGP$mu ys = Ys-CGGP$mu } else{ # PCA no longer an option CGGP$mu = colMeans(Ys) # Could use Y, or colMeans(rbind(Y, Ys)), for(oplcv in 1:dim(Ys)[2]){ CGGP$mu[oplcv] = mean(Ys[!is.na(Ys[,oplcv]),oplcv]) } # or make sure Ys is big enough for this y <- sweep(Y, 2, CGGP$mu) ys <- sweep(Ys, 2, CGGP$mu) } CGGP$y = y CGGP$ys = ys } # nopd is numberofoutputparameterdimensions nopd <- if (separateoutputparameterdimensions && is.matrix(y)) { ncol(y) } else { 1 } # Fix theta0 if (nopd > 1) { if (is.vector(theta0)) { theta0 <- matrix(theta0, nrow=length(theta0), ncol=nopd, byrow=F) } } # Can get an error for theta0 if number of PCA dimensions has changed if (is.matrix(theta0) && (ncol(theta0) != nopd)) { stop(paste("theta0 should have", nopd, "columns")) } # =======================================================. # Fit parameters for each output parameter dimension ==== # =======================================================. for (opdlcv in 1:nopd) { # output parameter dimension y.thisloop <- if (nopd==1) {y} else {y[,opdlcv]} # All of y or single column if (!is.null(Ys)) {ys.thisloop <- if (nopd==1) {ys} else {ys[,opdlcv]}} else {ys.thisloop <- NULL} theta0.thisloop <- if (nopd==1) {theta0} else {theta0[,opdlcv]} if(any(is.na(y.thisloop))){ if (!missing(set_thetaMAP_to) && !is.null(set_thetaMAP_to)) { opt.out <- list(par = if (nopd>1) {set_thetaMAP_to[,opdlcv]} else {set_thetaMAP_to}) y.thisloop=CGGP_internal_imputesomegrid(CGGP,y.thisloop,opt.out) repeattimes = 1 }else{ y.orig = y.thisloop y.thisloop=CGGP_internal_imputesomegrid(CGGP,y.thisloop,theta0.thisloop) repeattimes = 10 } }else{ repeattimes = 1 } for(imputelcv in 1:repeattimes){ if(imputelcv > 1.5){ if(imputelcv < 2.5){ y.thisloop=CGGP_internal_imputesomegrid(CGGP,y.orig,thetaMAP) }else{ ystart = y.thisloop y.thisloop=CGGP_internal_imputesomegrid(CGGP,y.orig,thetaMAP,ystart=ystart) if(max(abs(y.thisloop-ystart))<10^(-10)*max(abs(ystart))){ break } } } # Find MAP theta if (!missing(set_thetaMAP_to) && !is.null(set_thetaMAP_to)) { opt.out <- list(par = if (nopd>1) {set_thetaMAP_to[,opdlcv]} else {set_thetaMAP_to}) } else if (is.null(CGGP$Xs)){ # No supp data, just optimize opt.out = nlminb( theta0.thisloop, objective = CGGP_internal_neglogpost, gradient = CGGP_internal_gneglogpost, y = y.thisloop, CGGP = CGGP, ys = ys.thisloop, Xs = Xs, HandlingSuppData=HandlingSuppData, control = list(rel.tol = 1e-4,iter.max = 500) ) } else { # W/ supp data, optimize on grid first, then with both # Only grid data b/c it's fast opt.out = nlminb( theta0.thisloop, objective = CGGP_internal_neglogpost, gradient = CGGP_internal_gneglogpost, y = y.thisloop, CGGP = CGGP, HandlingSuppData="Ignore", # Never supp data here, so set to Ignore # regardless of user setting lower=rep(-.9, CGGP$d), upper=rep( .9, CGGP$d), control = list(rel.tol = 1e-2,iter.max = 500) ) neglogpost_par <- CGGP_internal_neglogpost(theta=opt.out$par, CGGP=CGGP, y=y.thisloop, ys=ys.thisloop, Xs=Xs, HandlingSuppData=HandlingSuppData ) if (is.infinite(neglogpost_par)) { theta0_2 <- rep(0, CGGP$d) } else { theta0_2 <- opt.out$par } # Then use best point as initial point with supp data opt.out = nlminb( theta0_2, objective = CGGP_internal_neglogpost, gradient = CGGP_internal_gneglogpost, y = y.thisloop, ys = ys.thisloop, Xs = Xs, CGGP = CGGP, HandlingSuppData = HandlingSuppData, control = list(rel.tol = 1e-4,iter.max = 500) ) } # End supp data opt thetaMAP <- opt.out$par sigma2MAP <- CGGP_internal_calcsigma2anddsigma2(CGGP=CGGP, y=y.thisloop, theta=thetaMAP, return_lS=FALSE)$sigma2 if(imputelcv > 1.5){ if(all(abs(sigma2MAP0-sigma2MAP)<0.025*sigma2MAP)){ break } sigma2MAP0 = sigma2MAP }else{ # On first time through sigma2MAP0 = sigma2MAP } } # ===================================. # Update parameters and samples ==== # ===================================. # Set new theta # If one value, it gives it as matrix. Convert it to scalar if (length(sigma2MAP) == 1) {sigma2MAP <- sigma2MAP[1,1]} lik_stuff <- CGGP_internal_calc_cholS_lS_sigma2_pw(CGGP=CGGP, y.thisloop, theta=thetaMAP) cholSs = lik_stuff$cholS pw <- lik_stuff$pw totnumpara = length(thetaMAP) # H is the Hessian at thetaMAP with reverse transformation H = matrix(0,nrow=totnumpara,ncol=totnumpara) # Transform so instead of -1 to 1 it is -Inf to Inf. Mostly in -5 to 5. PSTn= log((1+thetaMAP)/(1-thetaMAP)) # Reverse transformation thetav=(exp(PSTn)-1)/(exp(PSTn)+1) # Grad of reverse transformation function grad0 = CGGP_internal_gneglogpost(thetav,CGGP,y.thisloop, Xs=Xs, ys=ys.thisloop, HandlingSuppData=HandlingSuppData) * (2*(exp(PSTn))/(exp(PSTn)+1)^2) dimensions_that_need_fixing <- c() for(ipara in 1:totnumpara){ rsad = rep(0,totnumpara) rsad[ipara] =10^(-3) PSTn= log((1+thetaMAP)/(1-thetaMAP)) + rsad thetav=(exp(PSTn)-1)/(exp(PSTn)+1) PSTn2= log((1+thetaMAP)/(1-thetaMAP)) - rsad thetav2=(exp(PSTn2)-1)/(exp(PSTn2)+1) # There can be issues if gneglogpost can't be calculated at +/- epsilon, # happens when theta is at the edge of allowing matrix to be Cholesky # decomposed. Check here for that, use one side approx if only one grad # can be calculated. If both fail, no clue what to do. g_plus <- (CGGP_internal_gneglogpost(thetav,CGGP,y.thisloop, Xs=Xs, ys=ys.thisloop, HandlingSuppData=HandlingSuppData) * (2*(exp(PSTn))/(exp(PSTn)+1)^2)-grad0 )*10^(3)/2 g_minus <- (CGGP_internal_gneglogpost(thetav2,CGGP,y.thisloop, Xs=Xs, ys=ys.thisloop, HandlingSuppData=HandlingSuppData) * (2*(exp(PSTn))/(exp(PSTn)+1)^2)-grad0 )*10^(3)/2 if (all(is.finite(g_plus)) && all(is.finite(g_minus))) { H[ipara,] <- g_plus - g_minus } else { dimensions_that_need_fixing <- c(dimensions_that_need_fixing, ipara) # message(c("At least one was not finite, ", g_plus, g_minus)) if (all(is.finite(g_plus))) { H[ipara,] <- 2 * g_plus } else if (all(is.finite(g_minus))) { H[ipara,] <- -2 * g_minus } else { # stop("Having to set one to NaN, will probably break stuff") H[ipara,] <- NaN } } } # For any dimensions that gave issues, set them to unit vector here # Shouldn't affect eigen stuff for other dimensions? for (ipara in dimensions_that_need_fixing) { message(paste(c("Had to fix dimensions ", dimensions_that_need_fixing, " in CGGP_fit"))) H[ipara,] <- H[,ipara] <- 0 H[ipara, ipara] <- 1 } Hmat = H/2+t(H)/2 A = eigen(Hmat) cHa = (A$vectors)%*%diag(pmin(sqrt(pmax(1/(A$values),10^(-16))),4))%*%t(A$vectors) # Get posterior samples using Laplace approximation PST= log((1+thetaMAP)/(1-thetaMAP)) + cHa%*%matrix(rnorm(CGGP$numPostSamples*length(thetaMAP),0,1), nrow=length(thetaMAP)) thetaPostSamples = (exp(PST)-1)/(exp(PST)+1) # Now if there were any bad dimensions, we need to set all those # thetaPostSamples for that dimension to be the MAP for (ipara in dimensions_that_need_fixing) { message(paste(c("Changed thetaPostSamples for dims ", dimensions_that_need_fixing))) thetaPostSamples[ipara,] <- thetaMAP[ipara] } if(CGGP$supplemented){ # Cs = matrix(1,dim(CGGP$Xs)[1],CGGP$ss) # for (dimlcv in 1:CGGP$d) { # Loop over dimensions # V = CGGP$CorrMat(CGGP$Xs[,dimlcv], CGGP$xb, # thetaMAP[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara]) # Cs = Cs*V[,CGGP$designindex[,dimlcv]] # } # # Sigma_t = matrix(1,dim(CGGP$Xs)[1],dim(CGGP$Xs)[1]) # for (dimlcv in 1:CGGP$d) { # Loop over dimensions # V = CGGP$CorrMat(CGGP$Xs[,dimlcv], CGGP$Xs[,dimlcv], # thetaMAP[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara]) # Sigma_t = Sigma_t*V # } # # MSE_s = list(matrix(0,dim(CGGP$Xs)[1],dim(CGGP$Xs)[1]), # (CGGP$d+1)*(CGGP$maxlevel+1)) # for (dimlcv in 1:CGGP$d) { # for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { # MSE_s[[(dimlcv)*CGGP$maxlevel+levellcv]] = # (-CGGP_internal_postvarmatcalc(CGGP$Xs[,dimlcv],CGGP$Xs[,dimlcv], # CGGP$xb[1:CGGP$sizest[levellcv]], # thetaMAP[(dimlcv-1)*CGGP$numpara + # 1:CGGP$numpara], # CorrMat=CGGP$CorrMat)) # } # } # # for (blocklcv in 1:CGGP$uoCOUNT) { # ME_s = matrix(1,nrow=dim(Xs)[1],ncol=dim(Xs)[1]) # for (dimlcv in 1:CGGP$d) { # levelnow = CGGP$uo[blocklcv,dimlcv] # ME_s = ME_s*MSE_s[[(dimlcv)*CGGP$maxlevel+levelnow]] # } # Sigma_t = Sigma_t-CGGP$w[blocklcv]*(ME_s) # } # yhats = Cs%*%pw # # # Sti_resid = solve(Sigma_t,ys.thisloop-yhats) # Sti = solve(Sigma_t) # sigma2MAP = (sigma2MAP*dim(CGGP$design)[1] + # colSums((ys.thisloop-yhats)*Sti_resid)) / ( # dim(CGGP$design)[1]+dim(Xs)[1]) # # pw_adj_y = t(Cs)%*%Sti_resid # pw_adj <- CGGP_internal_calcpw(CGGP=CGGP, y=pw_adj_y, theta=thetaMAP) # # pw_uppadj = pw-pw_adj # supppw = Sti_resid supp_values <- CGGP_internal_calc_supp_pw_sigma2_Sti( CGGP, thetaMAP=thetaMAP, ys.thisloop=ys.thisloop, pw=pw, sigma2MAP=sigma2MAP, only_sigma2MAP=FALSE) supppw <- supp_values$supppw sigma2MAP <- supp_values$sigma2MAP Sti <- supp_values$Sti pw_uppadj<- supp_values$pw_uppadj } # Add all new variables to CGGP that are needed if (nopd==1) { # Only 1 output parameter dim, so just set them CGGP$thetaMAP <- thetaMAP CGGP$sigma2MAP <- sigma2MAP CGGP$pw <- pw CGGP$thetaPostSamples <- thetaPostSamples CGGP$cholSs = cholSs if (CGGP$supplemented) { CGGP$pw_uppadj <- pw_uppadj CGGP$supppw <- supppw CGGP$Sti = Sti CGGP$sigma2MAP <- sigma2MAP } } else { # More than 1 opd, so need to set as columns of matrix if (opdlcv==1) { # First time, initialize matrix/array for all CGGP$thetaMAP <- matrix(NaN, length(thetaMAP), nopd) if (length(sigma2MAP) != 1) { stop("Error: sigma2map should be a 1x1 matrix.") } CGGP$sigma2MAP <- numeric(nopd) CGGP$pw <- matrix(NaN, length(pw), nopd) # thetaPostSamples is matrix, so this is 3dim array below CGGP$thetaPostSamples <- array(data = NaN, dim=c(dim(thetaPostSamples), nopd)) CGGP$cholSs <- vector("list", nopd) } CGGP$thetaMAP[,opdlcv] <- thetaMAP CGGP$sigma2MAP[opdlcv] <- sigma2MAP CGGP$pw[,opdlcv] <- pw CGGP$thetaPostSamples[,,opdlcv] <- thetaPostSamples # CGGP$cholSs[,,opdlcv] <- cholSs CGGP$cholSs[[opdlcv]] <- cholSs if (CGGP$supplemented) { if (opdlcv==1) { # First time initialize all CGGP$pw_uppadj <- matrix(NaN, nrow(pw_uppadj), nopd) CGGP$supppw <- matrix(NaN, nrow(supppw), nopd) # Sti is matrix, so this is 3 dim array CGGP$Sti = array(NaN, dim=c(dim(Sti), nopd)) } CGGP$pw_uppadj[,opdlcv] <- pw_uppadj CGGP$supppw[,opdlcv] <- supppw CGGP$Sti[,,opdlcv] = Sti } } } # Clear old sigma2_samples. They will be recalculated when needed. CGGP$sigma2_samples <- NULL return(CGGP) } #' Calculate posterior variance #' #' @param x1 Points at which to calculate MSE #' @param x2 Levels along dimension, vector??? #' @param xo No clue what this is #' @param theta Correlation parameters #' @param CorrMat Function that gives correlation matrix #' for vector of 1D points. #' @param returndPVMC Should dPVMC be returned? #' @param returndiagonly Should only the diagonal be returned? #' #' @return Variance posterior # @export #' @noRd #' #' @examples #' CGGP_internal_postvarmatcalc(c(.4,.52), c(0,.25,.5,.75,1), #' xo=c(.11), theta=c(.1,.2,.3), #' CorrMat=CGGP_internal_CorrMatCauchySQT) CGGP_internal_postvarmatcalc <- function(x1, x2, xo, theta, CorrMat, returndPVMC = FALSE, returndiagonly=FALSE) { if(!returndiagonly && !returndPVMC){ S = CorrMat(xo, xo, theta) n = length(xo) cholS = chol(S) C1o = CorrMat(x1, xo, theta) CoinvC1o = backsolve(cholS,backsolve(cholS,t(C1o), transpose = TRUE)) C2o = CorrMat(x2, xo, theta) Sigma_mat = - t(CoinvC1o)%*%t(C2o) return(Sigma_mat) } else { stop("Full postvarmatcalc function was removed #25082") # Only the chunk above was ever used in our code, # the full version where the other options can be used # was moved to scratch/scratch_postvarmatcalc_fullversion.R } } #' Calculate sigma2 for all theta samples #' #' @param CGGP CGGP object #' #' @return All sigma2 samples ## @export #' @noRd CGGP_internal_calc_sigma2_samples <- function(CGGP) { nopd <- if (is.matrix(CGGP$thetaPostSamples)) {1} else {dim(CGGP$thetaPostSamples)[3]} if (is.null(CGGP[["y"]]) || length(CGGP$y)==0) { # Only supp data if (nopd == 1 && length(CGGP$sigma2MAP)==1) { # 1 opd and 1 od # Single output dimension as.matrix( apply(CGGP$thetaPostSamples, 2, function(th) { CGGP_internal_calc_supp_only_supppw_sigma2_Sti( CGGP=CGGP,thetaMAP=th,ys.thisloop=CGGP$ys, only_sigma2MAP=TRUE )$sigma2 } ) ) } else if (nopd == 1) { # 1 opd but 2+ od # MV output but shared parameters, so sigma2 is vector t( apply(CGGP$thetaPostSamples, 2, function(th) { CGGP_internal_calc_supp_only_supppw_sigma2_Sti( CGGP=CGGP,thetaMAP=th,ys.thisloop=CGGP$ys, only_sigma2MAP=TRUE )$sigma2 } ) ) } else { # 2+ opd, so must be 2+ od # MV output with separate parameters, so need to loop over # both samples and output dimension outer(1:CGGP$numPostSamples, 1:nopd, Vectorize( function(samplenum, outputdim) { CGGP_internal_calc_supp_only_supppw_sigma2_Sti( CGGP=CGGP,thetaMAP=CGGP$thetaPostSamples[,samplenum,outputdim], ys.thisloop=CGGP$ys[,outputdim], only_sigma2MAP=TRUE )$sigma2 } ) ) } } else if (!CGGP$supplemented) { if (nopd == 1 && length(CGGP$sigma2MAP)==1) { # 1 opd and 1 od # Single output dimension as.matrix( apply(CGGP$thetaPostSamples, 2, function(th) { CGGP_internal_calcsigma2(CGGP, CGGP$y, th )$sigma2 } ) ) } else if (nopd == 1) { # 1 opd but 2+ od # MV output but shared parameters, so sigma2 is vector t( apply(CGGP$thetaPostSamples, 2, function(th) { CGGP_internal_calcsigma2(CGGP, CGGP$y, th )$sigma2 } ) ) } else { # 2+ opd, so must be 2+ od # MV output with separate parameters, so need to loop over # both samples and output dimension outer(1:CGGP$numPostSamples, 1:nopd, Vectorize(function(samplenum, outputdim) { CGGP_internal_calcsigma2( CGGP, CGGP$y[,outputdim], CGGP$thetaPostSamples[,samplenum,outputdim] )$sigma2 }) ) } } else { # There is supplementary data if (nopd == 1 && length(CGGP$sigma2MAP)==1) { # 1 opd and 1 od # Single output dimension as.matrix( apply(CGGP$thetaPostSamples, 2, function(th) { CGGP_internal_calc_supp_pw_sigma2_Sti(CGGP=CGGP, y.thisloop=CGGP$y, thetaMAP=th, ys.thisloop=CGGP$ys, only_sigma2MAP=TRUE )$sigma2 } ) ) } else if (nopd == 1) { # 1 opd but 2+ od # MV output but shared parameters, so sigma2 is vector t( apply(CGGP$thetaPostSamples, 2, function(th) { CGGP_internal_calc_supp_pw_sigma2_Sti(CGGP, y.thisloop=CGGP$y, thetaMAP=th, ys.thisloop=CGGP$ys, only_sigma2MAP=TRUE )$sigma2 } ) ) } else { # 2+ opd, so must be 2+ od # MV output with separate parameters, so need to loop over # both samples and output dimension outer(1:CGGP$numPostSamples, 1:nopd, Vectorize(function(samplenum, outputdim) { CGGP_internal_calc_supp_pw_sigma2_Sti( CGGP=CGGP, y.thisloop=CGGP$y[,outputdim], thetaMAP=CGGP$thetaPostSamples[,samplenum,outputdim], ys.thisloop=CGGP$ys[,outputdim], only_sigma2MAP=TRUE )$sigma2 }) ) } } }
2da44a20117c7c734a95099d95a79a05bac6fb79
7d720d52d8342900573a45e40ca539e0a9137557
/man/make_eml.Rd
4989cbe11d9b6da171318cd306090fe49b6838b7
[ "CC0-1.0" ]
permissive
jtallantumich/EMLassemblyline
d723953e181850735f85eafa7bf0eb46be107b51
98346e63ed7715ef1a5d2f14c3ca0e88291538db
refs/heads/master
2021-08-15T20:03:58.240272
2017-11-16T22:21:29
2017-11-16T22:21:29
null
0
0
null
null
null
null
UTF-8
R
false
true
1,405
rd
make_eml.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make_eml.R \name{make_eml} \alias{make_eml} \title{Make EML} \usage{ make_eml(path) } \arguments{ \item{path}{A path to the dataset working directory containing the completed metadata templates, \emph{eml_configuration.R}, \emph{datasetname_datatablename_catvars.txt} (if categorical variables are present), and \emph{geographic_coverage.txt} (if reporting detailed geographic coverage).} } \value{ Validation results printed to the \emph{Console}. An EML metadata file written to the dataset working directory titled \emph{packageID.xml}. } \description{ Translate user supplied metadata into the EML schema, validate the schema, and write to file. } \details{ If validation fails, open the EML document in a .xml editor to identify the source of error. Often the error is small and quickly resolved with the aid of an editors schema congruence checker. } \seealso{ \code{\link{import_templates}} to import metadata templates to the dataset working directory. \code{\link{view_instructions}} for instructions on completing the template files. \code{\link{define_catvars}} to create the categorical variables table (if the attributes table contains categorical variables). \code{\link{extract_geocoverage}} to extract detailed geographic coordinates of sampling sites. }
7c1f9ad2d3f0b75199b67d271a31754bbb779da0
b06a918eb2c1a3b147a124dd204a41dbbf12ed46
/R/plot.FSA.R
e87f32f610c556e0cf77132ba3a86d231dbe440a
[]
no_license
joshuawlambert/rFSA
712cd31dfa0ba7641b20d9120227e328d4dc7c6b
b0986bb2534f550f6b6a4215d107254c370910d9
refs/heads/master
2021-07-13T14:13:30.222459
2021-06-30T16:49:59
2021-06-30T16:49:59
95,580,239
10
0
null
null
null
null
UTF-8
R
false
false
1,166
r
plot.FSA.R
#' Diagnostic Plots for FSA solutions #' #' @param x FSA object to see diagnostic plots on. #' @param ask logical; if TRUE, the user is asked before each plot. See help(plot.lm). #' @param easy logical; should diagnostic plots be presented in easy to read format? #' @param ... arguments to be passed to other functions. #' @return diagnostic plots to plot window. #' @importFrom graphics par plot #' @importFrom stats AIC anova as.formula cov fitted formula glm influence lm predict resid var #' @importFrom utils capture.output tail #' @export #' #' @examples #' #use mtcars package see help(mtcars) #' data(mtcars) #' colnames(mtcars) #' fit<-lmFSA(formula="mpg~hp*wt",data=mtcars,fixvar="hp", #' quad=FALSE,m=2,numrs=10,save_solutions=FALSE,cores=1) #' plot(x=fit) plot.FSA <- function(x,ask = F,easy = T,...) { stopifnot(inherits(x, "FSA")) fm <- fitmodels(x) if (length(fm) < 2) { dm <- length(fm) } else dm <- 2 if (easy == F) { par(mfrow = c(1,4)) } else{ par(mfrow = c(dm,4)) } for (i in 1:length(fm)) { one<-capture.output(pfit<-print(x)) plot(fm[[i]],ask = ask,main = rownames(pfit)[i]) } }
0b407e53f1f9019b58b68456597bcc81d08cebea
dd8132404e8c7b028cb13cba904c50aace01c6a7
/swt/src/lib/swt/src/vt$cel.r
a774634430bee65d1fc59780369beaed8f581481
[]
no_license
arnoldrobbins/gt-swt
d0784d058fab9b8b587f850aeccede0305d5b2f8
2922b9d14b396ccd8947d0a9a535a368bec1d6ae
refs/heads/master
2020-07-29T09:41:19.362530
2019-10-04T11:36:01
2019-10-04T11:36:01
209,741,739
15
0
null
null
null
null
UTF-8
R
false
false
236
r
vt$cel.r
# vt$cel --- send a clear to end-of-line sequence integer function vt$cel (dummy) integer dummy include SWT_COMMON if (Tc_clear_to_eol (1) == EOS) return (ERR) send_str (Tc_clear_to_eol) return (OK) end
ee7a10aa3445087dad45138e4bc105831ac666fb
9d95dc6f45bd74a2c3c84163d2b914c74f568573
/man/TrenchForecast.Rd
cabba63a416a7f7c5d1453b7c8405c57e75a333a
[]
no_license
cran/ltsa
a50b94d096c10a2c331332d6d4c6a141a66e3576
0a03bbb7cf19e479dc77592ed09621eeb8afb470
refs/heads/master
2021-01-21T21:54:35.303595
2015-12-21T08:55:04
2015-12-21T08:55:04
17,697,221
0
0
null
null
null
null
UTF-8
R
false
false
4,396
rd
TrenchForecast.Rd
\name{TrenchForecast} \alias{TrenchForecast} \title{ Minimum Mean Square Forecast} \description{ Given time series of length n+m, the forecasts for lead times k=1,...,L are computed starting with forecast origin at time t=n and continuing up to t=n+m. The input time series is of length n+m. For purely out-of-sample forecasts we may take n=length(z). Note that the parameter m is inferred using the fact that m=length(z)-n. } \usage{ TrenchForecast(z, r, zm, n, maxLead, UpdateAlgorithmQ = TRUE) } \arguments{ \item{z}{time series data, length n+m } \item{r}{autocovariances of length(z)+L-1 or until damped out } \item{zm}{mean parameter in model } \item{n}{forecast origin, n } \item{maxLead}{ =L, the maximum lead time } \item{UpdateAlgorithmQ}{ = TRUE, use efficient update method, otherwise if UpdateAlgorithmQ=FALSE, the direct inverse matrix is computed each time} } \details{ The minimum mean-square error forecast of z[N+k] given time series data z[1],...,z[N] is denoted by \eqn{z_N(k)}{z_N(k)}, where N is called the forecast origin and k is the lead time. This algorithm computes a table for \eqn{z_N(k), N=n,\dots,n+m; k=1,\ldots,m}{z_N(k),N=n,...,n+m; k=1,...,m} The minimum mean-square error forecast is simply the conditional expectation of \eqn{z_{N+k}}{z_{N+k}} given the time series up to including time \eqn{t=N}{t=N}. This conditional expectation works out to the same thing as the conditional expectation in an appropriate multivariate normal distribution -- even if no normality assumption is made. See McLeod, Yu, Krougly (2007, eqn. 8). Similar remarks hold for the variance of the forecast. An error message is given if length(r) < n + L -1. } \value{ A list with components \item{Forecasts }{matrix with m+1 rows and maxLead columns with the forecasts} \item{SDForecasts }{matrix with m+1 rows and maxLead columns with the sd of the forecasts} } \references{ McLeod, A.I., Yu, Hao, Krougly, Zinovi L. (2007). Algorithms for Linear Time Series Analysis, Journal of Statistical Software. } \author{ A.I. McLeod } \note{ An error message is given if r is not a pd sequence, that is, the Toeplitz matrix of r must be pd. This could occur if you were to approximate a GLP which is near the stationary boundary by a MA(Q) with Q not large enough. In the bootstrap simulation experiment reported in our paper McLeod, Yu and Krougly (2007) we initially approximated the FGN autocorrelations by setting them to zero after lag 553 but in this case the ARMA(2,1) forecasts were always better. When we used all required lags of the acvf then the FGN forecasts were better as we expected. From this experience, we don't recommend setting high-order acf lags to zero unless the values are in fact very small. } \seealso{ \code{\link{TrenchInverse}} } \examples{ #Example 1. Compare TrenchForecast and predict.Arima #general script, just change z, p, q, ML z<-sqrt(sunspot.year) n<-length(z) p<-9 q<-0 ML<-10 #for different data/model just reset above out<-arima(z, order=c(p,0,q)) Fp<-predict(out, n.ahead=ML) phi<-theta<-numeric(0) if (p>0) phi<-coef(out)[1:p] if (q>0) theta<-coef(out)[(p+1):(p+q)] zm<-coef(out)[p+q+1] sigma2<-out$sigma2 #r<-var(z)*ARMAacf(ar=phi, ma=theta, lag.max=n+ML-1) #When r is computed as above, it is not identical to below r<-sigma2*tacvfARMA(phi, theta, maxLag=n+ML-1) F<-TrenchForecast(z, r, zm, n, maxLead=ML) #the forecasts are identical using tacvfARMA # #Example 2. Compare AR(1) Forecasts. Show how #Forecasts from AR(1) are easily calculated directly. #We compare AR(1) forecasts and their sd's. #Define a function for the AR(1) case AR1Forecast <- function(z,phi,n,maxLead){ nz<-length(z) m<-nz-n zf<-vf<-matrix(numeric(maxLead*m),ncol=maxLead) zorigin<-z[n:nz] zf<-outer(zorigin,phi^(1:maxLead)) vf<-matrix(rep(1-phi^(2*(1:maxLead)),m+1),byrow=TRUE,ncol=maxLead)/(1-phi^2) list(zf=zf,sdf=sqrt(vf)) } #generate AR(1) series and compare the forecasts phi<-0.9 n<-200 m<-5 N<-n+m z<-arima.sim(list(ar=phi), n=N) maxLead<-3 nr<-N+maxLead-1 r<-(1/(1-phi^2))*phi^(0:nr) ansT1<-TrenchForecast(z,r,0,n,maxLead) ansT2<-TrenchForecast(z,r,0,n,maxLead,UpdateAlgorithmQ=FALSE) ansAR1<-AR1Forecast(z,phi,n,maxLead) } \keyword{ ts }
0559d3c340d9b6f290e3160af0ece08ff092cd71
87c30bb64856dc8940105447abcb827f7629c468
/R/iddplacebo.spaghetti.R
488a547329d3b5a74b289f3d30e6e8f2793efc48
[]
no_license
carlbona/idd
c28f2856b25c8d085768dd1c998bd576442aff2e
888ccd68976bd6ad1bfa3211a870fd39a5243763
refs/heads/master
2021-05-05T12:22:59.283961
2018-06-10T17:37:35
2018-06-10T17:37:35
118,240,758
0
0
null
null
null
null
UTF-8
R
false
false
3,057
r
iddplacebo.spaghetti.R
#' @title Plot the estimated effects from the placebo studies. #' @description \code{iddplacebo.spaghetti} takes an output object after \code{iddplacebo} and plots the estimated placebo effects using \code{ggplot2}. #' #' @param x Name of the \code{iddplacebo} output object. #' @param mult Multiplier for the rates. Defaults to 100000. #' @param rm Remove placebo studies with poor pre-intervention fit? Default=TRUE. #' @param alpha controls the shaded significance region on the plot. Defaults to 0.05. #' #' @return Returns a ggplot containing the estimated effects from the placebo studies along with the results from the main analysis. #' @examples #' \dontrun{ #' data(simpanel) #' placebo.out <- iddplacebo(eventvar="y", #' popvar="pop", #' idvar="age", #' timevar="time", #' postvar="post", #' treatvar="treat", #' data=simpanel, #' iter=50) #' plot.out <- iddplacebo.spaghetti(placebo.out) #' } #' @export #' iddplacebo.spaghetti <- function(x, mult=100000, rm=TRUE, alpha=0.05) { post=NULL realtr=NULL effect=NULL subsample=NULL time=NULL z <- x$Resdata x <- x$Effects x$effect <- x$effect*mult vl <- min(subset(x, post==1)$time) x0 <- subset(x, realtr==0) x1 <- subset(x, realtr==1) z0 <- subset(z, realtr==0) fun <- stats::ecdf(z$RMSE_T0) z$pvec <- fun(z$RMSE_T0) x0 <- merge(z, x0, by=c("subsample")) if (rm == TRUE) { x0 <- x0[which(x0$pvec<=(1-alpha)),] p <- ggplot2::ggplot(x0, ggplot2::aes(y=effect, x=time)) + ggplot2::geom_line(ggplot2::aes(group=subsample), colour="gray80") + ggplot2::geom_line(data=x1, ggplot2::aes(y=effect, x=time)) + ggplot2::theme_bw() + ggplot2::theme(panel.grid.major = ggplot2::element_blank(), panel.grid.minor = ggplot2::element_blank(), axis.line = ggplot2::element_line(colour = "black")) + ggplot2::geom_hline(yintercept=0, linetype=2) + ggplot2::geom_vline(xintercept=vl, linetype=2) + ggplot2::xlab("Time") + ggplot2::ylab("Effect estimate") + ggplot2::ggtitle("Placebo studies") + ggplot2::theme(plot.title = ggplot2::element_text(size=12)) } if (rm == FALSE) { p <- ggplot2::ggplot(x0, ggplot2::aes(y=effect, x=time)) + ggplot2::geom_line(ggplot2::aes(group=subsample), colour="gray80") + ggplot2::geom_line(data=x1, ggplot2::aes(y=effect, x=time)) + ggplot2::theme_bw() + ggplot2::theme(panel.grid.major = ggplot2::element_blank(), panel.grid.minor = ggplot2::element_blank(), axis.line = ggplot2::element_line(colour = "black")) + ggplot2::geom_hline(yintercept=0, linetype=2) + ggplot2::geom_vline(xintercept=vl, linetype=2) + ggplot2::xlab("Time") + ggplot2::ylab("Effect estimate") + ggplot2::ggtitle("Placebo studies") + ggplot2::theme(plot.title = ggplot2::element_text(size=12)) } return(p) }
df82699bb21cee0e4d62287076eb205cd6ff4c20
291cc209f6d19bee19563973eef13f59ca147178
/smartSaveTest/testcase.R
9e3fa30534982ee007bd44531f9dfc1e7b37326d
[]
no_license
dwarak98/smartSaveCSV
470d9642081b2fcebfd7a268da516b28f616cba8
82c0849e5a3d9b01279d30b94705e5f81a0c39e4
refs/heads/master
2023-04-17T01:49:51.228246
2021-04-22T10:54:59
2021-04-22T10:54:59
323,888,237
0
0
null
2021-01-01T17:26:39
2020-12-23T11:48:33
HTML
UTF-8
R
false
false
1,272
r
testcase.R
source("helperMethods.R") # setwd(dirname(rstudioapi::isAvailable()::getActiveDocumentContext()$path)) library(lubridate) library(smartSaveCSV) library(readr) library(pracma) getWindPenetration <- function(df) { df %>% dplyr::filter(variable %in% c("wind_pen_actual", "wind_pen_STF", "wind_pen_MTF")) } createBlankCSV <- function(name) { file.create(name) } df <- getForecastVsActual() %>% getWindPenetration() month_year <- format(as.Date(Sys.Date()), "%m_%Y") name <- paste("Wpen_Forecast_Vs_Actual_", month_year, ".csv", sep = "") day_of_the_month <- format(as.Date(Sys.Date()), "%d") path <- paste("Data/", name, sep = "") if (day_of_the_month == 1) { createBlankCSV(path) write.csv(df, path, row.names = FALSE) } existingdf <- read_csv(path, guess_max = 5001) datecols <- c("Interval", "GMTIntervalEnd") # datecols <- c("Interval") for (dateColName in datecols) { existingdf[[dateColName]] <- parse_date_time(x = existingdf[[dateColName]], orders = c("mdy HM", "ymd HM", "mdy HMS", "ymd HMS")) df[[dateColName]] <- parse_date_time(x = df[[dateColName]], orders = c("mdy HMS", "ymd HMS")) } appendeddf <- smartSaveCSV::smartAppend(df, existingdf, "value") smartSaveCSV(df, existingdf, path, "value")
93e642ada10cf814d165c85d56f13eab09e52668
9467c3e8fa02b25193fa6d5214a34a03039839f5
/man/tvar.Rd
4080378f48286d7a4b37435ac01cf727a208a908
[]
no_license
vishalbelsare/dsp
3fa14c61597a7db017f329d587b175e72a15b401
2cfdfbc12fca5a9907c0c1a83c2050922ca5b385
refs/heads/master
2023-08-03T12:37:07.990452
2023-07-09T22:44:47
2023-07-09T22:44:47
126,933,461
0
0
null
2023-07-10T06:42:44
2018-03-27T05:28:15
R
UTF-8
R
false
true
3,795
rd
tvar.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mcmc_samplers.R \name{tvar} \alias{tvar} \title{MCMC Sampler for Time-Varying Autoregression} \usage{ tvar( y, p_max = 1, include_intercept = FALSE, evol_error = "DHS", D = 2, useObsSV = FALSE, nsave = 1000, nburn = 1000, nskip = 4, mcmc_params = list("mu", "yhat", "beta", "evol_sigma_t2", "obs_sigma_t2", "dhs_phi", "dhs_mean"), computeDIC = TRUE, verbose = TRUE ) } \arguments{ \item{y}{the \code{T x 1} vector of time series observations} \item{p_max}{the maximum order of lag to include} \item{include_intercept}{logical; if TRUE, include a time-varying intercept} \item{evol_error}{the evolution error distribution; must be one of 'DHS' (dynamic horseshoe prior), 'HS' (horseshoe prior), 'BL' (Bayesian lasso), or 'NIG' (normal-inverse-gamma prior)} \item{D}{degree of differencing (D = 1 or D = 2)} \item{useObsSV}{logical; if TRUE, include a (normal) stochastic volatility model for the observation error variance} \item{nsave}{number of MCMC iterations to record} \item{nburn}{number of MCMC iterations to discard (burin-in)} \item{nskip}{number of MCMC iterations to skip between saving iterations, i.e., save every (nskip + 1)th draw} \item{mcmc_params}{named list of parameters for which we store the MCMC output; must be one or more of: \itemize{ \item "mu" (conditional mean) \item "yhat" (posterior predictive distribution) \item "beta" (dynamic regression coefficients) \item "evol_sigma_t2" (evolution error variance) \item "obs_sigma_t2" (observation error variance) \item "dhs_phi" (DHS AR(1) coefficient) \item "dhs_mean" (DHS AR(1) unconditional mean) }} \item{computeDIC}{logical; if TRUE, compute the deviance information criterion \code{DIC} and the effective number of parameters \code{p_d}} \item{verbose}{logical; should R report extra information on progress?} } \value{ A named list of the \code{nsave} MCMC samples for the parameters named in \code{mcmc_params} } \description{ Run the MCMC for a time-varying autoregression with a penalty on first (D=1) or second (D=2) differences of each dynamic autoregressive coefficient. The penalty is determined by the prior on the evolution errors, which include: \itemize{ \item the dynamic horseshoe prior ('DHS'); \item the static horseshoe prior ('HS'); \item the Bayesian lasso ('BL'); \item the normal stochastic volatility model ('SV'); \item the normal-inverse-gamma prior ('NIG'). } In each case, the evolution error is a scale mixture of Gaussians. Sampling is accomplished with a (parameter-expanded) Gibbs sampler, mostly relying on a dynamic linear model representation. } \note{ The data \code{y} may NOT contain NAs. The data \code{y} will be used to construct the predictor matrix (of lagged values), which is not permitted to include NAs. } \examples{ \dontrun{ # Example 1: simdata = simUnivariate(signalName = "doppler", T = 128, RSNR = 7, include_plot = TRUE) y = simdata$y # Note: in general should subtract off the sample mean p = 6 # Lag out = tvar(y, p_max = p, include_intercept = FALSE, evol_error = 'DHS', D = 1, mcmc_params = list('mu', 'yhat', 'beta', 'obs_sigma_t2')) for(j in 1:p) plot_fitted(rep(0, length(y) - p), mu = colMeans(out$beta[,,j]), postY = out$beta[,,j]) plot_fitted(y[-(1:p)], mu = colMeans(out$mu), postY = out$yhat, y_true = simdata$y_true[-(1:p)]) spec_TF = post_spec_dsp(post_ar_coefs = out$beta, post_sigma_e = sqrt(out$obs_sigma_t2[,1]), n.freq = 100) image(x = 1:(length(y)-p), y = spec_TF$freq, colMeans(log(spec_TF$post_spec)), xlab = 'Time', ylab = 'Freqency', main = 'Posterior Mean of Log-Spectrum') } }
2b48b248dd819ba59853f2021281e035794b745f
da003b80b72879594a248ad1e1400cfae3ac8d97
/patch/lda.svi/R/lda_svi.R
758b5d1e3ccfd95cc4c28a09c6a3ab9137df8481
[]
no_license
maximilian-aigner/reddit-activity
f5aff4e6fe55ce7489ebb30a0ddffd31d3d11b2b
81e88d5067fa33d060abbefb7f703143a7d14177
refs/heads/master
2022-11-27T20:49:14.628150
2020-08-10T12:06:17
2020-08-10T12:06:17
null
0
0
null
null
null
null
UTF-8
R
false
false
3,791
r
lda_svi.R
#' Fit a Latent Dirichlet Allocation model to a text corpus #' #' @param dtm This must be a DocumentTermMatrix (with term frequency weighting) from the tm package. #' @param passes The number of passes over the whole corpus - how many times we update the local variables for each document. #' @param batchsize The size of the minibatches. #' @param maxiter The maximum iterations for the "E step" for each document (the updating of the per-document parameters within each minibatch). The default of 100 follows the reference implementation in python by the authors. #' @param eta Dirichlet prior hyperparameter for the document-specific topic proportions. #' @param alpha Dirichlet prior hyperparameter for the topic-specific term proportions. #' @param kappa learning rate parameter. Lower values give greater weight to later iterations. For guaranteed convergence to a local optimum, kappa must lie in the interval (0.5,1]. #' @param tau_0 learning rate parameter. Higher values reduce the influence of early iterations. #' @param K The number of topics #' @param tidy_output if true, the parameter estimates are returned as 'long' data frames; otherwise they are returned as matrices. #' @importFrom methods is #' @details The implementation here is based on the python implementation by Matthew D. Hoffman accompanying the paper #' @return A named list of length two. The element named 'beta' gives the proportions for the terms within the topics, while the element named 'theta' gives the proportions for the topics within the documents. If the tidy_output argument is true these are data frames in 'long' format; otherwise they are matrices. #' @references Hoffman, M., Bach, FM., and Blei, DM. (2010) 'Online Learning for Latent Dirichlet Allocation', _Conference and Workshop on Neural Information Processing Systems_ #' #' #' Hoffman, M., Blei, DM., Wang, C, and Paisley, J. (2013) 'Stochastic Variational Inference', _Journal of Machine Learning Research_. Preprint: https://arxiv.org/abs/1206.7051_ #' #' @examples #' library(topicmodels) #' data(AssociatedPress) #' ap_lda_fit <- lda_svi(AssociatedPress,passes=1,K=50) #' #I use a single pass because CRAN requires examples to run quickly; #' #generally one would use more. 20 often seems to be sufficient as a rule of thumb, #' #but it might be worth experimenting with more or fewer #' @export lda_svi <- function(dtm,passes=10,batchsize=256,maxiter=100,K,eta=1/K,alpha=1/K,kappa=0.7,tau_0=1024,tidy_output=TRUE){ if (is(dtm,"DocumentTermMatrix")){ if (!any(attr(dtm,'weighting') %in% c('term frequency','tf'))){ stop('The DocumentTermMatrix object must use term frequency weighting') } } doc_ids <- dtm$i - 1#the c++ code expects 0-indexed ids docs <- dtm$dimnames$Docs term_ids <- dtm$j - 1#the c++ code expect 0-indexed ids terms <- dtm$dimnames$Terms counts <- dtm$v res_list <- lda_online_cpp(doc_ids,term_ids,counts,K,passes,batchsize,maxiter=maxiter,eta=eta,alpha=alpha,tau_0=tau_0,kappa=kappa) gamma <- res_list$Gamma lambda <- res_list$Lambda colnames(gamma) <- seq(1:ncol(gamma))#topic labels rownames(gamma) <- unique(docs) colnames(lambda) <- unique(terms) rownames(lambda) <- seq(1:nrow(lambda)) # convert variational parameters to model parameters # (this follows from equation 2 in the NIPS paper) # Noting that the expectation of a Dirichlet(a) rv is a/sum(a) theta <- gamma beta <- lambda if (tidy_output){ for (i in seq(1,nrow(gamma))){ theta[i,] <- theta[i,]/sum(theta[i,]) } for (i in seq(1,nrow(lambda))){ beta[i,] <- lambda[i,]/sum(lambda[i,]) } theta <- reshape2::melt(theta) beta <- reshape2::melt(beta) colnames(beta) <- c('topic','term','value') colnames(theta) <- c('document','topic','value') } list('theta'=theta,'beta'=beta) }
4636127583afa2a317d0335f866f257eb7fe2a2c
93f3a242a09dd7a1afcd3e56b927a5488d93ea27
/R/tar_knitr.R
fa66cc48d30ac3256f1eb2c859052a42fabb7b4b
[ "MIT" ]
permissive
kendonB/targets
23c638169d69d224735ac82cadbf58ef49c86aeb
b6fb33662c4306b0a7457d6e69acb6003a6b8dfb
refs/heads/master
2023-04-12T04:09:58.681935
2020-07-19T02:27:52
2020-07-19T02:27:52
280,758,764
0
0
null
2020-07-18T23:45:03
2020-07-18T23:45:02
null
UTF-8
R
false
false
5,558
r
tar_knitr.R
#' @title Include a knitr or R Markdown report in a pipeline. #' @export #' @description Register a knitr or R Markdown report as part of a dynamic #' target. Relies on tidy evaluation to insert a special expression into #' the target's command when the target is defined. #' @details `tar_knitr()` tells `targets` to look for dependency targets #' in the active code chunks of the report. These dependencies #' must be mentioned as literal symbols in explicit calls to #' [tar_load()] and [tar_read()]. This mechanism not only rerenders #' the report automatically when the dependencies change, but also #' allows you to run the report by itself (outside the pipeline) #' as long as a `_targets/` data store already exists in the current #' working directory and contains the data. #' @section Working directory: #' The current working directory (i.e. `getwd()`) must contain the #' `_targets/` data store not only when `tar_knitr()` is evaluated, #' but also when the actual report is run. The easiest way to #' deal with this is just to keep all your R Markdown source files #' at the root directory of the project. If you need to keep your report #' somewhere else, such as a subdirectory, #' consider setting `knit_root_dir = getwd()` in `rmarkdown::render()` or #' `knitr::opts_knit$set(root.dir = "your_project_root_directory")` #' in an early code chunk of the report itself. #' @return A language object that represents the dependencies and #' return value of a `knitr` source dynamic file. #' @param path Character of length 1, path to the `knitr` or #' R Markdown source file. #' @examples #' \dontrun{ #' tar_dir({ #' # Here is a path to an example R Markdown report that depends on #' # targets data, data2, and analysis. #' path <- system.file("example_knitr_report.Rmd", package = "targets") #' # `tar_knitr()` defines a piece of code with the dependencies #' # of the report as symbols so the code analyzer can detect them. #' expr <- tar_knitr(path) #' expr #' # If you evaluate the expression, you get the path to the report, #' # which is exactly what we want for dynamic files (`format = "file"`). #' eval(expr, envir = list(analysis = 1, data = 1, data2 = 1)) #' # In your actual pipeline, write a dynamic file (`format = "file"`) target #' # to run the report and return the paths to the source and output. #' # (`!!tar_knitr("report.Rmd")` returns `"report.Rmd"`, and it requires #' # `tidy_eval` to be `TRUE` in [tar_target()] (default). #' file.copy(path, "report.Rmd") #' tar_script({ #' tar_options() #' tar_pipeline( #' tar_target(data, create_data()), # You define create_data(). #' tar_target(analysis, analyze_data(data)), # You define analyze_data(). #' tar_target( #' report, { #' rmarkdown::render("report.Rmd") #' c(!!tar_knitr("report.Rmd"), "report.html") #' }, #' format = "file" #' ) #' ) #' }) #' # In the graph below, #' # notice how report depends on data and analysis #' # because of the calls to tar_load() and tar_read() in the report #' # and the use of !!tar_knitr() in the target. #' tar_visnetwork() #' }) #' } tar_knitr <- function(path) { assert_package("knitr", "tar_knitr() requires the knitr package.") assert_scalar(path, "tar_knitr() only takes one file at a time.") assert_chr(path, "path argument of tar_knitr() must be a character.") assert_path(path, paste("the path", path, "for tar_knitr() does not exist.")) expr <- knitr_expr(path) deps <- rlang::syms(knitr_deps(expr)) out <- substitute( list(deps = deps, path = path)[["path"]], env = list(deps = deps, path = path) ) safe_parse(safe_deparse(out)) } knitr_expr <- function(path) { tryCatch( parse(text = knitr_code(path)), error = function(e) { throw_validate( "Could not parse knitr report ", path, " to detect dependencies: ", conditionMessage(e) ) } ) } knitr_code <- function(path) { handle <- basename(tempfile()) connection <- textConnection(handle, open = "w", local = TRUE) on.exit(close(connection)) withr::with_options( new = list(knitr.purl.inline = TRUE), code = knitr::knit(path, output = connection, tangle = TRUE, quiet = TRUE) ) textConnectionValue(connection) } knitr_deps <- function(expr) { counter <- counter_init() walk_expr(expr, counter) counter_get_names(counter) } walk_expr <- function(expr, counter) { if (!length(expr)) { return() } else if (is.call(expr)) { walk_call(expr, counter) } else if (typeof(expr) == "closure") { walk_expr(formals(expr), counter = counter) walk_expr(body(expr), counter = counter) } else if (is.pairlist(expr) || is.recursive(expr)) { lapply(expr, walk_expr, counter = counter) } } walk_call <- function(expr, counter) { name <- safe_deparse(expr[[1]], backtick = FALSE) if (name %in% paste0(c("", "targets::", "targets:::"), "tar_load")) { register_load(expr, counter) } if (name %in% paste0(c("", "targets::", "targets:::"), "tar_read")) { register_read(expr, counter) } lapply(expr, walk_expr, counter = counter) } register_load <- function(expr, counter) { expr <- match.call(targets::tar_load, as.call(expr)) names <- all.vars(expr$names, functions = FALSE, unique = TRUE) counter_set_names(counter, names) } register_read <- function(expr, counter) { expr <- match.call(targets::tar_read, as.call(expr)) names <- all.vars(expr$name, functions = FALSE, unique = TRUE) counter_set_names(counter, names) }
43ed068ec564ecf214872a9f8593db1756f8dc0c
44e7795c40f1fa82ccf60a57af22a320a3b0fcfd
/src/julendat/rmodules/gfNonNaStations.R
380ba503321c752e94cc1d58857a93d607f2fb25
[]
no_license
environmentalinformatics-marburg/julendat
3217bf3b871fa0f3b12d2979edcead809b47fe2f
3496c21f758bfc0a06bac8e7058a3204d6a3a42a
refs/heads/master
2016-08-07T07:37:13.384975
2014-12-02T18:07:37
2014-12-02T18:07:37
null
0
0
null
null
null
null
UTF-8
R
false
false
2,839
r
gfNonNaStations.R
gfNonNaStations <- function(data.indep, pos.na, prm.dep = "Ta_200", ...) { ################################################################################ ## ## This program takes a known NA record from an incomplete monthly ## data set of a given plot as input argument and identifies all ## other plots that provide valid records at this particular date ## ## parameters are as follows: ## ## data.indep (list): List of data sets of independent plots. ## Must be composed of ki.data objects. ## pos.na (numeric): Gap in data set of dependent plot including ## starting point, endpoint, and length of the gap. ## prm.dep (character): Parameter under investigation. ## ... Further arguments to be passed ## ################################################################################ ## ## Copyright (C) 2013 Florian Detsch, Tim Appelhans ## ## This program is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program. If not, see <http://www.gnu.org/licenses/>. ## ## Please send any comments, suggestions, criticism, or (for our sake) bug ## reports to florian.detsch@geo.uni-marburg.de ## ################################################################################ cat("\n", "Module : gfNonNaStations", "\n", "Author : Florian Detsch <florian.detsch@geo.uni-marburg.de>, Tim Appelhans <tim.appelhans@gmail.com>", "Version : 2013-01-08", "\n", "License : GNU GPLv3, see http://www.gnu.org/licenses/", "\n", "\n") ########## FUNCTION BODY ####################################################### # Identify plots with available records for the given gap data.temp <- lapply(seq(data.indep), function(i) { (pos.na[1]:pos.na[2]) %in% which(is.na(data.indep[[i]]@Parameter[[prm.dep]])) }) data.avl <- lapply(seq(data.indep), function(i) { sum(data.temp[[i]]) == 0 }) # Plot names plot.avl <- lapply(seq(data.indep), function(i) { data.indep[[i]]@PlotId$Unique }) # Return data.frame containing plot names and information about availablity of particular record return(data.frame(unlist(plot.avl), unlist(data.avl), stringsAsFactors=FALSE)) }
4ba5dd51c39811706ba960be7192643e05c160a7
a97353fe3ce43bc159c9ec3a81e1b927b8625e59
/source/PDTX_structure.R
e7086b0271e83b642c8d9ab19421772de73ef0a8
[]
no_license
bhklab/BreastPDTX
f5829f0933139d7237f82121c57a3965601c797c
4d7f4ea4ecaa57dc2d7db08e72d6ed8bbe7ebd57
refs/heads/master
2022-01-02T11:31:35.605584
2021-12-22T23:57:55
2021-12-22T23:57:55
153,497,800
0
1
null
null
null
null
UTF-8
R
false
false
4,762
r
PDTX_structure.R
# Get sample names ## Run the following on mordor ## export RAW_DIR="/mnt/work1/users/bhklab/Data/Breast_Cancer_PDTX" ## ls $RAW_DIR | grep ".bam" | egrep -v '.bai|RRBS|Shall' | cut -f1 -d"." ## I like to copy the list and work on my local machine, but of course you can work on the cluster as well ## If you copy it to your local machine, name it "PDX SAMPLE LIST.csv" library(stringr) setwd("~/Desktop/BreastPDTX/data/results/sensitivity") samples <- read.csv(file="~/Desktop/BreastPDTX/data/cell_annotation_all.csv", header=TRUE) samples$X <- NULL; samples$BreastPDTX <- NULL # Assign patient IDs samples$MODEL <- sapply(strsplit(as.character(samples$unique.cellid), split="-"), function(x) x[1]) samples$tmp <- sapply(strsplit(as.character(samples$unique.cellid), split="-"), function(x) x[2]) # Assign sample types samples$TYPE <- "" samples$TYPE <- ifelse(grepl("N", samples$tmp), "NORMAL", samples$TYPE) samples$TYPE <- ifelse(grepl("T", samples$tmp), "TUMOR", samples$TYPE) samples$TYPE <- ifelse(grepl("X", samples$tmp), "PDX", samples$TYPE) samples$TYPE <- ifelse(grepl("C", samples$tmp), "PDC", samples$TYPE) # Is the sample a replicate? samples$REPLICATE <- ifelse(grepl("R", samples$tmp), "YES", "NO") samples$tmp <- NULL rownames(samples) <- samples$unique.cellid write.csv(samples, file="~/Desktop/BreastPDTX/data/cell.csv") # Assign PDTC passage number samples$C <- "" for (i in 1:nrow(samples)) { ss <- samples$tmp[i] if (grepl("C", ss)==FALSE) { samples$C[i] <- NA } else { samples$C[i] <- strsplit(ss, "C")[[1]][2] } } for (i in 1:nrow(samples)) { ss <- samples$tmp[i] if (grepl("C$", ss)==TRUE) { samples$C[i] <- "PDC PASSAGE NUMBER NOT CLEAR" } } ## Take care of normal and tumor samples ## You don't have to run this; I just like to be redundant for (i in 1:nrow(samples)) { nn <- samples$tmp[i] if (grepl("N", nn)==TRUE) { samples$C[i] <- NA } } for (i in 1:nrow(samples)) { tt <- samples$tmp[i] if (grepl("T", tt)==TRUE) { samples$C[i] <- NA } } # Assign replicate number samples$REPLICATE_NUMBER <- "" for (i in 1:nrow(samples)) { rr <- samples$tmp[i] if (grepl("R", rr)==TRUE) { samples$REPLICATE_NUMBER[i] <- strsplit(rr, "R")[[1]][2] } } for (i in 1:nrow(samples)) { rr <- samples$tmp[i] if (grepl("R$", rr)==TRUE) { samples$REPLICATE_NUMBER[i] <- "REPLICATE NUMBER NOT CLEAR" } } # Assign PDTX passage number samples$X <- "" for (i in 1:nrow(samples)) { x <- samples$tmp[i] if (grepl("X[0-9]+$", x)==TRUE) { samples$X[i] <- strsplit(x, "X")[[1]][2] } } for (i in 1:nrow(samples)) { xx <- samples$tmp[i] if (grepl("C", xx)==TRUE) { samples$X[i] <- gsub("X", "", strsplit(xx, "C")[[1]][1]) } } for (i in 1:nrow(samples)) { yy <- samples$tmp[i] if (grepl("R", yy)==TRUE) { samples$X[i] <- gsub("X", "", strsplit(yy, "R")[[1]][1]) } } ## Take care of normal and tumor samples for (i in 1:nrow(samples)) { n <- samples$tmp[i] if (grepl("N", n)==TRUE) { samples$X[i] <- NA } } for (i in 1:nrow(samples)) { t <- samples$tmp[i] if (grepl("T", t)==TRUE) { samples$X[i] <- NA } } # Patient CAMBMT1 is a special case, where 5 samples were taken from 5 different spots of 1 tumor, and from each of which a single PDTX was generated samples$CC <- "" for (i in 1:nrow(samples)) { cc <- samples$V1[i] if (grepl("CAMBMT1", cc)==TRUE) { samples$CC[i] <- strsplit(as.character(cc), split="-")[[1]][2] } } samples$XX <- "" for (i in 1:nrow(samples)) { xx <- samples$V1[i] if (grepl("CAMBMT1", xx)==TRUE) { samples$XX[i] <- strsplit(as.character(xx), split="-")[[1]][3] } } ## Assign corrected sample types for (i in 1:nrow(samples)) { ct <- samples$XX[i] if (grepl("X", ct)==TRUE) { samples$TYPE[i] <- "PDX" } } ## Assign PDTX passage number for (i in 1:nrow(samples)) { ct <- samples$XX[i] if (grepl("X", ct)==TRUE) { samples$X[i] <- strsplit(ct, "")[[1]][2] } } ## Assign tumor replicate number samples$CAMBMT1_TUMOR_NUMBER <- "" for (i in 1:nrow(samples)) { ct <- samples$CC[i] if (grepl("T", ct)==FALSE) { samples$CAMBMT1_TUMOR_NUMBER[i] <- NA } else { samples$CAMBMT1_TUMOR_NUMBER[i] <- strsplit(ct, "")[[1]][2] } } # Make the summary data frame PDTX_STRUCTURE <- cbind(as.vector(samples$V1), samples$MODEL, samples$TYPE, samples$CAMBMT1_TUMOR_NUMBER, samples$X, samples$C, samples$REPLICATE, samples$REPLICATE_NUMBER) PDTX_STRUCTURE <- as.data.frame(PDTX_STRUCTURE) colnames(PDTX_STRUCTURE) <- c("SAMPLE", "PATIENT", "TYPE", "CAMBMT1 TUMOR PASSAGE NUMBER", "PDX PASSAGE NUMBER", "PDC PASSAGE NUMBER", "IS REPLICATE?", "REPLICATE NUMBER") write.csv(PDTX_STRUCTURE, file="BCaPE PDX STRUCTURE.csv")
7ffe9c097c073ba380dac275384d43f35af6462f
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/pmxTools/examples/calc_ss_3cmt_linear_oral_1_lag.Rd.R
a80b155585f06c750a6f737bda0fa0faade8c8c9
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
410
r
calc_ss_3cmt_linear_oral_1_lag.Rd.R
library(pmxTools) ### Name: calc_ss_3cmt_linear_oral_1_lag ### Title: Calculate C(t) for a 3-compartment linear model at steady-state ### with first-order oral dosing with a lag time ### Aliases: calc_ss_3cmt_linear_oral_1_lag ### ** Examples Ctrough <- calc_ss_3cmt_linear_oral_1_lag(t = 11.75, CL = 3.5, V1 = 20, V2 = 500, V3 = 200, Q2 = 0.5, Q3 = 0.05, ka = 1, dose = 100, tau=24, tlag = 1.5)
09f2e580dd562364d7cf9d001e3ccc5892659fe4
1301c6e3631c0308b0c8c0710ece40b846566db9
/inst/shiny/server.R
e9da1c6f0851df53d9e52881c2a56d1a4540ec29
[]
no_license
HQData/httkgui
ae4370c64c8350669953a37bec968778640f66c5
69646dd004306f77c786a67ef4baa8a677fa5d4c
refs/heads/master
2020-04-28T00:59:31.608231
2019-01-24T17:56:36
2019-01-24T17:56:36
null
0
0
null
null
null
null
UTF-8
R
false
false
23,794
r
server.R
library(ggplot2) library(dplyr) library(tidyr) # library(plotly) shiny::shinyServer(function(input, output, session) { source("plot_functions.R", local = TRUE) source("calculate_functions.R", local = TRUE) parameter_names <- c( "BW" = "Body Weight, kg.", "Clmetabolismc" = "Hepatic Clearance, L/h/kg BW.", "Fgutabs" = "Fraction of the oral dose absorbed, i.e. the fraction of the dose that enters the gutlumen.", "Funbound.plasma" = "Fraction of plasma that is not bound.", "Fhep.assay.correction" = "The fraction of chemical unbound in hepatocyte assay using the method of Kilford et al. (2008)", "hematocrit" = "Percent volume of red blood cells in the blood.", "kdermabs" = "Rate that chemical is transferred from the skin to the blood, 1/h.", "Kgut2pu" = "Ratio of concentration of chemical in gut tissue to unbound concentration in plasma.", "kgutabs" = "Rate that chemical enters the gut from gutlumen, 1/h.", "kinhabs" = "Rate that the chemical is transferred from the lungs to the blood, 1/h.", "Kkidney2pu" = "Ratio of concentration of chemical in kidney tissue to unbound concentration in plasma.", "Kliver2pu" = "Ratio of concentration of chemical in liver tissue to unbound concentration in plasma.", "Klung2pu" = "Ratio of concentration of chemical in lung tissue to unbound concentration in plasma.", "Krbc2pu" = "Ratio of concentration of chemical in red blood cells to unbound concentration in plasma.", "Krest2pu" = "Ratio of concentration of chemical in rest of body tissue to unbound concentration in plasma.", "million.cells.per.gliver" = "Millions cells per gram of liver tissue.", "MW" = "Molecular Weight, g/mol.", "Qcardiacc" = "Cardiac Output, L/h/kg BW^3/4.", "Qgfrc" = "Glomerular Filtration Rate, L/h/kg BW^3/4, volume of fluid filtered from kidney and excreted.", "Qgutf" = "Fraction of cardiac output flowing to the gut.", "Qkidneyf" = "Fraction of cardiac output flowing to the kidneys.", "Qliverf" = "Fraction of cardiac output flowing to the liver.", "Rblood2plasma" = "The ratio of the concentration of the chemical in the blood to the concentration in the plasma.", "Vartc" = "Volume of the arteries per kg body weight, L/kg BW.", "Vgutc" = "Volume of the gut per kg body weight, L/kg BW.", "Vkidneyc" = "Volume of the kidneys per kg body weight, L/kg BW.", "Vliverc" = "Volume of the liver per kg body weight, L/kg BW.", "Vlungc" = "Volume of the lungs per kg body weight, L/kg BW.", "Vrestc" = "Volume of the rest of the body per kg body weight, L/kg BW.", "Vvenc" = "Volume of the veins per kg body weight, L/kg BW.", "Vmax" = "Maximal velocity, []", "km" = "Michaelis constant" ) additional_parameters <- c( "KTS" = "KTS", "FR" = "FR", "Clint" = "Clint" ) observeEvent(input$use_add, { updateTabsetPanel(session, "main_panel", selected = ifelse(input$use_add == 1, "add compound", "inputs summary") ) }) # compound information (define population) -------------------------------- compound_summary <- reactive({ filter(chem.physical_and_invitro.data, Compound == input$compound) %>% select(Compound, CAS, SMILES.desalt, logP, pKa_Donor, pKa_Accept) }) output$compound_table <- renderTable({ compound_summary() }) # observeEvent(input$custom_params, { # if(!input$custom_params && exists("custom_param_values")) # rm(custom_param_values, envir=.GlobalEnv) # }) observeEvent(input$population_new_submit, { #custom_subpopulation is used just for user display #populations_list is created (for actual calculations) later on #on first click set them to empty! if(input$population_new_submit == 1) { custom_subpopulation <<- data.frame() populations_list <<- list() } #update the table that the user sees custom_subpopulation_newdata <- data.frame( "name"=input$population_new_name, "N"=input$population_new_N, "type"=input$population_new_vartype, "multiplier"=input$population_new_multiplier, "CV"=input$population_new_cv) custom_subpopulation <<- rbind(custom_subpopulation, custom_subpopulation_newdata) #update the list that guides the simulations newlist <- list( #fill based on the CV and means provided via custom parameters param_to_override = list( # "Average BW" = c("mean" = 75, "cv" = 0) ), param_to_vary_after = data.frame( "names" = c("Clmetabolismc", "CLmetabolism_gut", "CLmetabolism_kidney"), "cv" = input$population_new_cv, "multiplier" = input$population_new_multiplier), N = input$population_new_N, "name"=input$population_new_name) # if(input$population_new_vartype == "tk_physbio") # newlist$param_to_vary_before <- TRUE populations_list[[length(populations_list) + 1]] <<- newlist #clean the inputs updateTextInput(session, "population_new_name", value = paste("Population", length(populations_list) + 1)) updateNumericInput(session, "population_new_cv", value = .3) updateNumericInput(session, "population_new_N", value = 100) # updateNumericInput(session, "population_new_vartype", value = 0) updateNumericInput(session, "population_new_multiplier", value = 1) }) # custom_param_values <- data.frame("parameter"=c(), "description"=c(), "value"=c(), # "MC 2.5%"=c(), "MC mean"=c(), "MC 97.5%"=c()) output$custom_subpopulation_table <- renderTable({ if((input$population_new_submit > 0) && exists("custom_subpopulation")) return(custom_subpopulation) }) #experiemental data display: output$experimental_data_table <- renderTable({ experimental_data() }) output$model_visual <- renderImage({ list(src = "pbtk_model.png", alt = "PBTK model schematic", width = 300) }, deleteFile = FALSE) # model parameters --------- observeEvent(input$add_submit, { if(input$use_add) { # browser() my.new.data <- data.frame( 'Compound' = input$add_compound, 'CAS' = input$add_cas, 'MW' = input$add_mw, 'logp'= input$add_logp, 'funbound' = input$add_funbound, 'fgutabs' = input$add_fgutabs, 'clint' = input$add_clint, 'KTS' = input$add_kts, 'FR' = input$add_fr, 'vmax' = input$add_vmax, 'km' = input$add_km, 'pKa_donor' = input$add_pka_donor, 'pKa_accept' = input$add_pka_accept) nna.list <- as.list(na.omit(c( 'Compound' = 'Compound', 'CAS' = "CAS", 'MW' = ifelse(!input$add_mw_na, "MW", NA), 'logP' = ifelse(!input$add_logp_na, "logp", NA), 'Funbound.plasma' = ifelse(!input$add_funbound_na, "funbound", NA), 'Fgutabs' = ifelse(!input$add_fgutabs_na, "fgutabs", NA), 'Clint' = ifelse(!input$add_clint_na, "clint", NA), 'KTS' = ifelse(!input$add_kts_na, "KTS", NA), 'FR' = ifelse(!input$add_fr_na, "FR", NA), 'Vmax' = ifelse(!input$add_vmax_na, "vmax", NA), 'km' = ifelse(!input$add_km_na, "km", NA), 'pKa_Donor' = ifelse(!input$add_pka_donor_na, "pKa_donor", NA), 'pKa_Accept' = ifelse(!input$add_pka_accept_na, "pKa_accept", NA) ))) # 'logMA', 'Clint', 'Clint.pValue', 'Funbound.plasma', 'Fgutabs' chem.physical_and_invitro.data_new <<- add_chemtable(my.new.data, current.table=chem.physical_and_invitro.data, data.list=nna.list, species=input$species, reference=input$add_reference, overwrite = TRUE) } else { } }) observeEvent(input$custom_params, { if(!input$custom_params && exists("custom_param_values")) # browser() rm(custom_param_values, envir=.GlobalEnv) }) observeEvent(input$cparams_submit, { #add a row: newdata <- data.frame("parameter"=input$cparams_select, "description"=c(parameter_names, additional_parameters)[input$cparams_select], "value"=input$cparams_value, "mc.cv"=input$cparams_cv) # browser if(exists("custom_param_values")) { #remove the last existing value if it was in there custom_param_values <<- custom_param_values[custom_param_values$parameter != input$cparams_select,] custom_param_values <<- rbind(custom_param_values, newdata) } else { custom_param_values <<- newdata } #clean the inputs updateNumericInput(session, "cparams_value", value = 0) }) # custom_param_values <- data.frame("parameter"=c(), "description"=c(), "value"=c(), # "MC 2.5%"=c(), "MC mean"=c(), "MC 97.5%"=c()) output$custom_param_table <- renderDataTable({ input$cparams_submit if(exists("custom_param_values")) return(custom_param_values) }) mc_cv <- reactive(c(`Total Body Water` = input$cv.water, `Plasma Volume` = input$cv.plasma, `Cardiac Output` = input$cv.cardiac, `Average BW` = input$cv.bw, `Total Plasma Protein` = input$cv.tpp, `Plasma albumin` = input$cv.albumin, `Plasma a-1-AGP` = input$cv.a1agp, Hematocrit = input$cv.hematocrit, Urine = input$cv.urine, Bile = input$cv.bile, GFR = input$cv.gfr, `Average Body Temperature` = input$cv.abt )) #this returns one set of parameters parameters <- reactive({ param_list <- list("chem.cas"=NULL,"chem.name" = input$compound, "species" = input$species, "default.to.human" = F, "tissuelist" = list(liver=c("liver"), kidney=c("kidney"), lung=c("lung"), gut=c("gut")), "force.human.clint.fub" = F, "clint.pvalue.threshold" = 0.05 # monte.carlo=FALSE ) if(input$use_cas) { param_list$chem.cas <- input$cas param_list$chem.name <- NULL } if(input$use_add && input$add_submit) { chem.physical_and_invitro.data <<- chem.physical_and_invitro.data_new param_list$chem.name <- paste(toupper(substr(input$add_compound, 1, 1)), substr(input$add_compound, 2, nchar(input$add_compound)), sep="") } input$cparams_submit inits <- do.call(parameterize_pbtk, param_list) #update if user supplied custom values if(exists("custom_param_values") && nrow(custom_param_values) > 0) { #this part deals with values to update in inits ONLY: which_are_inits <- custom_param_values$parameter %in% names(parameter_names) which_are_additional <- custom_param_values$parameter %in% names(additional_parameters) if(any(which_are_additional)) { #forcing of FR, KTS, Clint torep <- custom_param_values$value[which_are_additional] names(torep) <- custom_param_values$parameter[which_are_additional] param_list$override.input <- torep inits <- do.call(parameterize_pbtk, param_list) #overwrite previous calc } torep <- custom_param_values$value[which_are_inits] names(torep) <- custom_param_values$parameter[which_are_inits] inits[names(torep)] <- torep } return(inits) }) #this will depend on results() object (so generate_population) as that's where inits live parameters_summary <- reactive({ ww <- parameters() if(input$output_type == "single" || (input$run == 0)) { parameter_df <- data.frame("parameter"=as.character(names(ww)), "description" = parameter_names[names(ww)], "value"=unlist(ww)) } else if(input$output_type == "mc") { if(is.null(results())) return(NULL) parameter_df <- data.frame("parameter"=names(ww), "description" = parameter_names[names(ww)], stringsAsFactors = F) #expand the data frame with uncertainty info: df <- lapply(results(), function(x) lapply(x[["inits"]], unlist) %>% do.call(rbind, .)) %>% do.call(rbind, .) %>% apply(2, function(x) {c("mean"=mean(x, na.rm=T), "lci"=quantile(x,.025, na.rm=T), "uci"=quantile(x,.975, na.rm=T))}) %>% t() %>% as.data.frame() parameter_df[["MC 2.5%"]] <- df[parameter_df$parameter,2] parameter_df[["MC mean"]] <- df[parameter_df$parameter,1] parameter_df[["MC 97.5%"]] <- df[parameter_df$parameter,3] } return(parameter_df) }) output$parameters_df <- renderTable({ parameters_summary() }) # calculation of results(single, monte carlo + summary df for MC) --------------------------------------- #results stored in a single reactive object: both MC and a single simulation results <- reactive({ dynamic_inputs <- list( compound = input$compound, species = input$species, cas = input$cas, use.cas = input$use_cas, output.units = input$solve.output.units, iv.dose = input$solve.iv.dose, tsteps = input$solve.tsteps, days = input$solve.days ) if(input$dose_type == "daily dose"){ dynamic_inputs$daily.dose = input$solve.daily.dose } if(input$dose_type == "per dose + doses/day"){ dynamic_inputs$dose = input$solve.dose dynamic_inputs$doses.per.day = input$solve.doses.per.day } if(input$output_type == "mc") { #mock eventReactive on input$run if(input$run == 0) return(NULL) if(input$population_new_submit < 1) { showModal(modalDialog(title = "No population defined", "Please specify at least one group in the Population Variability section")) return(NULL) } withProgress(message = "Generating results", min = 0, max = 1, { lapply(populations_list, function(x) { y <- do.call(generate_population, append(dynamic_inputs, x)) incProgress(1/length(populations_list)) return(y) }) }) } else if(input$output_type == "single") { do.call(generate_population, append(dynamic_inputs, list(N = 1))) } }) endpoints <- reactive({ # ww <- c("Cplasma", paste0("C", input$compartments), "Crest", "Ametabolized", "Atubules", "Agutlumen") ww <- c("Cplasma", paste0("C", c("lung", "kidney", "gut", "liver")), "Crest", "Ametabolized", "Atubules", "Agutlumen") names(ww) <- ww # names(ww) <- c("Plasma", input$compartments, "rest", "metabolized", "tubules", "gut lumen") # names(ww) <- c("Plasma", c("lung", "kidney", "gut", "liver"), "rest", "metabolized", "tubules", "gut lumen") return(ww) }) experimental_data <- reactive({ inFile <- input$experimental_data_input if(is.null(inFile)) return(NULL) tab <- read.csv(inFile$datapath) if(is.null(tab$variable)) tab$variable <- "Cplasma" tab }) results_mc_df_v2 <- reactive({ if(input$output_type == "single") return(NULL) if(is.null(results())) return(NULL) lci_value <- (1-input$display_ci)/2 uci_value <- 1 - (1-input$display_ci)/2 res <- results() withProgress( message = paste0("Calculating mean parameter values together with ", 100*input$display_ci, "% intervals"), { lapply(res, function(x) summarise_population(x, lci_value, uci_value)) }) }) # presentation of results ------------------------------------------------- output$choose_plot_ui <- renderUI({ selectInput("choose_plot", "Choose parameter to plot", endpoints()) }) output$choose_plot_type_ui <- renderUI({ input$run #refresh when we run! if(exists("populations_list")) if((length(populations_list) > 1) && (input$run > 0)) return(selectInput("choose_plot_type", "Type of display for subpopulations", c("Color different populations" = "group", "Facet (separate panels)" = "facet", "Both" = "both"))) }) output$results_plot_single <- renderPlot({ if(!is.null(input$choose_plot)) { if(input$output_type == "mc") { if(is.null(results_mc_df_v2()) || is.null(input$choose_plot)) return(NULL) #display options: fvar <- F; gvar <- F if(!is.null(input$choose_plot_type)) { if(input$choose_plot_type == "group") gvar <- T if(input$choose_plot_type == "facet") fvar <- T if(input$choose_plot_type == "both") { fvar <- T; gvar <- T } } tab <- filter(do.call(rbind, results_mc_df_v2()), variable == input$choose_plot) return(solution_autoplot(tab, facet = fvar, grouping = gvar, varname = input$choose_plot, observed = experimental_data())) } if(input$output_type == "single") { # res <- results_single()[["result"]][[1]] res <- results()[["result"]][[1]] cd <- which(colnames(res) == input$choose_plot) tab <- res[,c(1, cd)] %>% as.data.frame() %>% setNames(c("time", "mean")) return(solution_autoplot(tab, facet = F, grouping = F, varname = input$choose_plot, observed = experimental_data())) } } }) output$validation_results <- renderUI({ if(!is.null(experimental_data())) return(list( h3("Validation against experimental data"), plotOutput("results_plot_obspred") )) return(NULL) }) output$results_plot_obspred <- renderPlot({ if(!is.null(experimental_data())) { if(!is.null(input$choose_plot)) { if(input$output_type == "mc") { tab <- filter(do.call(rbind, results_mc_df_v2()), variable == input$choose_plot) obs <- filter(experimental_data(), variable == input$choose_plot) if(nrow(obs) == 0) return(NULL) return(plot_obspred(prediction = tab, observed = obs)) } if(input$output_type == "single") { # res <- results_single()[["result"]][[1]] res <- results()[["result"]][[1]] cd <- which(colnames(res) == input$choose_plot) tab <- res[,c(1, cd)] %>% as.data.frame() %>% setNames(c("time", "mean")) return(plot_obspred(prediction = tab, observed = experimental_data())) } } } }) # left panel observers ---------------------------------------------------- output$results_plot_ui <- renderUI({ if(input$output_type == "mc") plotOutput("results_plot") if(input$output_type == "single") plotOutput("results_plot", height=200, width= 600) }) results_numerical_df <- reactive({ if(input$output_type == "mc") { if(input$output_type == "single") varname <- "Ccompartment" if(input$output_type == "mc") varname <- "Cplasma" return(bind_rows(lapply(results(), function(x) { tt <- summarise_parameters(x, variable = varname, conf.int = input$display_ci) tt$parameter <- rownames(tt) tt[,c(5, 4, 1, 2, 3)] }))) } if(input$output_type == "single") { # if(!is.null(results_single())) { if(!is.null(results())) { return(data.frame("Cplasma half-life" = results()[["halflife"]][[1]], "Cplasma Cmax" = results()[["Cmax"]][[1]], "Cplasma AUC" = results()[["AUC"]][[1]])) } } }) output$results_numerical <- renderTable({ results_numerical_df() }, rownames = F, digits = 3) # reporting ----- output$fileDownload <- downloadHandler( filename = function() { paste("data-", Sys.Date(), ".csv", sep="") }, content = function(file) { if(input$output_type == "single") # data <- results_single() data <- results() if(input$output_type == "mc") data <- results_mc_df()["mean",,] # browser() write.csv(data, file) }, contentType='text/csv' ) output$report <- downloadHandler( # filename = "report.html", # filename = paste0("report.", input$report_format), filename = function() { paste('tkplate_report', sep = '.', switch( input$report_format, PDF = 'pdf', HTML = 'html', Word = 'docx' )) }, content = function(file) { # Copy the report file to a temporary directory before processing it, in # case we don't have write permissions to the current working dir (which # can happen when deployed). # if(input$report_format == "html"){ # tempReport <- file.path(tempdir(), "tkplate_report.Rmd") # file.copy("tkplate_report.Rmd", tempReport, overwrite = TRUE)} # if(input$report_format == "pdf"){ # tempReport <- file.path(tempdir(), "tkplate_report_pdf.Rmd") # file.copy("tkplate_report_pdf.Rmd", tempReport, overwrite = TRUE)} # if(input$report_format == "docx"){ # tempReport <- file.path(tempdir(), "tkplate_report_docx.Rmd") # file.copy("tkplate_report_docx.Rmd", tempReport, overwrite = TRUE)} src <- normalizePath('tkplate_report.Rmd') # temporarily switch to the temp dir, in case you do not have write # permission to the current working directory owd <- setwd(tempdir()) on.exit(setwd(owd)) file.copy(src, 'tkplate_report.Rmd', overwrite = TRUE) # Set up parameters to pass to Rmd document params <- list(name = input$compound, metabolic_route = input$compound_pathway, compound_chars = compound_summary(), pbtk_parameters = parameters_summary(), population_variability = custom_subpopulation, results = results_numerical_df(), plot = filter(do.call(rbind, results_mc_df_v2()), variable == input$choose_plot) %>% solution_autoplot(facet = F, grouping = T, varname = input$choose_plot) ) if(input$dose_type == "daily dose") params$dose <- paste("Single dose (mg/kg BW) of", input$solve.daily.dose) if(input$dose_type == "per dose + doses/day") params$dose <- paste(input$solve.doses.per.day, "doses per day;", input$solve.dose, "per dose/day (mg/kg BW)") # Knit the document, passing in the `params` list, and eval it in a # child of the global environment (this isolates the code in the document # from the code in this app). # rmarkdown::render(tempReport, output_file = file, # params = params, # envir = new.env(parent = globalenv()) # ) out <- rmarkdown::render('tkplate_report.Rmd', output_file = file, params = params, envir = new.env(parent = globalenv()), output_format = switch( input$report_format, PDF = rmarkdown::pdf_document(), HTML = rmarkdown::html_document(), Word = rmarkdown::word_document() )) file.rename(out, file) } ) })
0f39f6e3ca901ee1366550c363cda75db923f7f2
745e9ecc726fb37db47fbf81f77a10b9a374c05b
/man/getTableAddressing.Rd
0cde1d02117a5e47e75df8e1296db40b7bd5df24
[]
no_license
cran/ENA
c608af50058210c1ed3b3d485ffbc753e773da12
6f49d24bd908e4a7dbd39c50e0d64e609d1782a8
refs/heads/master
2021-01-02T22:37:40.847303
2014-01-20T00:00:00
2014-01-20T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
1,492
rd
getTableAddressing.Rd
\name{getTableAddressing} \alias{getTableAddressing} \title{Get the adjacency list addressing template.} \usage{ getTableAddressing(variableNames, truth) } \arguments{ \item{variableNames}{the names of all genes to include in the adjacency list} \item{truth}{The true adjacency matrix. Often will not be available, but is useful for debugging and testing.} } \value{ A data.frame representing the adjacency list of the matrix provided. } \description{ Useful if you want to store the networks in their condensed upper-diagonal form while still having the benefit of convenient addressing and/or if you are using a simulated dataset in which you know the truth and want to store all the values in a single data.frame. } \details{ Internal function used to get the addressing template for a data.frame to contain the adjacency list representation of a matrix. } \examples{ #Load in the sample Protein-Protein-Interaction data that comes with this package. data(PPI) #Simulate a dataset based on the 44-gene topology provided. sim <- simulateNetwork(net44) #Convert the true, underlying adjacency list to an adjacency matrix trueMat <- adj2mat(net44) #Reconstruct using GeneNet gn <- abs(buildGenenet(sim)) gn <- gn[upper.tri(gn)] wg <- abs(buildWgcna(sim)) wg <- wg[upper.tri(wg)] #Aggregate all results into a single data.frame data <- getTableAddressing(rownames(sim), trueMat) data <- cbind(data, gn, wg) } \author{ Jeffrey D. Allen \email{Jeffrey.Allen@UTSouthwestern.edu} }
d29904eb4e3aba1b4a3446b8d66df08477cb54d8
636c3ed63c399881041c586e9e70d718ae95d7ae
/environment/virfinder.R
8bd712b3212ca7620ba8fa2d9ca6134fa17c4c3c
[]
no_license
Puumanamana/nf-viral-contigs-identification
4e57b64a052abf39998f6e019042e0133d069817
82d965718e1fd4731835229206f1b9c1279ae174
refs/heads/master
2022-12-07T20:17:45.129287
2020-08-21T07:56:17
2020-08-21T07:56:17
289,207,290
0
0
null
null
null
null
UTF-8
R
false
false
2,088
r
virfinder.R
#!/usr/bin/env Rscript require(Biostrings) require(parallel) require(VirFinder) parVF.run <- function (seqFaIn) { seqFa <- strsplit(x=as.character(seqFaIn),split="",fixed=T)[[1]] data(VF.trainMod8mer) w <- VF.trainMod8mer predResult <- NULL featureOut <- countSeqFeatureCpp(seqFa, w) featureOut_kmerCount <- featureOut$kmerCount seqLength <- length(seqFa) if (seqLength < 1 * 1000) { lasso.mod <- attr(VF.trainMod8mer, "lasso.mod_0.5k") rmWordID <- attr(VF.trainMod8mer, "rmWordID_0.5k") nullDis <- attr(VF.trainMod8mer, "nullDis_0.5k") } else if (seqLength < 3 * 1000) { lasso.mod <- attr(VF.trainMod8mer, "lasso.mod_1k") rmWordID <- attr(VF.trainMod8mer, "rmWordID_1k") nullDis <- attr(VF.trainMod8mer, "nullDis_1k") } else { lasso.mod <- attr(VF.trainMod8mer, "lasso.mod_3k") rmWordID <- attr(VF.trainMod8mer, "rmWordID_3k") nullDis <- attr(VF.trainMod8mer, "nullDis_3k") } lasso.pred <- predict(lasso.mod, t(as.matrix(featureOut_kmerCount[-rmWordID])), type = "response") pvalue <- mean(nullDis > as.numeric(lasso.pred)) print(paste("len", seqLength, "score", round(lasso.pred, 4), "pvalue", round(pvalue, 4))) predResult <- rbind(predResult, c(seqLength, lasso.pred, pvalue)) colnames(predResult) <- c("length", "score", "pvalue") predResult_df <- as.data.frame(predResult) predResult_df$length <- as.numeric(as.character(predResult_df$length)) predResult_df$score <- as.numeric(as.character(predResult_df$score)) predResult_df$pvalue <- as.numeric(as.character(predResult_df$pvalue)) return(predResult_df) } environment(parVF.run) <- asNamespace('VirFinder') parVF.pred <- function(inFaFile, cores=8) { dnastringset <- readDNAStringSet(inFaFile) predResult_df <- do.call(rbind, mclapply(dnastringset, parVF.run, mc.preschedule=F, mc.cores=cores)) predResult_df$name <- rownames(predResult_df) predResult_df <- predResult_df[,c("name","length","score","pvalue")] predResult_df <- predResult_df[names(dnastringset),] return(predResult_df) } environment(parVF.pred) <- asNamespace('VirFinder')
fdd464916c72054860253b45463f801a1c627f41
60a1dcb82f87bc22703b05d0110f036c3dce07e0
/Metiers/Scripts/script_metiers.R
06332ed7ba0292e3f55bd698ef10e996ba9f97c4
[]
no_license
Manonifremer/RCGs
2e7f016debadd46940b114f68e19575f8561c188
f89061a3ef9880b5b4f21a6dc6b6407d3771914e
refs/heads/master
2023-08-25T07:56:26.252877
2021-10-14T15:12:16
2021-10-14T15:12:16
null
0
0
null
null
null
null
UTF-8
R
false
false
15,060
r
script_metiers.R
library(stringr) library(data.table) library(openxlsx) library(purrr) library(lubridate) rm(list=ls()) gc() # Import all functions for(f in list.files(path="./Scripts/Functions", full.names = T)){ source(f) } rm(f) # Load the input data data.file <- "data_input_example.csv" input.data <- loadInputData(data.file) rm(data.file) # Validate input data format validateInputDataFormat(input.data) # Load reference lists url <- "https://github.com/ices-eg/RCGs/raw/master/Metiers/Reference_lists/AreaRegionLookup.csv" area.list <- loadAreaList(url) url <- "https://github.com/ices-eg/RCGs/raw/master/Metiers/Reference_lists/Metier%20Subgroup%20Species%202020.xlsx" species.list <- loadSpeciesList(url) url <- "https://github.com/ices-eg/RCGs/raw/master/Metiers/Reference_lists/RDB_ISSG_Metier_list.csv" metier.list <- loadMetierList(url) url <- "https://github.com/ices-eg/RCGs/raw/master/Metiers/Reference_lists/Code-ERSGearType-v1.1.xlsx" gear.list <- loadGearList(url) rm(url) # Validate input data codes validateInputDataCodes(input.data, gear.list, area.list, species.list) # Prepare input data input.data[,EUR:=as.numeric(EUR)] input.data[,KG:=as.numeric(KG)] input.data[,c("selection_type","selection_mesh"):=data.table(str_split_fixed(selection,"_",2))] input.data[,selection_type:=ifelse(selection_type=="",NA,selection_type)] input.data[,selection_mesh:=ifelse(selection_mesh=="",NA,selection_mesh)] # Assign RCG names to the input data input.data <- merge(input.data, area.list, all.x = T, by = "area") # Assign species category to the input data input.data <- merge(input.data, species.list, all.x = T, by = "FAO_species") # Assign gear group and re-coded gear name to the input data input.data<-merge(input.data, gear.list, all.x = T, by.x = "gear", by.y = "gear_code") # Process input data #In the variable called sequence.def please include all columns that will constitute a fishing sequence #This variable will be used as a key for grouping operations sequence.def <- c("Country","year","vessel_id","vessel_length","trip_id","haul_id", "fishing_day","area","ices_rectangle","gear_level6","mesh","selection", "registered_target_assemblage") # Calculate group totals for each sequence input.data[,":="(seq_group_KG = sum(KG, na.rm = T), seq_group_EUR = sum(EUR, na.rm = T)), by=c(sequence.def,"species_group")] # Select a measure to determine the dominant group at a sequence level. If at least one species in a sequence has "value" in a measure column then # all species in that sequence get the same measure. input.data[,":="(seq_measure = getMeasure(measure)), by=sequence.def] # Determine the dominant group for each sequence input.data[seq_measure == "weight",":="(seq_dom_group = species_group[which.max(seq_group_KG)]), by=sequence.def] input.data[seq_measure == "value",":="(seq_dom_group = species_group[which.max(seq_group_EUR)]), by=sequence.def] input.data[,":="(seq_group_KG=NULL,seq_group_EUR=NULL,seq_measure=NULL)] # Apply DWS rules input.data[dws_group=="DWS",seq_DWS_kg:=sum(KG, na.rm = T), by=c(sequence.def, "dws_group")] input.data[,seq_total_kg:=sum(KG, na.rm = T), by=sequence.def] input.data[,seq_DWS_perc:=ifelse(is.na(seq_DWS_kg),0,seq_DWS_kg/seq_total_kg)*100] input.data[,seq_DWS_perc:=max(seq_DWS_perc),by=sequence.def] input.data[,DWS_gear_applicable:=grepl(RCG,DWS_for_RCG),by=.(RCG)] input.data[seq_DWS_perc>8 & DWS_gear_applicable,seq_dom_group:="DWS"] input.data[,":="(dws_group=NULL,DWS_for_RCG=NULL,seq_DWS_kg=NULL,seq_total_kg=NULL,seq_DWS_perc=NULL, DWS_gear_applicable=NULL)] # Assign metier level 6 input.data$metier_level_6<-NA input.data$metier_level_5<-NA input.data[,c("metier_level_6","metier_level_5"):=pmap_dfr(list(RCG, year, gear_level6, registered_target_assemblage, seq_dom_group, mesh, selection_type, selection_mesh), getMetier)] # Missing metier. Step 1: Search levels based on a dominant group of species input.data[,":="(month=month(dmy(fishing_day)), quarter=quarter(dmy(fishing_day)))] step.levels<-list(c("vessel_id","month","area","seq_dom_group","gear_group"), c("vessel_id","month","area","seq_dom_group"), c("vessel_id","quarter","area","seq_dom_group","gear_group"), c("vessel_id","quarter","area","seq_dom_group"), c("vessel_id","year","area","seq_dom_group","gear_group"), c("vessel_id","year","area","seq_dom_group"), c("vessel_id","month","seq_dom_group","gear_group"), c("vessel_id","month","seq_dom_group"), c("vessel_id","quarter","seq_dom_group","gear_group"), c("vessel_id","quarter","seq_dom_group"), c("vessel_id","year","seq_dom_group","gear_group"), c("vessel_id","year","seq_dom_group")) for(level in step.levels){ if(nrow(input.data[substr(metier_level_6,1,3)=="MIS"])>0){ input.data <- missingMetiersByLevel(input.data,level,sequence.def) } else {break} } # Missing metier. Step 2: Search levels based on gear/gear group step.levels<-list(c("vessel_id","month","area","gear_level6"), c("vessel_id","quarter","area","gear_level6"), c("vessel_id","year","area","gear_level6"), c("vessel_id","month","gear_level6"), c("vessel_id","quarter","gear_level6"), c("vessel_id","year","gear_level6"), c("vessel_id","month","area","gear_group"), c("vessel_id","quarter","area","gear_group"), c("vessel_id","year","area","gear_group"), c("vessel_id","month","gear_group"), c("vessel_id","quarter","gear_group"), c("vessel_id","year","gear_group")) for(level in step.levels){ if(nrow(input.data[substr(metier_level_6,1,3)=="MIS"])>0){ input.data <- missingMetiersByLevel(input.data,level,sequence.def) } else {break} } # Missing metier. Step 3: Search levels based on fleet register gear, vessel length group # and species group input.data[,vessel_length_group:=cut(vessel_length,breaks=c(0,10,12,18,24,40,Inf),right=F)] step.levels<-list(c("month","vessel_length_group","gear_FR","area","seq_dom_group"), c("month","gear_FR","area","seq_dom_group"), c("quarter","vessel_length_group","gear_FR","area","seq_dom_group"), c("quarter","gear_FR","area","seq_dom_group"), c("year","vessel_length_group","gear_FR","area","seq_dom_group"), c("year","gear_FR","area","seq_dom_group"), c("month","vessel_length_group","gear_FR","seq_dom_group"), c("month","gear_FR","seq_dom_group"), c("quarter","vessel_length_group","gear_FR","seq_dom_group"), c("quarter","gear_FR","seq_dom_group"), c("year","vessel_length_group","gear_FR","seq_dom_group"), c("year","gear_FR","seq_dom_group")) for(level in step.levels){ if(nrow(input.data[substr(metier_level_6,1,3)=="MIS"])>0){ input.data <- missingMetiersByLevel(input.data,level,sequence.def) } else {break} } # Missing metier. Step 4: Search levels based on fleet register gear, vessel length group step.levels<-list(c("month","vessel_length_group","gear_FR","area","gear_level6"), c("month","gear_FR","area","gear_level6"), c("quarter","vessel_length_group","gear_FR","area","gear_level6"), c("quarter","gear_FR","area","gear_level6"), c("year","vessel_length_group","gear_FR","area","gear_level6"), c("year","gear_FR","area","gear_level6"), c("month","vessel_length_group","gear_FR","gear_level6"), c("month","gear_FR","gear_level6"), c("quarter","vessel_length_group","gear_FR","gear_level6"), c("quarter","gear_FR","gear_level6"), c("year","vessel_length_group","gear_FR","gear_level6"), c("year","gear_FR","gear_level6"), c("month","vessel_length_group","gear_FR","area","gear_group"), c("month","gear_FR","area","gear_group"), c("quarter","vessel_length_group","gear_FR","area","gear_group"), c("quarter","gear_FR","area","gear_group"), c("year","vessel_length_group","gear_FR","area","gear_group"), c("year","gear_FR","area","gear_group"), c("month","vessel_length_group","gear_FR","gear_group"), c("month","gear_FR","gear_group"), c("quarter","vessel_length_group","gear_FR","gear_group"), c("quarter","gear_FR","gear_group"), c("year","vessel_length_group","gear_FR","gear_group"), c("year","gear_FR","gear_group")) for(level in step.levels){ if(nrow(input.data[substr(metier_level_6,1,3)=="MIS"])>0){ input.data <- missingMetiersByLevel(input.data,level,sequence.def) } else {break} } # Analyze vessel patterns # Specify the percentage threshold of the number of sequences below which # a metier will be considered rare rare.threshold <- 15 # Version 1 of the vessel pattern algorithm # input.data <- vesselPatterns(input.data,sequence.def,rare.threshold,gear.list) # Version 2 of the vessel pattern algorithm input.data<-rareMetiersLvl5(input.data,sequence.def,rare.threshold) # Vessel patterns. Step 1. step.levels<-list(c("vessel_id","month","area","seq_dom_group","gear_group"), c("vessel_id","month","area","seq_dom_group"), c("vessel_id","quarter","area","seq_dom_group","gear_group"), c("vessel_id","quarter","area","seq_dom_group"), c("vessel_id","year","area","seq_dom_group","gear_group"), c("vessel_id","year","area","seq_dom_group"), c("vessel_id","month","seq_dom_group","gear_group"), c("vessel_id","month","seq_dom_group"), c("vessel_id","quarter","seq_dom_group","gear_group"), c("vessel_id","quarter","seq_dom_group"), c("vessel_id","year","seq_dom_group","gear_group"), c("vessel_id","year","seq_dom_group")) for(level in step.levels){ if(nrow(input.data[metier_level_5_status=="rare" & is.na(metier_level_5_pattern)])>0){ input.data <- vesselPatternsByLevel(input.data,level,sequence.def) } else {break} } # Vessel patterns. Step 2. step.levels<-list(c("vessel_id","month","area","gear_level6"), c("vessel_id","quarter","area","gear_level6"), c("vessel_id","year","area","gear_level6"), c("vessel_id","month","gear_level6"), c("vessel_id","quarter","gear_level6"), c("vessel_id","year","gear_level6"), c("vessel_id","month","area","gear_group"), c("vessel_id","quarter","area","gear_group"), c("vessel_id","year","area","gear_group"), c("vessel_id","month","gear_group"), c("vessel_id","quarter","gear_group"), c("vessel_id","year","gear_group")) for(level in step.levels){ if(nrow(input.data[metier_level_5_status=="rare" & is.na(metier_level_5_pattern)])>0){ input.data <- vesselPatternsByLevel(input.data,level,sequence.def) } else {break} } # Metier level 6 assignment to metier level 5 which was assigned from pattern. input.data[,metier_level_6_pattern:=NA] step.levels<-list(c("vessel_id","month","area","metier_level_5"), c("vessel_id","quarter","area","metier_level_5"), c("vessel_id","year","area","metier_level_5"), c("vessel_id","month","metier_level_5"), c("vessel_id","quarter","metier_level_5"), c("vessel_id","year","metier_level_5")) for(level in step.levels){ if(nrow(input.data[metier_level_5_status=="rare" & !is.na(metier_level_5_pattern) & is.na(metier_level_6_pattern)])>0){ input.data <- metiersLvl6ForLvl5pattern(input.data,level,sequence.def) } else {break} } # Create new metier columns where rare metiers are replaced with the ones found in the pattern. input.data[,":="(metier_level_5_new=ifelse(is.na(metier_level_5_pattern), metier_level_5, metier_level_5_pattern), metier_level_6_new=ifelse(is.na(metier_level_6_pattern), metier_level_6, metier_level_6_pattern))] # Detailed metier level 6 assignment to general >0_0_0 cases. input.data[,detailed_metier_level_6:=ifelse(grepl("_>0_0_0",metier_level_6_new),NA,metier_level_6_new)] step.levels<-list(c("vessel_id","month","area","metier_level_5_new"), c("vessel_id","quarter","area","metier_level_5_new"), c("vessel_id","year","area","metier_level_5_new"), c("vessel_id","month","metier_level_5_new"), c("vessel_id","quarter","metier_level_5_new"), c("vessel_id","year","metier_level_5_new")) for(level in step.levels){ if(nrow(input.data[is.na(detailed_metier_level_6)])>0){ input.data <- detailedMetiersLvl6ForLvl5(input.data,level,sequence.def) } else {break} } # Save results print("Saving results ...") result<-input.data[order(vessel_id,trip_id,fishing_day,area,ices_rectangle,gear,mesh), .(Country,RCG,year,vessel_id,vessel_length,trip_id,haul_id,fishing_day,area,ices_rectangle,gear,gear_FR,mesh,selection,FAO_species, registered_target_assemblage,KG,EUR,metier_level_6,mis_met_level,mis_met_number_of_seq, metier_level_5,metier_level_5_status, metier_level_5_pattern,ves_pat_level,ves_pat_number_of_seq, metier_level_6_pattern,ves_pat_met6_level,ves_pat_met6_number_of_seq, metier_level_5_new,metier_level_6_new, detailed_metier_level_6,det_met6_level,det_met6_number_of_seq)] write.csv(result,"metier_results.csv", na = "") write.xlsx(file = "metier_results_summary.xlsx",result[,.(n_count=.N, KG_sum=sum(KG, na.rm=T), EUR_sum=sum(EUR, na.rm=T)), by=.(Country, RCG, metier_level_6_new)][order(Country, RCG, metier_level_6_new)])
f0be8ce0b0822be3f540f707b7ed6b8dd2a4d72d
c275a9710945be41621d3981d8c78fc7a6e74873
/R/coef.R
d7b2a40b3a5cf39c7273697a6819b3b7caf26dda
[]
no_license
mhu48/pudms
e60bbc1a359e191a28787d2debfa3ccb6394e40c
1564b34f6c241b9e02f814ac73d0cbd12bfc3112
refs/heads/master
2023-02-06T01:36:16.598004
2020-12-29T22:53:18
2020-12-29T22:53:18
null
0
0
null
null
null
null
UTF-8
R
false
false
143
r
coef.R
#' @import PUlasso #' @importFrom stats coef #' @export #' @method coef pudms.fit coef.pudms.fit<-function(object,...){ coef(object$fitted) }
3b744a92c3c2bbf6376921b230306d2889b01000
0b1dd5509959785e1151563f401a91cc1220a07a
/tests/testthat/test-accounts.R
905dc144c154d6ae2f59c601a41644ef4d89b6d8
[ "MIT" ]
permissive
jfontestad/gdaxr
30a001a08b1e69b8f33fe9263465e140e6e17a2f
bdb7cbf695bf959825e3ab4e7332cacf243130a3
refs/heads/master
2022-01-25T13:43:46.544313
2018-01-23T02:23:07
2018-01-23T02:23:07
null
0
0
null
null
null
null
UTF-8
R
false
false
2,370
r
test-accounts.R
context("accounts") test_that("accounts are listed correctly", { accounts <- with_mock_API(get_accounts()) expect_identical(length(accounts), 2L) expect_identical(length(accounts[[1]]), 6L) expect_identical(length(accounts[[2]]), 6L) expect_identical(accounts[[1]][["id"]], "71452118-efc7-4cc4-8780-a5e22d4baa53") expect_identical(accounts[[1]][["currency"]], "BTC") expect_identical(accounts[[1]][["balance"]], "0.0000000000000000") expect_identical(accounts[[2]][["available"]], "79.2266348066930000") expect_identical(accounts[[2]][["hold"]], "1.0035025000000000") expect_identical(accounts[[2]][["profile_id"]], "75da88c5-05bf-4f54-bc85-5c775bd68254") }) test_that("single account is listed correctly", { id <- "a1b2c3d4" account <- with_mock_API(get_account(id)) expect_identical(length(account), 6L) expect_identical(account[["id"]], id) expect_identical(account[["currency"]], "USD") expect_identical(account[["balance"]], "1.100") expect_identical(account[["available"]], "1.00") expect_identical(account[["hold"]], "0.100") }) test_that("account history is listed correctly", { id <- "100" account_history <- with_mock_API(get_account_history(id)) hist1 <- account_history[[1]] expect_identical(length(account_history), 1L) expect_identical(hist1[["id"]], id) expect_identical(hist1[["created_at"]], "2014-11-07T08:19:27.028459Z") expect_identical(hist1[["amount"]], "0.001") expect_identical(hist1[["balance"]], "239.669") expect_identical(hist1[["type"]], "fee") expect_identical(hist1[["details"]], list( "order_id" = "d50ec984-77a8-460a-b958-66f114b0de9b", "trade_id" = "74", "product_id" = "BTC-USD" )) }) test_that("account holds are is listed correctly", { id <- "82dcd140-c3c7-4507-8de4-2c529cd1a28f" account_holds <- with_mock_API(get_account_holds(id)) hold1 <- account_holds[[1]] expect_identical(length(account_holds), 1L) expect_identical(hold1[["id"]], id) expect_identical(hold1[["account_id"]], "e0b3f39a-183d-453e-b754-0c13e5bab0b3") expect_identical(hold1[["created_at"]], "2014-11-06T10:34:47.123456Z") expect_identical(hold1[["updated_at"]], "2014-11-06T10:40:47.123456Z") expect_identical(hold1[["amount"]], "4.23") expect_identical(hold1[["type"]], "order") expect_identical(hold1[["ref"]], "0a205de4-dd35-4370-a285-fe8fc375a273") })
7ca997a5fb4c24563ac5430a3ac54fa9eb1e44f6
f8eb55c15aec611480ede47d4e15e5a6e472b4fa
/analysis/0114_tendencies_sp500.R
75c3cabb8efaad8a22b7e5156e26bd399707016b
[]
no_license
nmaggiulli/of-dollars-and-data
a4fa71d6a21ce5dc346f7558179080b8e459aaca
ae2501dfc0b72d292314c179c83d18d6d4a66ec3
refs/heads/master
2023-08-17T03:39:03.133003
2023-08-11T02:08:32
2023-08-11T02:08:32
77,659,168
397
32
null
null
null
null
UTF-8
R
false
false
2,019
r
0114_tendencies_sp500.R
cat("\014") # Clear your console rm(list = ls()) #clear your environment ########################## Load in header file ######################## # setwd("~/git/of_dollars_and_data") source(file.path(paste0(getwd(),"/header.R"))) ########################## Load in Libraries ########################## # library(scales) library(readxl) library(lubridate) library(ggrepel) library(tidyverse) folder_name <- "0114_tendencies" out_path <- paste0(exportdir, folder_name) dir.create(file.path(paste0(out_path)), showWarnings = FALSE) ########################## Start Program Here ######################### # #Bring in all data ret_yr <- readRDS(paste0(localdir, "0009_sp500_ret_pe.Rds")) %>% rename(index = price_plus_div) %>% mutate(yr = year(date), mt = month(date)) %>% filter(mt == 1) %>% mutate(ret = index/lag(index) - 1) %>% select(date, ret, yr, index) %>% filter(!is.na(ret), yr > 1940) avg_ret <- ret_yr %>% summarise(mean_ret = mean(ret)) %>% pull() print(avg_ret) file_path <- paste0(out_path, "/sp500_returns_by_yr.jpeg") source_string <- "Source: http://www.econ.yale.edu/~shiller/data.htm (OfDollarsAndData.com)" note_string <- paste0("Note: Real return includes reinvested dividends.") plot <- ggplot(ret_yr, aes(x=date, y=ret)) + geom_bar(stat="identity", fill = chart_standard_color) + geom_hline(yintercept = avg_ret, linetype = "dashed") + geom_hline(yintercept = 0, col = "black") + scale_y_continuous(label = percent) + of_dollars_and_data_theme + ggtitle("Market Returns Are Rarely Average") + labs(x="Date", y="Annual Return", caption = paste0(source_string, "\n", note_string)) # Save the plot ggsave(file_path, plot, width = 15, height = 12, units = "cm") # ############################ End ################################## #
c1568d0e5662e1a2dbd4c1a10130b7be51c52ac2
ba3882d86260c2204210f627f2ddfa7199cfa60d
/cachematrix.R
83194c3d2672c171a9a5e8c86f540f585519ef46
[]
no_license
speakwithalisp/ProgrammingAssignment2
70393061e367f736546ba82065b0abeb9ad8603b
cd325caec5564b1e0a2f7533c6c151eb2724029e
refs/heads/master
2020-04-06T04:40:01.367765
2015-04-26T18:05:50
2015-04-26T18:05:50
null
0
0
null
null
null
null
UTF-8
R
false
false
1,894
r
cachematrix.R
## The following functions utilize the lexical scoping of R language to cache potentially expensive matrix inversion operations. #This function takes a matrix and makes it into a "cache" matrix. It has getters and setters to access the matrix object and its "inverse" defined as an attribute of a makeCacheMatrix object. The function returns a list that allows access to these functions makeCacheMatrix <- function(x=matrix()) { i <- NULL #initialize variable as NULL set <- function(y) { x <<- y #set input object y as argument x in makeCacheMatrix env i <<- NULL #ensure that set operation always resets cached i value to null in makeCacheMatrix env } get <- function() x #give the current object stored in the makeCacheMatrix env setInverse <- function(inverse) i <<- inverse #allow variable i to hold value inverse in makeCacheMatrix env getInverse <- function() i #return current i value in makeCacheMatrix env list(get = get,getInverse = getInverse,set = set,setInverse = setInverse) #finally return a list that allows access to the getter and setter functions } #This function wraps the solve function (which operates on a matrix object to give our desired inverse matrix value) so that the operation can be performed on a makeCacheMatrix object. It first checks if the value has already been calculated and returns the value if the cache exists (or calculates the inverse, stores it in the makeCacheMatrix object and returns that value) cacheSolve <- function(x, ...) { i <- x$getInverse() #store value returned from makeCacheMatrix env (returned as getInverse element in return list) if(!is.null(i)) { message("Getting cached data") return(i) } else { data <- solve(x$get()) i <- x$setInverse(data) } #check if i has a value and set or cache value accordingly i }
4d817fcd5a2096e84c14597afef5907bfacd37aa
7c9c2e23420a68537b2988ea08f25ac16db42a9c
/maps/special_avg_new_cases_per_100k_last_14_days.R
f83b82afce32c67934f50718791a61bc5078e1ea
[]
no_license
VinayArora404219/TN_COVID
eede36a1ccb8617c3f0d36199366b7e2082467bb
a9dd22a17eafa9e3511b1db658c04a83ef1ebfd0
refs/heads/master
2022-11-26T18:23:37.449134
2020-08-06T03:10:56
2020-08-06T03:10:56
285,459,297
0
0
null
2020-08-06T03:03:17
2020-08-06T03:03:16
null
UTF-8
R
false
false
2,698
r
special_avg_new_cases_per_100k_last_14_days.R
################################################################################ ### Map of new cases per capita the last 14 days by county. This map can be ### found at: https://www.tn.gov/health/cedep/ncov/data/epi-curves.html ### I've seen Phil Williams @ NC5 use it several times so I thought it was worth ### replicating ################################################################################ scale <- 100000 scale_txt <- "100k" new_cases_last14 <- new_cases_tib %>% tail(n = 15) %>% head(n = 14) %>% select(-Date, -Total) %>% gather() %>% group_by(key) %>% summarize(new_cases_last14 = mean(value)) %>% rename(County = key) #%>% this_map <- county_acs %>% mutate(NAME = str_replace(NAME, " County, Tennessee", "")) %>% mutate(NAME = if_else(NAME == "DeKalb", "Dekalb", NAME)) %>% select(GEOID, NAME, POP2018) %>% rename(County = NAME) %>% left_join(new_cases_last14, by = "County") %>% mutate(new_cases_percapita_last14 = scale * new_cases_last14 / POP2018) %>% mutate( color_code = case_when( new_cases_percapita_last14 <= 10 ~ "Below Threshold <= 10", new_cases_percapita_last14 > 10 ~ "Above Threshold > 10", )) new_cases_percapita_last14_label <- (this_map$new_cases_percapita_last14) %>% round(1) new_cases_percapita_last14_label[new_cases_percapita_last14_label == 0] <- "" ### By default use black for the font, but if the value is over 1/2's of the way ### to the max, use the white font instead as it looks nicer frac <- 0.5 * max(this_map$new_cases_percapita_last14) this_map$textcolor = if_else(this_map$new_cases_percapita_last14 > frac, "white", "black") this_map$textcolor = "black" map_new_cases_percapita_last14 <- ggplot(this_map) + theme_void() + theme(plot.title = element_text(hjust = 0.5)) + theme(legend.title = element_blank()) + theme(legend.position = "bottom") + geom_sf(data = this_map$geometry, aes(fill = this_map$color_code), size = geom_thickness, color = "white") + geom_text(data = this_map, size = 3, color = "black", #this_map$textcolor, aes(x = county_centers$x, y = county_centers$y, label = new_cases_percapita_last14_label), nudge_x = this_map$nudge_x, nudge_y = this_map$nudge_y) + # Set the color scale manually scale_fill_manual(values = c("Below Threshold <= 10" = "#3e87b5", "Above Threshold > 10" = "#ce703a")) + labs(title = paste("Average New Cases Per ", scale_txt, " Last 14 Days - ", new_cases_tib %>% tail(n = 1) %>% pull("Date"), sep = "")) print(map_new_cases_percapita_last14)
0a6e6f935b7812272d7818697d95b5eca36c1096
98812f2e5b9efd30c62883d30ad8a989eb34a156
/Sarit Polsky/PICLS/Pump/clean_and_calculate_all.R
97d9e66b25992f1532947d24c6bf876837522859
[]
no_license
childhealthbiostatscore/BDC-Code
8983b1cc0d1bceab9e7ccb7b8b4d885ba1b45fa8
eb48c29233fc3d956167bf2481a5c5ed827ebb72
refs/heads/master
2023-09-02T09:46:41.745174
2023-09-01T20:04:44
2023-09-01T20:04:44
168,424,966
0
0
null
null
null
null
UTF-8
R
false
false
7,648
r
clean_and_calculate_all.R
library(tidyverse) library(readxl) library(tools) library(parsedate) library(cgmanalysis) library(pdftools) source("/Users/pylell/Documents/GitHub/BDC-Code/Sarit Polsky/PICLS/Pump/pump_variables.R") setwd("/Volumes/BDC/Projects/Sarit Polsky/PICLS") # Want data about 1 month prior to questionnaires #dates = read_excel("./Data_Raw/Device Files/Device Information thru 405.xlsx") #dates$`Date Questionnaires` = parse_date(dates$`Date Questionnaires`,approx = F) # Carelink dir.create("./Data_Clean/Carelink Pump Files",showWarnings = F) dir.create("./Data_Clean/Carelink Sensor Files",showWarnings = F) files = list.files("./Data_Raw/Final raw CGM files for analysis",full.names = T) for (f in files) { # Pump id <- sub(".csv*","",basename(f)) print(id) #date = dates$`Date Questionnaires`[match(as.numeric(id),dates$`Participant ID`)] # Read in table = read.csv(f,na.strings = "", header=F) # Trim ends end <- which(table[,3] == "Sensor") if (length(end)> 0) { table <- table[-c((end-1):nrow(table)),] } start <- which(table[,1] == "Index") if (length(start) > 0) { colnames(table) <- table[start[1],] table <- table[-c(1:(start[1]+2)),] } # Remove micro boluses closed_loop = which(table$`Bolus Source` == "CLOSED_LOOP_MICRO_BOLUS") if(length(closed_loop)>0){ table = table[-closed_loop,] } # Remove missing dates table = table[!is.na(table$Date) & !is.na(table$Time),] # Date time column table$datetime <- paste(table$Date,table$Time) table$datetime <- parse_date(table$datetime,approx = F) # Remove data > 1 month before date #rows = table$datetime <= date & table$datetime > (as.Date(date)-30) #if(sum(rows,na.rm = T) > 0){ # table = table[rows,] #} else { # table = table[-c(1:nrow(table)),] #} # Remove blank rows blank = which(rowSums(is.na(table))==ncol(table)) if(length(blank)>0){ table = table[-blank,] } # 3 days for Holly to check #rows = table$datetime > (as.Date(table$datetime[1])-3) #if(sum(rows,na.rm = T) > 0){ # table = table[rows,] #} else { # table = table[-c(1:nrow(table)),] #} # Write file if(nrow(table)>0){ filename <- paste0("./Data_Clean/Carelink Pump Files/",id,".csv") write.csv(table,file = filename,row.names = F,na = "") } # Sensor # Read in table = read.csv(f,na.strings = "", header=F) # Find sensor, date, and time sensor_cols = which(unlist(lapply(table, function(c){ "Sensor Glucose (mg/dL)" %in% c }))) date_col = which(unlist(lapply(table, function(c){ "Date" %in% c }))) time_col = which(unlist(lapply(table, function(c){ "Time" %in% c }))) if(length(sensor_cols)>1){ table$sensorglucose = rowSums(table[,sensor_cols],na.rm = T) } else { table$sensorglucose = table[,sensor_cols] } table$sensorglucose = suppressWarnings(as.numeric(table$sensorglucose)) table$timestamp = parse_date(paste(table[,date_col],table[,time_col]),approx = F) table$subjectid = NA table = table[,c("subjectid","timestamp","sensorglucose")] # Remove data > 1 month before date #rows = table$timestamp <= date & table$timestamp > (as.Date(date)-30) #if(sum(rows,na.rm = T) > 100){ # table = table[rows,] #}else { # table = table[-c(1:nrow(table)),] #} # Remove blank rows blank = which(rowSums(is.na(table))==ncol(table)) if(length(blank)>0){ table = table[-blank,] } # 3 days for Holly to check #rows = table$timestamp > (as.Date(table$timestamp[1])-3) #if(sum(rows,na.rm = T) > 10){ # table = table[rows,] #}else { # table = table[-c(1:nrow(table)),] #} if(sum(!is.na(table$sensorglucose))>0){ # Write file filename <- paste0("./Data_Clean/Carelink Sensor Files/",id,".csv") write.csv(table,file = filename,row.names = F,na = "") } } # Analyze pump_variables(indir = "./Data_Clean/Carelink Pump Files", outdir = "./Data_Clean",outname = "carelink_pump_summary") cgmvariables("./Data_Clean/Carelink Sensor Files","./Data_Clean", outputname = "carelink_sensor_summary",id_filename = T) # need to find all rows with "other" in field AH "event marker" # then count the number of rows with field AD "BWZ status" set to delivered +/- 2 minutes of the other event marker cleaned_files <- list.files("./Data_Clean/Carelink Pump Files/",full.names = T) # 107A_1.11.20- 2.9.20, file 17, has boluses within 2 minutes # 100A_5.9.19- 6.7.19, file 6, has undelivered boluses # count variables are for the current file, total cumulative over all files total_no_bolus <- 0 total_one_bolus <- 0 total_more_than_one_bolus <- 0 for (f in cleaned_files[1:20]) { #for (f in cleaned_files) { # Pump id <- sub(".csv*","",basename(f)) print(id) table = read.csv(f,na.strings = "", header=T) table <- table %>% filter(!is.na(datetime)) table$datetime <- as.POSIXct(table$datetime) times_target_bolus <- table %>% filter(Event.Marker=="Other") count_no_bolus_cumulative <- 0 count_one_bolus_cumulative <- 0 count_more_than_one_bolus_cumulative <- 0 for (j in times_target_bolus) { # this needs to be divided by some constant to make in terms of minutes try(pull_data <- table %>% filter(abs(as.numeric(datetime-as.POSIXct(times_target_bolus$datetime[j]))) <=120)) pull_data_bolus <- pull_data %>% filter(BWZ.Status=="Delivered") count_this_bolus <- nrow(pull_data_bolus) count_no_bolus <- ifelse(count_this_bolus==0,1,0) ifelse(count_no_bolus==1){print(c("No bolus:",f,as.character(times_target_bolus$datetime[j])))} count_no_bolus_cumulative <- count_no_bolus_cumulative + count_no_bolus count_one_bolus <- ifelse(count_this_bolus==1,1,0) count_one_bolus_cumulative <- count_one_bolus_cumulative + count_one_bolus count_more_than_one_bolus <- ifelse(count_this_bolus>1,1,0) count_more_than_one_bolus_cumulative <- count_more_than_one_bolus_cumulative + count_more_than_one_bolus } total_no_bolus <- total_no_bolus + count_no_bolus_cumulative total_one_bolus <- total_one_bolus + count_one_bolus_cumulative total_more_than_one_bolus <- total_more_than_one_bolus + count_more_than_one_bolus_cumulative } # trying another approach # need to find all rows with "other" in field AH "event marker" # then count the number of rows with field AD "BWZ status" set to delivered +/- 2 minutes of the other event marker cleaned_files <- list.files("./Data_Clean/Carelink Pump Files/",full.names = T) # 107A_1.11.20- 2.9.20, file 17, has boluses within 2 minutes # 100A_5.9.19- 6.7.19, file 6, has undelivered boluses # count variables are for the current file, total cumulative over all files data_no_bolus <- NA data_more_than_one_bolus <- 0 cumulative_none <- NA cumulative_more_than_one <- NA for (f in cleaned_files[1:20]) { #for (f in cleaned_files) { # Pump id <- sub(".csv*","",basename(f)) print(id) table = read.csv(f,na.strings = "", header=T) table <- table %>% filter(!is.na(datetime)) table$datetime <- as.POSIXct(table$datetime) times_target_bolus <- table %>% filter(Event.Marker=="Other") for (j in times_target_bolus) { # this needs to be divided by some constant to make in terms of minutes try(pull_data <- table %>% filter(abs(as.numeric(datetime-as.POSIXct(times_target_bolus$datetime[j]))) <=120)) pull_data_bolus <- pull_data %>% filter(BWZ.Status=="Delivered") temp_none <- ifelse(nrow(pull_data_bolus)==0,as.data.frame(times_target_bolus),NA) temp_more_than_one <- ifelse(nrow(pull_data_bolus)>1,pull_data_bolus,NA) cumulative_none <- rbind(cumulative_none,temp_none) cumulative_more_than_one <- rbind(cumulative_more_than_one,temp_more_than_one) } }
c21dc4286d1b8091483f908d721c00799cf663be
cfd42578c552ea9b863846fac0be37867cd3da90
/R_scripts/polysome profiling.R
8127b6e08c1dffbb2687b72c259a4b3a5453daba
[]
no_license
tsekara/TransRefiner
88a1ad1ec6d80c9ed3cd162ba4449de5a088f2e8
27629ed135124b381a453454c3e2b7502bb7dd96
refs/heads/master
2022-11-15T01:04:29.900762
2020-06-18T14:04:51
2020-06-18T14:04:51
256,861,717
0
0
null
null
null
null
UTF-8
R
false
false
8,027
r
polysome profiling.R
#!/usr/bin/Rscript ## change things here to alter script output input.dir <- "/home/tsekara/Downloads/Polysomefiles/"; filenames=dir(input.dir,pattern="*.csv") for( i in 1:length(filenames) ) { # name of input file to process #input.file <- sprintf("%s%s",input.dir,"100315_2_100U.csv"); input.file <- sprintf("%s%s",input.dir,filenames[i]); sprintf("%s%s",input.dir,filenames[i]); show.raw <- TRUE; # show raw data in final plot show.smoothed <- TRUE; # show smoothed data in final plot smooth.window.size <- 11; # usually 11, set to 1 if already smooth rescale.data <- FALSE; # scale raw data to range 0..100% output.directory <- "/home/tsekara/Downloads/polysome_output/"; ## local minima window size in minutes minima.window.size <- (10/60); # 10 seconds time.range <- c(0,20); detector.range <- c(0,100); ## command-line processing argPos <- 1; if(length(commandArgs(TRUE)) > 0){ ## this makes sure that there is no default input file if command ## line arguments are used input.file <- ""; output.directory <- "./"; } while(argPos <= length(commandArgs(TRUE))){ argument <- commandArgs(TRUE)[argPos]; if(argument == "-input"){ input.file <- commandArgs(TRUE)[argPos+1]; argPos <- argPos + 1; } else if(argument == "-rescale"){ rescale.data <- TRUE; } else if(argument == "-noraw"){ show.raw <- FALSE; } else { cat("Command not understood:", argument); } argPos <- argPos + 1; } # remove directory from input file name input.base <- sub("^.*/","",input.file); # ensure output.directory has one '/' at the end output.directory <- sub("/$","",output.directory); output.directory <- sub("$","/",output.directory); ## accessory functions ## getArea -- returns the area between two time points getArea <- function(startTime, stopTime, input.df){ if(startTime > stopTime){ return(data.frame(start.time = startTime, end.time = stopTime, base.area = NA, rect.area = NA, trap.area = NA)); } sample.readings <- subset(input.df, Time >= startTime & Time <= stopTime); num.readings <- dim(sample.readings)[1]; ## the area is calculated by summing up tiny trapezoids between data points base.area <- sample.readings$widths * sample.readings$averages; rect.area <- sample.readings$widths * (sample.readings$averages - min(sample.readings$averages)); ## subtract triangle from rectangular base trap.area <- sum(rect.area) - sum(sample.readings$widths) * abs(sample.readings$averages[1] - sample.readings$averages[num.readings]) / 2; return(data.frame(start.time = sample.readings$Time[1], end.time = sample.readings$Time[num.readings], base.area = sum(base.area), rect.area = sum(rect.area), trap.area = trap.area)); } ## runmin -- Determines local minima from a series of points with ## a given window size ## note: window size is in minutes runmin <- function(points, times, window.size){ cat("Hunting for local minima..."); min.locs <- NULL; next.loc <- 1; unique.points <- 0; for (pos in 1:length(points)){ current.time <- times[pos]; current.window <- which( (times >= (current.time - window.size)) & (times <= (current.time + window.size))); min.points <- which(points[current.window] == min(points[current.window])) + min(current.window) - 1; if(length(min.points) < length(current.window)){ if(points[pos] == min(points[current.window]) && (points[current.window[1]] != points[pos]) && (points[current.window[length(current.window)]] != points[pos])){ min.locs[next.loc] <- trunc(mean(min.points)); if(length(unique(min.locs)) > unique.points){ unique.points <- length(unique(min.locs)); cat("", unique.points); } next.loc <- next.loc + 1; } } 1 } cat(" done\n"); return(unique(min.locs)); } plotData <- function(raw = TRUE, smoothed = TRUE, adjust.time = FALSE){ x.label <- "Time"; time.values <- data.df$Time; if(adjust.time){ time.values <- data.df$AdjustedTime; x.label <- "Time (adjusted)"; } ## print a dummy plot so that grid can be drawn plot(x = time.values, y = data.df$Reading, type = "n", xlab = x.label, ylab = "Absorbance", xlim = time.range, ylim = detector.range); legend.names <- NULL; if(raw){ ## actual point data -- raw points(x = time.values, y = data.df$Reading, type = "l", col = "red"); legend.names <- c(legend.names, "raw data"); } if(smoothed){ legend.names <- c(legend.names, "smoothed data", "local minima"); ## actual point data -- smoothed data points(x = time.values, y = data.df$smoothed, type = "l", col = "blue"); ## local minima points(x = time.values[local.minima], y = data.df$smoothed[local.minima], type = "o", col = "magenta"); } if(length(legend.names)>1){ ## legend for plot legend("topright",legend=legend.names, fill=c("red","blue","magenta")[1:length(legend.names)], inset = 0.05, bg = "white"); } } data.df <- read.csv(input.file, row.names = NULL); data.df <- subset(data.df, Time <= max(time.range)); ## order by time to make points more predictable data.df <- data.df[order(data.df$Time),]; ## if using raw voltage readings, multiply by 100 if(max(data.df$Reading) < 2){ data.df$Reading <- data.df$Reading * 100; } ## if readings are in seconds, convert to minutes if(max(data.df$Time) > 120){ data.df$Time <- data.df$Time / 60; } ## smooth out by running median of 11 points data.df$smoothed <- runmed(data.df$Reading, smooth.window.size); ## find local minima with a window size of minima.window.size local.minima <- unique(c(1,runmin(data.df$smoothed, data.df$Time,minima.window.size), dim(data.df)[1])); ## normalise to absorbance relative to highest peak, lowest local minima min.point <- min(data.df$Reading[local.minima]); max.point <- max(data.df$Reading); if(rescale.data){ data.df$Reading <- (data.df$Reading - min.point) / (max.point - min.point) * 100; } ## smooth data using new values data.df$smoothed <- runmed(data.df$Reading, smooth.window.size); smoothed.range <- range(data.df$smoothed); time.range <- range(data.df$Time); ## work out widths/heights of trapeziums between each point ## (assume baseline = 0) data.df$widths <- c(0,data.df$Time[-1] - data.df$Time[-dim(data.df)[1]]); data.df$averages <- c(0,(data.df$smoothed[-1] + data.df$smoothed[-dim(data.df)[1]])/2); ## determine t0 for adjusted time ## t0 is set to the start of the largest jump over 20 readings readings.diff <- c(data.df$Reading[-(1:20)],rep(0,20)) - data.df$Reading; maxjump.pos <- which(readings.diff == max(readings.diff))[1]; time.t0 <- data.df$Time[maxjump.pos]; data.df$AdjustedTime <- data.df$Time - time.t0; pdf(paste(output.directory,input.base,".pdf", sep = ""), width = 9.6, height = 5.4); par(mar = c(5.1,5.1,1,1)); ## draw plots -- raw data only plotData(raw = TRUE, smoothed = FALSE, adjust.time = FALSE); plotData(raw = TRUE, smoothed = FALSE, adjust.time = TRUE); plotData(raw = show.raw, smoothed = show.smoothed, adjust.time = FALSE); ## create region labels, and produce area table areas.df <- NULL; for(x in 2:length(local.minima)){ minima.window <- local.minima[x-1]:local.minima[x]; maxVal <- max(data.df$smoothed[minima.window]); xpos = data.df$Time[mean(which(data.df$smoothed[minima.window] == maxVal)) + local.minima[x-1] - 1]; ypos <- maxVal + (max(smoothed.range) * 0.05); if(ypos > (max(smoothed.range))){ ypos <- (max(smoothed.range) * 0.95); } text(x = xpos, y = ypos, labels = (x-1), cex = 0.5); areas.df <- rbind(areas.df,cbind(region = x-1, getArea(data.df$Time[local.minima[x-1]], data.df$Time[local.minima[x]], data.df))); } write.csv(areas.df,file = paste(output.directory,"areas_",input.base,sep=""), row.names = FALSE); dummy <- dev.off(); }
142c1fdcc18b1ddabbee1ba72c0a04751edc80bf
29585dff702209dd446c0ab52ceea046c58e384e
/checkmate/tests/testthat/test_checkChoice.R
57ec130f7638c40e94741c8e584b684c763bc813
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
611
r
test_checkChoice.R
context("checkChoice") test_that("checkChoice", { myobj = 1 expect_succ_all(Choice, myobj, 1:3) myobj = 0 expect_fail_all(Choice, myobj, 1:3) expect_false(testChoice(character(0), letters)) expect_false(testChoice(NULL, letters)) expect_false(testChoice(1, NULL)) expect_error(testChoice(list(1), as.list(iris)), "atomic") expect_true(testChoice(1L, 1:10)) expect_false(testChoice("ab", letters)) expect_false(testChoice(NA_integer_, 1:10)) expect_false(testChoice(1:2, 1:10)) expect_error(assertChoice(-1, 1:2), "element of") expect_error(assertChoice(1L, list()), "atomic") })
206a0351be8eff763d910f390db1d2c499dedd59
dbb8e3961b743e36de7ac45a6f5e73383006ea8c
/R/zzz.R
474977c544b29f1d11966f7b9ea85e261d8ccf7d
[ "MIT" ]
permissive
non87/SISeg
ccf26df7845f855b212ae900a14260d7419d98f6
b011ba35c5dad6d9c91b050bd60b5690917ff3f9
refs/heads/master
2023-01-21T18:22:14.865202
2020-12-01T22:11:18
2020-12-01T22:11:18
314,720,523
0
0
null
null
null
null
UTF-8
R
false
false
132
r
zzz.R
# Create the framework/index/derivative lists in the index_environment .onLoad <- function(libname, pkgname){ create_env_lists() }
11098b548f68f58a1e89d0f984a52f988754e9ad
604058771cc0ef5dbaf48dc73dee14fefab0cdb8
/code/gff_to_gene_fasta.R
fa5a2698fc4ca16624434d766b5ad08ee555dc1f
[]
no_license
nemhauser-lab/brassica_rna_seq
c7e3c77a5f3f844649cca28cc67a5621c3fe8d73
92f428d75c08f51ff40af00a8af082d0e1d7e5f9
refs/heads/master
2020-06-23T05:49:01.313898
2019-08-02T16:54:19
2019-08-02T16:54:19
198,528,136
0
0
null
null
null
null
UTF-8
R
false
false
3,257
r
gff_to_gene_fasta.R
library(Biostrings) library(GenomicRanges) library(rprojroot) library(plyr) library(BSgenome) rootDir <- find_root(is_rstudio_project) dirPath <- file.path(rootDir, "data", "Brapa2.5") genomeFile <- file.path(dirPath, "BrapaV2.5_Chr.fa.gz") BrapaGenome <- readDNAStringSet(genomeFile) gffFile <- file.path(dirPath, "BrapaV2.5_Chr.gene.gff") gffData <- ape::read.gff(gffFile) # parse the attributes field gffData$ID <- stringr::str_match(gffData$attributes, "ID=(.*?)(;.*)?$")[,2] gffData$Parent <- stringr::str_match(gffData$attributes, "Parent=(.*?)(;.*)?$")[,2] gffData$Name <- stringr::str_match(gffData$attributes, "Name=(.*?)(;.*)?$")[,2] gffData$Gene_ID <- stringr::str_match(gffData$ID, "BraA\\d{8}|BraSca\\d{6}") # process entries of the "gene" type geneData <- gffData[gffData$type %in% "gene", ] # geneData$gene_id <- stringr::str_match(geneData$attributes, # "ID=evm\\.TU\\.(.*?);")[,2] geneIDs <- read.table(file.path(dirPath, "all_gene_ids.csv"), stringsAsFactors=FALSE) # ourGeneData <- subset(geneData, Gene_ID %in% geneIDs$V1) ourGeneData <- subset(geneData, !is.na(Gene_ID)) # gene sequences ourGenesGR <- GRanges(seqnames=ourGeneData$seqid, ranges=IRanges(start=ourGeneData$start, end=ourGeneData$end, names=ourGeneData$Gene_ID), strand=ourGeneData$strand) ourGenesDSS <- getSeq(BrapaGenome, ourGenesGR) writeXStringSet(ourGenesDSS, file.path(dirPath, "all_genes.fasta")) # Promoters promoterLength <- 1000 df <- adply(.data=ourGeneData, .margins=1, .fun=function(row){ strand <- as.character(row$strand) if (strand=="+"){ promoterStart <- max(row$start - promoterLength, 1) promoterEnd <- max(row$start - 1, 1) } else if(strand=="-"){ promoterStart <- min(row$end + 1, width(BrapaGenome[row$seqid])) promoterEnd <- min(row$end + promoterLength, width(BrapaGenome[row$seqid])) } output <- cbind(row, data.frame(promoterStart, promoterEnd)) }) ourPromoterGR <- GRanges(seqnames=df$seqid, ranges=IRanges(start=df$promoterStart, end=df$promoterEnd, names=df$Gene_ID), strand=df$strand) ourPromoterDSS <- getSeq(BrapaGenome, ourPromoterGR) fname <- paste("all_genes_promoters_", as.character(round(promoterLength/1000, 0)), "k.fasta", sep="") writeXStringSet(ourPromoterDSS, file.path(dirPath, fname)) # ============================================================================== # Misc. stuff # to read in a fasta: dirPath <- file.path(rootDir, "data", "Brapa2.5") Promoters <- readDNAStringSet(file.path(dirPath, "all_genes_promoters_1k.fasta")) # temp df to test limits of string set indexing. df <- data.frame("seqid"=rep("A01", 3), "strand"=rep("+", 3), "promoterStart"=c(1,1,33885991), "promoterEnd"=c(1,1,33885992))
8a6d3269a29c3ff6f3cb59fdd89e3c3e0b11cc1a
468d9bb47216ebe06564f014d9b314c7da6be263
/01rscripts/02conventional_sl/02_functions.R
863d9fd86af1aac0854b99012cdf4706f16ee3bd
[]
no_license
toni-ml/sys_review_ml
1f5349daebec8f29d32cbc4e09c1fb177bf17335
149c23be6cdcc67e327701b8643b4a58428561d2
refs/heads/main
2023-04-17T01:11:44.042926
2021-04-29T15:02:46
2021-04-29T15:02:46
344,104,653
0
0
null
null
null
null
UTF-8
R
false
false
28,824
r
02_functions.R
#******************************************************************************* #******************************************************************************* ########################## functions ################################## #******************************************************************************* #******************************************************************************* #*************************************************************************************************************************************************************** #*********************** Timing ***************************************************************************************************************** #******************************************************a********************************************************************************************************* # Update tic toc pacakge toc.outmsg <- function (tic, toc, msg) { if (is.null(msg) || is.na(msg) || length(msg) == 0) outmsg <- paste(hms::as_hms(toc - tic), " processing time", sep = "") else outmsg <- paste(msg, ": ", hms::as_hms(toc - tic), " processing time", sep = "") } assignInNamespace(x = "toc.outmsg", value = toc.outmsg,ns = "tictoc") #*************************************************************************************************************************************************************** #**************** seed function #**************************************************************************************************************** #*************************************************************************************************************************************************************** setSeedsRepeatedCV <- function(numbers = 1, repeats = 1, tunes = NULL, seed = 42) { # list_length is the number of resamples and integer vector of M (numbers + tune length if any) list_length <- numbers * repeats if (is.null(list_length)) { seeds <- NULL } else { set.seed(seed = seed) seeds <- vector(mode = "list", length = list_length) seeds <- lapply(seeds, function(x) sample.int(n = 1000000, size = numbers + ifelse(is.null(tunes), 0, tunes))) seeds[[length(seeds) + 1]] <- sample.int(n = 1000000, size = 1) } # return seeds seeds } #*************************************************************************************************************************************************************** #*********************** Cores ****************************************************************************************************************** #*************************************************************************************************************************************************************** # use 90 percent of cores. But in case of > 32 core, use only the half tidy_cores <- function(){ case_when(detectCores() > 32 ~ floor(detectCores()/2 * 0.90), detectCores() <= 32 ~ floor(detectCores() * 0.90), TRUE ~ 1)} #*************************************************************************************************************************************************************** #****************** twoClassSummaryCustom #****************************************************************************************************** #*************************************************************************************************************************************************************** twoClassSummaryCustom <- function(data, lev = NULL, model = NULL) { lvls <- levels(data$obs) if (length(lvls) > 2) { stop(paste("Your outcome has", length(lvls), "levels. The twoClassSummary() function isn't appropriate.")) } if (!all(levels(data[, "pred"]) == lvls)) { stop("levels of observed and predicted data do not match") } if (!all(lvls == c("inclusion", "exclusion")) == TRUE) { stop("please use inclusion as the reference and not exclusion") # daher ist: # lvls[1] == "inclusion" und lvls[2] == "exclusion } out <- c( mean((data[, "inclusion"] - ifelse(data$obs == lvls[2], 0, 1))^2), # ModelMetrics::brier(actual = ifelse(data$obs == lvls[2], 0, 1), predicted = data[, "inclusion"]), MLmetrics::Accuracy(y_pred = data[, lvls[1]], y_true = ifelse(data$obs == lvls[1], 1, 0)), ModelMetrics::auc(actual = ifelse(data$obs == lvls[2], 0, 1), predicted = data[, lvls[1]]), caret::sensitivity(data[, "pred"], data[, "obs"], positive = lvls[1]), caret::specificity(data[, "pred"], data[, "obs"], negative = lvls[2]), # caret::posPredValue(data[, "pred"], data[, "obs"], positive = lvls[1]), ModelMetrics::ppv(actual = ifelse(data$obs == lvls[2], 0, 1), predicted = data[, "inclusion"]), # caret::negPredValue(data[, "pred"], data[, "obs"], negative = lvls[2]), ModelMetrics::npv(actual = ifelse(data$obs == lvls[2], 0, 1), predicted = data[, "inclusion"]), ModelMetrics::ce(actual = ifelse(data$obs == lvls[2], 0, 1), predicted = data[, "inclusion"]), ModelMetrics::f1Score(actual = ifelse(data$obs == lvls[2], 0, 1), predicted = data[, "inclusion"]), ModelMetrics::logLoss(actual = ifelse(data$obs == lvls[2], 0, 1), predicted = data[, "inclusion"]), MLmetrics::Gini(y_pred = data[, lvls[1]], y_true = ifelse(data$obs == lvls[1], 1, 0)) ) names(out) <- c("brier_score", "acc", "auc", "sens", "spec", "pos_pred", "neg_pred", "cl_error", "f1", "entropy", "gini" ) out } #*************************************************************************************************************************************************************** #********************** tidy_best_model #******************************************************************************************************* #*************************************************************************************************************************************************************** tidy_best_model <- function(data, metric, rev_conf = 1:4, ti_tiab_conf = c("ti", "tiab"), arrange = FALSE, desc = TRUE) { data %>% filter( rev_id %in% rev_conf, ti_tiab %in% ti_tiab_conf ) %>% select(dat_id, !!sym(metric)) %>% {if (arrange == TRUE) { if(desc == TRUE){ arrange(., desc(!!sym(metric))) } else { arrange(., !!sym(metric)) } } else { if(desc == TRUE){ filter(., !!sym(metric) == max(!!sym(metric))) } else { filter(., !!sym(metric) == min(!!sym(metric))) } } } } #*************************************************************************************************************************************************************** #********************** store_eval_dat_null_modell #******************************************************************************************** #*************************************************************************************************************************************************************** store_eval_dat_null_modell <- function(i){ #+++++++++++++++++++++++ # load data #+++++++++++++++++++++++ fit_name <- paste0(ml_model, "_", grid_f$data_id_config[i]) ls_model <- readRDS(paste0("00data/rdata/02conventional_sl/",ml_model_path, "/",fit_name, ".RDS")) # calc average of metrics over folds and calc brier_skill score results_temp <- ls_model$fit$resample %>% mutate(net_benefit05 = (sens - (1 - spec)*(0.05/(1-0.05))), net_benefit10 = (sens - (1 - spec)*(0.05/(1-0.1)))) lvls <- levels(ls_model$test_y_ref) #+++++++++++++++++++++++ # overall #+++++++++++++++++++++++ list( dat_id = as.character(grid_f$data_id_config[i]), rev_id = as.character(grid_f$rev_id_config[i]), sampling = as.character(grid_f$sampling_runs_names_config[i]), ti_tiab = as.character(grid_f$input_names1_config[i]), stem_lemma = as.character(if_else(grid_f$input_names2_config[i] == "ss", "stem", "lemma")), weight = as.character(grid_f$weight_tf_tf_idf_names_config[i]), ngram = as.character(grid_f$ngram_config[i]), max_tokens = as.numeric(grid_f$max_init_tokens_config[i]), max_tokens_names = as.character(grid_f$max_init_tokens_names_config[i]), model_short = as.character(ml_model), model_type = as.character(ls_model$fit$modelType), model_method = as.character(ls_model$fit$method), tune_time = as.character(hms::as_hms(ls_model$fit$times$everything[[3]])), optim_metric = as.character(ls_model$fit$metric), cores = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(cores), time_run = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(time_run), time_load = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(time_load), time_fit = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(time_fit), time_pred_train = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(time_pred_train), time_pred_test = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(time_pred_test), time_save = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(time_save), #+++++++++++++++++++++++ # results of resamples #+++++++++++++++++++++++ kcross_brier_score = as.numeric(mean(results_temp$brier_score)), kcross_brier_score_sd = as.numeric(sd(results_temp$brier_score)), kcross_net_benefit05 = as.numeric(mean(results_temp$net_benefit05)), kcross_net_benefit05_sd = as.numeric(sd(results_temp$net_benefit05)), kcross_net_benefit10 = as.numeric(mean(results_temp$net_benefit10)), kcross_net_benefit10_sd = as.numeric(sd(results_temp$net_benefit10)), kcross_acc = as.numeric(mean(results_temp$acc)), kcross_acc_sd = as.numeric(sd(results_temp$acc)), kcross_auc = as.numeric(mean(results_temp$auc)), kcross_auc_sd = as.numeric(sd(results_temp$auc)), kcross_sens = as.numeric(mean(results_temp$sens)), kcross_sens_sd = as.numeric(sd(results_temp$sens)), kcross_spec = as.numeric(mean(results_temp$spec)), kcross_spec_sd = as.numeric(sd(results_temp$spec)), kcross_pos_pred = as.numeric(mean(results_temp$pos_pred)), kcross_pos_pred_sd = as.numeric(sd(results_temp$pos_pred)), kcross_neg_pred = as.numeric(mean(results_temp$neg_pred)), kcross_neg_pred_sd = as.numeric(sd(results_temp$neg_pred)), kcross_cl_error = as.numeric(mean(results_temp$cl_error)), kcross_cl_error_sd = as.numeric(sd(results_temp$cl_error)), kcross_f1 = as.numeric(mean(results_temp$f1)), kcross_f1_sd = as.numeric(sd(results_temp$f1)), kcross_entropy = as.numeric(mean(results_temp$entropy)), kcross_entropy_sd = as.numeric(sd(results_temp$entropy)), kcross_gini = as.numeric(mean(results_temp$gini)), kcross_gini_sd = as.numeric(sd(results_temp$gini)), #+++++++++++++++++++++++ # train_cm #+++++++++++++++++++++++ train_brier_score = as.numeric(mean((ls_model$train_pred_prob[["inclusion"]] - ifelse(ls_model$train_y_ref == lvls[2], 0, 1))^2)), train_auc = as.numeric(ModelMetrics::auc(actual = ifelse(ls_model$train_y_ref == lvls[2], 0, 1), predicted = ls_model$train_pred_prob[, lvls[1]])), train_cl_error = as.numeric(ModelMetrics::ce(actual = ifelse(ls_model$train_y_ref == lvls[2], 0, 1), predicted = ls_model$train_pred_prob[, "inclusion"])), train_entropy = as.numeric(ModelMetrics::logLoss(actual = ifelse(ls_model$train_y_ref == lvls[2], 0, 1), predicted = ls_model$train_pred_prob[, "inclusion"])), train_gini = as.numeric(MLmetrics::Gini(y_pred = ls_model$train_pred_prob[, lvls[1]], y_true = ifelse(ls_model$train_y_ref == lvls[1], 1, 0))), train_cm_ref_incl_pred_incl = as.numeric(ls_model$train_cm$tabl[[1]]), train_cm_ref_incl_pred_excl = as.numeric(ls_model$train_cm$tabl[[2]]), train_cm_ref_excl_pred_incl = as.numeric(ls_model$train_cm$tabl[[3]]), train_cm_ref_excl_pred_excl = as.numeric(ls_model$train_cm$tabl[[4]]), train_accuracy = as.numeric(ls_model$train_cm$overall[[1]]), train_kappa = as.numeric(ls_model$train_cm$overall[[2]]), train_accuracy_lower = as.numeric(ls_model$train_cm$overall[[3]]), train_accuracy_upper = as.numeric(ls_model$train_cm$overall[[4]]), train_no_information_rate = as.numeric(ls_model$train_cm$overall[[5]]), train_accuracy_pvalue = as.numeric(ls_model$train_cm$overall[[6]]), train_mcnemar_pvalue = as.numeric(ls_model$train_cm$overall[[7]]), train_sens = as.numeric(ls_model$train_cm$byClass[[1]]), train_spec = as.numeric(ls_model$train_cm$byClass[[2]]), train_net_benefit05 = as.numeric(ls_model$train_cm$byClass[[1]] - ((1-ls_model$train_cm$byClass[[2]])*(0.05/(1-0.05)))), train_net_benefit10 = as.numeric(ls_model$train_cm$byClass[[1]] - ((1-ls_model$train_cm$byClass[[2]])*(0.1/(1-0.1)))), train_pos_pred_val = as.numeric(ls_model$train_cm$byClass[[3]]), train_neg_pred_val = as.numeric(ls_model$train_cm$byClass[[4]]), train_precision = as.numeric(ls_model$train_cm$byClass[[5]]), train_recall = as.numeric(ls_model$train_cm$byClass[[6]]), train_f1 = as.numeric(ls_model$train_cm$byClass[[7]]), train_prevalence = as.numeric(ls_model$train_cm$byClass[[8]]), train_detect_rate = as.numeric(ls_model$train_cm$byClass[[9]]), train_detect_prev = as.numeric(ls_model$train_cm$byClass[[10]]), train_balanced_acc = as.numeric(ls_model$train_cm$byClass[[11]]), #+++++++++++++++++++++++ # test_cm #+++++++++++++++++++++++ test_brier_score = as.numeric(mean((ls_model$test_pred_prob[["inclusion"]] - ifelse(ls_model$test_y_ref == lvls[2], 0, 1))^2)), test_auc = as.numeric(ModelMetrics::auc(actual = ifelse(ls_model$test_y_ref == lvls[2], 0, 1), predicted = ls_model$test_pred_prob[, lvls[1]])), test_cl_error = as.numeric(ModelMetrics::ce(actual = ifelse(ls_model$test_y_ref == lvls[2], 0, 1), predicted = ls_model$test_pred_prob[, "inclusion"])), test_entropy = as.numeric(ModelMetrics::logLoss(actual = ifelse(ls_model$test_y_ref == lvls[2], 0, 1), predicted = ls_model$test_pred_prob[, "inclusion"])), test_gini = as.numeric(MLmetrics::Gini(y_pred = ls_model$test_pred_prob[, lvls[1]], y_true = ifelse(ls_model$test_y_ref == lvls[1], 1, 0))), test_cm_ref_incl_pred_incl = as.numeric(ls_model$test_cm$tabl[[1]]), test_cm_ref_incl_pred_excl = as.numeric(ls_model$test_cm$tabl[[2]]), test_cm_ref_excl_pred_incl = as.numeric(ls_model$test_cm$tabl[[3]]), test_cm_ref_excl_pred_excl = as.numeric(ls_model$test_cm$tabl[[4]]), test_accuracy = as.numeric(ls_model$test_cm$overall[[1]]), test_kappa = as.numeric(ls_model$test_cm$overall[[2]]), test_accuracy_lower = as.numeric(ls_model$test_cm$overall[[3]]), test_accuracy_upper = as.numeric(ls_model$test_cm$overall[[4]]), test_no_information_rate = as.numeric(ls_model$test_cm$overall[[5]]), test_accuracy_pvalue = as.numeric(ls_model$test_cm$overall[[6]]), test_mcnemar_pvalue = as.numeric(ls_model$test_cm$overall[[7]]), test_sens = as.numeric(ls_model$test_cm$byClass[[1]]), test_spec = as.numeric(ls_model$test_cm$byClass[[2]]), test_net_benefit05 = as.numeric(ls_model$test_cm$byClass[[1]] - ((1-ls_model$test_cm$byClass[[2]])*(0.05/(1-0.05)))), est_net_benefit10 = as.numeric(ls_model$test_cm$byClass[[1]] - ((1-ls_model$test_cm$byClass[[2]])*(0.1/(1-0.1)))), test_pos_pred_val = as.numeric(ls_model$test_cm$byClass[[3]]), test_neg_pred_val = as.numeric(ls_model$test_cm$byClass[[4]]), test_precision = as.numeric(ls_model$test_cm$byClass[[5]]), test_recall = as.numeric(ls_model$test_cm$byClass[[6]]), test_f1 = as.numeric(ls_model$test_cm$byClass[[7]]), test_prevalence = as.numeric(ls_model$test_cm$byClass[[8]]), test_detect_rate = as.numeric(ls_model$test_cm$byClass[[9]]), test_detect_prev = as.numeric(ls_model$test_cm$byClass[[10]]), test_balanced_acc = as.numeric(ls_model$test_cm$byClass[[11]]) ) %>% bind_rows() } #*************************************************************************************************************************************************************** #********************** store_eval_dat #******************************************************************************************************* #*************************************************************************************************************************************************************** store_eval_dat <- function(i){ #+++++++++++++++++++++++ # load data #+++++++++++++++++++++++ fit_name <- paste0(ml_model, "_", grid_f$data_id_config[i]) fit_name_null <- paste0("null_model_", grid_f$data_id_config[i]) ls_model <- readRDS(paste0(fd_data,ml_model_path, "/",fit_name, ".RDS")) ls_model_null <- readRDS(paste0("00data/rdata/02conventional_sl/00_null_model/",fit_name_null, ".RDS")) # calc average of metrics over folds and calc brier_skill score results_temp <- ls_model$fit$resample %>% left_join( ls_model_null$fit$resample %>% select(Resample, brier_score_ref = brier_score), by = "Resample") %>% mutate(brier_skill_score = (brier_score_ref - brier_score)/brier_score_ref, brier_comp_score = brier_score_ref - brier_score, net_benefit05 = (sens - (1 - spec)*(0.05/(1-0.05))), net_benefit10 = (sens - (1 - spec)*(0.05/(1-0.1)))) lvls <- levels(ls_model$test_y_ref) #+++++++++++++++++++++++ # overall #+++++++++++++++++++++++ ls_temp_return <- list( dat_id = as.character(grid_f$data_id_config[i]), rev_id = as.character(grid_f$rev_id_config[i]), sampling = as.character(grid_f$sampling_runs_names_config[i]), ti_tiab = as.character(grid_f$input_names1_config[i]), stem_lemma = as.character(if_else(grid_f$input_names2_config[i] == "ss", "stem", "lemma")), weight = as.character(grid_f$weight_tf_tf_idf_names_config[i]), ngram = as.character(grid_f$ngram_config[i]), max_tokens = as.numeric(grid_f$max_init_tokens_config[i]), max_tokens_names = as.character(grid_f$max_init_tokens_names_config[i]), model_short = as.character(ml_model), model_type = as.character(ls_model$fit$modelType), model_method = as.character(ls_model$fit$method), best_tune = ls_model$fit$bestTune, tune_time = as.character(hms::as_hms(ls_model$fit$times$everything[[3]])), optim_metric = as.character(ls_model$fit$metric), cores = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(cores), time_run = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(time_run), time_load = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(time_load), time_fit = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(time_fit), time_pred_train = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(time_pred_train), time_pred_test = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(time_pred_test), time_save = times %>% filter(token == paste0(ml_model,"_",grid_f$data_id_config[i], ".RDS")) %>% pull(time_save), #+++++++++++++++++++++++ # results of resamples #+++++++++++++++++++++++ kcross_brier_score = as.numeric(mean(results_temp$brier_score)), kcross_brier_score_sd = as.numeric(sd(results_temp$brier_score)), kcross_brier_score_ref = as.numeric(mean(results_temp$brier_score_ref)), kcross_brier_score_ref_sd = as.numeric(sd(results_temp$brier_score_ref)), kcross_brier_skill_score = as.numeric(mean(results_temp$brier_skill_score)), kcross_brier_skill_score_sd = as.numeric(sd(results_temp$brier_skill_score)), kcross_brier_comp_score = as.numeric(mean(results_temp$brier_comp_score)), kcross_brier_comp_score_sd = as.numeric(sd(results_temp$brier_comp_score)), kcross_net_benefit05 = as.numeric(mean(results_temp$net_benefit05)), kcross_net_benefit05_sd = as.numeric(sd(results_temp$net_benefit05)), kcross_net_benefit10 = as.numeric(mean(results_temp$net_benefit10)), kcross_net_benefit10_sd = as.numeric(sd(results_temp$net_benefit10)), kcross_acc = as.numeric(mean(results_temp$acc)), kcross_acc_sd = as.numeric(sd(results_temp$acc)), kcross_auc = as.numeric(mean(results_temp$auc)), kcross_auc_sd = as.numeric(sd(results_temp$auc)), kcross_sens = as.numeric(mean(results_temp$sens)), kcross_sens_sd = as.numeric(sd(results_temp$sens)), kcross_spec = as.numeric(mean(results_temp$spec)), kcross_spec_sd = as.numeric(sd(results_temp$spec)), kcross_pos_pred = as.numeric(mean(results_temp$pos_pred)), kcross_pos_pred_sd = as.numeric(sd(results_temp$pos_pred)), kcross_neg_pred = as.numeric(mean(results_temp$neg_pred)), kcross_neg_pred_sd = as.numeric(sd(results_temp$neg_pred)), kcross_cl_error = as.numeric(mean(results_temp$cl_error)), kcross_cl_error_sd = as.numeric(sd(results_temp$cl_error)), kcross_f1 = as.numeric(mean(results_temp$f1)), kcross_f1_sd = as.numeric(sd(results_temp$f1)), kcross_entropy = as.numeric(mean(results_temp$entropy)), kcross_entropy_sd = as.numeric(sd(results_temp$entropy)), kcross_gini = as.numeric(mean(results_temp$gini)), kcross_gini_sd = as.numeric(sd(results_temp$gini)), #+++++++++++++++++++++++ # train_cm #+++++++++++++++++++++++ train_brier_score = as.numeric(mean((ls_model$train_pred_prob[["inclusion"]] - ifelse(ls_model$train_y_ref == lvls[2], 0, 1))^2)), train_brier_score_ref = as.numeric(mean((ls_model_null$train_pred_prob[["inclusion"]] - ifelse(ls_model_null$train_y_ref == lvls[2], 0, 1))^2)), train_brier_skill_score = as.numeric(1 - mean((ls_model$train_pred_prob[["inclusion"]] - ifelse(ls_model$train_y_ref == lvls[2], 0, 1))^2)/mean((ls_model_null$train_pred_prob[["inclusion"]] - ifelse(ls_model_null$train_y_ref == lvls[2], 0, 1))^2)), train_brier_comp_score = as.numeric(mean((ls_model_null$train_pred_prob[["inclusion"]] - ifelse(ls_model_null$train_y_ref == lvls[2], 0, 1))^2) - mean((ls_model$train_pred_prob[["inclusion"]] - ifelse(ls_model$train_y_ref == lvls[2], 0, 1))^2)), train_auc = as.numeric(ModelMetrics::auc(actual = ifelse(ls_model$train_y_ref == lvls[2], 0, 1), predicted = ls_model$train_pred_prob[, lvls[1]])), train_cl_error = as.numeric(ModelMetrics::ce(actual = ifelse(ls_model$train_y_ref == lvls[2], 0, 1), predicted = ls_model$train_pred_prob[, "inclusion"])), train_entropy = as.numeric(ModelMetrics::logLoss(actual = ifelse(ls_model$train_y_ref == lvls[2], 0, 1), predicted = ls_model$train_pred_prob[, "inclusion"])), train_gini = as.numeric(MLmetrics::Gini(y_pred = ls_model$train_pred_prob[, lvls[1]], y_true = ifelse(ls_model$train_y_ref == lvls[1], 1, 0))), train_cm_ref_incl_pred_incl = as.numeric(ls_model$train_cm$tabl[[1]]), train_cm_ref_incl_pred_excl = as.numeric(ls_model$train_cm$tabl[[2]]), train_cm_ref_excl_pred_incl = as.numeric(ls_model$train_cm$tabl[[3]]), train_cm_ref_excl_pred_excl = as.numeric(ls_model$train_cm$tabl[[4]]), train_accuracy = as.numeric(ls_model$train_cm$overall[[1]]), train_kappa = as.numeric(ls_model$train_cm$overall[[2]]), train_accuracy_lower = as.numeric(ls_model$train_cm$overall[[3]]), train_accuracy_upper = as.numeric(ls_model$train_cm$overall[[4]]), train_no_information_rate = as.numeric(ls_model$train_cm$overall[[5]]), train_accuracy_pvalue = as.numeric(ls_model$train_cm$overall[[6]]), train_mcnemar_pvalue = as.numeric(ls_model$train_cm$overall[[7]]), train_sens = as.numeric(ls_model$train_cm$byClass[[1]]), train_spec = as.numeric(ls_model$train_cm$byClass[[2]]), train_net_benefit05 = as.numeric(ls_model$train_cm$byClass[[1]] - ((1-ls_model$train_cm$byClass[[2]])*(0.05/(1-0.05)))), train_net_benefit10 = as.numeric(ls_model$train_cm$byClass[[1]] - ((1-ls_model$train_cm$byClass[[2]])*(0.1/(1-0.1)))), train_pos_pred_val = as.numeric(ls_model$train_cm$byClass[[3]]), train_neg_pred_val = as.numeric(ls_model$train_cm$byClass[[4]]), train_precision = as.numeric(ls_model$train_cm$byClass[[5]]), train_recall = as.numeric(ls_model$train_cm$byClass[[6]]), train_f1 = as.numeric(ls_model$train_cm$byClass[[7]]), train_prevalence = as.numeric(ls_model$train_cm$byClass[[8]]), train_detect_rate = as.numeric(ls_model$train_cm$byClass[[9]]), train_detect_prev = as.numeric(ls_model$train_cm$byClass[[10]]), train_balanced_acc = as.numeric(ls_model$train_cm$byClass[[11]]), #+++++++++++++++++++++++ # test_cm #+++++++++++++++++++++++ test_brier_score = as.numeric(mean((ls_model$test_pred_prob[["inclusion"]] - ifelse(ls_model$test_y_ref == lvls[2], 0, 1))^2)), test_brier_score_ref = as.numeric(mean((ls_model_null$test_pred_prob[["inclusion"]] - ifelse(ls_model_null$test_y_ref == lvls[2], 0, 1))^2)), test_brier_skill_score = as.numeric(1 - mean((ls_model$test_pred_prob[["inclusion"]] - ifelse(ls_model$test_y_ref == lvls[2], 0, 1))^2)/mean((ls_model_null$test_pred_prob[["inclusion"]] - ifelse(ls_model_null$test_y_ref == lvls[2], 0, 1))^2)), test_brier_comp_score = as.numeric(mean((ls_model_null$test_pred_prob[["inclusion"]] - ifelse(ls_model_null$test_y_ref == lvls[2], 0, 1))^2) - mean((ls_model$test_pred_prob[["inclusion"]] - ifelse(ls_model$test_y_ref == lvls[2], 0, 1))^2)), test_auc = as.numeric(ModelMetrics::auc(actual = ifelse(ls_model$test_y_ref == lvls[2], 0, 1), predicted = ls_model$test_pred_prob[, lvls[1]])), test_cl_error = as.numeric(ModelMetrics::ce(actual = ifelse(ls_model$test_y_ref == lvls[2], 0, 1), predicted = ls_model$test_pred_prob[, "inclusion"])), test_entropy = as.numeric(ModelMetrics::logLoss(actual = ifelse(ls_model$test_y_ref == lvls[2], 0, 1), predicted = ls_model$test_pred_prob[, "inclusion"])), test_gini = as.numeric(MLmetrics::Gini(y_pred = ls_model$test_pred_prob[, lvls[1]], y_true = ifelse(ls_model$test_y_ref == lvls[1], 1, 0))), test_cm_ref_incl_pred_incl = as.numeric(ls_model$test_cm$tabl[[1]]), test_cm_ref_incl_pred_excl = as.numeric(ls_model$test_cm$tabl[[2]]), test_cm_ref_excl_pred_incl = as.numeric(ls_model$test_cm$tabl[[3]]), test_cm_ref_excl_pred_excl = as.numeric(ls_model$test_cm$tabl[[4]]), test_accuracy = as.numeric(ls_model$test_cm$overall[[1]]), test_kappa = as.numeric(ls_model$test_cm$overall[[2]]), test_accuracy_lower = as.numeric(ls_model$test_cm$overall[[3]]), test_accuracy_upper = as.numeric(ls_model$test_cm$overall[[4]]), test_no_information_rate = as.numeric(ls_model$test_cm$overall[[5]]), test_accuracy_pvalue = as.numeric(ls_model$test_cm$overall[[6]]), test_mcnemar_pvalue = as.numeric(ls_model$test_cm$overall[[7]]), test_sens = as.numeric(ls_model$test_cm$byClass[[1]]), test_spec = as.numeric(ls_model$test_cm$byClass[[2]]), test_net_benefit05 = as.numeric(ls_model$test_cm$byClass[[1]] - ((1-ls_model$test_cm$byClass[[2]])*(0.05/(1-0.05)))), est_net_benefit10 = as.numeric(ls_model$test_cm$byClass[[1]] - ((1-ls_model$test_cm$byClass[[2]])*(0.1/(1-0.1)))), test_pos_pred_val = as.numeric(ls_model$test_cm$byClass[[3]]), test_neg_pred_val = as.numeric(ls_model$test_cm$byClass[[4]]), test_precision = as.numeric(ls_model$test_cm$byClass[[5]]), test_recall = as.numeric(ls_model$test_cm$byClass[[6]]), test_f1 = as.numeric(ls_model$test_cm$byClass[[7]]), test_prevalence = as.numeric(ls_model$test_cm$byClass[[8]]), test_detect_rate = as.numeric(ls_model$test_cm$byClass[[9]]), test_detect_prev = as.numeric(ls_model$test_cm$byClass[[10]]), test_balanced_acc = as.numeric(ls_model$test_cm$byClass[[11]]) ) %>% as.data.frame() names(ls_temp_return) <- str_replace_all(names(ls_temp_return), pattern = "tune.", replacement = "tune_") return(ls_temp_return) }
eb630333c9efb772f70fee8eae4e387a3c86909d
29f8f3ee59c366ea408633d183614bc39b49b26d
/UEM_CCS/[CCS] suicide_geo_socioeconomical.R
5468ec6eaeba2e6aa181e37569a27fd8fe17f2a7
[]
no_license
souzajvp/analytical_codes
92db345dc75f128c2f25fb7b28f0891139ffea98
dcc49662253ba1dbd4f54b8c4caea40232632783
refs/heads/master
2023-05-23T06:06:12.058469
2021-06-07T18:11:00
2021-06-07T18:11:00
null
0
0
null
null
null
null
UTF-8
R
false
false
16,843
r
[CCS] suicide_geo_socioeconomical.R
##################################################################################### #BASIC R STATISTICS TEMPLATE ##################################################################################### # # # # # ##################################################################################### #SETTING ENVIRONMENT ##################################################################################### #PASCKAGES INSTALLATION CODES #install.packages("Hmisc") #install.packages("car") #install.packages("psych") #install.packages("nortest") #install.packages("ggplot2") #install.packages("pastecs") #install.packages("repmis") #install.packages("mvnormtest") #install.packages("polycor") #PACKAGES LOADING CODE #Load packages neededz for the analysis #library(Hmisc) #All packages must be installes with install.packages() function #lapply(c("Hmisc","car","psych","nortest","ggplot2","pastecs","repmis","mvnormtest","polycor","lavaan","nFactors","qgraph","semTools"), library, character.only=T) library(reshape) library(ggplot2) library(gdata) library(repmis) library(qgraph) ##################################################################################### #IMPORTING DATA ##################################################################################### #LOADING DATA FROM A .CSV FILE #data<-read.csv("/Users/rpietro/Desktop/MDD_BIPD_Baseline.csv",sep=",") #information between " " are the path to the directory in your computer where the data is stored #Import data from Dropbox, in .csv format #Instructions here http://goo.gl/Ofa7gQ #data <- repmis::source_DropboxData("moral_competence_leo_thesis.csv","fdilgafkauxzpz2",sep = ",",header = TRUE) data<-read.csv("/home/joao/Dropbox/datasets/luciano papers/suicide_gis.csv",sep=',') #data <- repmis::source_DropboxData("suicide_gis.csv","i99ndw3so6ur7m6",sep = ",",header = TRUE) ############################################################################# #DATA MANAGEMENT ############################################################################# str(data) data_1<-remove.vars(data,c("CD_GEOCODM")) data_2000<-with(data,data.frame(school2000,Inform2000,Agric2000,Income00,Unemploy00,Chil_lab00,IDHM00,SMR151908,SMR202408,SMR252908)) data_2010<-with(data,data.frame(school2010,Inform2010,Agric2010,Income10,Unemploy10,Chil_Lab10,IDHM10,SMR151908,SMR202408,SMR252908)) #creating data set to melt #age_data<-with(data,data.frame(LAG151998,LAG151908,LAG202498,LAG202408,LAG252998,LAG252908,CD_GEOCODM)) #melting data to interpolate columns and rows #test_data<-remove.vars(data,c("NM_MUNICIP")) #age_data<-melt(test_data,id=c("CD_GEOCODM")) #age_data$value<-as.numeric(as.character(age_data$value)) #creating factor variable to cathegorize time series (2000 and 2012) #age_data$time_serie<-rep(c(rep("2000",length(data$LAG151998)),rep("2000",length(data$LAG151998)),rep("2000",length(data$LAG151998)),rep("2012",length(data$LAG151908)),rep("2012",length(data$LAG151908)),rep("2012",length(data$LAG151908))),6) #creating factor variable to cathegorize group ages #age_data$age_group<-rep(c(rep("15-19",length(data$LAG151998)),rep("20-24",length(data$LAG151998)),rep("25-29",length(data$LAG151998)),rep("15-19",length(data$LAG151908)),rep("20-24",length(data$LAG151908)),rep("25-29",length(data$LAG151908))),6) #age_data$age_group<-as.factor(age_data$age_group) #age_data$time_serie<-as.factor(age_data$time_serie) #age_data$socio_variable<-as.factor(c(rep(c("LAG"),399*6),rep(c("LINF"),399*6),rep(c("LSCH"),399*6),rep(c("LINC"),399*6),rep(c("LUNE"),399*6),rep(c("LCHI"),399*6))) ############################################################################# #CLUSTERING ANALYSIS ############################################################## wssplot <- function(data, nc=15, seed=1234){ wss <- (nrow(data)-1)*sum(apply(data,2,var)) for (i in 2:nc){ set.seed(seed) wss[i] <- sum(kmeans(data, centers=i)$withinss)} plot(1:nc, wss, type="b", xlab="Number of Clusters", ylab="Within groups sum of squares")} #1 standardize data cluster_2000<-scale(data_2000[,1:7]) #2 determine number of clusters wssplot(cluster_2000) library(NbClust) set.seed(1234) nc <- NbClust(cluster_2000, min.nc=2, max.nc=15, method="kmeans") table(nc$Best.n[1,]) barplot(table(nc$Best.n[1,]), xlab="Numer of Clusters", ylab="Number of Criteria", main="Number of Clusters Chosen by 26 Criteria") #3 K-means cluster analysis set.seed(1234) fit.km <- kmeans(cluster_2000, 3, nstart=25) fit.km$size fit.km$centers aggregate(data_2000[,1:7], by=list(cluster=fit.km$cluster), mean) #ct.km <- table(wine$Type, fit.km$cluster) #library(flexclust) #randIndex(ct.km) data_2000$groups<-fit.km$cluster data_2000$groups<-car::recode(data_2000$groups, "1='Pattern1';2='Pattern2';3='Pattern3'") d <- dist(scale(data_2000[,1:7]), method = "euclidean") # distance matrix # fit <- hclust(d, method="ward") plot(fit) # display dendogram data_2000$groups <- cutree(fit, k=4) # cut tree into 5 clusters # draw dendogram with red borders around the 5 clusters rect.hclust(fit, k=4, border="red") data_2000$groups<-car::recode(data_2000$groups, "1='Pattern1';2='Pattern2';3='Pattern3'") data_graph<-remove.vars(data_2000,c("SMR151908","SMR202408","SMR252908")) descriptive_graph<-melt(data_graph,id=c("groups")) timemeans <- cast(descriptive_graph, groups~variable, mean) timesd <- cast(descriptive_graph, groups~variable, sd) d <- dist(scale(data_2010[,1:7]), method = "euclidean") # distance matrix fit <- hclust(d, method="ward") plot(fit) # display dendogram data_2010$groups <- cutree(fit, k=3) # cut tree into 5 clusters # draw dendogram with red borders around the 5 clusters rect.hclust(fit, k=3, border="red") data_2010$groups<-car::recode(data_2010$groups, "1='Pattern2';2='Pattern1';3='Pattern3'") data_graph<-remove.vars(data_2010,c("SMR151908","SMR202408","SMR252908")) descriptive_graph<-melt(data_graph,id=c("groups")) timemeans <- cast(descriptive_graph, groups~variable, mean) timesd <- cast(descriptive_graph, groups~variable, sd) #descriptive_graph_2<-melt(timemeans,id=c("groups")) ############################################################################# #ENVIRONMENT RISK ############################################################################# logmodel2000_1<-glm(Group1519_9802 ~ as.factor(data_2000$groups),family=binomial, data=data) summary(logmodel2000_1) logistic.display(logmodel2000_1) #anova(reglogGEU) #exp(coef(model1_death)) # exponentiated coefficients #exp(confint(model1_death)) # 95% CI for exponentiated coefficients #predict(model1_death, type="response") # predicted values #residuals(model1_death, type="deviance") # residuals logmodel2000_2<-glm(Group2024_9802 ~ as.factor(data_2000$groups),family=binomial, data=data) summary(logmodel2000_2) logistic.display(logmodel2000_2) #anova(reglogGEU) #exp(coef(model1_death)) # exponentiated coefficients #exp(confint(model1_death)) # 95% CI for exponentiated coefficients #predict(model1_death, type="response") # predicted values #residuals(model1_death, type="deviance") # residuals logmodel2000_3<-glm(Group2529_9802 ~ as.factor(data_2000$groups),family=binomial, data=data) summary(logmodel2000_3) logistic.display(logmodel2000_3) #anova(reglogGEU) #exp(coef(model1_death)) # exponentiated coefficients #exp(confint(model1_death)) # 95% CI for exponentiated coefficients #predict(model1_death, type="response") # predicted values #residuals(model1_death, type="deviance") # residuals logmodel2010_1<-glm(Group1519_0812 ~ as.factor(data_2010$groups),family=binomial, data=data) summary(logmodel2010_1) logistic.display(logmodel2010_1) #anova(reglogGEU) #exp(coef(model1_death)) # exponentiated coefficients #exp(confint(model1_death)) # 95% CI for exponentiated coefficients #predict(model1_death, type="response") # predicted values #residuals(model1_death, type="deviance") # residuals logmodel2010_2<-glm(Group2024_0812 ~ as.factor(data_2010$groups),family=binomial, data=data) summary(logmodel2010_2) logistic.display(logmodel2010_2) #anova(reglogGEU) #exp(coef(model1_death)) # exponentiated coefficients #exp(confint(model1_death)) # 95% CI for exponentiated coefficients #predict(model1_death, type="response") # predicted values #residuals(model1_death, type="deviance") # residuals logmodel2010_3<-glm(Group2529__0812 ~ as.factor(data_2010$groups),family =binomial, data=data) summary(logmodel2010_3) logistic.display(logmodel2010_3) #anova(reglogGEU) #exp(coef(model1_death)) # exponentiated coefficients #exp(confint(model1_death)) # 95% CI for exponentiated coefficients #predict(model1_death, type="response") # predicted values #residuals(model1_death, type="deviance") # residuals ######################################################### #PLOTTING ######################################################### ### PLOTING MODEL #plot_odds<-function(x, title = NULL){ tmp_1<-data.frame(rbind(exp(coef(logmodel2000_1)),exp(coef(logmodel2000_2)),exp(coef(logmodel2000_3))),rbind(exp(confint(logmodel2000_1)),exp(confint(logmodel2000_2)),exp(confint(logmodel2000_3)))) #odds<-tmp[-1,] names(tmp_1)<-c('OR', 'lower', 'upper') tmp_1$vars<-c('15-19','20-24','25-29') tmp_1$facet<-c('2000 Time Series') #ticks<-c(seq(.1, 1, by =.1), seq(0, 10, by =1), seq(10, 100, by =10)) #plot_odds<-function(x, title = NULL){ tmp_2<-data.frame(rbind(exp(coef(logmodel2010_1)),exp(coef(logmodel2010_2)),exp(coef(logmodel2010_3))),rbind(exp(confint(logmodel2010_1)),exp(confint(logmodel2010_2)),exp(confint(logmodel2010_3)))) #odds<-tmp[-1,] names(tmp_2)<-c('OR', 'lower', 'upper') tmp_2$vars<-c('15-19','20-24','25-29') tmp_2$facet<-c('2010 Time Series') #ticks<-c(seq(.1, 1, by =.1), seq(0, 10, by =1), seq(10, 100, by =10)) #plot_odds<-function(x, title = NULL){ tmp_3<-data.frame(rbind(exp(coef(x_1C)),exp(coef(x_2C)),exp(coef(x_3C))),rbind(exp(confint(x_1C)),exp(confint(x_2C)),exp(confint(x_3C)))) #odds<-tmp[-1,] names(tmp_3)<-c('OR', 'lower', 'upper') tmp_3$vars<-c('24 hours','1 week','Overall') tmp_3$facet<-c('Violence') #ticks<-c(seq(.1, 1, by =.1), seq(0, 10, by =1), seq(10, 100, by =10)) odds<-rbind(tmp_1,tmp_2) ggplot(odds, aes(y= OR, x = reorder(vars, OR))) + geom_point() + geom_errorbar(aes(ymin=lower, ymax=upper), width=.2) + #scale_y_log10(breaks=ticks, labels = ticks) + geom_hline(yintercept = 1, linetype=2) + coord_flip() + labs(title = "", x = 'Variables', y = 'OR') + theme_bw() + facet_wrap(~ facet,ncol=1) ############################################################################# #NETWORK APPROACH ############################################################################# str(data_2000) network_data<-cor(data_2000) qsggr<-list(Outcome=c(8,9,10),Predictors=c(1,2,3,4,5,6,7)) nodeNames=c("Education","Informality","Agriculture","Income","Unemployment","Child Labor","IDH","Suicidality 15-19","Suicadality 20-24","Suicidality 25-29") qsgg3<-qgraph(network_data,layout="spring",vsize=6,esize=20,graph="glasso",sampleSize=nrow(data_2000),legend.cex = 0.5,GLratio=1.5) qsgg2<-qgraph(network_data,layout="spring",vsize=6,esize=20,graph="pcor",threshold="holm",sampleSize=nrow(data_2000),legend.cex = 0.5,GLratio=1.5) qsgg1<-qgraph(network_data,layout="spring",vsize=6,esize=20,legend.cex = 0.5,GLratio=1.5) Lqsg<-averageLayout(qsgg1,qsgg2,qsgg3) qsgG1<-qgraph(network_data,layout=Lqsg,nodeNames=nodeNames,vsize=6,esize=20,legend.cex = 0.3,cut = 0.3, maximum = 1, minimum = 0, esize = 20,vsize = 5, repulsion = 0.8,groups=qsggr,gray=TRUE,color=c("gray80","gray50"),legend=T) qsgG2<-qgraph(network_data,layout=Lqsg,nodeNames=nodeNames,vsize=6,esize=20,graph="pcor",legend.cex = 0.3,cut = 0.1, maximum = 1, minimum = 0, esize = 20,vsize = 5, repulsion = 0.8,groups=qsggr,gray=TRUE,color=c("gray80","gray50"),legend=F) qsgG3<-qgraph(network_data,layout=Lqsg,nodeNames=nodeNames,vsize=6,esize=20,graph="glasso",sampleSize=nrow(data_2000),legend.cex = 0.3,cut = 0.1, maximum = 1, minimum = 0, esize = 20,vsize = 5, repulsion = 0.8,groups=qsggr,gray=F,color=c("gray80","white")) x<-centrality(qsgG3) data_line_plot<-data.frame(x$ShortestPathLengths[1:7,8:10]) names_line_plot<-c("Education","Informality","Agriculture","Income","Unemployment","Child Labor","IDH")#,"Suicidality 15-19","Suicadality 20-24","Suicidality 25-29") #Some organiztion needed for the codes value<-with(data_line_plot,c(X1,X2,X3)) Profile<-c(rep(c("Suicidality 15-19"),7),rep(c("Suicadality 20-24"),7),rep(c("Suicadality 25-29"),7)) Themes<-rep(names_line_plot,3) data_plot<-data.frame(value,Profile,Themes) #using GGPLOT TO PLOT THE LINES (Still fixing variable names) ggplot(data=data_plot, aes(y=value, x=Themes, group=Profile,color=Profile)) + geom_line(size=1.5) + geom_point(size=3,fill="white") + ylab("") + xlab("") + theme_bw()+ scale_colour_manual(values=c("#999999","darkred","black"))+theme(axis.text.x = element_text(angle= 270, hjust = 0, colour = "black",size=14)) centralityPlot(qsgG3) clusteringPlot(qsgG3) g<-as.igraph(qsgG3) walktrap.community() data_2000$suicide<-rowMeans(data_2000[8:10]) directed_network<-with(data_2000,data.frame(school2000,Inform2000,Agric2000,Income00,Unemploy00,Chil_lab00,IDHM00,suicide)) library("pcalg") #data(gmI) suffStat <- list(C = cor(directed_network), n = nrow(directed_network)) pc.fit <- pc(suffStat, indepTest=gaussCItest, p = ncol(directed_network), alpha = 0.01) qgraph(pc.fit) ############################################################################# #MULTIPLE REGRESSION OR PREDICTIVE MODEL APPROACH ############################################################################# library(bnlearn) library(Rgraphviz) data_2000$suicide<-rowMeans(data_2000[8:10]) directed_network<-with(data_2000,data.frame(school2000,Inform2000,Agric2000,Income00,Unemploy00,Chil_lab00,IDHM00,suicide)) ## Manual graph construction varnames=c("Education","Informality","Agriculture","Income","Unemployment","Child Labor","IDH","Suicide") ag=empty.graph(varnames) ## Automated graph contruction #data(asia) names(directed_network) = varnames ########################### # Learning RB Structure ########################### # Growth Shrink Algorithmm <<<<<<<<<<<<<<<< rb_gs = gs(directed_network) graphviz.plot(rb_gs) # IAMB Algorithm <<<<<<<<<<<<<<<< rb_ia = iamb(directed_network) graphviz.plot(rb_ia) # Hill Climbing Algorithm <<<<<<<<<<<<<<<< rb_hc = hc(directed_network) graphviz.plot(rb_hc) # Learning CPT for each node in the graph fitted = bn.fit(rb_hc, directed_network) print(fitted$SoB) print(fitted) fitted gs(directed_network, debug = TRUE) score(rb_hc, data = directed_network) nparams(rb_hc) # plot the network learned by gs(). #res = set.arc(rb_hc) strength = arc.strength(rb_hc, directed_network) strength.plot(rb_hc, strength) # add another (non-significant) arc and plot the network again. res = set.arc(res, "A", "C") strength = arc.strength(res, learning.test, criterion = "x2") strength.plot(rb_hc) averaged.network(strength, nodes, threshold) ############################################################################# #ANCOVA ############################################################################# #merging both time series points education variables #age_data$education<-with(data,rep(c(M_SCH00,M_SCH10),3)) #ANOVA 1 #fitting anova model anova1<-subset(age_data,age_data$socio_variable=="LAG") fit <- aov(value ~ time_serie*age_group, data=anova1) summary(fit) #fiding predictions patterns pred1 <- predict(fit) #ANOVA 2 #fitting anova model anova2<-subset(age_data,age_data$socio_variable=="LINF") fit <- aov(value ~ time_serie*age_group, data=anova2) summary(fit) #fiding predictions patterns pred2 <- predict(fit) #ANOVA 3 #fitting anova model anova3<-subset(age_data,age_data$socio_variable=="LCHI") fit <- aov(value ~ time_serie*age_group, data=anova3) summary(fit) #fiding predictions patterns pred3 <- predict(fit) #ANOVA 4 #fitting anova model anova4<-subset(age_data,age_data$socio_variable=="LINC") fit <- aov(value ~ time_serie*age_group, data=anova4) summary(fit) #fiding predictions patterns pred4 <- predict(fit) #ANOVA 5 #fitting anova model anova5<-subset(age_data,age_data$socio_variable=="LSCH") fit <- aov(value ~ time_serie*age_group, data=anova5) summary(fit) #fiding predictions patterns pred5 <- predict(fit) #ANOVA 6 #fitting anova model anova6<-subset(age_data,age_data$socio_variable=="LUNE") fit <- aov(value ~ time_serie*age_group, data=anova6) summary(fit) #fiding predictions patterns pred6 <- predict(fit) pred<-c(pred1,pred2,pred3,pred4,pred5,pred6) #plotting prediction values ggplot(data = cbind(age_data, pred), aes(age_group, value, color=time_serie, group=time_serie)) + geom_line(aes(y=pred)) + facet_grid(. ~ socio_variable)
909abc15f679a9af46a3659aff1c4e19a46654ac
521bb895b5edc039fcccd2f535f3efe0f9fc788a
/R/prediction_arima0.R
7cfd14cb6146d73b8ed15e02c6624c6d2b8408cc
[]
no_license
cran/prediction
f44e5446d260fbf06d2b8c64ea076b6c58381b17
b3dbe3f40752da53c95e03a05fc9e8dba91ddc9d
refs/heads/master
2021-01-11T06:53:35.254761
2019-06-17T18:40:03
2019-06-17T18:40:03
72,375,519
0
0
null
null
null
null
UTF-8
R
false
false
71
r
prediction_arima0.R
#' @rdname prediction #' @export prediction.arima0 <- prediction.ar
7adfe54310d9ce7a59f48918a88326bd2074bed4
50711e687a44aeb126149528f2e5deec6e261cfb
/correlation_deconvolution_cells.R
cef35e00133f780d49d26fa2255a64f4ee83cc49
[]
no_license
BlackburnLab/immuneCellEnrichment
83ba534213e7a7de6a21b6854149b182b3a56545
3f23106995dca2a7037b8f21f1499f438ef194a3
refs/heads/master
2023-04-02T13:22:44.139834
2021-03-14T18:58:49
2021-03-14T18:58:49
352,647,806
0
0
null
null
null
null
UTF-8
R
false
false
15,669
r
correlation_deconvolution_cells.R
setwd("C:/Users/Javan_Okendo/Desktop/cybersort/decon_statistical_test") library(dplyr) prevTB <- read.csv("prevTB.csv",header = T,sep = ',') head(prevTB) attach(prevTB) # Show the g levels levels(prevTB$g) #Previous TB # Box plots # Plot weight by g and color by g library("ggpubr") colnames(prevTB) #B.cell naive par(mfrow=c(4,4)) # To plot two images side-by-side ggboxplot(prevTB, x = "Group", y = "Neutrophils", color = "Group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Neutrophils", xlab = "Challenge gs",title = "Neutrophils") ggline(prevTB, x = "Group", y = "T.cells.regulatory..Tregs.", add = c("mean_se", "jitter"), order = c("Baseline","BCG","PPD","Saline"), ylab = "T.cells.regulatory..Tregs.", xlab = "Challenge g",title = "Mast.cells.resting") #computation of Kruskal-Wallis test kruskal.test(Macrophages.M0 ~ Group, data = prevTB) #Multiple pairwise-comparison between gs pairwise.wilcox.test(prevTB$Macrophages.M0, prevTB$Group, p.adjust.method = "BH") #Recurrent TB patient g rectb <- read.csv("reccurentTB_cibersort_results.csv",header = T, sep = ',') attach(rectb) colnames(rectb) # this will be done interactively for the 22 immune cells per g ggboxplot(rectb, x = "g", y = "Neutrophils", color = "g", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Neutrophils", xlab = "Challenge gs",title = "Neutrophils") #Latent TB infection LTBI <- read.csv("LTBI_cibersort_results.csv",header = T,sep = ',') attach(LTBI) colnames(LTBI) # this will be done interactively for the 22 immune cells per g ggboxplot(LTBI, x = "challeng_group", y = "B.cells.naive", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "B.cells.naive", xlab = "Challenge gs",title = "B.cells.naive") #======================================================================================================= ggboxplot(LTBI, x = "challeng_group", y = "B.cells.memory", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "B.cells.memory", xlab = "Challenge gs",title = "B.cells.memory") #======================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "Plasma.cells", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Plasma.cells", xlab = "Challenge gs",title = "Plasma.cells") #============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "T.cells.CD8", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "T.cells.CD8", xlab = "Challenge gs",title = "T.cells.CD8") #=============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "T.cells.CD4.naive", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "T.cells.CD4.naive", xlab = "Challenge gs",title = "T.cells.CD4.naive") # =============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "T.cells.CD4.memory.resting", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "T.cells.CD4.memory.resting", xlab = "Challenge gs",title = "T.cells.CD4.memory.resting") #================================================================================================================ ggboxplot(LTBI, x = "challeng_group", y = "T.cells.CD4.memory.activated", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "T.cells.CD4.memory.activated", xlab = "Challenge gs",title = "T.cells.CD4.memory.activated") # =============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "T.cells.follicular.helper", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "T.cells.follicular.helper", xlab = "Challenge gs",title = "T.cells.follicular.helper") #================================================================================================================ ggboxplot(LTBI, x = "challeng_group", y = "T.cells.regulatory..Tregs.", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "T.cells.regulatory..Tregs.", xlab = "Challenge gs",title = "T.cells.regulatory..Tregs.") # =============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "T.cells.gamma.delta", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "T.cells.gamma.delta", xlab = "Challenge gs",title = "T.cells.gamma.delta") # =============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "NK.cells.resting", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "NK.cells.resting", xlab = "Challenge gs",title = "NK.cells.resting") # =============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "NK.cells.activated", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "NK.cells.activated", xlab = "Challenge gs",title = "NK.cells.activated") # =============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "Monocytes", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Monocytes", xlab = "Challenge gs",title = "Monocytes") # =============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "Macrophages.M0", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Macrophages.M0", xlab = "Challenge gs",title = "Macrophages.M0") # =============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "Macrophages.M1", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Macrophages.M1", xlab = "Challenge gs",title = "Macrophages.M1") # ============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "Macrophages.M2", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Macrophages.M2", xlab = "Challenge gs",title = "Macrophages.M2") # ============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "Dendritic.cells.resting", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Dendritic.cells.resting", xlab = "Challenge gs",title = "Dendritic.cells.resting") # ============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "Dendritic.cells.activated", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Dendritic.cells.activated", xlab = "Challenge gs",title = "Dendritic.cells.activated") # ============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "Mast.cells.resting", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Mast.cells.resting", xlab = "Challenge gs",title = "Mast.cells.resting") # ============================================================================================================= ggboxplot(LTBI, x = "challeng_group", y = "Mast.cells.activated", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Mast.cells.activated", xlab = "Challenge gs",title = "Mast.cells.activated") #============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "Eosinophils", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Mast.cells.activated", xlab = "Challenge gs",title = "Eosinophils") #============================================================================================================== ggboxplot(LTBI, x = "challeng_group", y = "Neutrophils", color = "challeng_group", palette = c("#00AFBB", "#E7B800", "#FC4E07","#202020"), order = c("Baseline","BCG","PPD","Saline"), ylab = "Neutrophils", xlab = "Challenge gs",title = "Neutrophils") #Assessing the distribution of data par(mfrow=c(5,5)) #Previous TB hist(prevTB$B.cells.naive,main = "B.cells.naive") hist(prevTB$B.cells.memory,main = "B.cells.memory") hist(prevTB$Plasma.cells,main = "Plasma.cells") hist(prevTB$T.cells.CD8,main = "T.cells.CD8") hist(prevTB$T.cells.CD4.naive,main = "T.cells.CD4.naive") hist(prevTB$T.cells.CD4.memory.resting,main = "T.cells.CD4.memory.resting") hist(prevTB$T.cells.CD4.memory.activated,main = "T.cells.CD4.memory.activated") hist(prevTB$T.cells.follicular.helper,main = "T.cells.follicular.helper") hist(prevTB$T.cells.regulatory..Tregs.,main = "T.cells.regulatory..Tregs.") hist(prevTB$T.cells.gamma.delta,main = "T.cells.gamma.delta") hist(prevTB$NK.cells.resting,main = "NK.cells.resting") hist(prevTB$NK.cells.activated,main = "NK.cells.activated") hist(prevTB$Monocytes,main = "Monocytes") hist(prevTB$Macrophages.M0,main = "Macrophages.M0") hist(prevTB$Macrophages.M1,main = "Macrophages.M1") hist(prevTB$Macrophages.M2,main = "Macrophages.M2") hist(prevTB$Dendritic.cells.resting,main = "Dendritic.cells.resting") hist(prevTB$Dendritic.cells.activated,main = "Dendritic.cells.activated") hist(prevTB$Mast.cells.resting,main = "Mast.cells.resting") hist(prevTB$Mast.cells.activated,main = "Mast.cells.activated") hist(prevTB$Eosinophils,main = "Eosinophils") hist(prevTB$Neutrophils,main = "Neutrophils") #Reccurent TB immune profile distribution normality test hist(rectb$B.cells.naive,main = "B.cells.naive") hist(rectb$B.cells.memory,main = "B.cells.memory") hist(rectb$Plasma.cells,main = "Plasma.cells") hist(rectb$T.cells.CD8,main = "T.cells.CD8") hist(rectb$T.cells.CD4.naive,main = "T.cells.CD4.naive") hist(rectb$T.cells.CD4.memory.resting,main = "T.cells.CD4.memory.resting") hist(rectb$T.cells.CD4.memory.activated,main = "T.cells.CD4.memory.activated") hist(rectb$T.cells.follicular.helper,main = "T.cells.follicular.helper") hist(rectb$T.cells.regulatory..Tregs.,main = "T.cells.regulatory..Tregs.") hist(rectb$T.cells.gamma.delta,main = "T.cells.gamma.delta") hist(rectb$NK.cells.resting,main = "NK.cells.resting") hist(rectb$NK.cells.activated,main = "NK.cells.activated") hist(rectb$Monocytes,main = "Monocytes") hist(rectb$Macrophages.M0,main = "Macrophages.M0") hist(rectb$Macrophages.M1,main = "Macrophages.M1") hist(rectb$Macrophages.M2,main = "Macrophages.M2") hist(rectb$Dendritic.cells.resting,main = "Dendritic.cells.resting") hist(rectb$Dendritic.cells.activated,main = "Dendritic.cells.activated") hist(rectb$Mast.cells.resting,main = "Mast.cells.resting") hist(rectb$Mast.cells.activated,main = "Mast.cells.activated") hist(rectb$Eosinophils,main = "Eosinophils") hist(rectb$Neutrophils,main = "Neutrophils") #LTBI hist(LTBI$B.cells.naive,main = "B.cells.naive") hist(LTBI$B.cells.memory,main = "B.cells.memory") hist(LTBI$Plasma.cells,main = "Plasma.cells") hist(LTBI$T.cells.CD8,main = "T.cells.CD8") hist(LTBI$T.cells.CD4.naive,main = "T.cells.CD4.naive") hist(LTBI$T.cells.CD4.memory.resting,main = "T.cells.CD4.memory.resting") hist(LTBI$T.cells.CD4.memory.activated,main = "T.cells.CD4.memory.activated") hist(LTBI$T.cells.follicular.helper,main = "T.cells.follicular.helper") hist(LTBI$T.cells.regulatory..Tregs.,main = "T.cells.regulatory..Tregs.") hist(LTBI$T.cells.gamma.delta,main = "T.cells.gamma.delta") hist(LTBI$NK.cells.resting,main = "NK.cells.resting") hist(LTBI$NK.cells.activated,main = "NK.cells.activated") hist(LTBI$Monocytes,main = "Monocytes") hist(LTBI$Macrophages.M0,main = "Macrophages.M0") hist(LTBI$Macrophages.M1,main = "Macrophages.M1") hist(LTBI$Macrophages.M2,main = "Macrophages.M2") hist(LTBI$Dendritic.cells.resting,main = "Dendritic.cells.resting") hist(LTBI$Dendritic.cells.activated,main = "Dendritic.cells.activated") hist(LTBI$Mast.cells.resting,main = "Mast.cells.resting") hist(LTBI$Mast.cells.activated,main = "Mast.cells.activated") hist(LTBI$Eosinophils,main = "Eosinophils") hist(LTBI$Neutrophils,main = "Neutrophils") #statistical analysis of the immune cell profiles from different patient groups #Data to be used attach(prevTB) attach(rectb) attach(LTBI) #computation of Kruskal-Wallis test colnames(LTBI) #Multiple pairwise-comparison between gs pairwise.wilcox.test(LTBI$Mast.cells.activated, LTBI$challeng_group, p.adjust.method = "BH") #Post-hoc statistical test aov.ex1 = aov(LTBI$B.cells.naive~LTBI$challeng_group) summary(aov.ex1,intercept = T) TukeyHSD(aov.ex1, conf.level=.95) plot(TukeyHSD(aov(LTBI$Macrophages.M0~LTBI$challeng_group), conf.level=.95))
87608c0649cf59e4def416717d9b7daf409d5c64
726abd47c598f785f90342266c00d514c75b9478
/src/10-supplement-imputation-effect.R
a2406c41fe9ca42cd24fa0d1c0e864b545aef2de
[]
no_license
yangxhcaf/surrogacy-among-biodiversity-dimensions
50c4e248c7842e71df08929af6c4fe1a77b3c499
f69bd5eced3c0258e36baa43371e6b3d51511c1a
refs/heads/master
2021-09-24T16:18:17.351197
2018-10-11T18:15:58
2018-10-11T18:15:58
null
0
0
null
null
null
null
UTF-8
R
false
false
2,816
r
10-supplement-imputation-effect.R
############################################################## ##### -- Good surrogacy among biodiversity dimensions -- ##### ############################################################## ################### SUPPLEMENTARY ANALYSIS ################### ######################################################### ##### -- Testing for influence of imputed values -- ##### ######################################################### ##### Load library library(caper) ##### Quantify phylogenetic signal of traits with and without imputed taxa #### Mammals mamm_comparative_data_full <- comparative.data(data = traits_mamm_complete, phy = tree_mamm_complete, names.col = "species", na.omit = TRUE) tree_mamm_noImputed <- drop.tip(tree_mamm_complete, traits_imputed_mamm$species) mamm_comparative_data_noImputed <- comparative.data(data = traits_mamm_noImputed, phy = tree_mamm_noImputed, names.col = "species", na.omit = TRUE) mamm_full_phylosig <- pgls(body_mass_log ~ 1, mamm_comparative_data_full, lambda='ML') saveRDS(mamm_full_phylosig, "rds/metrics/mamm_full_phylosig.rds") mamm_noImputed_phylosig <- pgls(body_mass_log ~ 1, mamm_comparative_data_noImputed, lambda='ML') saveRDS(mamm_noImputed_phylosig, "rds/metrics/mamm_noImputed_phylosig.rds") #### Reptiles ##### Quantify phylogenetic signal of traits with and without imputed taxa rept_comparative_data_full <- comparative.data(data = traits_rept_complete, phy = tree_rept_complete, names.col = "species", na.omit = TRUE) tree_rept_noImputed <- drop.tip(tree_rept_complete, traits_imputed_rept$species) rept_comparative_data_noImputed <- comparative.data(data = traits_rept_noImputed, phy = tree_rept_noImputed, names.col = "species", na.omit = TRUE) rept_full_phylosig <- pgls(body_mass_log ~ 1, rept_comparative_data_full, lambda='ML') saveRDS(rept_full_phylosig, "rds/metrics/rept_full_phylosig.rds") rept_noImputed_phylosig <- pgls(body_mass_log ~ 1, rept_comparative_data_noImputed, lambda='ML') saveRDS(rept_noImputed_phylosig, "rds/metrics/rept_noImputed_phylosig.rds") #### Birds ##### Quantify phylogenetic signal of traits with and without imputed taxa bird_comparative_data_full <- comparative.data(data = traits_bird_complete, phy = tree_bird_complete, names.col = "species", na.omit = TRUE) tree_bird_noImputed <- drop.tip(tree_bird_complete, traits_imputed_bird$species) bird_comparative_data_noImputed <- comparative.data(data = traits_bird_noImputed, phy = tree_bird_noImputed, names.col = "species", na.omit = TRUE) bird_full_phylosig <- pgls(body_mass_log ~ 1, bird_comparative_data_full, lambda='ML') saveRDS(bird_full_phylosig, "rds/metrics/bird_full_phylosig.rds") bird_noImputed_phylosig <- pgls(body_mass_log ~ 1, bird_comparative_data_noImputed, lambda='ML') saveRDS(bird_noImputed_phylosig, "rds/metrics/bird_noImputed_phylosig.rds")
8b008238ab591ef5b27e646b172f0aaf089fd72a
54bf9bc76aaa7e1fec5961efb12bfb636fa90a2e
/Archive/NIMBioS.code/shinyparameters.old/shiny_parameters/ui.R
bc0bd4b21659359fde63244ae24826f527c7f5c0
[]
no_license
christianparobek/skeleSim
25d63dc3eeee6d8218d19e0f011229cfb843d053
0d61409497283ac1db129379b479639261695f83
refs/heads/master
2020-03-28T05:36:45.448623
2020-02-26T21:55:51
2020-02-26T21:55:51
32,469,895
3
9
null
2017-11-22T16:30:16
2015-03-18T16:16:29
HTML
UTF-8
R
false
false
4,674
r
ui.R
source("setup.R") shinyUI( navbarPage( "skelesim", tabPanel( "File", navlistPanel( tabPanel( "Load parameters", fileInput("fileParams", h4("Choose .Rdata File")), uiOutput("uiSelectParamObj"), textOutput("txtSelectedTitle") ), tabPanel( "Save parameters", textInput("txtTitle", label = h4("Project Title"), value = ssClass@title), h4("Save parameter file"), uiOutput("uiBtnSaveParams"), textOutput("txtSaveStatus") ), tabPanel( "Run Simulator", uiOutput("btnRun"), textOutput("txtRunStatus") ) ) ), tabPanel("Intro questions", sidebarLayout( sidebarPanel( # textInput("simname", "Simulation Name:", "Sim Parameters #1"), checkboxInput("snps", label = "Do you have SNP data?", value = FALSE), checkboxInput("non.diploid", label = "Is your data other than diploid?", value = FALSE), checkboxInput("marker.num", label = "Do you want to simulate many markers?", value = FALSE), checkboxInput("pop.size", label = "Do you have large population sizes?", value = FALSE), checkboxInput("complex.hist", label = "Do you have a complex history to simulate?", value=FALSE), checkboxInput("deep.time", label = "Are you looking at deep time frames", value = FALSE), checkboxInput("demography", label = "Do you want to include demography?", value = FALSE), checkboxInput("management", label = "Does your question involve management decisions?", value = FALSE), checkboxInput("completion.time", label = "Do you need a short completion time", value = FALSE), checkboxInput("computer", label = "Do you have large computer capacity?", value = FALSE), # for the file uploader fileInput("file", label = h3("OR choose file to upload")) ), mainPanel( includeMarkdown("helpfiles/help-questions.md"), h3(textOutput("simname", container = span)), tableOutput("values") ) )), tabPanel("General Config.", sidebarLayout( sidebarPanel( uiOutput("titleUI"), uiOutput("quietUI"), uiOutput("coalescentUI"), uiOutput("repsUI"), uiOutput("wdUI") ), mainPanel() )), tabPanel("Scenario Config.", sidebarLayout( sidebarPanel( uiOutput("scenarioNumberUI"), uiOutput("numpopsUI"), uiOutput("numlociUI"), uiOutput("mutrateUI"), br(), actionButton("repopulateMig","Rewrite migration matrix"), uiOutput("migmodelUI"), uiOutput("migrateUI"), uiOutput("rows"), uiOutput("cols"), uiOutput("distanceFun") ), mainPanel( tabsetPanel( tabPanel("Migration matrix", includeMarkdown("helpfiles/help-migration.md"), tableOutput("migmat")), tabPanel("Migration graph", plotOutput("networkPlot")) # , # tabPanel("debug", # textOutput("scenDebug")) )) )), tabPanel("Specific simulation config.", sidebarLayout( sidebarPanel( uiOutput("infsitesUI"), uiOutput("simhistUI") ), mainPanel( conditionalPanel( condition = "input.coalescent == true", tabsetPanel( tabPanel("Simcoal history specification",tableOutput("simhistTbl")), tabPanel("Graphical view", includeMarkdown("helpfiles/help-history.md"), plotOutput("simhistPlot", click= "histplotClick", dblclick = "histplotDblclick")) ) ) )) ) ) )
1409f015e6ee209f760f37449e8f961840bbc326
bc4a4a37217b325d1b913d8dc750210c91c41db3
/Cluster analysis/Cluster_detection.R
40b6a3ec28d1abed5d939882ffed21425c670672
[]
no_license
jshannon75/ACFB_analysis
b2490aefa6f7be8c139338672bbc61eced895774
d9da0c669b67eca3a5ac11049f7b420c0f854817
refs/heads/master
2021-06-10T18:02:31.870545
2017-02-08T16:15:16
2017-02-08T16:15:16
null
0
0
null
null
null
null
UTF-8
R
false
false
844
r
Cluster_detection.R
library(MASS) library(ggplot2) library(plotly) #Create fit from sample data item.dist<-dist(SampleData1) fit<-isoMDS(item.dist,k=2) fit # plot solution x <- fit$points[,1] y <- fit$points[,2] plot(x, y) text(x, y, labels = row.names(SampleData), cex=.7) #kmeans dist.sample<-data.frame(cbind(x,y)) cluster.sample<-kmeans(dist.sample,5) cluster.sample$cluster <- as.factor(cluster.sample$cluster) plot<-ggplot(dist.sample, aes(x, y, color = cluster.sample$cluster)) + geom_point(size=10) ggplotly(plot) #hierarchical cluster mds.dist<-dist(dist.sample) hc<-hclust(mds.dist) plot(hc) groups<-cutree(hc,k=8) #Print HCA clusters (function from Practical Data Science in R) print_clusters<-function(labels,k) { for(i in 1:k) { print(paste("cluster",i)) print(SampleData1[labels==i,c("Type","Score")]) } } print_clusters(groups,8)
4fc93503086506785d7f5ca123b30c00135ab674
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
/A_github/sources/authors/2774/plotly/data.R
6defe37548678c31b1e44e6f2644b7a931022ea2
[]
no_license
Irbis3/crantasticScrapper
6b6d7596344115343cfd934d3902b85fbfdd7295
7ec91721565ae7c9e2d0e098598ed86e29375567
refs/heads/master
2020-03-09T04:03:51.955742
2018-04-16T09:41:39
2018-04-16T09:41:39
128,578,890
5
0
null
null
null
null
UTF-8
R
false
false
344
r
data.R
#' Wind data #' #' Description TBD. #' #' @format A data frame with three variables: `r`, `t`, #' `nms`. "wind" #' Mic data #' #' Description TBD. #' #' @format A data frame with three variables: `r`, `t`, #' `nms`. "mic" #' Hobbs data #' #' Description TBD. #' #' @format A data frame with three variables: `r`, `t`, #' `nms`. "hobbs"
b6d3e30880fe0ba5ad90fe4fb173e4ad720f9161
6a85c3b1ce095a59a241585251c79e56a9709519
/HW4/PCA code.R
a72da136bbb7354308dd6adc444d4e27305932d5
[]
no_license
ApoorvaSrinivasa/Applied-Machine-Learning
a0402e12d2f94d6cb033af56dc7f9bc6b6831c8a
71e9e23bfe6af221d3b85b8860c32bba9d6d5fef
refs/heads/master
2020-04-23T19:28:15.284833
2019-09-01T06:33:54
2019-09-01T06:33:54
171,405,132
1
0
null
null
null
null
UTF-8
R
false
false
7,719
r
PCA code.R
#Homework 4 setwd("C:/JuliaT/Univ of I/CS 498 AML/Homework 4") #downloaded dataset from https://www.cs.toronto.edu/~kriz/cifar.html. ##########ref for reading the files - https://stackoverflow.com/questions/32113942/importing-cifar-10-data-set-to-r ############################################# Read Data ############################################################## # Read binary file and convert to integer vectors labels = read.table("cifar-10-batches-bin/batches.meta.txt") images.rgb = list() images.lab = list() num.images = 10000 # Set to 10000 to retrieve all images per file to memory flat_list = list() # Cycle through all 5 binary files for (f in 1:5) { to.read = file(paste("cifar-10-batches-bin/data_batch_", f, ".bin", sep=""), "rb") for(i in 1:num.images) { l = readBin(to.read, integer(), size=1, n=1, endian="big") r = as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big")) g = as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big")) b = as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big")) index = num.images * (f-1) + i images.rgb[[index]] = data.frame(r, g, b) images.lab[[index]] = l+1 } close(to.read) remove(l,r,g,b,f,i,index, to.read) } # add the test data to.read = file("cifar-10-batches-bin/test_batch.bin", "rb") for(i in 1:num.images) { l = readBin(to.read, integer(), size=1, n=1, endian="big") r = as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big")) g = as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big")) b = as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big")) index = 50000 + i images.rgb[[index]] = data.frame(r, g, b) images.lab[[index]] = l+1 } close(to.read) remove(l,r,g,b,i,index, to.read) ############################################ Flatten Iamges ######################################################################## numSamples = length(images.rgb) #flatten the dataframes to vectors for(i in 1:numSamples){ test = images.rgb[[i]] test = c(t(test)) flat_list[[i]] = as.numeric(test) } #matrix for images only images_mat = matrix(unlist(flat_list), byrow=TRUE, nrow=length(flat_list) ) #matrix for the labels only images_mat_l = matrix(unlist(images.lab), byrow=TRUE, nrow=length(images.lab) ) #image + label images_mat_full = cbind(images_mat, images_mat_l) ########################################### Part A ################################################################################### #num of classes numClass = length(unique(images_mat_full[, 3073])) #to store the mean images store_means = matrix(NA, nrow = numClass, ncol = ncol(images_mat)) #store_means = list() for(i in 1:numClass) { #subsets by labels label = subset(images_mat_full,images_mat_full[,3073] == i) #get only the pixels label_px = label[,-c(3073)] image_mean = apply(label_px,2,mean) store_means[i, ] = image_mean } #convert store_means and labels to proper format for image display input mean_label_list = list() mean_rgb_list = list() img_df = data.frame(nrow = 1024, ncol = 3) #prepare for input for(i in 1:10){ mean_label_list[[i]] = i img_df = data.frame(r = (store_means[i, 1:1024]), g = (store_means[i, 1025:2048]), b = (store_means[i, 2049:3072])) mean_rgb_list[[i]] = img_df } #function for image display showImage = function(index) { img = mean_rgb_list[[index]] img_r = matrix(img$r, ncol=32, byrow = TRUE) img_g = matrix(img$g, ncol=32, byrow = TRUE) img_b = matrix(img$b, ncol=32, byrow = TRUE) img_col = rgb(img_r, img_g, img_b, maxColorValue = 255) dim(img_col) = dim(img_r) #display library(grid) grid.raster(img_col, interpolate=FALSE) remove(img, img_r, img_g, img_b, img_col) # plus label labels[[1]][mean_label_list[[index]]] } showImage(10) #pca SSE = rep(0, 10) for (i in 1:numClass){ #subsets by labels label = subset(images_mat_full,images_mat_full[,3073] == i) #get only the pixels label_px = label[,-c(3073)] pca_data = prcomp(label_px, center=TRUE) #reconstruct X_hat = pca_data$x[,1:20] %*% t(pca_data$rotation[,1:20]) X_hat = scale(X_hat, center = -store_means[i,], scale = FALSE) error = mean(rowSums((label_px-X_hat)^2)) SSE[i] = error } #barplot barplot(SSE, main="SSE Plot Part A", col="lightblue", names.arg=c("airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck")) ########################################### Part B ################################################################################### #cacl euclidean distance using dist function euclidean = as.matrix(dist(store_means), method = "euclidean") D_2 = euclidean*euclidean #to a file write.table(D_2,"partb_distances.csv", row.names=FALSE, col.names = FALSE, sep = ',') #MDS part B ones_mat = matrix(1, nrow = numClass, ncol = numClass) I = diag(numClass) A = I - 1/numClass * (ones_mat %*% t(ones_mat)) W = - 1/2 * A %*% D_2 %*% t(A) lamda = eigen(W) eigen_values = sqrt(sort(lamda$values, decreasing=TRUE)[1:2]) #Construct U eigen_vectors = lamda$vectors[,1:2] Y = eigen_vectors * eigen_values colnames(Y) <- c("component1","component2") rownames(Y) <- c("airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck") plot(Y[, 1], Y[, 2], xlab = "Component 1", ylab = "Component 2", col = "darkorange", pch = 16, cex = 2, main = "Scatter plot - part B" ) text(Y[, 1], Y[, 2], labels = rownames(Y), col = "blue", cex= 1, pos=3) ########################################### Part C ################################################################################# calcXhat = function(data_mat, eigen_vec, mean_vec, num_comp) { x_mat = matrix(0, nrow = nrow(data_mat), ncol=ncol(data_mat)) for(i in 1:nrow(data_mat)) { for(j in 1:num_comp){ x_mat[i,] = x_mat[i,] + t(eigen_vec[,j])%*%(data_mat[i,]-mean_vec)%*%t(eigen_vec[,j]) } x_mat[i,] = mean_vec + x_mat[i,] } return (x_mat) } D_c = matrix(0, nrow = numClass, ncol = numClass) for (i in 1:numClass){ mean_a = store_means[i,] #subsets by labels label = subset(images_mat_full,images_mat_full[,3073] == i) #get only the pixels data_a = label[,-c(3073)] pca_data_a = prcomp(data_a, center=TRUE) for (j in 1:numClass){ mean_b = store_means[j,] label_b = subset(images_mat_full,images_mat_full[,3073] == j) #get only the pixels data_b = label_b[,-c(3073)] pca_data_b = prcomp(data_b, center=TRUE) X_hat_a = calcXhat(as.matrix(data_a), pca_data_b$rotation, mean_a, 20) error_a = mean(rowSums((data_a - X_hat_a)^2)) X_hat_b = calcXhat(as.matrix(data_b), pca_data_a$rotation, mean_b, 20) error_b = mean(rowSums((data_b - X_hat_b)^2)) D_c[i,j] = (error_a + error_b)/2 } } write.table(D_c,"partc_distances.csv", row.names=FALSE, col.names = FALSE, sep = ',') #MDS part C #we have A already W_c = - 1/2 * A %*% D_c %*% t(A) lamda_c = eigen(W_c) eigen_values_c = sqrt(sort(lamda_c$values, decreasing=TRUE)[1:2]) #Construct U eigen_vectors_c = lamda_c$vectors[,1:2] Y_c = eigen_vectors_c * eigen_values_c colnames(Y_c) <- c("component1","component2") rownames(Y_c) <- c("airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck") plot(Y_c[, 1], Y_c[, 2], xlab = "Component 1", ylab = "Component 2", col = "green", pch = 16, cex = 2, main = "Scatter plot - part C" ) text(Y_c[, 1], Y_c[, 2], labels = rownames(Y_c), col = "blue", cex= 1, pos=3)
cd1bc975d47030825bd39feff382aace545b2c5f
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/ypr/tests/test-sr.R
c807a5702506c26407127167867e32f4e8acd915
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,235
r
test-sr.R
context("sr") test_that("sr", { bh <- ypr_sr(ypr_population()) ri <- ypr_sr(ypr_population(BH = 0L)) expect_is(bh, "data.frame") expect_identical(names(bh), c("alpha", "beta", "Rk", "phi", "phiF", "R0", "R0F", "S0", "S0F")) expect_identical(names(ri), names(bh)) expect_equal(bh$alpha, 0.0006708921, check.attributes = FALSE) expect_equal(bh$beta, bh$alpha, check.attributes = FALSE) expect_equal(bh$Rk, ypr_population()$Rk, check.attributes = FALSE) expect_equal(bh$R0F, 0.1343839, check.attributes = FALSE, tolerance = 1e-07) expect_equal(bh$phi, 4471.658, check.attributes = FALSE, tolerance = 1e-07) expect_equal(bh$phiF, 1721.956, check.attributes = FALSE, tolerance = 1e-06) expect_equal(bh$S0, 1.583806, check.attributes = FALSE, tolerance = 1e-06) expect_equal(bh$S0F, 0.1746688, check.attributes = FALSE, tolerance = 1e-06) expect_identical(unname(bh$R0F), bh(bh$R0F * bh$phiF, bh$alpha, bh$beta)) expect_equal(unname(ri$R0F), ri(ri$R0F * ri$phiF, ri$alpha, ri$beta)) expect_identical(ri[c("alpha", "phi", "phiF")], bh[c("alpha", "phi", "phiF")]) expect_equal(ri$beta, 0.0002468074, check.attributes = FALSE) expect_equal(ri$R0F, 0.3395686, check.attributes = FALSE, tolerance = 1e-07) })
f7536602737cc32e485b0692ca5c88b967ea9d81
c55ad53d28079d4f75eefd3845dcbed1efd470d3
/reverse/app.R
a390de64fd2a6290c8bb241c145a06faebaa5006
[ "MIT", "CC0-1.0" ]
permissive
boltomli/MyShinyApps
8d2860b3d17739c0686d0b4087efbaaae80727e4
59b7edc132d8e97f7cf66958d8eaf5b7acc6da3f
refs/heads/master
2021-01-01T03:54:27.519946
2020-07-10T14:57:15
2020-07-10T14:57:15
56,049,788
6
0
null
null
null
null
UTF-8
R
false
false
2,631
r
app.R
library(tuneR) library(XML) library(httr) library(jsonlite) library(shiny) ui <- fluidPage( titlePanel("Say something and play it in reverse"), sidebarLayout( sidebarPanel( textInput("text", "text to convert:", "文本"), tags$hr(), helpText("View", a("source code on GitHub", href="https://github.com/boltomli/MyShinyApps", target="_blank")), ), mainPanel( uiOutput("funny_result"), ) ) ) server <- function(input, output) { output$funny_result <- renderUI({ text <- input$text ssml <- newXMLDoc() ns <- c(xml = "http://www.w3.org/2000/xmlns") speak <- newXMLNode("speak", namespace = ns) addAttributes(speak, "version" = "1.0", "xml:lang" = "zh-cn") voice <- newXMLNode("voice", namespace = ns) addAttributes(voice, "xml:lang" = "zh-cn", "xml:gender" = "Female", "name" = "Microsoft Server Speech Text to Speech Voice (zh-CN, HuihuiRUS)") textNode <- newXMLTextNode(text = text) addChildren(voice, textNode) addChildren(speak, voice) addChildren(ssml, speak) issueTokenUri <- config::get("token_url") key <- config::get("subscription_key") tokenResult <- POST(issueTokenUri, add_headers("Ocp-Apim-Subscription-Key" = key), body = "") token <- content(tokenResult, as = "text") ttsUri <- config::get("tts_url") synthesisResult <- POST(ttsUri, content_type("application/ssml+xml"), add_headers( "X-Microsoft-OutputFormat" = "riff-16khz-16bit-mono-pcm", "Authorization" = paste("Bearer ", token), "X-Search-AppId" = "07D3234E49CE426DAA29772419F436CA", "X-Search-ClientID" = "1ECFAE91408841A480F00935DC390960" ), body = toString.XMLNode(ssml)) synthesis <- content(synthesisResult, as = "raw") pcmfile <- file("temp.wav", "wb") writeBin(con = pcmfile, object = synthesis) close(pcmfile) pcmObj <- readWave('temp.wav') pcmObj@left <- rev(pcmObj@left) writeWave(pcmObj, 'www/reversed.wav') tags$audio(src = 'reversed.wav', type ="audio/wav", controls = T, autoplay = F) }) } shinyApp(ui = ui, server = server)
ef39d9c9181ee4d64832f0bee38d1c2e790adf9c
f5c18b46de92ca5399f8ff0a2d4cb01fa92412b5
/utilities/topic_modeling/numTopic_Optimize.R
c11bdd1e16216d9ee468374fd73802eaf7a08d83
[]
no_license
observermedia/content-analysis
f70829d19e6285f8fe97748e4fb58c699ca95e61
3acb2b825e101fee1de1fca0fe9859563c574867
refs/heads/master
2021-01-20T12:10:53.985665
2016-12-20T23:46:43
2016-12-20T23:46:43
70,710,998
1
0
null
null
null
null
UTF-8
R
false
false
2,428
r
numTopic_Optimize.R
require(rjson) #require(RJSONIO) require(NLP) require(tm) require(topicmodels) require(SnowballC) require(LDAvis) json_file <- fromJSON(file='clean_data2.json') json_file <- lapply(json_file,function(x){ x[sapply(x,is.null)]<-NA unlist(x) }) #bind together as dataframe and remove NA's df <- as.data.frame(do.call('rbind',json_file)) df<-na.omit(df) df$content <- as.character(df$content) #remove json_file rm(json_file) #Want to sample b/c to taxing to run on full data set sample_size <- 1000 df<-df[sample(nrow(df), sample_size), ] #TODO: Fix so it reads in more than 1 document to corpus #remove anything between [ ] df$content <-gsub("\\[[^\\]]*\\]", "",df$content,perl = TRUE) #remove html tags from content df$content <- gsub("<.*?>", "", df$content, perl = TRUE) #remove newline character df$content <- gsub("[\r\n]", "", df$content, perl = TRUE) #wtf.. I need to encode the corpus to UTF-8-MAC? .. Doesnt work on my ubuntu machine.. but works on mac content<- VCorpus(VectorSource(df$content)) #Currently runs on 2 cores, specificy mc.cores param to adjust content <- tm_map(content,content_transformer(function(x) iconv(x, to='UTF-8-MAC', sub='byte')), mc.cores=1) content.clean <- tm_map(content, stripWhitespace) #strip whitespace content.clean <- tm_map(content.clean,removeNumbers) #remove numbers content.clean <- tm_map(content.clean,removePunctuation) #remove punctuation content.clean <- tm_map(content.clean,content_transformer(tolower)) #convert to lower case content.clean <- tm_map(content.clean,removeWords,stopwords("english")) #removestop words content.clean <- tm_map(content.clean,stemDocument) #stem words #create dtm dtm <- DocumentTermMatrix(content.clean, control = list(weighting = weightTf)) rowsums <- apply(dtm,1,sum) empty.rows <- dtm[rowsums == 0, ]$dimnames[1][[1]] dtm.new <- dtm[rowsums > 0, ] content.new <- content[-as.numeric(empty.rows)] df.new<-df[-as.numeric(empty.rows),] max_nb_toics <- 40 result <- FindTopicsNumber( dtm.new, topics = seq(from = 2, to = max_nb_toics, by = 1), metrics = c("Griffiths2004", "CaoJuan2009", "Arun2010", "Deveaud2014"), method = "Gibbs", control = list(seed = 77), mc.cores = 2, verbose = TRUE ) #Plot and view results to make decision FindTopicsNumber_plot(result)
4f8527c5b9a4419ddc565d2aeae72ac5c499c78c
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/broman/examples/fac2num.Rd.R
b9e0329fac21812cdb3112dadeab34b2a699ddcd
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
176
r
fac2num.Rd.R
library(broman) ### Name: fac2num ### Title: Convert a factor to numeric ### Aliases: fac2num ### ** Examples x <- factor(c(3, 4, 9, 4, 9), levels=c(3,4,9)) fac2num(x)
0769dd4ee17010061d9422a7a4dc6abd54d62c3a
f606dc05a68407496aa3dccd0f56cf147d4bad82
/rprog_data_specdata/complete.R
6a0991cf1b0b5f4d1f567b8d542ea5ff94188fbe
[]
no_license
Osirisis/RProgramming
4fe68662b0779928bc683d00d773d900c1cb6cd6
95c94e78255e7dfa0e748eec93f1e2d3758e99da
refs/heads/master
2021-01-09T20:16:47.580362
2016-06-11T18:53:59
2016-06-11T18:53:59
60,922,725
0
0
null
null
null
null
UTF-8
R
false
false
599
r
complete.R
complete <- function(directory, id = 1:332) { myid <- id for(i in 1:length(id)) { if(id[i]<10) { myid[i] <- paste("00",id[i], sep="") }else if(id[i]<100) { myid[i] <- paste("0",id[i], sep="") } } myoutput <- matrix(data=NA, nrow = length(id), ncol = 2) dimnames(myoutput) <- list(1:length(id), c("id","nobs")) j = 0 for (i in myid) { mydata <- read.csv(paste("./", directory, "/", i, ".csv", sep = "")) cleandata <- na.omit(mydata) j = j + 1 myoutput[j, 1] <- as.numeric(i) myoutput[j, 2] <- nrow(cleandata) } print(myoutput) }
a18d544115e6a6cdc528c64dcb12d85c99f09427
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/PO.EN/man/example.data.Rd
4a89137678dd95f200109bee5ccac95244c2d4ed
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
true
939
rd
example.data.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{example.data} \alias{example.data} \title{Example datasets} \format{ The \code{example.data$train.data} and \code{example.data$test.data} are dataframes with 220 and 100 observations and 146 variables. \describe{ \item{response}{A binary response vector} \item{features}{Standardized 145 DeepSEA features} } } \usage{ data(example.data) } \description{ This data list, \code{example.data}, includes three datasets generated based on Saturation mutagenesis results (M. Kircher, et al.,2019) and the DeepSEA features (Zhou & Troyanskaya, 2015). The training and testing datasets in the data list include binary response vectors, which are truncations of the P values of tissue K562 from the Saturation mutagenesis results, and reduced versions of the DeepSEA features for a faster computational demonstration. } \keyword{datasets}
6a94ddad9e2d59b953fc1d030c4183930ecbd4fe
26f19812adb9b17b75bfd26fa4212809b6422937
/Old_RF_Code/4_RandomForest.R
0be71c0ee481eb76011eec7bf63a365fafb1c0c5
[]
no_license
nskaff/gleon_predict_salt
ebcfdf4de30a3fc4e5ee3a03cd762d3fba10e475
946d30c818c47cfd8ba90852eebf295e6161239f
refs/heads/master
2022-05-02T12:13:58.398669
2022-03-09T18:55:02
2022-03-09T18:55:02
152,502,719
0
1
null
2020-06-18T19:26:21
2018-10-10T23:30:19
HTML
UTF-8
R
false
false
19,564
r
4_RandomForest.R
# title: "WI_chloride_randomforest" # author: "Hilary Dugan" # date: "4/5/2019" library(forestFloor) library(randomForest) library(ranger) library(tidyverse) library(scales) library(sf) library(LAGOSNE) library(caret) library(lubridate) library(parallel) library(devtools) # Load data datin = read_csv("LAGOS_prediction/data3_LAGOS_ChlorideCovariates.csv") df <- unclass(datin) ## Tidy data # total data (80k, after 1990 69k, after taking mean of measurements from same day 35k, filter out deep measurements 35k) dat <- datin %>% dplyr::filter(Chloride < 10000 & Chloride >=0) %>% dplyr::mutate(Chloride = ifelse(Chloride == 0, 0.0001, Chloride)) %>% # dplyr::filter(!(coastdist < 4)) %>% dplyr::filter(ActivityDepthHeightMeasure.MeasureValue < 10 | is.na(ActivityDepthHeightMeasure.MeasureValue)) %>% # dplyr::filter(!is.na(maxdepth)) %>% dplyr::filter(ActivityStartDate > as.Date('1990-01-01')) %>% # dplyr::mutate(iws_forest = iws_nlcd2011_pct_41 + iws_nlcd2011_pct_42 +iws_nlcd2011_pct_43) %>% # dplyr::mutate(iws_ag = iws_nlcd2011_pct_81 + iws_nlcd2011_pct_82) %>% # dplyr::mutate(iws_develop = iws_nlcd2011_pct_24 + iws_nlcd2011_pct_23 + iws_nlcd2011_pct_22 + iws_nlcd2011_pct_21) %>% # dplyr::mutate(buffer500m_forest = buffer500m_nlcd2011_pct_41 + buffer500m_nlcd2011_pct_42 +buffer500m_nlcd2011_pct_43) %>% # dplyr::mutate(buffer500m_ag = buffer500m_nlcd2011_pct_81 + buffer500m_nlcd2011_pct_82) %>% # dplyr::mutate(buffer500m_develop = buffer500m_nlcd2011_pct_24 + buffer500m_nlcd2011_pct_23 + buffer500m_nlcd2011_pct_22 + buffer500m_nlcd2011_pct_21) %>% dplyr::group_by(ActivityStartDate, lagoslakeid) %>% dplyr::summarise_if(is.numeric,list(mean) )%>% dplyr::left_join(distinct(dplyr::select(datin,lagoslakeid,lakeconnection,gnis_name,state_zoneid,State))) # group_by(State) %>% # filter(n() > 10) %>% ungroup() dat = data.frame(dat) log01 <- function(x){log(x + 0.001)} # log of columns dat_rf <- dat %>% # filter_all(all_vars(!is.na(.))) %>% mutate_at(vars(Chloride,lake_area_ha,iws_ha,wlconnections_allwetlands_count:rdDist_Roads),log01) %>% mutate(cov_strat = case_when(Chloride > 5.5 ~ "vhigh", Chloride > 3 & Chloride <= 5.5 ~ "high", Chloride <= 3 ~ "low")) %>% # mutate(cov_strat=ifelse(Chloride>5.9, "vhigh", ifelse(Chloride>4,"high", "low"))) %>% mutate(month = month(ActivityStartDate)) %>% filter(!is.na(iws_nlcd2011_pct_22)) %>% filter(!is.na(TonsPerMile)) %>% mutate(id=row_number()) sapply(dat_rf, function(x) sum(is.na(x))) # See if there are NA values ## Random Forest model rf_cov <- dat_rf %>% dplyr::select(month,lake_area_ha,iws_ha, iws_nlcd2011_pct_0:iws_roaddensity_density_mperha, buffer500m_nlcd2011_pct_0:TonsPerMile) sapply(rf_cov, function(x) sum(is.na(x))) # See if there are NA values ##ranger version of random forest #generating a matrix of in-bag and out of bag observations ntree=500 random_lake_samps <- lapply(1:ntree, function(i){ #print(i) unique_lakes<-unique(dat_rf$lagoslakeid) #sample 10% of unique sites lake_samp<- sample(unique_lakes, size =.9*length(unique_lakes), replace=F) samp = as.integer(dat_rf$lagoslakeid %in% lake_samp) # #take a full bootstrap sample of the in-sample lakes. Leaving this with replace F but can be adjusted later # expand_samp<-sample(as.numeric(row.names(dat_rf))[dat_rf$lagoslakeid %in% lake_samp ], replace=F ) # # #counting the number of bootstrap samples for each observation # samp_count<-plyr::count(expand_samp) # # #joining the in-bag sample with the out of bag sample index # df<-full_join(samp_count, data.frame(x=as.numeric(row.names(dat_rf))[!(row.names(dat_rf) %in% samp_count$x)]), by="x") # # #ordering by row number # samp<-as.numeric(df[order(as.numeric(df$x)),"freq"]) # # #converting NA to 0 for withheld lakes # samp[is.na(samp)]<-0 return(samp) } ) rf_model<-ranger(dependent.variable.name='Chloride',data=data.frame(Chloride=dat_rf$Chloride,rf_cov),inbag=random_lake_samps, num.trees=ntree, importance = "permutation", keep.inbag = T, mtry=20) rf_model rf_model$predictions #variable importance v<-as.numeric(rf_model$variable.importance) w<-as.character((names(rf_model$variable.importance))) DF<-data.frame(w=w,v=as.numeric(v)) DF$w<-factor(DF$w, levels =DF[order(as.numeric(DF$v)),"w"] ) ggplot(DF[order(as.numeric(DF$v)),][(nrow(DF)-20):nrow(DF),], aes(x=w, y=v,fill=v))+ geom_bar(stat="identity", position="dodge")+ coord_flip()+ ylab("Variable Importance")+ xlab("")+ ggtitle("Information Value Summary")+ guides(fill=F) ##feature contributions for forestfloor source("ranger_RFadaptor.R") ff_rf_model<-ranger_RFadaptor(rf_model,dat_rf$Chloride) ffra = forestFloor(ff_rf_model,rf_cov,calc_np = TRUE) #color by most important feature Col = fcol(ffra ,1) plot(ffra, plot_seq=c(1,2,4,6,8),plot_GOF=F, limitY=F, col=Col,orderByImportance = T) # rf_model<-randomForest(y=dat_rf$Chloride, # x = rf_cov, # keep.inbag = T, # importance = T, # ntree = 500, # sampsize = 2885, # mtry=10) # rf_model$importance # importance(rf_model) # names(importance(rf_model)[rev(order(importance(rf_model)[,1])),1]) # names(rf_model$importance[rev(order(rf_model$importance[,1])),1]) # # varImpPlot(rf_model) # # mtry=length(rf_cov)) # # # Select important variables # rf_cov <- dat_rf %>% dplyr::select( # rdDist_Interstate, # buffer500m_ag, # buffer500m_forest, # buffer500m_develop, # winterseverity) ###all lagos data for later predictions allLagos = read_csv('LAGOS_prediction/data5_LAGOS_allLakes.csv') %>% # dplyr::filter(!is.na(maxdepth)) %>% # dplyr::mutate(iws_forest = iws_nlcd2011_pct_41 + iws_nlcd2011_pct_42 +iws_nlcd2011_pct_43) %>% # dplyr::mutate(iws_ag = iws_nlcd2011_pct_81 + iws_nlcd2011_pct_82) %>% # dplyr::mutate(iws_develop = iws_nlcd2011_pct_24 + iws_nlcd2011_pct_23 + iws_nlcd2011_pct_22 + iws_nlcd2011_pct_21) %>% # dplyr::mutate(buffer500m_forest = buffer500m_nlcd2011_pct_41 + buffer500m_nlcd2011_pct_42 + buffer500m_nlcd2011_pct_43) %>% # dplyr::mutate(buffer500m_ag = buffer500m_nlcd2011_pct_81 + buffer500m_nlcd2011_pct_82) %>% # dplyr::mutate(buffer500m_develop = buffer500m_nlcd2011_pct_24 + buffer500m_nlcd2011_pct_23 + buffer500m_nlcd2011_pct_22 + buffer500m_nlcd2011_pct_21) %>% mutate(month = 8) %>% filter(state_zoneid != 'OUT_OF_COUNTY_STATE') #%>% # filter(!lagoslakeid %in% dat$lagoslakeid) allLagos <- allLagos %>% # filter_all(all_vars(!is.na(.))) %>% mutate_at(vars(lake_area_ha,iws_ha,iws_nlcd2011_ha_0:rdDist_Roads),log01) %>% filter(!is.na(iws_nlcd2011_pct_22)) sapply(allLagos, function(x) sum(is.na(x))) # See if there are NA values allLagos.rf <- allLagos %>% dplyr::select(colnames(rf_cov)) # #######Don't run unless you've got some time#### # ##grid search to select random forest hyperparameters # control <- trainControl(method="oob", number=10, search="random") # rf_random <- train(y=dat_rf$Chloride, x = rf_cov, method="rf",tuneLength=15, trControl=control) # #best r2 model has mtry=4, but I like maximizing mtry to better interpret interactions in forestFloor plots/control for variables that are used for splits earlier in the trees # #small grid search for the right sampsize. Lower sampsize decorrelates trees a bit, which is important when you maximize mtry but less so if mtry is 4 # sampsize<-c(100,200,500,1000,length(dat_rf$Chloride)) # samp_df<-data.frame() # # for (i in 1:length(sampsize) ){ # rf_sampsize<-randomForest(y=dat_rf$Chloride, # x = rf_cov, # keep.inbag = T, # importance = T, # ntree=500, # sampsize = sampsize[i], # mtry=length(rf_cov)) # #mtry=7) # samp_df<-rbind(samp_df, # data.frame(sampsize=sampsize[i], # r2=rf_sampsize$rsq[length(rf_sampsize$rsq)], # mse=rf_sampsize$mse[length(rf_sampsize$mse)]) # ) # } # samp_df #sampsize around the max is pretty good, though it's not the worst idea to reduce this value to decorrelate each tree ###### getPreds <- function(i){ # train_lakes <- sample(unique_lakes, size = ceiling(length(unique_lakes)*lake_prop) ) singleLake = dat_rf %>% # filter(lagoslakeid %in% train_lakes) %>% group_by(lagoslakeid) %>% summarise(id = sample(id,size = 1)) %>% sample_frac(lake_prop) # a = dat_rf %>% select(lagoslakeid,cov_strat) %>% slice(1:20) #https://stackoverflow.com/questions/51671856/dplyr-sample-n-by-group-with-unique-size-argument-per-group dat_rf_strat <- data.frame(dat_rf %>% # mutate(id=row_number()) %>% filter(id %in% singleLake$id) %>% mutate(frq = case_when(cov_strat=="vhigh" ~ ceiling(vhighval_prop*n()), cov_strat=="high" ~ ceiling(highval_prop*n()), cov_strat=="low" ~ ceiling(lowval_prop*n()))) %>% group_by(cov_strat) %>% nest() %>% mutate(v = purrr:map(data, ~sample_n(data.frame(.), unique(.$frq), replace=T))) %>% unnest(v)) rf_cov_strat <- dat_rf_strat %>% dplyr::select(colnames(rf_cov)) rf_model_lst <- randomForest(y=dat_rf_strat$Chloride, x = rf_cov_strat, keep.inbag = T, importance = T, ntree = 10, replace=F, # sampsize=length(dat_rf_strat$Chloride), ytest=dat_rf$Chloride, xtest=rf_cov, # mtry=10, norm.votes=F, keep.forest=TRUE) # importanceOut = importance(rf_model_lst)[rev(order(importance(rf_model_lst)[,1])),1] importanceOut = importance(rf_model_lst)[,1] #predictions for all lagos lagos_pred_Aug<-predict(rf_model_lst, newdata=allLagos.rf) #removing insample predictions rf_model_lst$test$predicted[dat_rf_strat$id] <- NA return(list(rfModelPred = rf_model_lst$test$predicted, lagosPredAug = lagos_pred_Aug, # rfModel = rf_model_lst, importance = importanceOut)) } detectCores() # unique_lakes <- unique(dat_rf$lagoslakeid) lake_prop <- 0.95 ntree <- 200 # 0.2, 0.75, 1.5 = r2 0.93 (cutoff is <3, 3-5.5, >5.5) vhighval_prop <- 1 highval_prop <- 0.2 lowval_prop <- 0.3 rf_model_lst_preds <- mclapply(X = 1:ntree, getPreds, mc.cores = 3, mc.preschedule = FALSE) # 3243 was the maximum number of ntrees that could be stored in memory # save(rf_model_lst_preds5000, file="data/rf_model_lst_preds5000.RData") #calculating oob predictions for training data # b = lapply(rf_model_lst_preds, function(x) x[['rfModelPred']]) # a = do.call("cbind",b) a = do.call("cbind", lapply(rf_model_lst_preds, function(x) x[['rfModelPred']])) #This works oob_preds_mean = as.numeric(apply(a, 1, FUN=mean, na.rm=T)) importOut = do.call("cbind", lapply(rf_model_lst_preds, function(x) x[['importance']])) #This works oob_import = data.frame(cov = colnames(rf_cov), importance = as.numeric(apply(importOut, 1, FUN=mean, na.rm=T))) %>% arrange(desc(importance)) # oob_preds_50 = as.numeric(apply(a, 1, FUN=quantile, na.rm=T, probs = 0.50, names = F)) # oob_preds_5 = as.numeric(apply(a, 1, FUN=quantile, na.rm=T, probs = 0.05, names = F)) # oob_preds_95 = as.numeric(apply(a, 1, FUN=quantile, na.rm=T, probs = 0.95, names = F)) # hist(oob_preds_95-oob_preds_5) # ### Examine outliers ### dat_rf$pred = oob_preds_mean dat_rf$diff = dat_rf$pred - dat_rf$Chloride # test = dat_rf %>% filter(pred > 5) # ggplot(test, aes(x = Chloride, y = pred, color = maxdepth)) + geom_point(alpha = 0.5) + # xlab('Observed Chloride') + ylab('Predicted Chloride') + # geom_abline() + # labs(title = paste0('Cor = ',cor(oob_preds_mean, dat_rf$Chloride, use = "complete.obs") ^ 2)) + # scale_colour_viridis_c() # dat_rf$oob_preds_mean = oob_preds_mean ggplot(dat_rf, aes(x = Chloride, y = oob_preds_mean, color = log(maxdepth))) + geom_point(alpha = 0.6) + ylim(-3.1,8) + xlim(-3.1,8) + xlab(bquote('Observed Chloride'~(mg~L^-1))) + ylab(bquote('Predicted Chloride'~(mg~L^-1))) + geom_abline(linetype = 2) + labs(title = paste0('Cor = ',round(cor(oob_preds_mean, dat_rf$Chloride, use = "complete.obs") ^ 2,2))) + scale_colour_viridis_c() + theme_bw() ggsave('LAGOS_prediction/Figure_modelCor.png',width = 7,height = 5) #calculating predictions for all lagos alllagos_preds_Aug = do.call("cbind", lapply(rf_model_lst_preds, function(x) x[['lagosPredAug']])) #This works alllagos_preds_Aug = rowMeans(alllagos_preds_Aug, na.rm = T) allLagos$predictionAug = alllagos_preds_Aug # write_csv(alllagos_preds_Aug,'output_data_allLagosPredictions.csv') ##### dat_rf.sum = dat_rf %>% dplyr::mutate(predicted = as.numeric(oob_preds_mean)) %>% group_by(lagoslakeid) %>% summarise(meanCl = mean(Chloride), min = min(Chloride), max = max(Chloride), pred = mean(predicted), lakeconn = first(lakeconnection), lat = first(nhd_lat), long = first(nhd_long), count = n()) %>% arrange(meanCl) %>% mutate(id = as.numeric(rownames(.))) %>% mutate(residuals = pred-meanCl) ggplot(dat_rf.sum, aes(x = id, y = meanCl, color = log(count))) + geom_point(alpha = 0.6) + geom_point(aes(y = pred), color = 'red3', alpha = 0.6) + geom_linerange(aes(ymin = min, ymax = max), alpha = 0.6) + ylab('Range observed Chloride concentrations') library(lme4) fitsO <- lm(pred ~ meanCl, data=dat_rf.sum) fitsO = data.frame(r2 = paste0('r2 = ',round(summary(fitsO)$r.squared,2)), meanCl = 7, pred = 0.1) fits <- lme4::lmList(pred ~ meanCl | lakeconn, data=dat_rf.sum) fits1 = data.frame(r2 = paste0('r2 = ',round(summary(fits)$r.squared,2)), lakeconn = unique(fits@groups), meanCl = 7, pred = 0.1) ggplot(dat_rf.sum, aes(x = meanCl, y = pred, color = log(count))) + geom_point(alpha = 0.6) + geom_point(aes(y = pred, color = log(count)), alpha = 0.6) + geom_errorbarh(aes(xmin = min, xmax = max), alpha = 0.6) + xlab(bquote('Observed Chloride'~(mg~L^-1))) + ylab(bquote('Mean Predicted Chloride'~(mg~L^-1))) + scale_color_viridis_c() + geom_abline(linetype = 'dashed') + theme_bw() + geom_text(data = fitsO, aes(label = r2),hjust = 1,vjust = -1, color = 'black') + coord_fixed(ratio = 1) ggsave('LAGOS_prediction/Figure_modelCorMean_Range.png',width = 7,height = 5) ggplot(dat_rf.sum) + geom_hline(yintercept = 0, linetype = 2) + geom_point(aes(x=count, y = residuals), color = 'red3', alpha = 0.6) + ylab(bquote('Log Mean Residual Chloride'~(mg~L^-1))) + xlab('Number of Observations') + theme_bw() ggsave('LAGOS_prediction/Figure_ModelResiduals.png',width = 7,height = 5) p1 = ggplot(dat_rf.sum, aes(x = exp(meanCl), y = exp(pred))) + geom_point() + geom_abline(linetype = 'dashed') + ylab(bquote('Predicted Chloride'~(mg~L^-1))) + xlab(bquote('Observed Chloride'~(mg~L^-1))) + labs(title = paste0('Modeled chloride (n = ',nrow(dat_rf),')')) + scale_y_continuous(trans = log2_trans()) + scale_x_continuous(trans = log2_trans()) + geom_text(data = fitsO, aes(label = r2),hjust = 1,vjust = -1, color = 'black') + theme_bw() + theme(legend.justification = c(0, 1), legend.position = c(0.02, 0.97),legend.box.background = element_rect(colour = "black")) + scale_color_viridis_c(name = "% Forest") p1 ggsave(plot = p1,'LAGOS_prediction/Figure_modelCorMean.png',width = 6,height = 5) p2 = ggplot(dat_rf.sum, aes(x = exp(meanCl), y = exp(pred))) + geom_point() + geom_abline(linetype = 'dashed') + ylab(bquote('Predicted Chloride'~(mg~L^-1))) + xlab(bquote('Observed Chloride'~(mg~L^-1))) + facet_wrap(~lakeconn) + labs(title = paste0('Modeled chloride (n =',nrow(dat_rf),')')) + scale_y_continuous(trans = log2_trans()) + scale_x_continuous(trans = log2_trans()) + geom_text(data = fits1, aes(label = r2),hjust = 1,vjust = -1, color = 'black') + theme_bw() + theme(legend.justification = c(0, 1), legend.position = c(0.02, 0.97),legend.box.background = element_rect(colour = "black")) + scale_color_viridis_c(name = "% Forest") p2 ggsave(plot = p2,'LAGOS_prediction/Figure_modelCorMean_LakeType.png',width = 6,height = 5) ############# ############# ############# ############# ############# ############# ## Prediction for LAGOS #### # Plot prediction histogram ggplot() + geom_density(data = allLagos, aes(x = exp(predictionAug), fill = "r"), alpha = 0.3) + geom_density(data = dat_rf.sum, aes(x = exp(meanCl), fill = "b"), alpha = 0.3) + scale_colour_manual(name ="", values = c("r" = "red", "b" = "blue"), labels=c("b" = "Observed", "r" = "Predicted")) + scale_fill_manual(name ="", values = c("r" = "red", "b" = "blue"), labels=c("b" = "Observed", "r" = "Predicted")) + scale_x_continuous(trans='log10') + ylab('Density') + xlab(bquote('Chloride'~(mg~L^-1))) + ggtitle("Predicted Chloride Concentrations in Lagos") + theme_bw() + geom_vline(xintercept = c(230,860),linetype = 2) + annotate(geom='text',label = 'Cl = 230, EPA Chronic chloride toxicity',x = 190, y = 0.4, angle = 90) + annotate(geom='text',label = 'Cl = 860, EPA Acute chloride toxicity',x = 720, y = 0.4, angle = 90) ggsave('LAGOS_prediction/Figure_LAGOSpredictions.png',width = 7,height = 5) ############# ############# ############# ############# ############# ############# ############# plotting residuals over space ############# library(tigris) states <- states(cb = TRUE) states_sf<- st_as_sf(states) # LAGOS region ggplot(data=dat_rf.sum) + geom_sf(data=states_sf[states_sf$NAME %in% c('New York','Vermont','New Hampshire','Maine','Rhode Island', 'Iowa','Missouri','Illinois','Ohio','Indiana','Pennsylvania','New Jersey', 'Massachusetts','Connecticut','Wisconsin','Minnesota','Michigan'),], fill="white")+ geom_point(aes(x=long, y=lat, col=abs(residuals), size = abs(residuals)), alpha=.5 )+ scale_color_viridis_c(option="magma")+ theme_bw() # ggsave(filename = 'LAGOS_prediction/Figure_RF_modelResiduals.png',width = 7, height = 5) b = allLagos %>% select(lagoslakeid:lakeconnection,predictionAug) %>% filter(predictionAug > log(50)) %>% mutate(cols = case_when(exp(predictionAug) < 100 ~ 1, exp(predictionAug) >= 100 & exp(predictionAug) <260 ~ 2, exp(predictionAug) > 260 ~ 3)) %>% mutate(expCl = exp(predictionAug)) %>% st_as_sf(coords = c('nhd_long','nhd_lat'),crs = 4326) library(mapview) library(viridisLite) m = b %>% mapview(zcol = "expCl", col.regions = magma(7),layer.name = 'Predicted Chloride (mg/L)') m mapshot(m, url = paste0(getwd(), "/html/map.html"))
0ba22a920d0d06fdeaf4ec423338ffba0c11e13d
83aa4e1f4fc491ac7dac2874a94aab9c43648507
/cachematrix.R
3280f34a7e748066dd69bb7b249cd541e119c8e6
[]
no_license
mrojze/ProgrammingAssignment2
2c6fc8a5a7da001d666ec70e112124c100b5d16c
3c646e6bbb6dae80b16d57dcfa8bf74574724608
refs/heads/master
2021-01-21T02:03:14.505672
2015-08-20T20:18:49
2015-08-20T20:18:49
41,099,790
0
0
null
2015-08-20T14:18:53
2015-08-20T14:18:53
null
UTF-8
R
false
false
1,286
r
cachematrix.R
## The goal is to be able to cache any matrix inverse calculations ## E.g. ##Give a matrix -> ##tm <- stats::rnorm(16) ##tm ##dim(tm) <- c(4,4) ## ##Define the CacheMatrix -> ##qq <- makeCacheMatrix(tm) ## ##Load the matrix with the inverse (solve) -> ##cacheSolve(qq) ## ## makeCacheMatrix -> This function creates a special "matrix" object that can cache its inverse. ## It has 4 functions ## 1.-set -> set the value of the matrix ## 2.-get -> get the value of the matrix ## 3.-setsolve -> set the inverse value ## 4.-getsolve -> get the inverse value makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setsolve <- function(solve) m <<- solve getsolve <- function() m list(set = set, get = get, setsolve = setsolve, getsolve = getsolve) } ## cacheSolve: This function computes the inverse of the special "matrix" ## returned by makeCacheMatrix above. If the inverse has already been ## calculated (and the matrix has not changed), then the cachesolve should retrieve ## the inverse from the cache. cacheSolve <- function(x, ...) { m <- x$getsolve() if(!is.null(m)) { message("getting cache data") return(m) } data <- x$get() m <- solve(data, ...) x$setsolve(m) m }
214512d4e4c61f92e4db7ef2d0d55cef26bc8e50
fc5a73575695f17d71214918f026923a1ee2f81f
/R/JJF.R
83ed9b4397ae65b6cdbe823972e247807bcf9f8a
[]
no_license
ssgantayat/brinla
01c99c2dcf4b8abeb2ce600f22750e42f503a60e
338abb581fb92d23bee9e7bd4fcebd82aaac993a
refs/heads/master
2022-12-16T16:37:44.945687
2020-09-22T17:24:46
2020-09-22T17:24:46
null
0
0
null
null
null
null
UTF-8
R
false
false
12,498
r
JJF.R
#' Convert precision to SD #' #' @author Julian Faraway, \email{jjf23@bath.ac.uk} #' @param prec a precision density #' @param internal logical indicating whether this is an internal representation #' #' @return an SD density #' @export bri.hyper.sd = function(prec,internal=FALSE){ if(internal){ inla.tmarginal(function(x) 1/sqrt(exp(x)),prec) }else{ inla.tmarginal(function(x) 1/sqrt(x), prec) } } #' Compute a summary from a density #' #' @author Julian Faraway, \email{jjf23@bath.ac.uk} #' @param dens a density #' #' @return numerical summary #' @export bri.density.summary = function(dens){ m = inla.emarginal(function(xx) c(xx, xx^2), dens) q = inla.qmarginal(c(0.025, 0.5, 0.975), dens) s = sqrt(max(0, m[2] - m[1]^2)) md = inla.mmarginal(dens) c(mean = m[1], sd = s, q0.025 = q[1], q0.5 = q[2], q0.975 = q[3],mode=md) } #' Convert precisions to SD in INLA hyperparameter summary #' #' @author Julian Faraway, \email{jjf23@bath.ac.uk} #' @param inla model object #' #' @return summary of hyperparameters on SD scale (where appropriate) #' @export bri.hyperpar.summary = function(r){ irp = r$internal.marginals.hyperpar hrp = r$marginals.hyperpar hypnames = names(irp) iip = grep("precision",hypnames) for(i in 1:length(irp)){ if(i %in% iip){ irp[[i]] = bri.hyper.sd(irp[[i]],internal=TRUE) }else{ irp[[i]] = hrp[[i]] hypnames[i] = names(hrp)[i] } } ts = t(sapply(irp,bri.density.summary)) hypnames = sub("Log precision","SD",hypnames) row.names(ts) = hypnames ts } #' Plot the hyperparameter posteriors #' #' @author Julian Faraway, \email{jjf23@bath.ac.uk} #' @param r an INLA model object #' @param together TRUE if densities to be plotted on a single panel #' #' @return data frame containing the densities #' @export bri.hyperpar.plot = function(r,together=TRUE){ if (!require("ggplot2")) stop("Function requires ggplot2 package. Please install this first.") irp = r$internal.marginals.hyperpar hrp = r$marginals.hyperpar hypnames = names(irp) iip = grep("precision",hypnames) for(i in 1:length(irp)){ if(i %in% iip){ irp[[i]] = bri.hyper.sd(irp[[i]],internal=TRUE) }else{ irp[[i]] = hrp[[i]] hypnames[i] = names(hrp)[i] } } hypnames = sub("Log precision","SD",hypnames) hypnames = sub("the Gaussian observations","error",hypnames) names(irp) = hypnames cf = data.frame(do.call(rbind,irp)) cf$parameter = rep(hypnames,times=sapply(irp,nrow)) if(together){ p=ggplot(cf,aes(x=x,y=y,linetype=parameter))+geom_line()+ylab("density")+xlab("") print(p) }else{ p=ggplot(cf,aes(x=x,y=y))+geom_line()+facet_wrap(~parameter,scales="free")+ylab("density")+xlab("") print(p) } invisible(cf) } #' Plot the posterior densities of the random effects #' #' @author Julian Faraway, \email{jjf23@bath.ac.uk} #' @param r inla model object #' #' @return a data frame with the densities and group labels #' @export bri.random.plot = function(r){ if (!require("ggplot2")) stop("Function requires ggplot2 package. Please install this first.") reff <- r$marginals.random irp = reff[[1]] cf = data.frame(do.call(rbind,irp)) cf$group = rep(as.character(1:length(irp)),times=sapply(irp,nrow)) p=ggplot(cf,aes(x=x,y=y,linetype=group))+geom_line()+ylab("density")+xlab("") print(p) invisible(cf) } #' Plot posterior densities of the fixed effects #' #' @author Julian Faraway, \email{jjf23@bath.ac.uk} #' @param r an inla model object #' #' @return a data frame containing the densities and parameter labels (invisible) #' @export bri.fixed.plot = function(r, together=FALSE){ if (!require("ggplot2")) stop("Function requires ggplot2 package. Please install this first.") rmf = r$marginals.fixed cf = data.frame(do.call(rbind, rmf)) cf$parameter = rep(names(rmf),times=sapply(rmf,nrow)) if(together){ p=ggplot(cf,aes(x=x,y=y,linetype=parameter))+geom_line()+geom_vline(xintercept=0)+ylab("density") print(p) }else{ p = ggplot(cf,aes(x=x,y=y))+geom_line()+ facet_wrap(~ parameter, scales="free")+geom_vline(xintercept=0)+ylab("density") print(p) } invisible(cf) } #' Gaussian Process Regression in 1D #' #' @author Julian Faraway, \email{jjf23@bath.ac.uk} #' @param x the predictor vector #' @param y the response vector #' @param pcprior limites for the penalised complexity prior (optional). If specified should be a vector #' of the form c(r,s) where P(range < r = 0.05) and P(SD(y) > s = 0.05) #' @param nbasis - number of basis functions for the spline (default is 25) #' @param degree - degree for splines (default is 2) - allowable possibilities are 0, 1 or 2. #' @param alpha - controls shape of the GP kernel (default is 2) - 0 < alpha <=2 is possible #' @param xout - grid on which posterior will be calculated (default is x) #' @param sigma0 - prior mean for the signal SD (default is SD(y)) #' @param rho0 - prior mean for the range #' #' @return list consisting of xout, the posterior mean, the lower 95\% credibility band, #' the upper 95\% credibility band and the INLA object containing the fit #' @export bri.gpr <- function(x, y, pcprior, nbasis=25, degree=2, alpha=2, xout=x, sigma0=sd(y), rho0 = 0.25*(max(x) - min(x))){ if (!all(is.finite(c(x, y)))) stop("missing or infinite values in inputs are not allowed") mesh <- inla.mesh.1d(seq(min(xout),max(xout),length.out = nbasis),degree = degree) nu <- alpha - 1/2 kappa0 <- sqrt(8 * nu)/rho0 tau0 <- 1 / (4 * kappa0^3 * sigma0^2)^0.5 if(missing(pcprior)){ spde <- inla.spde2.matern(mesh, alpha=alpha, constr = FALSE, B.tau = cbind(log(tau0), 1, 0), B.kappa = cbind(log(kappa0), 0, 1), theta.prior.prec = 1e-4) }else{ spde <- inla.spde2.pcmatern(mesh,alpha=alpha,prior.range=c(pcprior[1],0.05),prior.sigma=c(pcprior[2],0.05)) } A <- inla.spde.make.A(mesh, loc=x) Ap <- inla.spde.make.A(mesh, loc=xout) index <- inla.spde.make.index("sinc", n.spde = spde$n.spde) st.est <- inla.stack(data=list(y=y), A=list(A), effects=list(index), tag="est") st.pred <- inla.stack(data=list(y=NA), A=list(Ap), effects=list(index), tag="pred") sestpred <- inla.stack(st.est,st.pred) formula <- y ~ -1 + f(sinc, model=spde) data <- inla.stack.data(sestpred) result <- inla(formula, data=data, family="normal", control.predictor= list(A=inla.stack.A(sestpred),compute=TRUE)) ii <- inla.stack.index(sestpred, tag='pred')$data list(xout=xout, mean=result$summary.fitted.values$mean[ii], lcb=result$summary.fitted.values$"0.025quant"[ii], ucb=result$summary.fitted.values$"0.975quant"[ii], inlaobj=result) } #' Smoothness bands for Gaussian Process Regression #' #' @author Julian Faraway, \email{jjf23@bath.ac.uk} #' @param x the predictor vector #' @param y the response vector #' @param nbasis - number of basis functions for the spline (default is 25) #' @param degree - degree for splines (default is 2) - allowable possibilities are 0, 1 or 2. #' @param alpha - controls shape of the GP kernel (default is 2) - 0 < alpha <=2 is possible #' @param xout - grid on which posterior will be calculated (default is x) #' @param sigma0 - prior mean for the signal SD (default is SD(y)) #' @param rho0 - prior mean for the range #' #' @return list consisting of xout, the posterior mean, the smoother 95\% credibility band, #' the rougher 95\% credibility band #' @export bri.smoothband <- function(x, y, nbasis=25, degree=2, alpha=2, xout=x, sigma0=sd(y), rho0 = 0.25*(max(x) - min(x))){ if (!all(is.finite(c(x, y)))) stop("missing or infinite values in inputs are not allowed") mesh <- inla.mesh.1d(seq(min(xout),max(xout),length.out = nbasis),degree = degree) nu <- alpha - 1/2 kappa0 <- sqrt(8 * nu)/rho0 tau0 <- 1 / (4 * kappa0^3 * sigma0^2)^0.5 spde <- inla.spde2.matern(mesh, alpha=alpha, constr = FALSE, B.tau = cbind(log(tau0), 1, 0), B.kappa = cbind(log(kappa0), 0, 1), theta.prior.prec = 1e-4) A <- inla.spde.make.A(mesh, loc=x) Ap <- inla.spde.make.A(mesh, loc=xout) index <- inla.spde.make.index("sinc", n.spde = spde$n.spde) st.est <- inla.stack(data=list(y=y), A=list(A), effects=list(index), tag="est") st.pred <- inla.stack(data=list(y=NA), A=list(Ap), effects=list(index), tag="pred") sestpred <- inla.stack(st.est,st.pred) formula <- y ~ -1 + f(sinc, model=spde) data <- inla.stack.data(sestpred) result <- inla(formula, data=data, family="normal", control.predictor= list(A=inla.stack.A(sestpred),compute=TRUE)) mres <- inla.spde.result(result,"sinc",spde) kappa0 <- exp(mres$summary.log.kappa['0.025quant'])[,] sigma02 <- exp(mres$summary.log.variance.nominal['0.5quant'])[,] tau0 <- 1 / (4 * kappa0^3 * sigma02)^0.5 spde <- inla.spde2.matern(mesh, alpha=alpha, constr = FALSE, B.tau = cbind(log(tau0)), B.kappa = cbind(log(kappa0))) formula <- y ~ -1 + f(sinc, model=spde) resulta <- inla(formula, data=data, family="normal", control.predictor= list(A=inla.stack.A(sestpred),compute=TRUE)) kappa0 <- exp(mres$summary.log.kappa['0.975quant'])[,] sigma02 <- exp(mres$summary.log.variance.nominal['0.5quant'])[,] tau0 <- 1 / (4 * kappa0^3 * sigma02)^0.5 spde <- inla.spde2.matern(mesh, alpha=alpha, constr = FALSE, B.tau = cbind(log(tau0)), B.kappa = cbind(log(kappa0))) formula <- y ~ -1 + f(sinc, model=spde) resultb <- inla(formula, data=data, family="normal", control.predictor= list(A=inla.stack.A(sestpred),compute=TRUE)) ii <- inla.stack.index(sestpred, tag='pred')$data list(xout=xout, mean=result$summary.fitted.values$mean[ii], scb=resulta$summary.fitted.values$mean[ii], rcb=resultb$summary.fitted.values$mean[ii]) } #' Non-stationary smoothing for Gaussian Process Regression in 1D #' #' @author Julian Faraway, \email{jjf23@bath.ac.uk} #' @param x the predictor vector #' @param y the response vector #' @param nbasis - number of basis functions for the spline (default is 25) #' @param sbasis - number of basis functions for the smoothing of sigma and rho #' @param degree - degree for splines (default is 2) - allowable possibilities are 0, 1 or 2. #' @param alpha - controls shape of the GP kernel (default is 2) - 0 < alpha <=2 is possible #' @param xout - grid on which posterior will be calculated (default is x) #' #' @return list consisting of xout, the posterior mean, the lower 95\% credibility band, #' the upper 95\% credibility band and the INLA object containing the fit #' @export bri.nonstat <- function(x, y, nbasis=25, sbasis=5, degree=2, alpha=2, xout=x, sigma0=sd(y), rho0 = 0.25*(max(x) - min(x))){ if (!all(is.finite(c(x, y)))) stop("missing or infinite values in inputs are not allowed") mesh <- inla.mesh.1d(seq(min(xout),max(xout),length.out = nbasis),degree = degree) basis.T <-as.matrix(inla.mesh.basis(mesh, type="b.spline", n=sbasis, degree=2)) basis.K <-as.matrix(inla.mesh.basis(mesh, type="b.spline", n=sbasis, degree=2)) spde <- inla.spde2.matern(mesh, alpha=alpha, B.tau = cbind(basis.T[-1,],0), B.kappa = cbind(0,basis.K[-1,]/2), theta.prior.prec = 1e-4) A <- inla.spde.make.A(mesh, loc=x) Ap <- inla.spde.make.A(mesh, loc=xout) index <- inla.spde.make.index("sinc", n.spde = spde$n.spde) st.est <- inla.stack(data=list(y=y), A=list(A), effects=list(index), tag="est") st.pred <- inla.stack(data=list(y=NA), A=list(Ap), effects=list(index), tag="pred") sestpred <- inla.stack(st.est,st.pred) formula <- y ~ -1 + f(sinc, model=spde) data <- inla.stack.data(sestpred) result <- inla(formula, data=data, family="normal", control.predictor= list(A=inla.stack.A(sestpred),compute=TRUE)) ii <- inla.stack.index(sestpred, tag='pred')$data list(xout=xout, mean=result$summary.fitted.values$mean[ii], lcb=result$summary.fitted.values$"0.025quant"[ii], ucb=result$summary.fitted.values$"0.975quant"[ii], inlaobj=result) }
ae63541da90454548fbc1f22651f6ad61aef21cd
8a02b3940327bbe2fab42006e634fdd094ae527c
/R/tw_buy_phone.R
5adca6858a2f63c53fd8a37d374d1ebfc576e2ab
[]
no_license
carlganz/twilio
05bb5840e488d0cbcebe59be1d461031f38ea3ef
ee0cbc5c24689694763b29e98cc476992cee90f0
refs/heads/master
2021-07-05T08:22:05.666016
2018-05-18T17:47:00
2018-05-18T17:47:00
100,397,144
1
0
null
2017-08-15T16:29:55
2017-08-15T16:29:55
null
UTF-8
R
false
false
964
r
tw_buy_phone.R
#' Add a new phone number to your account #' #' To send messages with Twilio you must purchase phone numbers. #' @param phone_number String representing phone number to purchase. See notes. #' @note Phone numbers must be in E.164 format. '*' represents a wildcard digit. #' @export tw_buy_phone_number <- function(phone_number) { base_url <- "https://api.twilio.com/" ua <- user_agent("https://github.com/seankross/twilio") path <- paste("2010-04-01", "Accounts", get_sid(), "IncomingPhoneNumbers.json", sep = "/") url <- modify_url(base_url, path = path) resp <- POST(url, ua, authenticate(get_sid(), get_token()), body = list(PhoneNumber = phone_number)) if (http_type(resp) != "application/json") { stop("Twilio API did not return JSON.", call. = FALSE) } parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) check_status(resp) twilio_phone(parsed) }
8bddf983044e9e93ad13e9bb50fc714be5f4e333
b95e1106b931eccb03ca8629b267c21b6cb85c16
/R/cumvar.R
e60b631710fa467a1768e9cef2c59677d7f4859a
[]
no_license
cran/cumstats
4e578a3b272ed8d586e544675ac2e418557cca1a
27d91fda01018554bc91b1e03a79aa77dadad1f8
refs/heads/master
2021-01-11T20:20:09.203723
2017-01-16T07:13:00
2017-01-16T07:13:00
79,092,717
0
0
null
null
null
null
UTF-8
R
false
false
80
r
cumvar.R
cumvar <- function(x) sapply(seq_along(x), function(k, z) var(z[1:k]), z = x)
998cbc13b4b2790d1368fa10965aadb3e616406a
a9e600682610ec4be2fe709316f1e430d1e664f7
/server.R
9213dcca62ff49d90fb69f3691616ae76616254f
[]
no_license
mkmitchell/shinyebird
101b29cda83601911b9a78e7645f4aae0adfc8a5
e297e598e51262ac7f696d1af9880c28088cfe44
refs/heads/master
2021-01-22T20:26:24.932402
2018-02-13T15:14:23
2018-02-13T15:14:23
85,324,894
0
0
null
null
null
null
UTF-8
R
false
false
7,386
r
server.R
library(ggplot2) library(scales) library(diptest) library(shiny) library(TTR) library(splines) ################################################################################### # Silvermans test # # http://www-bcf.usc.edu/~gourab/code-bmt/tables/table-2/ # silverman.test <-function(x,k,M=999,adjust=FALSE,digits=6) ################################################################################### silverman.test <-function(x,k,M=999,adjust=FALSE,digits=6){ # x: data # k: number of modes to be tested # M: number of bootstrap replications #check if seed is available (as done in boot package) #if so save it seedAvailable = exists(x=".Random.seed",envir=.GlobalEnv,inherits=FALSE) if(seedAvailable) saved_seed = .Random.seed else{ rnorm(1) saved_seed = .Random.seed } #temp function for bootstrapping y.obs <- function(x,h,sig=sd(x)){ mean(x) + (x-mean(x)+h*rnorm(length(x),0,1))/((1+h^2/sig^2)^(1/2)) #(x+h*rnorm(length(x),0,1))/((1+h^2/sig^2)^(1/2)) } #temp function for density calculation nor.kernel <- function(x,h){ density(x,bw=h,kernel ="gaussian")$y } #start of the test h0 <- h.crit(x, k) n <- 0 for (i in 1:M) { x.boot <- sort(y.obs(sample(x, replace=TRUE),h0)) mod.temp <- nr.modes(nor.kernel(x.boot,h0)) if (mod.temp > k){ n <- n+1 } } p <- n/M ptemp=p if(adjust==TRUE){ if(k==1){ #asymptotic levels of silvermantest by Hall/York x=c(0,0.005,0.010,0.020,0.030,0.040,0.050,0.06,0.07,0.08,0.09,0.1,0.11,0.12,0.13,0.14,0.15,0.16,0.17,0.18,0.19,0.2,0.25,0.30,0.35,0.40,0.50) y=c(0,0,0,0.002,0.004,0.006,0.010,0.012,0.016,0.021,0.025,0.032,0.038,0.043,0.050,0.057,0.062,0.07,0.079,0.088,0.094,0.102,0.149,0.202,0.252,0.308,0.423) sp = interpSpline(x,y) #adjusting the p-value if(p<0.005) p=0 else{ p = predict(sp,p)$y p = round(p,digits) } } else{ print("The option to adjust the p-value is valid only for k=1") } } #return(list(saved_seed=saved_seed,p_value=p)) #test_obj = new("Silvermantest", data=x, p_value = p,saved_seed=saved_seed,k=k) return(p) } h.crit <- function(x,k,prec=6){ #temp function nor.kernel <- function(x,h){ density(x,bw=h,kernel ="gaussian")$y } digits=prec prec=10^(-prec) x <- sort(x) minh <- min(diff(x)) #minimal possible h maxh <- diff(range(x))/2 #maximal possible h a <- maxh b <- minh zaehler=0 while (abs(b-a)>prec){ m <- nr.modes(nor.kernel(x,a)) b <- a if (m > k){ minh <- a a <- (a + maxh)/2 } else { maxh <- a a <- (a - minh)/2 } } a=round(a,digits) if(nr.modes( nor.kernel(x,a) ) <= k){ #subtract until more than k modes while(nr.modes( nor.kernel(x,a) ) <= k){ a = a - prec } a=a+prec } if(nr.modes( nor.kernel(x,a) ) > k){ #add until nr. of moodes correct while(nr.modes( nor.kernel(x,a) ) > k){ a = a + prec } } a } nr.modes <- function(y){ d1 <- diff(y) signs <- diff(d1/abs(d1)) length(signs[signs==-2]) } ################################################################################### # End Silvermans test http://www-bcf.usc.edu/~gourab/code-bmt/tables/table-2/ shinyServer(function(input, output) { workspace = "/data" observeEvent(input$do, { print(as.numeric(input$do)) }) ebird = reactive({ withProgress(message = 'Loading:', detail='eBird data incoming', value = 0, { # Input ArcGIS Model csv file infile = input$species inbird = paste(infile,".csv",sep="") print(inbird) ############################################################################ # Read in ebird data temp = read.csv(paste(workspace,inbird,sep="/"), sep=",", header=TRUE, quote = "", stringsAsFactors = FALSE, na.strings=c("")) temp = temp[!is.na(temp$BCRNAME),] incProgress(0.6, detail = "Finished pulling in eBird. Making fancy") # Reorder months temp$Month = factor(temp$Month, levels=c(9, 10, 11, 12, 1, 2, 3, 4)) temp$Week = factor(temp$Week, levels=c(31:53,1:17)) #Set X as na temp$OBSERVATION.COUNT = ifelse(temp$OBSERVATION.COUNT == "X", 1, temp$OBSERVATION.COUNT) temp$OBSERVATION.COUNT = as.numeric(temp$OBSERVATION.COUNT) temp$BCRNUMNAME = paste(temp$BCR.CODE, temp$BCRNAME, sep="_") test = c() getFactor = function(x) { for (m in 9:12){ if (m %in% c(9,11)) { maxVal = 30 } else { maxVal = 31 } for (d in 1:maxVal){ test = append(test, paste(m,d,sep="/")) } } for (m in 1:4){ if (m == 2) { maxVal = 28 } else if (m == 4) { maxVal = 30 } else { maxVal = 31 } for (d in 1:maxVal){ test = append(test, paste(m,d,sep="/")) } } return(test) } temp$MonthDay = factor(temp$MonthDay,levels=(getFactor())) }) temp }) output$selectedSpecies = renderUI({ df = ebird() items = unique(df$BCRNUMNAME) selectInput("bcr", "BCR:", items) }) output$whichSpecies = renderText({ input$species }) computeSummary = reactive({ df = ebird() df = subset(df, df$BCRNUMNAME == input$bcr) aggMean = aggregate(df$OBSERVATION.COUNT, list(Week=df$MonthDay, BCR=df$BCR.CODE), mean) plot(x=aggMean$Week, y=aggMean$x, main=paste("Figure 1. Observation count mean by BCR plotted over wintering period for ", input$species, sep=""), ylab="Average count", xlab="Date", cex.lab=1.5 ) lines(x=aggMean$Week, y=aggMean$x, col="red") }) computeSmooth = reactive({ df = ebird() df = subset(df, df$BCRNUMNAME == input$bcr) aggMean = aggregate(df$OBSERVATION.COUNT, list(Week=df$MonthDay, BCR=df$BCR.CODE), mean) ss = smooth.spline(x=aggMean$Week, y=aggMean$x, spar=0.7, keep.data = TRUE) ss$x = aggMean$Week plot(x=ss$x, y=ss$y, type="l", main=paste("Figure 2. Smoothed Observation count mean by BCR plotted over wintering period for ", input$species, sep=""), ylab="Average count", xlab="Date", cex.lab=1.5 ) lines(x=ss$x,y=ss$y, col="red") }) computePVal = reactive({ df = ebird() df = subset(df, df$BCRNUMNAME == input$bcr) testsetup = aggregate(df$OBSERVATION.COUNT, list(Week=df$MonthDay, BCR=df$BCR.CODE, BCRNUMNAME = df$BCRNUMNAME), mean) #testsmooth = SMA(testsetup[, "x"], 3) ss = smooth.spline(x=testsetup$Week, y=testsetup$x, spar=0.7, keep.data = TRUE) test = dip.test(ss$yin) bcr_name = unique(testsetup$BCRNUMNAME) paste("P-value:", test$p.value[[1]]," / Silverman: ", silverman.test(ss$y, 2, M=999, adjust=FALSE)," / BCR:", bcr_name, sep=" ") }) output$statsTable = renderPlot({ if(input$do == 0) return(NULL) computeSummary() }) output$smoothTable = renderPlot({ if(input$do == 0) return(NULL) computeSmooth() }) output$pVal = renderText({ if(input$do == 0) return(NULL) computePVal() }) })
e2fdd64594d4a071fbd210ed56f28b083f64ea7d
dbddd8c5408cfa93016dc70af545ab9eedbcba6c
/man/interpretR-package.Rd
5fad0675478bc8513e14ff3b92ed793fa3232a24
[]
no_license
cran/interpretR
26f8499e4a1c260a13eb8a447ac747e7a9a46f0a
77b723f34d864c341a3d0c4f73fe4c5a14e9c2a6
refs/heads/master
2023-09-01T02:21:13.517431
2023-08-19T22:22:31
2023-08-19T23:30:48
26,950,586
0
0
null
null
null
null
UTF-8
R
false
false
776
rd
interpretR-package.Rd
\name{interpretR-package} \alias{interpretR-package} \alias{interpretR} \docType{package} \title{ Partial Dependence Plots and Permutation-Based Performance Measures } \description{ Compute permutation-based performance meaures (for binary classification) and create partial dependence plots (cross-validated classification and regression models). Currently only binary classification and regression models estimated with the package \code{randomForest} are supported. Binary classification models estimated with \code{ada} are also supported. } \author{ Authors: Michel Ballings, and Dirk Van den Poel, Maintainer: \email{Michel.Ballings@GMail.com} } \seealso{ \code{\link[interpretR:parDepPlot]{parDepPlot}}, \code{\link[interpretR:variableImportance]{variableImportance}} }
43153de5314915a6205c343363e61ef291ce4ba7
38ac9c4ed4f42bb582e19f0fe28dba81a77f0eea
/src/getIndex.R
c189743b5842d03df575c31f1f01b6f2aae17138
[]
no_license
colbyw5/sec_scrape
69770b7e8bc7d73ab64c9dec18c3768c62599d19
310f4a5ed17fb050dc14099965d21c18a15ba225
refs/heads/master
2020-12-19T23:31:21.395063
2020-01-24T01:42:38
2020-01-24T01:42:38
235,885,512
3
0
null
null
null
null
UTF-8
R
false
false
3,448
r
getIndex.R
#' Clean text from SEC Edgar file #' #' #' @param text text to be cleaned #' @return text in lower case without symbols or whitespace #' require(tidyverse) cleanText <- function(text) { text <- tolower(text) text <- str_replace_all(text, "[^:[:alnum:]]", " ") text <- str_squish(text) return(text) } #' Read raw text file from SEC Edgar, remove markings and header #' #' This function reads and parses 10-K filings hosted on #' SEC Edgar #' #' #' @param path_to_file path to local file #' @return processed raw text file from SEC Edgar #' cleanData <- function(path_to_file){ # Unzip file R.utils::gunzip(path_to_file, destname = "./temp_filing_dir/master.txt", temporary = FALSE, skip = FALSE, overwrite = TRUE, remove = TRUE) # Removing ''' to allow scan for '|' to run without fail due to occurrence of ''' in company name raw_data <- gsub("'", "", readLines("./temp_filing_dir/master.txt")) # Marking end of file description header_end <- grep("--------------------------------------------------------", raw_data, useBytes = TRUE) # Writting back to storage writeLines(raw_data, "./temp_filing_dir/master.txt") scraped_data <- scan("./temp_filing_dir/master.txt", what = list("", "", "", "", ""), flush = F, skip = header_end, sep = "|", quiet = T) # Remove punctuation characters from company names company_name <- gsub("[[:punct:]]", " ", scraped_data[[2]], perl = T) # Produce final dataset final_data <- data.frame(cik = scraped_data[[1]], company.name = company_name, form.type = scraped_data[[3]], date.filed = scraped_data[[4]], edgar.link = paste("https://www.sec.gov/Archives/", scraped_data[[5]], sep = "")) return(final_data) } #' Get index of filings from SEC Edgar #' #' This function extracts the index file of all SEC filings from #' SEC edgar, given a year range, quarter selection and form type #' #' @param years range of years for index #' @param quarter selection from 1-4 for quarters of index #' @param form_types vector of form types to be included index #' @return dataframe of filing index # cleans raw text filing from SEC Edgar #source("./cleanData.R") # removes symbols and whitespace from text #source("./src/cleanText.R") getIndex <- function(years, quarter, form_types){ # Create temporary directory for filing dir.create("./temp_filing_dir") # initialzing list to keep filing data filing_index <- list() for (year in years){ for (quarter in 1:4){ # constructing link to filing file_link <- paste("https://www.sec.gov/Archives/edgar/full-index/", year, "/QTR", quarter, "/master.gz", sep = "") # download zipped file utils::download.file(file_link, destfile = "./temp_filing_dir/master.idx", quiet = TRUE) # clean data from file filing_data <- cleanData(path_to_file = "./temp_filing_dir/master.idx") # filter for filing type of interest, adding to list of filing indices filing_index[[paste(year, quarter, sep = "_")]] <- filing_data[filing_data$form.type %in% form_types,] } } # Delete temporary directory unlink("./temp_filing_dir", recursive = TRUE) # Closing open file connections closeAllConnections() ## state will be the state abbreviation inc_links <- bind_rows(filing_index) return(inc_links) }
6a8e708ea7a9779ec55779c862d7294eb9c82c98
9b232487e19fc860f6161f4294fecefe516edad6
/arimaSAC.R
25686d67ccded031c95e97141c882a1952b3d2c6
[]
no_license
ragAgar/forecast
15b9caad9616636006505d7d7877f8f40b24bf6c
82393026cdd0a3b4ef68325b2d4e6af67ee2d48b
refs/heads/master
2021-01-21T05:23:06.875923
2017-02-26T03:33:34
2017-02-26T03:33:34
83,180,151
0
0
null
null
null
null
UTF-8
R
false
false
4,689
r
arimaSAC.R
arimaSAC<-function(p,d,q,y,s="MONTH",jissoku,trace="T",method="MAPE",h=2){ if(s=="MONTH"){s<-12} if(s=="WEEK"){s<-52} th<-length(jissoku) traintemp<-y[1:(length(y)-(h+1))] evaluationtemp<-rbind(y[(length(y)-(h-1)):length(y)],jissoku) pred.value<-array(rep(0,(th))) for(h1 in 1:th){ best<-0 evaluation<-evaluationtemp[-(1:h1)] train<-rbind(traintemp,evaluationtemp[1:h1]) for(s1 in 0:s){ if(s==12){if(!((s1==0)|(s1==6)|(s1==12))==T){next}} if(s==52){if(!((s1==0)|(s1==26)|(s1==52))==T){next}} # print(s1) for (i in 0:p){ for (j in 0:d){ for(k in 0:q){ for(P in 0:p){ for(D in 0:d){ for(Q in 0:q){ res<-try(model<-arima(train,order=c(i,j,k),seasonal=list(order=c(P,D,Q),period=s1)),silent=T) if((class(res)=="try-error")&(trace=="T")){next} fc.arima <- forecast(model,h=h) pred.arima <- as.vector(fc.arima$mean) if(min(pred.arima)<0){next}#負の予想があるモデルははじく difference <- (pred.arima[1:(h-1)])-(evaluation[1:(h-1)]) difference.positive <- sum(difference[difference >= 0]) difference.negative <- -(sum(difference[difference <= 0])) absolute<-abs(difference) differenceabs<-sum(absolute) ratio.positive <- (difference.positive)/(differenceabs) ratio.negative <- (difference.negative)/(differenceabs) mape=(sum((absolute/evaluation[1:(h-1)])))*100/length(absolute)  #                cat(i,j,k,P,D,Q,s1,mape,"\n") if(best==0){ best<-mape besti<-i bestj<-j bestk<-k bestP<-P bestD<-D bestQ<-Q bests<-s1 } else if((!(best==0))&(best>=mape)){ best<-mape besti<-i bestj<-j bestk<-k bestP<-P bestD<-D bestQ<-Q bests<-s1 } }}}}}}} cat("\n","The best model is (",besti,bestj,bestk,")(",bestP,bestD,bestQ,")[",bests,"]","\n") bestmodel<-arima(train,order=c(besti,bestj,bestk),seasonal=list(order=c(bestP,bestD,bestQ),period=bests)) fc.arima <- forecast(bestmodel,h=h) pred.arima <- as.vector(fc.arima$mean) difference <- (pred.arima[1:(h-1)])-(evaluation[1:(h-1)]) difference.positive <- sum(difference[difference >= 0]) difference.negative <- -(sum(difference[difference <= 0])) absolute<-abs(difference) differenceabs<-sum(absolute) ratio.positive <- (difference.positive)/(differenceabs) ratio.negative <- (difference.negative)/(differenceabs) pred.value[h1]<-pred.arima[h] cat(" The value we predict is",evaluation[1:(h-1)],"\n","Our predicted value is ",pred.arima,"\n") cat(" MAPE = ",best,"\n") cat(" MIC(+,-) = ",ratio.positive,",",ratio.negative,"\n","\n") cat(" We computed ",h1,"models. We have to compute additional ",(th-h1),"model !!","\n","\n") } jissoku<-as.vector(jissoku) difference<-pred.value-jissoku difference.positive <- sum(difference[difference >= 0]) difference.negative <- -(sum(difference[difference <= 0])) absolute<-abs(difference) differenceabs<-sum(absolute) ratio.positive <- (difference.positive)/(differenceabs) ratio.negative <- (difference.negative)/(differenceabs) bestmape= sum(absolute/jissoku)*100/length(jissoku) cat("####################################################","\n") cat(" The total MAPE = ",bestmape,"\n") cat(" MIC(+,-) = ",ratio.positive,",",ratio.negative,"\n") cat(" MAPE(+,-) = ",bestmape*ratio.positive,bestmape*ratio.negative,"\n") cat("####################################################","\n") cat(" JISSOKU Value is", round(as.vector(jissoku),1),"\n","PREDICT Value is") return(round(pred.value,1)) }
08a8de8eadd226f9af37191f3d6f938a7788488d
ebe0a4b323f2e17dca808583da2363e4d6efbd7e
/R/time_arr_to_mat.R
da15b8ea29978558dcec7f9e6e870af1b4faf2d3
[]
no_license
aaronolsen/linkR
7aeeb924b209cdf8d3e58f488d7d04af505d899a
6a9a40888fda73171372aee85f9b410068f0169e
refs/heads/master
2021-01-10T22:50:07.336303
2019-06-13T14:45:28
2019-06-13T14:45:28
70,348,665
6
1
null
null
null
null
UTF-8
R
false
false
240
r
time_arr_to_mat.R
time_arr_to_mat <- function(arr, times){ # Convert coordinate array to matrix mat <- arr2mat(arr) # At time column mat <- cbind(mat, times) # Add time column label colnames(mat) <- c(colnames(mat)[1:(ncol(mat)-1)], 'time') mat }
aff4a9444f35e73f66f4d9a24f086ca796f6b239
1f53587b329fe58250bb804a9cffdb34e2b95bef
/3 курс/R project/sample variance.R
b82e2bea6a4f660bc0248c814fbc47a5ab5a699f
[]
no_license
vladborisovjs/Polytech
b43519bdea3482c9d7d680e378292b9d7e9ebc35
60fb31362243558d1c323a52015ad4042668abc1
refs/heads/master
2020-04-02T18:23:35.995868
2018-10-25T16:03:27
2018-10-25T16:03:27
null
0
0
null
null
null
null
UTF-8
R
false
false
670
r
sample variance.R
S1<-0 S2<-0 S3<-0 S4<-0 S5<-0 S6<-0 M<-0.001 for(i in 1:1000){ S1[i]<-var(rnorm(5,0,1)) S2[i]<-var(rnorm(10,0,1)) S3[i]<-var(rnorm(15,0,1)) S4[i]<-var(rnorm(20,0,1)) S5[i]<-var(rnorm(40,0,1)) S6[i]<-var(rnorm(100,0,1)) } firstmid1<-sum(S1)*M firstmid2<-sum(S2)*M firstmid3<-sum(S3)*M firstmid4<-sum(S4)*M firstmid5<-sum(S5)*M firstmid6<-sum(S6)*M secondmid1<-sum(S1)^2*M^2 secondmid2<-sum(S2)^2*M^2 secondmid3<-sum(S3)^2*M^2 secondmid4<-sum(S4)^2*M^2 secondmid5<-sum(S5)^2*M^2 secondmid6<-sum(S6)^2*M^2 DS1<-secondmid1-firstmid1 DS2<-secondmid2-firstmid2 DS3<-secondmid3-firstmid3 DS4<-secondmid4-firstmid4 DS5<-secondmid5-firstmid5 DS6<-secondmid6-firstmid6
c340fa850284ee4d36ba1be06aecbe926636433d
95e08bcbcaae32468488c534cce201730e65a920
/Week 05/Pre-class-week-05_Rscript.R
7439ee456025c2f07ab6699fb85f675cea910558
[]
no_license
PHP-2560/pre-class-work-2018-kvoorhies
ea174886df1a377bd767911aefd9902978544230
627c6e7ca0605c9bd5c0dccc0c424c77012028d2
refs/heads/master
2020-03-28T23:56:38.673338
2018-12-05T03:38:22
2018-12-05T03:38:22
149,316,992
0
0
null
null
null
null
UTF-8
R
false
false
2,612
r
Pre-class-week-05_Rscript.R
# pre-class Make sure you commit this often with meaningful messages. ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` Standardizing a variable means subtracting the mean, and then dividing by the standard deviation. Let’s use a loop to standardize the numeric columns in the [Western Collaborative Group Study](https://clinicaltrials.gov/ct2/show/NCT00005174). This study began in 1960 with 3154 men ages 39-59, who were employed in one of 11 California based companies. They were followed until 1969 during this time, 257 of these men developed coronary heart disease (CHD). You can read this data in with the code below. You can access this dataset with the following code: ```{R} suppressMessages(library(foreign)) wcgs <- read.dta("https://drive.google.com/uc?export=download&id=0B8CsRLdwqzbzYWxfN3ExQllBQkU") ``` The data has the following variables: WCGS has the following variables: ----------------------------------------------------------- Name Description ------- ------------------------------------------- id Subject identification number age Age in years height Height in inches weight Weight in lbs. sbp Systolic blood pressure in mm dbp Diastolic blood pressure in mm Hg chol Fasting serum cholesterol in mm behpat Behavior 1 A1 2 A2 3 B3 4 B4 ncigs Cigarettes per day dibpat Behavior 1 type A 2 type B chd69 Coronary heart disease 1 Yes 0 no typechd Type of CHD 1 myocardial infarction or death 2 silent myocardial infarction 3 angina perctoris time169 Time of CHD event or end of follow-up arcus Arcus senilis 0 absent 1 present bmi Body Mass Index ----------------------------------------------------------- ### Question 1: Standardize Function A. Create a function called standardize.me() that takes a numeric vector as an argument, and returns the standardized version of the vector. B. Assign all the numeric columns of the original WCGS dataset to a new dataset called WCGS.new. C. Using a loop and your new function, standardize all the variables WCGS.new dataset. D. What should the mean and standard deviation of all your new standardized variables be? Test your prediction by running a loop #A standardize.me<-function(x){ if(is.numeric(x)){ } else x } ### Question 2: Looping to Calculate A. Using a loop, calculate the mean weight of the subjects separated by the type of CHD they have. B. Now do the same thing, but now don’t use a loop
e233386acb17cb1d8c6cda01bfd642e24d941702
89523e086f4021e1498107956a9cedac404b9ef1
/R/escalator.R
8abbd91ca62b8acb49260412342648cae1c9ec4e
[]
no_license
christianbitter/rOCologneKVB
73f7845f27cc8d5d0d80c4645cb0d06a3ffa3ef6
81148087858cc2c258e310275ecad60ee6702a36
refs/heads/master
2021-05-22T01:46:16.759048
2020-05-30T04:20:57
2020-05-30T04:20:57
252,913,503
0
0
null
null
null
null
UTF-8
R
false
false
2,019
r
escalator.R
#'@author christian bitter #'@name escalator #'@title KVB - Escalator #'@description returns the escalator information from the KVB Open Data Portal #'@return an sf object of the provided data (EPSG 4326). #'@param as_spatial should data be returned as sf (default False) #'@examples #'escalator_sf <- escalator() #'@export escalator <- function(as_spatial = F) { url <- "https://data.webservice-kvb.koeln/service/opendata/fahrtreppen/json"; json_content <- base_request(url = url, rq_type = "json"); # now into a spatial structure data_df <- json_content$features; data_df <- extract_geom(data_df); data_df <- extract_prop(data_df); data_df <- data_df %>% dplyr::mutate(Haltestellenbereich = as.numeric(Haltestellenbereich)); .data <- data_df; if (as_spatial) { epsg_code <- 4326; data_sf <- sf::st_as_sf(.data, coords = c("x", "y")) sf::st_crs(data_sf) <- epsg_code; .data <- data_sf; } return(.data); } #'@author christian bitter #'@name escalator #'@title KVB - Escalator #'@param as_spatial should data be returned as sf (default False) #'@description returns the escalator incident information from the KVB Open Data Portal #'@return an sf object of the provided data (EPSG 4326). #'@examples #'escalator_incident_sf <- escalator_incident() #'@export escalator_incident <- function(as_spatial = F) { url <- "https://data.webservice-kvb.koeln/service/opendata/fahrtreppenstoerung/json"; json_content <- base_request(url = url, rq_type = "json"); # now into a spatial structure data_df <- json_content$features; data_df <- extract_geom(data_df); data_df <- extract_prop(data_df); data_df <- data_df %>% dplyr::mutate(time = lubridate::ymd_hms(data_df$timestamp), Haltestellenbereich = as.numeric(Haltestellenbereich)); .data <- data_df; if (as_spatial) { epsg_code <- 4326; data_sf <- sf::st_as_sf(.data, coords = c("x", "y")) sf::st_crs(data_sf) <- epsg_code; .data <- data_sf; } return(.data); }
120b73833e092d72a93937e79ded0ce564212f43
06b0f34938f510e063147fb96eebf83f2d0276e9
/plot1.r
f3c5bf5cd6145d7c160dbdbe2a6ea5ba95bb829c
[]
no_license
erictengland/Exploratory-Data-Analysis-Project-1
53d9d56e25b0a13bbc08c9db2a3f5cd88c423157
3c6763df3cc9c7019165ecd5d6e6880d2335ad6a
refs/heads/master
2021-01-01T15:50:57.540343
2015-06-07T23:31:28
2015-06-07T23:31:28
37,035,101
0
0
null
null
null
null
UTF-8
R
false
false
1,145
r
plot1.r
# Data downloading and processing. temp <- tempfile() download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp) data <- read.csv(unz(temp, "household_power_consumption.txt"),sep=";") unlink(temp) rm(temp) subset1<-data[data$Date=="1/2/2007",] subset2<-data[data$Date=="2/2/2007",] subset<-rbind(subset1,subset2) rm(data) texttonum <- function(x){return(as.double(as.character(x)))} subset[,3:9]<-lapply(subset[,3:9], texttonum) texttodate <-function(x){return(as.Date(x,"%d/%m/%Y"))} subset$PosixDate<-sapply(subset[,1],texttodate) x<-paste(subset[,1],subset[,2]) datetime<-list() for (i in 1:2880) { datetime[[i]]<-(strptime(x[i], "%d/%m/%Y %H:%M:%S")) } # End of data processing #Begin actual graphics work png(filename="plot1.png") hist(as.numeric(as.character(subset$Global_a)), main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red", xlim=c(0,6), breaks<-c(0:16)/2, right=FALSE, xaxt="n",yaxt="n", mai=c(2,2,2,2), ylim=c(0,1400) ) axis(side=1,at=c(0,2,4,6)) axis(side=2,at=c(0,200,400,600,800,1000,1200)) dev.off()
045cd08a2f86365aeb7cd0fa27c67c6d0444e59d
01b1302af51d339f7c8827a620c4a5fb26c890f1
/core/standardizeDPSNames.r
56ec2cb2d40531dddbde6bf802a51e9dbc882ca0
[]
no_license
ihmeuw/gf
64ab90fb5a5c49694bde1596f4b20fcf107a76e3
29e0c530b86867d5edd85104f4fe7dcb1ed0f1ee
refs/heads/develop
2021-08-15T02:16:59.086173
2021-08-03T19:52:31
2021-08-03T19:52:31
109,062,373
3
6
null
2019-03-21T01:48:02
2017-10-31T23:17:16
R
UTF-8
R
false
false
3,379
r
standardizeDPSNames.r
# -------------------------------------------------------- # Function that stanardizes DPS names in DRC # # Inputs: # nameVector - a vector of class 'character' containing names of DPS's to be standardized # # Outputs: # cleanVector - a vector of class 'character' containing corresponding standard DPS names to nameVector # # The current working directory should be the root of this repo # # NOTE: # The GADM shapefile that we are using for DPS-level maps is not included in the alternate spellings csv # I'm not sure if it's possible to include it based on how it is formatted so I've added code to do that # part "manually" but at least it will be included in this function. - Audrey # -------------------------------------------------------- standardizeDPSNames = function(nameVector=NULL) { # test inputs if (is.null(nameVector)) stop('You must pass a vector of DPS names') if (class(nameVector)!='character') stop('nameVector must be a character vector') # for GADM shapefile: make changes to nameVector that will account for if the input nameVector is from the GADM shapefile nameVector <- gsub("Bas-Uélé", "Bas-Uele", nameVector) nameVector[grepl("quateur", nameVector)] <- "Equateur" # This needs to be updated somehow because it won't save the correct characters nameVector <- gsub("Haut-Uélé", "Haut-Uele", nameVector) nameVector <- gsub("Kasaï", "Kasai", nameVector) nameVector <- gsub("Kasaï-Central", "Kasai Central", nameVector) nameVector <- gsub("Kasaï-Oriental", "Kasai Oriental", nameVector) nameVector <- gsub("Maï-Ndombe", "Mai-Ndombe", nameVector) # load spreadsheet connecting all known names to standardized names require(data.table) altNamesFile = './core/dps_renaming_file.csv' alternateNames = fread(altNamesFile) # prep data table alternateNames = unique(alternateNames[, c('dps','dps_snis','dps_pnlp', 'dps_sv'), with=FALSE]) alternateNames = melt(alternateNames, id.vars='dps', value.name='alternate_name') # make sure standard names are also an option as an input tmp = unique(alternateNames[, 'dps', with=FALSE]) tmp[, alternate_name:=dps] alternateNames = rbind(alternateNames, tmp, fill=TRUE) # clean up alternate names alternateNames[, alternate_name:=tolower(alternate_name)] alternateNames[, alternate_name:=iconv(alternate_name, to='ASCII//TRANSLIT')] alternateNames[, alternate_name:=gsub(' ', '-', alternate_name)] # make sure that bas-congo is changed to kongo-central alternateNames[, alternate_name:=gsub("bas-congo", "kongo-central", alternate_name)] # clean up input vector nameVector = tolower(nameVector) nameVector = iconv(nameVector, to='ASCII//TRANSLIT') nameVector = gsub(' ', '-', nameVector) # make sure that bas-congo is changed to kongo-central nameVector <- gsub("bas-congo", "kongo-central", nameVector) # convert input vector to standardized names idx = match(nameVector, alternateNames$alternate_name) cleanVector = alternateNames[idx]$dps # test that it worked idx = which(is.na(cleanVector)) if (any(is.na(cleanVector))) warning(paste0('DPS \'', paste(unique(nameVector[idx]), collapse=', '), '\' not in alternate names list (', altNamesFile, ')')) if (length(nameVector)!=length(cleanVector)) stop('Something went wrong. The input vector is a different length than the output vector!') # return cleaned names return(cleanVector) }
62f87d430fb4ef25ea3f1d08824deb96806a6668
0479b5e809beae1d18a9c6b603305d674fd5b12e
/tests/testthat/test_prepare_chi.R
37e604552d33b85b730ee67f53d8abfadb9fce75
[]
no_license
huerqiang/GeoTcgaData
ecbd292e37df065ae4697c7dd07027c1e665853d
cc85914f2a17177164c7ae426f8f0f09f91e98c1
refs/heads/master
2023-04-12T10:04:20.034688
2023-04-04T05:57:04
2023-04-04T05:57:04
206,305,770
6
0
null
null
null
null
UTF-8
R
false
false
762
r
test_prepare_chi.R
test_that("can parse example prepare_chi", { cnv <- matrix(c( -1.09150, -1.47120, -0.87050, -0.50880, -0.50880, 2.0, 2.0, 2.0, 2.0, 2.0, 2.601962, 2.621332, 2.621332, 2.621332, 2.621332, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0 ), nrow = 5) cnv <- as.data.frame(cnv) rownames(cnv) <- c("AJAP1", "FHAD1", "CLCNKB", "CROCCP2", "AL137798.3") colnames(cnv) <- c( "TCGA-DD-A4NS-10A-01D-A30U-01", "TCGA-ED-A82E-01A-11D-A34Y-01", "TCGA-WQ-A9G7-01A-11D-A36W-01", "TCGA-DD-AADN-01A-11D-A40Q-01", "TCGA-ZS-A9CD-10A-01D-A36Z-01", "TCGA-DD-A1EB-11A-11D-A12Y-01" ) cnv_chi_file <- prepare_chi(cnv) expect_true("normalCNV" %in% colnames(cnv_chi_file)) })
740173269c7712c26a41bf6a646d9ba39bf65c2d
ff9eb712be2af2fa24b28ecc75341b741d5e0b01
/R/gofTestCensored.R
c4107a3a4743ed2171d6bd30e0747cd97cdb5a0a
[]
no_license
alexkowa/EnvStats
715c35c196832480ee304af1034ce286e40e46c2
166e5445d252aa77e50b2b0316f79dee6d070d14
refs/heads/master
2023-06-26T19:27:24.446592
2023-06-14T05:48:07
2023-06-14T05:48:07
140,378,542
21
6
null
2023-05-10T10:27:08
2018-07-10T04:49:22
R
UTF-8
R
false
false
5,327
r
gofTestCensored.R
gofTestCensored <- function (x, censored, censoring.side = "left", test = "sf", distribution = "norm", est.arg.list = NULL, prob.method = "hirsch-stedinger", plot.pos.con = 0.375, keep.data = TRUE, data.name = NULL, censoring.name = NULL) { distribution <- check.distribution.args(distribution, check.params = FALSE)$dist.abb if (!is.vector(x, mode = "numeric")) stop("'x' must be a numeric vector") if (is.null(data.name)) data.name <- deparse(substitute(x)) if (!is.vector(censored, mode = "numeric") & !is.vector(censored, mode = "logical")) stop("'censored' must be a logical or numeric vector") if (length(censored) != length(x)) stop("'censored' must be the same length as 'x'") if (is.numeric(censored)) { index <- is.finite(censored) if (!all(is.element(censored[index], 0:1))) stop(paste("When 'censored' is a numeric vector, all non-missing values of", "'censored' must be 0 (not censored) or 1 (censored).")) } if (is.null(censoring.name)) censoring.name <- deparse(substitute(censored)) censoring.side <- match.arg(censoring.side, c("left", "right")) test <- match.arg(test, c("sw", "sf", "ppcc")) if (test == "ppcc") test <- "ppccNorm" if ((bad.obs <- sum(!(ok <- is.finite(x) & is.finite(as.numeric(censored))))) > 0) { is.not.finite.warning(x) is.not.finite.warning(as.numeric(censored)) x <- x[ok] censored <- censored[ok] warning(paste(bad.obs, "observations with NA/NaN/Inf in 'x' and/or 'censored' removed.")) } if (is.numeric(censored)) censored <- as.logical(censored) n.cen <- sum(censored) if (n.cen == 0) { warning(paste("No censored values indicated by 'censored',", "so the function 'gofTest' was called.")) ret.list <- gofTest(x = x, test = test, distribution = distribution, est.arg.list = est.arg.list) ret.list$data.name <- data.name ret.list$bad.obs <- bad.obs return(ret.list) } x.no.cen <- x[!censored] if (length(unique(x.no.cen)) < 2) stop("'x' must contain at least 2 non-missing, uncensored, distinct values.") if (any(distribution == c("lnorm", "lnormAlt")) && any(x <= 0)) stop("All non-missing values of 'x' must be positive for a lognormal distribution") multiple <- TRUE T.vec <- unique(x[censored]) if (length(T.vec) == 1) { if (censoring.side == "left") { if (T.vec <= min(x.no.cen)) multiple <- FALSE } else { if (T.vec >= max(x.no.cen)) multiple <- FALSE } } if (multiple) { if (test == "sw") stop(paste("Shapiro-Wilk test not available for multiply censored data.", "Set test='sf' or test='ppcc'.")) prob.method <- match.arg(prob.method, c("hirsch-stedinger", "michael-schucany", "modified kaplan-meier", "nelson")) if (censoring.side == "left" & prob.method == "nelson") stop("Nelson Method not available when censoring.side='left'") if (censoring.side == "right" & prob.method == "modified kaplan-meier") stop("Modified Kaplan-Meier Method not available when censoring.side='right'") if (!is.vector(plot.pos.con, mode = "numeric") || length(plot.pos.con) != 1 || plot.pos.con < 0 || plot.pos.con > 1) stop("'plot.pos.con' must be a numeric scalar between 0 and 1") censoring.type <- "MultiplyCensored" } else { censoring.type <- "SinglyCensored" } test.name <- paste(test, censoring.type, "GofTest", sep = "") if (!(distribution %in% c("norm", "lnorm", "lnormAlt"))) { efcn <- paste("e", distribution, sep = "") if (EnvStats::Distribution.df[distribution, "Type"] != "Continuous" || !exists(efcn, where = "package:EnvStats")) stop(paste("When the argument distribution is not equal to", "'norm', 'lnorm', or 'lnormAlt',", "it must indicate a continuous distribution, and", "there must exist an associated function", "to estimate the parameters in the presence of censored data.", "See the help file for 'EnvStats::Distribution.df' for more information.")) test.name <- paste(test, censoring.type, "GeneralGofTest", sep = "") } arg.list <- list(x = x, censored = censored, censoring.side = censoring.side, distribution = distribution, est.arg.list = est.arg.list) if (multiple) arg.list <- c(arg.list, list(prob.method = prob.method, plot.pos.con = plot.pos.con)) ret.list <- do.call(test.name, args = arg.list) if (!keep.data) { ret.list <- ret.list[!(names(ret.list) %in% c("data", "censored"))] oldClass(ret.list) <- "gofCensored" } ret.list$data.name <- data.name ret.list$censoring.name <- censoring.name if (any(bad.obs > 0)) ret.list$bad.obs <- bad.obs else ret.list$bad.obs <- NULL ret.list }
5cb47a0a43a0387372d8a02366c40f683f64728e
da023fd8498e4216bea90093c858fc339cebc9e3
/04_HPC_abundance_analysis.R
bdd809b7c8f56d63be908b16236a76983a16dcd6
[ "MIT" ]
permissive
bowlerbear/ptarmiganUpscaling
92e48762edba81dcf488300bb8cb67e87a418209
3283d62795edecb672613b87c1c5b92df69e2222
refs/heads/master
2023-01-09T03:22:42.668585
2022-12-23T11:09:58
2022-12-23T11:09:58
128,047,997
0
0
null
null
null
null
UTF-8
R
false
false
18,943
r
04_HPC_abundance_analysis.R
#script to analysis line transect data on the HPC library(tidyverse) library(sp) library(rgeos) library(raster) library(maptools) #specify top level folder #myfolder <- "Data" #on local PC myfolder <- "/data/idiv_ess/ptarmiganUpscaling" #HPC ### ptarmigan data ############################################################### #read in data frame allData <- readRDS(paste(myfolder,"allData.rds",sep="/")) #subset to years of interest - 2008 onwards since many not visited in 2007 allData <- subset(allData,Year>2007 & Year<2018) #remove hyphens for help with subsetting allData$Fylkesnavn <- gsub("-"," ",allData$Fylkesnavn) allData$Fylkesnavn[which(allData$Rapporteringsniva=="Indre Troms")] <- "Troms" #mistake with 1405 - transect length allData$LengdeTaksert[which(allData$LinjeID==1405&allData$LengdeTaksert==1100)] <- 11000 ### remove outliers (see section below) ############################# #LinjeID 1925 has twice as high counts as all others allData <- subset(allData, LinjeID!=1925) #remove LinjeID 131?? -only 503 m long - smallest transect #drop lines visited in less than 5 years - see below allData <- subset(allData, !LinjeID %in% c(935,874,876,882,884,936,2317,2328,2338,878,886,1250,1569,2331,2339)) ### plot data ####################################################### #get lines as a spatial object # library(sf) # library(tmap) # # Lines_spatial <- readRDS("data/Lines_spatial.rds") # Lines_spatial <- subset(Lines_spatial, LinjeID %in% allData$LinjeID) # Lines_spatial <-st_as_sf(Lines_spatial) # Lines_spatial <- st_transform(Lines_spatial, st_crs(NorwayOrigProj)) # # occ_tmap <- tm_shape(NorwayOrigProj) + # tm_borders() + # tm_shape(Lines_spatial)+ # tm_lines(col="skyblue4",lwd=2) # occ_tmap # # saveRDS(occ_tmap,"plots/transects.RDS") ### aggregate data to the lines ###################################### #Get statistics per year and line tlDF <- allData %>% dplyr::group_by(LinjeID,Year) %>% dplyr::summarise(nuGroups = length(totalIndiv[!is.na(totalIndiv)]), totalsInfo = sum(totalIndiv,na.rm=T), groupSize = mean(totalIndiv,na.rm=T), length = mean(LengdeTaksert,na.rm=T)) sum(tlDF$totalsInfo,na.rm=T) #insert NA when there is no transect but evidence of a survey tlDF$length[is.na(tlDF$length)] <- 0 tlDF$nuGroups[tlDF$length==0 ] <- NA tlDF$totalsInfo[tlDF$length==0] <- NA tlDF$groupSize[tlDF$length==0] <- NA summary(tlDF) sum(tlDF$length==0) #### outlier check ################################################## #row/siteIndex 423 is an outlier/LinejeID 1925 # summaryData <- tlDF %>% # group_by(LinjeID) %>% # summarise(med = median(totalsInfo,na.rm=T), # medDensity = median(totalsInfo/length,na.rm=T), # transectlength=mean(length,na.rm=T), # nuObsYears = length(unique(Year[!totalsInfo==0 & !is.na(totalsInfo)])), # nuYears = length(unique(Year[!is.na(totalsInfo)])), # propObsYears = nuObsYears/nuYears) %>% # arrange(desc(med)) # # qplot(summaryData$transectlength,summaryData$medDensity) # qplot(summaryData$nuYears,summaryData$medDensity) # # subset(summaryData,propObsYears<0.3) # # tlDF %>% # group_by(LinjeID) %>% # summarise(nuYears = sum(!is.na(totalsInfo))) %>% # arrange(nuYears) %>% # filter(nuYears <5) # #15 line ### get environ data ################################################# bufferData <- readRDS(paste(myfolder, "varDF_allEnvironData_buffers_idiv.rds",sep="/")) bufferData <- subset(bufferData, !LinjeID %in% c(935,874,876,882,884,936,2317,2328,2338,878,886,1250,1569,2331,2339,1925)) tlDF <- subset(tlDF, LinjeID %in% bufferData$LinjeID) siteInfo_ArtsDaten <- readRDS(paste(myfolder, "siteInfo_ArtsDaten.rds",sep="/")) ### make siteInfo ###################################### tlDF$siteIndex <- as.numeric(as.factor(tlDF$LinjeID)) siteInfo <- unique(tlDF[,c("LinjeID","siteIndex")]) siteInfo <- arrange(siteInfo,siteIndex) siteInfo$adm <- bufferData$adm[match(siteInfo$LinjeID,bufferData$LinjeID)] siteInfo$admN <- as.numeric(as.factor(siteInfo$adm)) ### make arrays ################################################### #cast into arrays groupInfo <- reshape2::acast(tlDF,siteIndex~Year,value.var="nuGroups") totalsInfo <- reshape2::acast(tlDF,siteIndex~Year,value.var="totalsInfo") groupSizes <- reshape2::acast(tlDF,siteIndex~Year,value.var="groupSize") transectLengths <- reshape2::acast(tlDF,siteIndex~Year,value.var="length") sum(as.numeric(totalsInfo),na.rm=T) ### get observed max density ###################################### #transectArea <- (transectLengths/1000 * 0.1 * 2) #summary(totalsInfo/transectArea) ### transect lengths ############################################## #where there is a NA for transect length - put the mean for the line #just for imputation purposes meanTL = apply(transectLengths,1,function(x)median(x[x!=0])) for(i in 1:nrow(transectLengths)){ for(j in 1:ncol(transectLengths)){ transectLengths[i,j] <- ifelse(transectLengths[i,j]==0, meanTL[i], transectLengths[i,j]) } } #check alignment with other datasets all(row.names(groupInfo)==siteInfo$siteIndex) ### site abundances ############################################## # siteSummary <- tlDF %>% # filter(!is.na(totalsInfo)) %>% # group_by(siteIndex) %>% # summarise(nuZeros = sum(totalsInfo==0),meanCount = mean(totalsInfo)) # table(siteSummary$nuZeros) # summary(siteSummary$meanCount) #all above zero # temp <- allData %>% # filter(!is.na(totalIndiv)) %>% # group_by(LinjeID,Year) %>% # summarise(nuIndiv=sum(totalIndiv)) %>% # group_by(LinjeID) %>% # summarise(med = median(nuIndiv)) # # summary(temp$med) ### detection data ################################################ allDetections <- subset(allData, !is.na(totalIndiv) & totalIndiv!=0 & LinjeID %in% bufferData$LinjeID) allDetections$yearIndex <- as.numeric(factor(allDetections$Year)) allDetections$siteIndex <- siteInfo$siteIndex[match(allDetections$LinjeID, siteInfo$LinjeID)] allDetections$admN <- siteInfo$admN[match(allDetections$LinjeID, siteInfo$LinjeID)] #add on admN index to full data frame as same indices siteInfo_ArtsDaten$admNgrouped <- siteInfo$admN[match(siteInfo_ArtsDaten$admGrouped, siteInfo$adm)] #predict possible ESW for all transects - impute for mean value when it is missing meanGS = apply(groupSizes,1,function(x)median(x[!is.na(x)])) for(i in 1:nrow(groupSizes)){ for(j in 1:ncol(groupSizes)){ groupSizes[i,j] <- ifelse(is.na(groupSizes[i,j]), meanGS[i], groupSizes[i,j]) } } ### line-transect index ######################################## #remember: some lines are given the same siteIndex (when they overlap in the same grid) #get mapping from lines to grids siteIndex_linetransects <- readRDS(paste(myfolder,"siteIndex_linetransects.rds",sep="/")) siteIndex_linetransects <- siteIndex_linetransects %>% ungroup() %>% filter(LinjeID %in% siteInfo$LinjeID) siteIndex_linetransects$siteIndex_All <- as.numeric(as.factor(siteIndex_linetransects$siteIndex_All)) summary(siteIndex_linetransects$siteIndex_All) #302 grids are sampled #map indices to siteInfo for line transects siteInfo$siteIndex_All <- siteIndex_linetransects$siteIndex_All[match(siteInfo$LinjeID, siteIndex_linetransects$LinjeID)] summary(siteInfo$siteIndex_All) #map indices to siteInfo_ArtsDaten for grid data siteInfo_ArtsDaten$siteIndex_All <- siteIndex_linetransects$siteIndex_All[match(siteInfo_ArtsDaten$grid,siteIndex_linetransects$grid)] summary(siteInfo_ArtsDaten$siteIndex_All) siteInfo_ArtsDaten <- plyr::arrange(siteInfo_ArtsDaten,siteIndex_All) #fill in unsampled ones with indices nuMissing <- sum(is.na(siteInfo_ArtsDaten$siteIndex_All)) maxIndex <- max(siteIndex_linetransects$siteIndex_All) siteInfo_ArtsDaten$siteIndex_All[is.na(siteInfo_ArtsDaten$siteIndex_All)] <- (maxIndex+1):(maxIndex + nuMissing) summary(siteInfo_ArtsDaten$siteIndex_All) #arrange siteInfo <- siteInfo %>% arrange(siteIndex) siteInfo_ArtsDaten <- siteInfo_ArtsDaten %>% arrange(siteIndex_All) ### make bugs objects ########################################### bugs.data <- list(#For the state model nsite = length(unique(siteInfo$siteIndex)), nsiteAll = length(unique(siteInfo_ArtsDaten$siteIndex_All)), nyear = length(unique(allData$Year)), nadm = length(unique(siteInfo$admN)), site = siteInfo$siteIndex, siteAll = siteInfo$siteIndex_All, adm = siteInfo$admN, pred.adm = siteInfo_ArtsDaten$admNgrouped, NuIndivs = totalsInfo, TransectLength = transectLengths, #For the distance model W = 200, ndetections = nrow(allDetections), y = allDetections$LinjeAvstand, ln_GroupSize = log(allDetections$totalIndiv+1), GroupSizes = groupSizes, detectionYear = allDetections$yearIndex, detectionSite = allDetections$siteIndex, detectionAdm = allDetections$admN, zeros.dist = rep(0,nrow(allDetections))) names(bugs.data) ### get environ data ######################################### all(bufferData$LinjeID==siteInfo$LinjeID) myVars <- c("bio1", "bio5","y","bio6","MountainBirchForest", "Bog","ODF", "Meadows","OSF","Mire","SnowBeds", "tree_line_position","tree_line","distCoast","elevation") # scale them bufferData <- bufferData[,c("LinjeID",myVars)] bufferMeans <- as.numeric(apply(bufferData,2,mean)) bufferSD <- as.numeric(apply(bufferData,2,sd)) for(i in 2:ncol(bufferData)){ bufferData[,i] <- (bufferData[,i] - bufferMeans[i])/bufferSD[i] } #also for the siteInfo_ArtsDaten with same scaling siteInfo_ArtsDaten <- siteInfo_ArtsDaten[,c("grid",myVars)] for(i in 2:(ncol(siteInfo_ArtsDaten)-1)){#dont scale elevation siteInfo_ArtsDaten[,i] <- (siteInfo_ArtsDaten[,i] - bufferMeans[i])/bufferSD[i] } #saveRDS(siteInfo_ArtsDaten, file="data/siteInfo_AbundanceModels.rds") ### choose model ############################################## modelTaskID <- read.delim(paste(myfolder,"modelTaskID_distanceModel.txt",sep="/"),as.is=T) #get task id task.id = as.integer(Sys.getenv("SLURM_ARRAY_TASK_ID", "1")) #get model for this task mymodel <- modelTaskID$Model[which(modelTaskID$TaskID==task.id)] ### standard model ########################################### #variables selected based on first simple analyses if(mymodel == "linetransectModel_variables.txt"){ #add new variables to the bugs data bugs.data$occDM <- model.matrix(~ bufferData$bio6 + bufferData$bio5 + bufferData$tree_line + I(bufferData$tree_line^2))[,-1] #predictions to full grid bugs.data$predDM <- model.matrix(~ siteInfo_ArtsDaten$bio6 + siteInfo_ArtsDaten$bio5 + siteInfo_ArtsDaten$tree_line + I(siteInfo_ArtsDaten$tree_line^2))[,-1] ### indicator model selection ################################ #all linear and select quadratic } else { bugs.data$occDM <- model.matrix(~ bufferData$bio6 + bufferData$bio5 + bufferData$y + bufferData$distCoast + bufferData$tree_line + bufferData$MountainBirchForest + bufferData$Bog + bufferData$ODF + bufferData$Meadows + bufferData$OSF + bufferData$Mire + bufferData$SnowBeds + I(bufferData$bio6^2) + I(bufferData$bio5^2) + I(bufferData$y^2) + I(bufferData$distCoast^2) + I(bufferData$tree_line^2) + I(bufferData$MountainBirchForest^2) + I(bufferData$Bog^2) + I(bufferData$ODF^2) + I(bufferData$Meadows^2) + I(bufferData$OSF^2) + I(bufferData$Mire^2) + I(bufferData$SnowBeds^2))[,-1] # #predictions to full grid bugs.data$predDM <- model.matrix(~ siteInfo_ArtsDaten$bio6 + siteInfo_ArtsDaten$bio5 + siteInfo_ArtsDaten$y + siteInfo_ArtsDaten$distCoast + siteInfo_ArtsDaten$tree_line + siteInfo_ArtsDaten$MountainBirchForest + siteInfo_ArtsDaten$Bog + siteInfo_ArtsDaten$ODF + siteInfo_ArtsDaten$Meadows + siteInfo_ArtsDaten$OSF + siteInfo_ArtsDaten$Mire + siteInfo_ArtsDaten$SnowBeds + I(siteInfo_ArtsDaten$bio6^2) + I(siteInfo_ArtsDaten$bio5^2) + I(siteInfo_ArtsDaten$y^2) + I(siteInfo_ArtsDaten$distCoast^2) + I(siteInfo_ArtsDaten$tree_line^2) + I(siteInfo_ArtsDaten$MountainBirchForest^2) + I(siteInfo_ArtsDaten$Bog^2) + I(siteInfo_ArtsDaten$ODF^2) + I(siteInfo_ArtsDaten$Meadows^2) + I(siteInfo_ArtsDaten$OSF^2) + I(siteInfo_ArtsDaten$Mire^2) + I(siteInfo_ArtsDaten$SnowBeds^2))[,-1] } # myvars <- c("y",'bio6',"bio6_2","distCoast","distCoast_2", # "bio5","bio5_2","tree_line","tree_line_2","OSF","SnowBeds") # # myvars <- c("bio6","bio5","distCoast","tree_line","MountainBirchForest", # "Bog","ODF","Meadows","OSF","Mire","SnowBeds", # "bio6_2","bio5_2","distCoast_2","tree_line_2","MountainBirchForest_2", # "Bog_2","ODF_2","Meadows_2","OSF_2","Mire_2","SnowBeds_2") bugs.data$n.covs <- ncol(bugs.data$occDM) bugs.data$n.preds <- dim(bugs.data$predDM)[1] #saveRDS(bugs.data, file="data/bugs.data_linetransects.rds") ### fit model ################################################# library(rjags) library(jagsUI) params <- c("int.d","beta","g","r", "b.group.size","meanESW", "meanDensity","Density.p","exp.j", "bpv","MAD","expNuIndivs") #choose model - already done above now #modelfile <- paste(myfolder,"linetransectModel_variables.txt",sep="/") #modelfile <- paste(myfolder,"linetransectModel_variables_LASSO.txt",sep="/") #modelfile <- paste(myfolder,"linetransectModel_variables_ModelSelection.txt",sep="/") modelfile <- paste(myfolder,mymodel,sep="/") #n.cores = as.integer(Sys.getenv("NSLOTS", "1")) n.cores = as.integer(Sys.getenv("SLURM_CPUS_PER_TASK", "1")) #n.cores = 3 n.iterations = 50000 Sys.time() out1 <- jags(bugs.data, inits=NULL, params, modelfile, n.thin=50, n.chains=n.cores, n.burnin=n.iterations/2, n.iter=n.iterations, parallel=T) saveRDS(out1$summary,file=paste0("outSummary_linetransectModel_variables_",task.id,".rds")) print("Done main model") Sys.time() ### summary ################################################### temp <- data.frame(out1$summary) temp$Param <- row.names(temp) #look at MAD for each year for(i in 1:bugs.data$nyear){ mypattern <- paste0(",",i,"]") temp_train <- subset(temp, grepl("expNuIndivs", temp$Param)) temp_train <- subset(temp_train, grepl(mypattern, temp_train$Param))$mean data_train <- bugs.data$NuIndivs[,i] message(paste("Results in year", i, sep=" ")) print(summary(abs(data_train[!is.na(data_train)] - temp_train[!is.na(data_train)]))) print(cor(data_train[!is.na(data_train)],temp_train[!is.na(data_train)])) } print("Simple stats done now") ### samples ####################################################### library(ggmcmc) ggd <- ggs(out1$samples) out1_dataset <- subset(ggd,grepl("expNuIndivs",ggd$Parameter)) out1_dataset <- subset(out1_dataset,grepl(",6]",out1_dataset$Parameter)) out1_dataset$index <- as.numeric(interaction(out1_dataset$Iteration,out1_dataset$Chain)) #get actual NuIndiv totalsInfo_mid <- bugs.data$NuIndivs[,6] #get difference between this value and the simulated values mad_dataset <- as.numeric() rmse_dataset <- as.numeric() n.index <- max(out1_dataset$index) useData <- !is.na(totalsInfo_mid) for(i in 1:n.index){ mad_dataset[i] <- mean(abs(totalsInfo_mid[useData] - out1_dataset$value[out1_dataset$index==i][useData])) rmse_dataset[i] <- sqrt(mean((totalsInfo_mid[useData] - out1_dataset$value[out1_dataset$index==i][useData])^2)) } summary(mad_dataset) summary(rmse_dataset) saveRDS(summary(mad_dataset),file=paste0("MAD_linetransectModel_variables_",task.id,".rds")) saveRDS(summary(rmse_dataset),file=paste0("RMSE_linetransectModel_variables_",task.id,".rds")) print("Done model assessment") ### get site and year predictions ############################ out2 <- update(out1, parameters.to.save = c("Density.pt"),n.iter=10000, n.thin=50) #summary saveRDS(out2$summary,file=paste0("Density.pt_Summary_linetransectModel_variables_",task.id,".rds")) #and samples ggd <- ggs(out2$samples) saveRDS(ggd,file=paste0("Density.pt_linetransectModel_variables_",task.id,".rds")) print("end") ### end #######################################################
c0eb7fa85dd0bd03ca6555ceffc36771bee47bf7
dedfaa39da7f7597167da32979f8a76a9061eae6
/R/print_expoapp.R
3a1bf9702595217724a8f01f9f4e364d0bc592bc
[]
no_license
daviddonaire/RExpoApp
756f370f9c02498f67f1649c6097c0d72c429b65
4b6775883a315deb33bd4461017a7314844ad58d
refs/heads/master
2020-12-12T06:17:34.195075
2020-04-18T02:33:20
2020-04-18T02:33:20
234,061,969
0
0
null
null
null
null
UTF-8
R
false
false
4,023
r
print_expoapp.R
#' Write Html of ExpoApp Quality Data Analysis Report #' #' It is the function to generate the html with the data analysis of the ExpoApp data. #' @param result It is a list object with the times, settings, notes, gps_plot, pa_plot, and nolocation. #' Times is a data.table with the information of the quality of the data. #' Settings are the software details of the phone and settings of the ExpoApp session. #' Notes is the logcat from ExpoApp with all the infromation about battery of the phone. #' Gps_plot is a mapview object with the gps information from ExpoApp. #' PA_plot is a ggplot object with the METs time-series from ExpoApp. #' Nolocation is the temporal completeness of the geolocation of the Expoapp session. #' @param output_dir The folder where we want to store the html file with the quality analysis of ExpoApp session. #' @param open_html A logical variable (TRUE/FALSE) indicating if we want to open the Quality Analysis Report in the browser. #' @param save_html A logical variable (TRUE/FALSE) indicating if we want to save the Quality Analysis Report. #' @param ... optional arguments to the function. #' #' @return value print_expoapp <- function(result, output_dir = NULL, open_html = TRUE, save_html = TRUE,...){ expoapp_text1 <- c("#' ---","#' title: ExpoApp Quality Data Analysis","#' author: David Donaire-Gonzalez","#' date: January 8th, 2019", "#' output:","#' html_document:","#' toc: true","#' highlight: zenburn","#' ---"," ","#' ## Phone & ExpoApp Settings","#'", " ",'#+ results="asis",echo=FALSE, size="tiny" ') expoapp_text2 <- c("\n#'"," ","#' ## Evaluation of Data Completeness:","#' Recorded, Wearing, and with All, GPS and NETWORK location", "#'"," ",'#+ results="asis",echo=FALSE, size="tiny" ',"knitr::kable(result$times)","#'"," ","#' ## ExpoApp Physical Activity Plot", "#'"," ","#+ fig.width=9, fig.height=4,echo=FALSE ","result$pa_plot ","#'"," ","#' ## ExpoApp Map ","#'"," ","#+ fig.width=9, fig.height=6, echo=FALSE ", "result$gps_plot@map ","#'"," ","#' ## Minutes with accelerometer but no location","#' (off: smartphone turn off) ","#'"," ","#+ results='asis',echo=FALSE ","knitr::kable(result$nolocation) ", "#'"," "," ","#' ## ExpoApp logcat ","#'"," ","#+ results='asis',echo=FALSE ") expoapp_text3 <- c("\n#'"," ","#+ echo=FALSE ","#https://rmarkdown.rstudio.com/articles_report_from_r_script.html ", "#http://brooksandrew.github.io/simpleblog/articles/render-reports-directly-from-R-scripts/ ", "#https://kbroman.org/knitr_knutshell/pages/Rmarkdown.html ", "#https://stackoverflow.com/questions/48370425/pass-code-to-input-of-rmarkdown-render-function-instead-of-a-file ", " ",'#rmarkdown::render("C:/ACU/1_Projects/iMAP/Scripts/ExpoApp/Test_Improving_R_mobile_tools.R") ',"#'"," ") id <- result$settings$characteristics[result$settings$V1 == "ID"] Rname <- paste0("Expoapp_Report_",id,".R") Hname <- paste0("Expoapp_Report_",id,".html") tmp <- file.path(output_dir,Rname) cat(paste0(expoapp_text1,collapse="\n"),file=tmp) cat(sapply(apply(result$settings,1,paste0,collapse=": "),function(x)paste("\n#'",x," ")),file=tmp,append=T) cat(paste0(expoapp_text2,collapse="\n"),file=tmp,append=T) cat(sapply(apply(result$notes,1,paste0,collapse=" "),function(x)paste("\n#'",x," ")),file=tmp,append=T) cat(paste0(expoapp_text3,collapse="\n"),file=tmp,append=T) if(rmarkdown::pandoc_available()==FALSE){ stop("Error: pandoc 2.7.2 is not installed. Use the following link to installed. \n https://github.com/jgm/pandoc/releases/download/2.7.2/pandoc-2.7.2-windows-x86_64.msi") } rmarkdown::render(tmp,output_dir = output_dir) unlink(tmp) if(open_html == TRUE){ browseURL(file.path(output_dir,Hname)) } if(save_html == FALSE){ unlink(file.path(output_dir,Hname)) } }