content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
context("Check read_glatos_workbook")
# Actual result
#get path to example receiver location file
wb_file <- system.file("extdata",
"walleye_workbook.xlsm", package="glatos")
wb <- read_glatos_workbook(wb_file)
# Test using testthat library
test_that("metadata element gives expected result", {
# Check if expected and actual results are the same
expect_equal(wb[["metadata"]][1:5], walleye_workbook[["metadata"]][1:5])
})
test_that("receivers element gives expected result", {
# Check if expected and actual results are the same
expect_equal(wb["receivers"], walleye_workbook["receivers"])
})
test_that("animals gives expected result", {
# Check if expected and actual results are the same
expect_equal(wb["animals"], walleye_workbook["animals"])
}) | /tests/testthat/test-read_glatos_workbook.r | no_license | jsta/glatos | R | false | false | 769 | r | context("Check read_glatos_workbook")
# Actual result
#get path to example receiver location file
wb_file <- system.file("extdata",
"walleye_workbook.xlsm", package="glatos")
wb <- read_glatos_workbook(wb_file)
# Test using testthat library
test_that("metadata element gives expected result", {
# Check if expected and actual results are the same
expect_equal(wb[["metadata"]][1:5], walleye_workbook[["metadata"]][1:5])
})
test_that("receivers element gives expected result", {
# Check if expected and actual results are the same
expect_equal(wb["receivers"], walleye_workbook["receivers"])
})
test_that("animals gives expected result", {
# Check if expected and actual results are the same
expect_equal(wb["animals"], walleye_workbook["animals"])
}) |
#' @title The Earthquake Dataset
#'
#' @description This dataset contains the frequencies of all earthquakes of a given magnitude
#' (reported to one decimal place) for magnitudes from 4.5 to 9.1, that occurred between
#' January 1, 1964 to December 31, 2012.
#'
#' @details We use this dataset as a real data example to illustrate goodness-of-fit tests
#' of NB and Poisson regression models (univariate response).
#'
#' @usage data(earthquake)
#' @format A 45 by 2 matrix, with column names "Magnitude" and "Frequency"
#' @name earthquake
#'
#' @docType data
#'
#' @references Composite Earthquake Catalog, Advanced National Seismic System, Northern
#' California Earthquake Data Center (NCEDC), \url{http://quake.geo.berkeley.edu/cnss/}.
#'
#' See \url{https://github.com/gu-mi/NBGOF/wiki/} for more details.
#'
#' @keywords datasets
#'
#' @examples
#'
#' ## Load the dataset into R session:
#' library(NBGOF)
#' data(earthquake)
#'
#' ## basic descriptions of the dataset:
#' head(earthquake)
#' range(earthquake$Magnitude) # 4.5 9.1
#' range(earthquake$Frequency) # 1 33280
#'
#' ## GOF test of NB2, NBP and Poisson models:
#' y = earthquake$Frequency
#' x = as.matrix(cbind(rep(1,length(y)), earthquake$Magnitude))
#' gof.nb2 = nb.gof.v(y, x, sim=999, model="NB2")
#' gof.nbp = nb.gof.v(y, x, sim=999, model="NBP", method="ML")
#' x2 = x[,2]
#' gof.poi = nb.gof.v(y, x2, sim=999, model="Poisson")
#'
#' ## Empirical Probability Plots:
#' # pdf(file=file.path(path1, "eqk-nb-models.pdf"), width=8, height=4)
#' par(mfrow=c(1,2))
#' print(EPPlot(gof.nb2, envelope=0.95, data.note="Earthquake Dataset"))
#' print(EPPlot(gof.nbp, envelope=0.95, data.note="Earthquake Dataset"))
#' # dev.off()
#'
NULL
| /R/earthquake.R | no_license | jjlcathy/NBGOF | R | false | false | 1,719 | r |
#' @title The Earthquake Dataset
#'
#' @description This dataset contains the frequencies of all earthquakes of a given magnitude
#' (reported to one decimal place) for magnitudes from 4.5 to 9.1, that occurred between
#' January 1, 1964 to December 31, 2012.
#'
#' @details We use this dataset as a real data example to illustrate goodness-of-fit tests
#' of NB and Poisson regression models (univariate response).
#'
#' @usage data(earthquake)
#' @format A 45 by 2 matrix, with column names "Magnitude" and "Frequency"
#' @name earthquake
#'
#' @docType data
#'
#' @references Composite Earthquake Catalog, Advanced National Seismic System, Northern
#' California Earthquake Data Center (NCEDC), \url{http://quake.geo.berkeley.edu/cnss/}.
#'
#' See \url{https://github.com/gu-mi/NBGOF/wiki/} for more details.
#'
#' @keywords datasets
#'
#' @examples
#'
#' ## Load the dataset into R session:
#' library(NBGOF)
#' data(earthquake)
#'
#' ## basic descriptions of the dataset:
#' head(earthquake)
#' range(earthquake$Magnitude) # 4.5 9.1
#' range(earthquake$Frequency) # 1 33280
#'
#' ## GOF test of NB2, NBP and Poisson models:
#' y = earthquake$Frequency
#' x = as.matrix(cbind(rep(1,length(y)), earthquake$Magnitude))
#' gof.nb2 = nb.gof.v(y, x, sim=999, model="NB2")
#' gof.nbp = nb.gof.v(y, x, sim=999, model="NBP", method="ML")
#' x2 = x[,2]
#' gof.poi = nb.gof.v(y, x2, sim=999, model="Poisson")
#'
#' ## Empirical Probability Plots:
#' # pdf(file=file.path(path1, "eqk-nb-models.pdf"), width=8, height=4)
#' par(mfrow=c(1,2))
#' print(EPPlot(gof.nb2, envelope=0.95, data.note="Earthquake Dataset"))
#' print(EPPlot(gof.nbp, envelope=0.95, data.note="Earthquake Dataset"))
#' # dev.off()
#'
NULL
|
[Chorus:]
They only give you flowers when you can't smell them
Well I’m a give you trees when you can not tell them
You did a bit for the team we won't forget that
Caught me any nights we would talk yea chit chat
Oh man you man’s all real
Ke Ke KE ke Keep it real
Oh man you man’s all real
Ke Ke KE ke Keep it real
Oh man you man’s all real
Ke Ke KE ke Keep it real
[Verse 2:]
You see a vision and a better life
The eyes or a dreamer until a poison
Got between us like a pound of venus
Every man is accountable for his own demons
Deep in the shadows of my mind i can hear the screaming
[Verse 3:]
Yeah you love to hear the story again
Too bad you will only shine so glory be towards my name
See you in the future tomorrow's mouse give me a pen
If your heart stop beating i go back in time
Reverse the sin in the hour glass
When you counting my crime
I wanted by my own cerebellum
Love unconditional but i could never tell her
Remember gum fire echo in my mind bending
I find it hard to even talk about it now bredrin
You sacrificed your life and paid the price for a legend
Facts it's really real what I’m saying yea so real I’m saying
Oh Lorenzo i wish i could apologize to make it simple
But life twisted life changing simple
Now facing these lines and everything we been through
Got me pouring my heart out over this instrumentals
Your mom gave me the number so i spoke with them
Always got that unibrow eyebrow i joked with them
Pakistan G i admire you if i get the label right
You know I’m a hire you
[Chorus]
[Verse 2:]
Yo i keep waking up thought i was done dreaming
Is it my que that i heard it again later that evening
I know that voice but that’s nobody but uptop
I thought he was in grain sing sing don’t stop
The next morning i caught him by the window walking
A child when i heard a No like i just peaked track
I got love for this nigga so I’m going to live
I pull to the house got to show him my strength
Everyday we reminisce about mommy you know the clip too
Laughing about the day he fell and got a chip tooth
Only nigga i know respected by everybody
And he ain't even have to sell crack to buss a shotty
The ore in the nigga pure flow in your blood
And you got a reputation until you lay in the mud
You trash hoe you can’t take his place
So i had a son too tatoo on your face
He was real and believe strong like everything we breathe on
Them moma heard the voice like the jeans long
When we were in that jail together
Which we would have kept it real for ever
Oh you my brother from another mother
You know nigga i love you for ever time will tell
Trying to seal the letter I’m gone
Your mom gave me the number so i spoke with them
Always got that unibrow eyebrow i joked with them
Pakistan G i admire when i get the label right
You know I’m a hire you
[Chorus] | /scraper/rap_eng/texts/Capone/U.M.A.R | no_license | clarnomargurite591/rapper_ml | R | false | false | 2,810 | r | [Chorus:]
They only give you flowers when you can't smell them
Well I’m a give you trees when you can not tell them
You did a bit for the team we won't forget that
Caught me any nights we would talk yea chit chat
Oh man you man’s all real
Ke Ke KE ke Keep it real
Oh man you man’s all real
Ke Ke KE ke Keep it real
Oh man you man’s all real
Ke Ke KE ke Keep it real
[Verse 2:]
You see a vision and a better life
The eyes or a dreamer until a poison
Got between us like a pound of venus
Every man is accountable for his own demons
Deep in the shadows of my mind i can hear the screaming
[Verse 3:]
Yeah you love to hear the story again
Too bad you will only shine so glory be towards my name
See you in the future tomorrow's mouse give me a pen
If your heart stop beating i go back in time
Reverse the sin in the hour glass
When you counting my crime
I wanted by my own cerebellum
Love unconditional but i could never tell her
Remember gum fire echo in my mind bending
I find it hard to even talk about it now bredrin
You sacrificed your life and paid the price for a legend
Facts it's really real what I’m saying yea so real I’m saying
Oh Lorenzo i wish i could apologize to make it simple
But life twisted life changing simple
Now facing these lines and everything we been through
Got me pouring my heart out over this instrumentals
Your mom gave me the number so i spoke with them
Always got that unibrow eyebrow i joked with them
Pakistan G i admire you if i get the label right
You know I’m a hire you
[Chorus]
[Verse 2:]
Yo i keep waking up thought i was done dreaming
Is it my que that i heard it again later that evening
I know that voice but that’s nobody but uptop
I thought he was in grain sing sing don’t stop
The next morning i caught him by the window walking
A child when i heard a No like i just peaked track
I got love for this nigga so I’m going to live
I pull to the house got to show him my strength
Everyday we reminisce about mommy you know the clip too
Laughing about the day he fell and got a chip tooth
Only nigga i know respected by everybody
And he ain't even have to sell crack to buss a shotty
The ore in the nigga pure flow in your blood
And you got a reputation until you lay in the mud
You trash hoe you can’t take his place
So i had a son too tatoo on your face
He was real and believe strong like everything we breathe on
Them moma heard the voice like the jeans long
When we were in that jail together
Which we would have kept it real for ever
Oh you my brother from another mother
You know nigga i love you for ever time will tell
Trying to seal the letter I’m gone
Your mom gave me the number so i spoke with them
Always got that unibrow eyebrow i joked with them
Pakistan G i admire when i get the label right
You know I’m a hire you
[Chorus] |
#' ENVISIONQuery.
#'
#' \tabular{ll}{
#' Package: \tab ENVISIONQuery\cr
#' Type: \tab Package\cr
#' Version: \tab 1.1.4 \cr
#' Date: \tab 2011-07-17 \cr
#' License: \tab GPL-2\cr
#' LazyLoad: \tab yes\cr
#' }
#'
#'
#' @name ENVISIONQuery-package
#' @aliases ENVISIONQuery-package
#' @docType package
#' @author Alex Lisovich, Roger Day
#' @keywords package
{}
| /OtherPackages/ENVISIONQuery/R/ENVISIONQuery-package.R | no_license | rikenbit/PubMedQuery | R | false | false | 378 | r | #' ENVISIONQuery.
#'
#' \tabular{ll}{
#' Package: \tab ENVISIONQuery\cr
#' Type: \tab Package\cr
#' Version: \tab 1.1.4 \cr
#' Date: \tab 2011-07-17 \cr
#' License: \tab GPL-2\cr
#' LazyLoad: \tab yes\cr
#' }
#'
#'
#' @name ENVISIONQuery-package
#' @aliases ENVISIONQuery-package
#' @docType package
#' @author Alex Lisovich, Roger Day
#' @keywords package
{}
|
testlist <- list(doy = c(-Inf, 0), latitude = c(-6.93132091139805e-107, 1.86807199754812e+112, -Inf, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, NaN, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615832986-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 421 | r | testlist <- list(doy = c(-Inf, 0), latitude = c(-6.93132091139805e-107, 1.86807199754812e+112, -Inf, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, NaN, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
\name{omega.tot}
\alias{omega.tot}
\title{Compute McDonald's Omega Total}
\usage{
omega.tot(x, factors = 1, missing = "complete")
}
\arguments{
\item{x}{Can be either a data matrix or a covariance
matrix}
\item{missing}{how to handle missing values. \eqn{mi}.}
\item{factors}{The number of latent factors.}
}
\value{
\item{omega.tot}{Omega total reliability estimate.}
}
\description{
McDonald proposed Omega Total as a method for estimating
reliabilty for a test with multiple factors.
}
\examples{
omega.tot(Rosenberg, factors=1)
}
\author{
Tyler Hunt \email{tyler@psychoanalytix.com}
}
\references{
McDonald, R. P. (1999). Test Theory: Aunified Treatment.
Psychology Press.
}
| /man/omega.tot.Rd | no_license | JackStat/Lambda4 | R | false | false | 704 | rd | \name{omega.tot}
\alias{omega.tot}
\title{Compute McDonald's Omega Total}
\usage{
omega.tot(x, factors = 1, missing = "complete")
}
\arguments{
\item{x}{Can be either a data matrix or a covariance
matrix}
\item{missing}{how to handle missing values. \eqn{mi}.}
\item{factors}{The number of latent factors.}
}
\value{
\item{omega.tot}{Omega total reliability estimate.}
}
\description{
McDonald proposed Omega Total as a method for estimating
reliabilty for a test with multiple factors.
}
\examples{
omega.tot(Rosenberg, factors=1)
}
\author{
Tyler Hunt \email{tyler@psychoanalytix.com}
}
\references{
McDonald, R. P. (1999). Test Theory: Aunified Treatment.
Psychology Press.
}
|
#' Compare predictive models, created on your data
#'
#' @description This step allows one to create test models on your data
#' and helps determine which performs best.
#' @docType class
#' @usage LinearMixedModelDevelopment(object, type, df,
#' grainCol, personCol, predictedCol, impute, debug)
#' @import caret
#' @import doParallel
#' @import e1071
#' @import grpreg
#' @import lme4
#' @import pROC
#' @importFrom R6 R6Class
#' @import ranger
#' @import ROCR
#' @import RODBC
#' @param object of SuperviseModelParameters class for $new() constructor
#' @param type The type of model (either 'regression' or 'classification')
#' @param df Dataframe whose columns are used for calc.
#' @param grainCol The data frame's ID column pertaining to the grain
#' @param personCol The data frame's ID column pertaining to the person/patient
#' @param predictedCol Column that you want to predict. If you're doing
#' classification then this should be Y/N.
#' @param impute Set all-column imputation to F or T.
#' This uses mean replacement for numeric columns
#' and most frequent for factorized columns.
#' F leads to removal of rows containing NULLs.
#' @param debug Provides the user extended output to the console, in order
#' to monitor the calculations throughout. Use T or F.
#' @references \url{http://healthcare.ai/}
#' @seealso \code{\link{healthcareai}}
#' @examples
#'
#' ### Built-in example; Doing classification
#' library(healthcareai)
#' library(lme4)
#'
#' df <- sleepstudy
#'
#' str(df)
#'
#' # Create binary column for classification
#' df$ReactionFLG <- ifelse(df$Reaction > 300, "Y", "N")
#' df$Reaction <- NULL
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "classification"
#' p$impute <- TRUE
#' p$personCol <- "Subject" # Think of this as PatientID
#' p$predictedCol <- "ReactionFLG"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Create Mixed Model
#' lmm <- LinearMixedModelDevelopment$new(p)
#' lmm$run()
#'
#' ### Doing regression
#' library(healthcareai)
#'
#' # SQL query and connection goes here - see SelectData function.
#'
#' df <- sleepstudy
#'
#' # Add GrainID, which is equivalent to PatientEncounterID
#' df$GrainID <- seq.int(nrow(df))
#'
#' str(df)
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "regression"
#' p$impute <- TRUE
#' p$grainCol <- "GrainID" # Think of this as PatientEnounterID
#' p$personCol <- "Subject" # Think of this as PatientID
#' p$predictedCol <- "Reaction"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Create Mixed Model
#' lmm <- LinearMixedModelDevelopment$new(p)
#' lmm$run()
#'
#' #### Example using csv data ####
#' library(healthcareai)
#' # setwd('C:/Your/script/location') # Needed if using YOUR CSV file
#' ptm <- proc.time()
#'
#' # Can delete this line in your work
#' csvfile <- system.file("extdata", "HCRDiabetesClinical.csv", package = "healthcareai")
#' #Replace csvfile with "path/to/yourfile"
#' df <- read.csv(file = csvfile, header = TRUE, na.strings = c("NULL", "NA", ""))
#'
#' head(df)
#'
#' df$InTestWindowFLG <- NULL
#'
#' set.seed(42)
#'
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "classification"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$personCol <- "PatientID"
#' p$predictedCol <- "ThirtyDayReadmitFLG"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Create Mixed Model
#' lmm <- LinearMixedModelDevelopment$new(p)
#' lmm$run()
#'
#' set.seed(42)
#' # Run Lasso
#' # Lasso <- LassoDevelopment$new(p)
#' # Lasso$run()
#' cat(proc.time() - ptm, '\n')
#'
#' \donttest{
#' #### This example is specific to Windows and is not tested.
#' #### Example using SQL Server data ####
#' # This example requires that you alter your connection string / query
#' # to read in your own data
#'
#' ptm <- proc.time()
#' library(healthcareai)
#'
#' connection.string <- "
#' driver={SQL Server};
#' server=localhost;
#' database=SAM;
#' trusted_connection=true
#' "
#'
#' query <- "
#' SELECT
#' [PatientEncounterID]
#' ,[PatientID]
#' ,[SystolicBPNBR]
#' ,[LDLNBR]
#' ,[A1CNBR]
#' ,[GenderFLG]
#' ,[ThirtyDayReadmitFLG]
#' ,[InTestWindowFLG]
#' FROM [SAM].[dbo].[HCRDiabetesClinical]
#' --no WHERE clause, because we want train AND test
#' "
#'
#' df <- selectData(connection.string, query)
#' head(df)
#'
#' df$InTestWindowFLG <- NULL
#'
#' set.seed(42)
#'
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "classification"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$personCol <- "PatientID"
#' p$predictedCol <- "ThirtyDayReadmitFLG"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Create Mixed Model
#' lmm <- LinearMixedModelDevelopment$new(p)
#' lmm$run()
#'
#' # Remove person col, since RF can't use it
#' df$personCol <- NULL
#' p$df <- df
#' p$personCol <- NULL
#'
#' set.seed(42)
#' # Run Random Forest
#' rf <- RandomForestDevelopment$new(p)
#' rf$run()
#'
#' # Plot ROC
#' rocs <- list(lmm$getROC(), rf$getROC())
#' names <- c("Linear Mixed Model", "Random Forest")
#' legendLoc <- "bottomright"
#' plotROCs(rocs, names, legendLoc)
#'
#' # Plot PR Curve
#' rocs <- list(lmm$getPRCurve(), rf$getPRCurve())
#' names <- c("Linear Mixed Model", "Random Forest")
#' legendLoc <- "bottomleft"
#' plotPRCurve(rocs, names, legendLoc)
#'
#' cat(proc.time() - ptm, '\n')
#' }
#'
#' @export
LinearMixedModelDevelopment <- R6Class("LinearMixedModelDevelopment",
# Inheritance
inherit = SupervisedModelDevelopment,
# Private members
private = list(
# Mixed model-specific datasets
trainTest = NA,
lmmTrain = NA,
lmmTest = NA,
# Fit LMM
fitLmm = NA,
fitLogit = NA,
predictions = NA,
# Performance metrics
ROCPlot = NA,
PRCurvePlot = NA,
AUROC = NA,
AUPR = NA,
RMSE = NA,
MAE = NA,
# functions
saveModel = function() {
if (isTRUE(self$params$debug)) {
cat('Saving model...', '\n')
}
# Save model
#NOTE: save(private$fitLogit, ...) directly, did not work!
fitLogit <- private$fitLogit
fitObj <- private$fitLmm
save(fitLogit, file = "rmodel_var_import_LMM.rda")
save(fitObj, file = "rmodel_probability_LMM.rda")
},
fitGeneralizedLinearModel = function() {
if (isTRUE(self$params$debug)) {
cat('generating fitLogit...', '\n')
}
if (self$params$type == 'classification') {
cat('fitting GLM', '\n')
private$fitLogit <- glm(
as.formula(paste(self$params$predictedCol, '.', sep = " ~ ")),
data = private$dfTrain,
family = binomial(link = "logit"),
metric = "ROC",
control = list(maxit = 10000),
trControl = trainControl(classProbs = TRUE, summaryFunction = twoClassSummary)
)
} else if (self$params$type == 'regression') {
private$fitLogit <- glm(
as.formula(paste(self$params$predictedCol, '.', sep = " ~ ")),
data = private$dfTrain,
metric = "RMSE",
control = list(maxit = 10000)
)
}
}
),
# Public members
public = list(
# Constructor
# p: new SuperviseModelParameters class object,
# i.e. p = SuperviseModelParameters$new()
initialize = function(p) {
super$initialize(p)
},
getPredictions = function(){
return(private$predictions)
},
# Start of functions
buildDataset = function(){
# TODO Soon: Prepare data according to InTestWindow column, in case
# user wants to predict for row that's not the last in the person group
# Combine test/train (which was randomly generated in base class)
private$trainTest <- rbind(private$dfTrain,private$dfTest)
if (isTRUE(self$params$debug)) {
cat('Re-combined train/test for MM specific use', '\n')
cat(str(private$trainTest), '\n')
}
# TODO Later: figure out why ordering in sql query is better auc than internal
# ordering. http://stackoverflow.com/a/1296745/5636012
# If ordering using with, access PersonID col via df[[PersonID]]
# Split out test/train by taking last row of each PersonID for test set
# TODO Soon: do this split using the InTestWindowCol
private$lmmTrain <- data.table::setDT(private$trainTest)[, .SD[1:.N - 1], by = eval(self$params$personCol)]
private$lmmTest <- data.table::setDT(private$trainTest)[, .SD[.N], by = eval(self$params$personCol)]
if (isTRUE(self$params$debug)) {
cat('Mixed model-specific training set after creation', '\n')
cat(str(private$lmmTrain), '\n')
cat('Mixed model-specific test set after creation', '\n')
cat(str(private$lmmTest), '\n')
}
},
# Override: build model
# Linear Mixed model (random intercept with fixed mean)
buildModel = function() {
# Start build formula by grabbing column names
colList <- colnames(private$lmmTrain)
# Remove target col from list
colList <- colList[colList != self$params$predictedCol]
# Remove grain col from list
colList <- colList[colList != self$params$grainCol]
# Remove random-effects col from list
fixedColsTemp <- colList[colList != self$params$personCol]
# Collapse columns in list into a large string of cols
fixedCols <- paste(fixedColsTemp, "+ ", collapse = "")
formula <- paste0(self$params$predictedCol, " ~ ",
fixedCols,
"(1|", self$params$personCol, ")")
if (isTRUE(self$params$debug)) {
cat('Formula to be used:', '\n')
cat(formula, '\n')
cat('Training the general linear mixed-model...', '\n')
cat('Using random intercept with fixed mean...', '\n')
}
if (self$params$type == 'classification') {
private$fitLmm <- glmer(formula = formula,
data = private$lmmTrain,
family = binomial(link = 'logit'))
} else if (self$params$type == 'regression') {
private$fitLmm <- lmer(formula = formula,
data = private$lmmTrain)
}
},
# Perform prediction
performPrediction = function() {
if (self$params$type == 'classification') {
private$predictions <- predict(object = private$fitLmm,
newdata = private$lmmTest,
allow.new.levels = TRUE,
type = "response")
if (isTRUE(self$params$debug)) {
cat(paste0('Predictions generated: ', nrow(private$predictions)), '\n')
cat('First 10 raw classification probability predictions', '\n')
cat(round(private$predictions[1:10],2), '\n')
}
}
else if (self$params$type == 'regression') {
private$predictions <- predict(object = private$fitLmm,
newdata = private$lmmTest,
allow.new.levels = TRUE)
if (isTRUE(self$params$debug)) {
cat(paste0('Predictions generated: ', '\n',
length(private$predictions)))
cat('First 10 raw regression predictions (with row # first)', '\n')
cat(round(private$predictions[1:10],2), '\n')
}
}
},
# Generate performance metrics
generatePerformanceMetrics = function() {
ytest <- as.numeric(private$lmmTest[[self$params$predictedCol]])
calcObjList <- calculatePerformance(private$predictions,
ytest,
self$params$type)
# Make these objects available for plotting and unit tests
private$ROCPlot <- calcObjList[[1]]
private$PRCurvePlot <- calcObjList[[2]]
private$AUROC <- calcObjList[[3]]
private$AUPR <- calcObjList[[4]]
private$RMSE <- calcObjList[[5]]
private$MAE <- calcObjList[[6]]
return(invisible(private$fitLmm))
},
# Override: run Linear Mixed algorithm
run = function() {
self$buildDataset()
# Build Model
self$buildModel()
# fit GLM for row-wise variable importance
private$fitGeneralizedLinearModel()
# save model
private$saveModel()
# Perform prediction
self$performPrediction()
# Generate performance metrics
self$generatePerformanceMetrics()
},
getROC = function() {
if (!isBinary(self$params$df[[self$params$predictedCol]])) {
cat("ROC is not created because the column you're predicting is not binary", '\n')
return(NULL)
}
return(private$ROCPlot)
},
getPRCurve = function() {
if (!isBinary(self$params$df[[self$params$predictedCol]])) {
cat("PR Curve is not created because the column you're predicting is not binary", '\n')
return(NULL)
}
return(private$PRCurvePlot)
},
getAUROC = function() {
return(private$AUROC)
},
getRMSE = function() {
return(private$RMSE)
},
getMAE = function() {
return(private$MAE)
},
getPerf = function() {
return(private$perf)
},
getCutOffs = function() {
warning("`getCutOffs` is deprecated. Please use `generateAUC` instead. See
?generateAUC", call. = FALSE)
}
)
)
| /R/linear-mixed-model-development.R | permissive | Hanlei-Zhu/healthcareai-r | R | false | false | 13,476 | r | #' Compare predictive models, created on your data
#'
#' @description This step allows one to create test models on your data
#' and helps determine which performs best.
#' @docType class
#' @usage LinearMixedModelDevelopment(object, type, df,
#' grainCol, personCol, predictedCol, impute, debug)
#' @import caret
#' @import doParallel
#' @import e1071
#' @import grpreg
#' @import lme4
#' @import pROC
#' @importFrom R6 R6Class
#' @import ranger
#' @import ROCR
#' @import RODBC
#' @param object of SuperviseModelParameters class for $new() constructor
#' @param type The type of model (either 'regression' or 'classification')
#' @param df Dataframe whose columns are used for calc.
#' @param grainCol The data frame's ID column pertaining to the grain
#' @param personCol The data frame's ID column pertaining to the person/patient
#' @param predictedCol Column that you want to predict. If you're doing
#' classification then this should be Y/N.
#' @param impute Set all-column imputation to F or T.
#' This uses mean replacement for numeric columns
#' and most frequent for factorized columns.
#' F leads to removal of rows containing NULLs.
#' @param debug Provides the user extended output to the console, in order
#' to monitor the calculations throughout. Use T or F.
#' @references \url{http://healthcare.ai/}
#' @seealso \code{\link{healthcareai}}
#' @examples
#'
#' ### Built-in example; Doing classification
#' library(healthcareai)
#' library(lme4)
#'
#' df <- sleepstudy
#'
#' str(df)
#'
#' # Create binary column for classification
#' df$ReactionFLG <- ifelse(df$Reaction > 300, "Y", "N")
#' df$Reaction <- NULL
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "classification"
#' p$impute <- TRUE
#' p$personCol <- "Subject" # Think of this as PatientID
#' p$predictedCol <- "ReactionFLG"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Create Mixed Model
#' lmm <- LinearMixedModelDevelopment$new(p)
#' lmm$run()
#'
#' ### Doing regression
#' library(healthcareai)
#'
#' # SQL query and connection goes here - see SelectData function.
#'
#' df <- sleepstudy
#'
#' # Add GrainID, which is equivalent to PatientEncounterID
#' df$GrainID <- seq.int(nrow(df))
#'
#' str(df)
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "regression"
#' p$impute <- TRUE
#' p$grainCol <- "GrainID" # Think of this as PatientEnounterID
#' p$personCol <- "Subject" # Think of this as PatientID
#' p$predictedCol <- "Reaction"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Create Mixed Model
#' lmm <- LinearMixedModelDevelopment$new(p)
#' lmm$run()
#'
#' #### Example using csv data ####
#' library(healthcareai)
#' # setwd('C:/Your/script/location') # Needed if using YOUR CSV file
#' ptm <- proc.time()
#'
#' # Can delete this line in your work
#' csvfile <- system.file("extdata", "HCRDiabetesClinical.csv", package = "healthcareai")
#' #Replace csvfile with "path/to/yourfile"
#' df <- read.csv(file = csvfile, header = TRUE, na.strings = c("NULL", "NA", ""))
#'
#' head(df)
#'
#' df$InTestWindowFLG <- NULL
#'
#' set.seed(42)
#'
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "classification"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$personCol <- "PatientID"
#' p$predictedCol <- "ThirtyDayReadmitFLG"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Create Mixed Model
#' lmm <- LinearMixedModelDevelopment$new(p)
#' lmm$run()
#'
#' set.seed(42)
#' # Run Lasso
#' # Lasso <- LassoDevelopment$new(p)
#' # Lasso$run()
#' cat(proc.time() - ptm, '\n')
#'
#' \donttest{
#' #### This example is specific to Windows and is not tested.
#' #### Example using SQL Server data ####
#' # This example requires that you alter your connection string / query
#' # to read in your own data
#'
#' ptm <- proc.time()
#' library(healthcareai)
#'
#' connection.string <- "
#' driver={SQL Server};
#' server=localhost;
#' database=SAM;
#' trusted_connection=true
#' "
#'
#' query <- "
#' SELECT
#' [PatientEncounterID]
#' ,[PatientID]
#' ,[SystolicBPNBR]
#' ,[LDLNBR]
#' ,[A1CNBR]
#' ,[GenderFLG]
#' ,[ThirtyDayReadmitFLG]
#' ,[InTestWindowFLG]
#' FROM [SAM].[dbo].[HCRDiabetesClinical]
#' --no WHERE clause, because we want train AND test
#' "
#'
#' df <- selectData(connection.string, query)
#' head(df)
#'
#' df$InTestWindowFLG <- NULL
#'
#' set.seed(42)
#'
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "classification"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$personCol <- "PatientID"
#' p$predictedCol <- "ThirtyDayReadmitFLG"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Create Mixed Model
#' lmm <- LinearMixedModelDevelopment$new(p)
#' lmm$run()
#'
#' # Remove person col, since RF can't use it
#' df$personCol <- NULL
#' p$df <- df
#' p$personCol <- NULL
#'
#' set.seed(42)
#' # Run Random Forest
#' rf <- RandomForestDevelopment$new(p)
#' rf$run()
#'
#' # Plot ROC
#' rocs <- list(lmm$getROC(), rf$getROC())
#' names <- c("Linear Mixed Model", "Random Forest")
#' legendLoc <- "bottomright"
#' plotROCs(rocs, names, legendLoc)
#'
#' # Plot PR Curve
#' rocs <- list(lmm$getPRCurve(), rf$getPRCurve())
#' names <- c("Linear Mixed Model", "Random Forest")
#' legendLoc <- "bottomleft"
#' plotPRCurve(rocs, names, legendLoc)
#'
#' cat(proc.time() - ptm, '\n')
#' }
#'
#' @export
LinearMixedModelDevelopment <- R6Class("LinearMixedModelDevelopment",
# Inheritance
inherit = SupervisedModelDevelopment,
# Private members
private = list(
# Mixed model-specific datasets
trainTest = NA,
lmmTrain = NA,
lmmTest = NA,
# Fit LMM
fitLmm = NA,
fitLogit = NA,
predictions = NA,
# Performance metrics
ROCPlot = NA,
PRCurvePlot = NA,
AUROC = NA,
AUPR = NA,
RMSE = NA,
MAE = NA,
# functions
saveModel = function() {
if (isTRUE(self$params$debug)) {
cat('Saving model...', '\n')
}
# Save model
#NOTE: save(private$fitLogit, ...) directly, did not work!
fitLogit <- private$fitLogit
fitObj <- private$fitLmm
save(fitLogit, file = "rmodel_var_import_LMM.rda")
save(fitObj, file = "rmodel_probability_LMM.rda")
},
fitGeneralizedLinearModel = function() {
if (isTRUE(self$params$debug)) {
cat('generating fitLogit...', '\n')
}
if (self$params$type == 'classification') {
cat('fitting GLM', '\n')
private$fitLogit <- glm(
as.formula(paste(self$params$predictedCol, '.', sep = " ~ ")),
data = private$dfTrain,
family = binomial(link = "logit"),
metric = "ROC",
control = list(maxit = 10000),
trControl = trainControl(classProbs = TRUE, summaryFunction = twoClassSummary)
)
} else if (self$params$type == 'regression') {
private$fitLogit <- glm(
as.formula(paste(self$params$predictedCol, '.', sep = " ~ ")),
data = private$dfTrain,
metric = "RMSE",
control = list(maxit = 10000)
)
}
}
),
# Public members
public = list(
# Constructor
# p: new SuperviseModelParameters class object,
# i.e. p = SuperviseModelParameters$new()
initialize = function(p) {
super$initialize(p)
},
getPredictions = function(){
return(private$predictions)
},
# Start of functions
buildDataset = function(){
# TODO Soon: Prepare data according to InTestWindow column, in case
# user wants to predict for row that's not the last in the person group
# Combine test/train (which was randomly generated in base class)
private$trainTest <- rbind(private$dfTrain,private$dfTest)
if (isTRUE(self$params$debug)) {
cat('Re-combined train/test for MM specific use', '\n')
cat(str(private$trainTest), '\n')
}
# TODO Later: figure out why ordering in sql query is better auc than internal
# ordering. http://stackoverflow.com/a/1296745/5636012
# If ordering using with, access PersonID col via df[[PersonID]]
# Split out test/train by taking last row of each PersonID for test set
# TODO Soon: do this split using the InTestWindowCol
private$lmmTrain <- data.table::setDT(private$trainTest)[, .SD[1:.N - 1], by = eval(self$params$personCol)]
private$lmmTest <- data.table::setDT(private$trainTest)[, .SD[.N], by = eval(self$params$personCol)]
if (isTRUE(self$params$debug)) {
cat('Mixed model-specific training set after creation', '\n')
cat(str(private$lmmTrain), '\n')
cat('Mixed model-specific test set after creation', '\n')
cat(str(private$lmmTest), '\n')
}
},
# Override: build model
# Linear Mixed model (random intercept with fixed mean)
buildModel = function() {
# Start build formula by grabbing column names
colList <- colnames(private$lmmTrain)
# Remove target col from list
colList <- colList[colList != self$params$predictedCol]
# Remove grain col from list
colList <- colList[colList != self$params$grainCol]
# Remove random-effects col from list
fixedColsTemp <- colList[colList != self$params$personCol]
# Collapse columns in list into a large string of cols
fixedCols <- paste(fixedColsTemp, "+ ", collapse = "")
formula <- paste0(self$params$predictedCol, " ~ ",
fixedCols,
"(1|", self$params$personCol, ")")
if (isTRUE(self$params$debug)) {
cat('Formula to be used:', '\n')
cat(formula, '\n')
cat('Training the general linear mixed-model...', '\n')
cat('Using random intercept with fixed mean...', '\n')
}
if (self$params$type == 'classification') {
private$fitLmm <- glmer(formula = formula,
data = private$lmmTrain,
family = binomial(link = 'logit'))
} else if (self$params$type == 'regression') {
private$fitLmm <- lmer(formula = formula,
data = private$lmmTrain)
}
},
# Perform prediction
performPrediction = function() {
if (self$params$type == 'classification') {
private$predictions <- predict(object = private$fitLmm,
newdata = private$lmmTest,
allow.new.levels = TRUE,
type = "response")
if (isTRUE(self$params$debug)) {
cat(paste0('Predictions generated: ', nrow(private$predictions)), '\n')
cat('First 10 raw classification probability predictions', '\n')
cat(round(private$predictions[1:10],2), '\n')
}
}
else if (self$params$type == 'regression') {
private$predictions <- predict(object = private$fitLmm,
newdata = private$lmmTest,
allow.new.levels = TRUE)
if (isTRUE(self$params$debug)) {
cat(paste0('Predictions generated: ', '\n',
length(private$predictions)))
cat('First 10 raw regression predictions (with row # first)', '\n')
cat(round(private$predictions[1:10],2), '\n')
}
}
},
# Generate performance metrics
generatePerformanceMetrics = function() {
ytest <- as.numeric(private$lmmTest[[self$params$predictedCol]])
calcObjList <- calculatePerformance(private$predictions,
ytest,
self$params$type)
# Make these objects available for plotting and unit tests
private$ROCPlot <- calcObjList[[1]]
private$PRCurvePlot <- calcObjList[[2]]
private$AUROC <- calcObjList[[3]]
private$AUPR <- calcObjList[[4]]
private$RMSE <- calcObjList[[5]]
private$MAE <- calcObjList[[6]]
return(invisible(private$fitLmm))
},
# Override: run Linear Mixed algorithm
run = function() {
self$buildDataset()
# Build Model
self$buildModel()
# fit GLM for row-wise variable importance
private$fitGeneralizedLinearModel()
# save model
private$saveModel()
# Perform prediction
self$performPrediction()
# Generate performance metrics
self$generatePerformanceMetrics()
},
getROC = function() {
if (!isBinary(self$params$df[[self$params$predictedCol]])) {
cat("ROC is not created because the column you're predicting is not binary", '\n')
return(NULL)
}
return(private$ROCPlot)
},
getPRCurve = function() {
if (!isBinary(self$params$df[[self$params$predictedCol]])) {
cat("PR Curve is not created because the column you're predicting is not binary", '\n')
return(NULL)
}
return(private$PRCurvePlot)
},
getAUROC = function() {
return(private$AUROC)
},
getRMSE = function() {
return(private$RMSE)
},
getMAE = function() {
return(private$MAE)
},
getPerf = function() {
return(private$perf)
},
getCutOffs = function() {
warning("`getCutOffs` is deprecated. Please use `generateAUC` instead. See
?generateAUC", call. = FALSE)
}
)
)
|
/Projekt MOW/Projekt MOW/script2.R | no_license | Konradszk/MOW-AE | R | false | false | 20,205 | r | ||
library(ggplot2)
library(arsenal)
library(GGally)
library(network)
library(ggnet)
data <- mockstudy
ggplot(data, aes(x = age, y = ast, size=alk.phos, colour = case))+
geom_point(alpha=0.35) +
stat_ellipse()+
stat_density_2d(aes())+
scale_color_continuous(guide=FALSE)+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),legend.position="none")+
theme_void()+guides(fill=FALSE)
library(ggraph)
library(igraph)
library(tidyverse)
library(viridis)
pckng <-
verti <- data[,c("case","age")]
mg <- graph_from_data_frame(edges, vertices = verti)
palette(rainbow(12, s = 0.6, v = 0.75))
datar <- cbind(data[sample(nrow(data)),], data[sample(nrow(data)),], data[sample(nrow(data)),],data[sample(nrow(data)),])
stars(datar[1:56,c(15:30)], full=TRUE,
labels = NULL, lwd=1.1, lty=6, frame.plot = TRUE)
stars2 <- function (x, full = TRUE, scale = TRUE, radius = TRUE, labels = dimnames(x)[[1L]], col.lines = NULL,
locations = NULL, nrow = NULL, ncol = NULL, len = 1, key.loc = NULL,
key.labels = dimnames(x)[[2L]], key.xpd = TRUE, xlim = NULL,
ylim = NULL, flip.labels = NULL, draw.segments = FALSE, col.segments = 1L:n.seg,
col.stars = NA, axes = FALSE, frame.plot = axes,
main = NULL, sub = NULL, xlab = "", ylab = "", cex = 0.8,
lwd = 0.25, lty = par("lty"), xpd = FALSE, mar = pmin(par("mar"),
1.1 + c(2 * axes + (xlab != ""), 2 * axes + (ylab !=
""), 1, 0)), add = FALSE, plot = TRUE, ...)
{
if (is.data.frame(x))
x <- data.matrix(x)
else if (!is.matrix(x))
stop("'x' must be a matrix or a data frame")
if (!is.numeric(x))
stop("data in 'x' must be numeric")
n.loc <- nrow(x)
n.seg <- ncol(x)
if (is.null(locations)) {
if (is.null(nrow))
nrow <- ceiling(if (!is.numeric(ncol)) sqrt(n.loc) else n.loc/ncol)
if (is.null(ncol))
ncol <- ceiling(n.loc/nrow)
if (nrow * ncol < n.loc)
stop("'nrow * ncol' is less than the number of observations")
ff <- if (!is.null(labels))
2.3
else 2.1
locations <- expand.grid(ff * 1L:ncol, ff * nrow:1)[1L:n.loc,
]
if (!is.null(labels) && (missing(flip.labels) || !is.logical(flip.labels)))
flip.labels <- ncol * mean(nchar(labels, type = "c")) >
30
}
else {
if (is.numeric(locations) && length(locations) == 2) {
locations <- cbind(rep.int(locations[1L], n.loc),
rep.int(locations[2L], n.loc))
if (!missing(labels) && n.loc > 1)
warning("labels do not make sense for a single location")
else labels <- NULL
}
else {
if (is.data.frame(locations))
locations <- data.matrix(locations)
if (!is.matrix(locations) || ncol(locations) != 2)
stop("'locations' must be a 2-column matrix.")
if (n.loc != nrow(locations))
stop("number of rows of 'locations' and 'x' must be equal.")
}
if (missing(flip.labels) || !is.logical(flip.labels))
flip.labels <- FALSE
}
xloc <- locations[, 1]
yloc <- locations[, 2]
angles <- if (full)
seq.int(0, 2 * pi, length.out = n.seg + 1)[-(n.seg +
1)]
else if (draw.segments)
seq.int(0, pi, length.out = n.seg + 1)[-(n.seg + 1)]
else seq.int(0, pi, length.out = n.seg)
if (length(angles) != n.seg)
stop("length of 'angles' must equal 'ncol(x)'")
if (scale) {
x <- apply(x, 2L, function(x) (x - min(x, na.rm = TRUE))/diff(range(x,
na.rm = TRUE)))
}
x[is.na(x)] <- 0
mx <- max(x <- x * len)
if (is.null(xlim))
xlim <- range(xloc) + c(-mx, mx)
if (is.null(ylim))
ylim <- range(yloc) + c(-mx, mx)
deg <- pi/180
op <- par(mar = mar, xpd = xpd)
on.exit(par(op))
dev.hold()
on.exit(dev.flush(), add = TRUE)
if (plot && !add)
plot(0, type = "n", ..., xlim = xlim, ylim = ylim, main = main,
sub = sub, xlab = xlab, ylab = ylab, asp = 1, axes = axes)
if (!plot)
return(locations)
s.x <- xloc + x * rep.int(cos(angles), rep.int(n.loc, n.seg))
s.y <- yloc + x * rep.int(sin(angles), rep.int(n.loc, n.seg))
if (draw.segments) {
aangl <- c(angles, if (full) 2 * pi else pi)
for (i in 1L:n.loc) {
px <- py <- numeric()
for (j in 1L:n.seg) {
k <- seq.int(from = aangl[j], to = aangl[j +
1], by = 1 * deg)
px <- c(px, xloc[i], s.x[i, j], x[i, j] * cos(k) +
xloc[i], NA)
py <- c(py, yloc[i], s.y[i, j], x[i, j] * sin(k) +
yloc[i], NA)
}
polygon(px, py, col = col.segments, lwd = lwd, lty = lty)
}
}
else {
for (i in 1L:n.loc) {
polygon(s.x[i, ], s.y[i, ], lwd = lwd, lty = lty, border = col.lines[i], col = col.stars[i])
polygon(s.x[i, ], s.y[i, ], lwd = lwd, lty = lty,
border = col.lines[i], col = col.stars[i])
if (radius)
segments(rep.int(xloc[i], n.seg), rep.int(yloc[i],
n.seg), s.x[i, ], s.y[i, ], lwd = lwd, lty = lty)
}
}
if (!is.null(labels)) {
y.off <- mx * (if (full)
1
else 0.1)
if (flip.labels)
y.off <- y.off + cex * par("cxy")[2L] * ((1L:n.loc)%%2 -
if (full)
0.4
else 0)
text(xloc, yloc - y.off, labels, cex = cex, adj = c(0.5,
1))
}
if (!is.null(key.loc)) {
par(xpd = key.xpd)
key.x <- len * cos(angles) + key.loc[1L]
key.y <- len * sin(angles) + key.loc[2L]
if (draw.segments) {
px <- py <- numeric()
for (j in 1L:n.seg) {
k <- seq.int(from = aangl[j], to = aangl[j +
1], by = 1 * deg)
px <- c(px, key.loc[1L], key.x[j], len * cos(k) +
key.loc[1L], NA)
py <- c(py, key.loc[2L], key.y[j], len * sin(k) +
key.loc[2L], NA)
}
polygon(px, py, col = col.segments, lwd = lwd, lty = lty)
}
else {
polygon(key.x, key.y, lwd = lwd, lty = lty)
if (radius)
segments(rep.int(key.loc[1L], n.seg), rep.int(key.loc[2L],
n.seg), key.x, key.y, lwd = lwd, lty = lty)
}
lab.angl <- angles + if (draw.segments)
(angles[2L] - angles[1L])/2
else 0
label.x <- 1.1 * len * cos(lab.angl) + key.loc[1L]
label.y <- 1.1 * len * sin(lab.angl) + key.loc[2L]
for (k in 1L:n.seg) {
text.adj <- c(if (lab.angl[k] < 90 * deg || lab.angl[k] >
270 * deg) 0 else if (lab.angl[k] > 90 * deg &&
lab.angl[k] < 270 * deg) 1 else 0.5, if (lab.angl[k] <=
90 * deg) (1 - lab.angl[k]/(90 * deg))/2 else if (lab.angl[k] <=
270 * deg) (lab.angl[k] - 90 * deg)/(180 * deg) else 1 -
(lab.angl[k] - 270 * deg)/(180 * deg))
text(label.x[k], label.y[k], labels = key.labels[k],
cex = cex, adj = text.adj)
}
}
if (frame.plot)
box(...)
invisible(locations)
}
stars2(mtcars[, 1:7], locations = c(0,0), radius = FALSE,key.loc=c(0,0),
main="Motor Trend Cars", lty = 2,col.lines = 1:nrow(mtcars))
stars2(datar[1:42,c(1:38)], full=TRUE,
labels = NULL, lwd=1.1, lty=6, frame.plot = TRUE,
col.lines = 1:nrow(datar))
fad <- data[,c("case","age","fu.time","hgb","bmi","alk.phos","ast","age.ord")]
fada <- cbind(fad[sample(nrow(fad)),], fad[sample(nrow(fad)),], fad[sample(nrow(fad)),], fad[sample(nrow(fad)),])
x <- seq(-4, 4, length=200)
y <- dnorm(x, mean=0, sd=1)
tiff("Plot2.tif", width = 5, height = 4, units = 'in', res = 1000)
par(bg = 'cornsilk')
#plot(x,y, type = "l",xaxt='n', ann=FALSE,frame.plot=FALSE, labels=NULL, axes=FALSE,
# lty = 2, lwd=0.55)
hist(y,xaxt='n', ann=FALSE,frame.plot=FALSE, labels=NULL, axes=FALSE,
lty = 2, lwd=0.55)
#par(new=TRUE, bg = 'cornsilk')
par(new=TRUE)
stars2(fada[1:42,c(1:18)], full=TRUE,
labels = NULL, lwd=1.1, lty=6, frame.plot = FALSE,
col.lines = 1:nrow(fada))
dev.off()
| /graph_art.R | no_license | delashu/aRt | R | false | false | 8,973 | r | library(ggplot2)
library(arsenal)
library(GGally)
library(network)
library(ggnet)
data <- mockstudy
ggplot(data, aes(x = age, y = ast, size=alk.phos, colour = case))+
geom_point(alpha=0.35) +
stat_ellipse()+
stat_density_2d(aes())+
scale_color_continuous(guide=FALSE)+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),legend.position="none")+
theme_void()+guides(fill=FALSE)
library(ggraph)
library(igraph)
library(tidyverse)
library(viridis)
pckng <-
verti <- data[,c("case","age")]
mg <- graph_from_data_frame(edges, vertices = verti)
palette(rainbow(12, s = 0.6, v = 0.75))
datar <- cbind(data[sample(nrow(data)),], data[sample(nrow(data)),], data[sample(nrow(data)),],data[sample(nrow(data)),])
stars(datar[1:56,c(15:30)], full=TRUE,
labels = NULL, lwd=1.1, lty=6, frame.plot = TRUE)
stars2 <- function (x, full = TRUE, scale = TRUE, radius = TRUE, labels = dimnames(x)[[1L]], col.lines = NULL,
locations = NULL, nrow = NULL, ncol = NULL, len = 1, key.loc = NULL,
key.labels = dimnames(x)[[2L]], key.xpd = TRUE, xlim = NULL,
ylim = NULL, flip.labels = NULL, draw.segments = FALSE, col.segments = 1L:n.seg,
col.stars = NA, axes = FALSE, frame.plot = axes,
main = NULL, sub = NULL, xlab = "", ylab = "", cex = 0.8,
lwd = 0.25, lty = par("lty"), xpd = FALSE, mar = pmin(par("mar"),
1.1 + c(2 * axes + (xlab != ""), 2 * axes + (ylab !=
""), 1, 0)), add = FALSE, plot = TRUE, ...)
{
if (is.data.frame(x))
x <- data.matrix(x)
else if (!is.matrix(x))
stop("'x' must be a matrix or a data frame")
if (!is.numeric(x))
stop("data in 'x' must be numeric")
n.loc <- nrow(x)
n.seg <- ncol(x)
if (is.null(locations)) {
if (is.null(nrow))
nrow <- ceiling(if (!is.numeric(ncol)) sqrt(n.loc) else n.loc/ncol)
if (is.null(ncol))
ncol <- ceiling(n.loc/nrow)
if (nrow * ncol < n.loc)
stop("'nrow * ncol' is less than the number of observations")
ff <- if (!is.null(labels))
2.3
else 2.1
locations <- expand.grid(ff * 1L:ncol, ff * nrow:1)[1L:n.loc,
]
if (!is.null(labels) && (missing(flip.labels) || !is.logical(flip.labels)))
flip.labels <- ncol * mean(nchar(labels, type = "c")) >
30
}
else {
if (is.numeric(locations) && length(locations) == 2) {
locations <- cbind(rep.int(locations[1L], n.loc),
rep.int(locations[2L], n.loc))
if (!missing(labels) && n.loc > 1)
warning("labels do not make sense for a single location")
else labels <- NULL
}
else {
if (is.data.frame(locations))
locations <- data.matrix(locations)
if (!is.matrix(locations) || ncol(locations) != 2)
stop("'locations' must be a 2-column matrix.")
if (n.loc != nrow(locations))
stop("number of rows of 'locations' and 'x' must be equal.")
}
if (missing(flip.labels) || !is.logical(flip.labels))
flip.labels <- FALSE
}
xloc <- locations[, 1]
yloc <- locations[, 2]
angles <- if (full)
seq.int(0, 2 * pi, length.out = n.seg + 1)[-(n.seg +
1)]
else if (draw.segments)
seq.int(0, pi, length.out = n.seg + 1)[-(n.seg + 1)]
else seq.int(0, pi, length.out = n.seg)
if (length(angles) != n.seg)
stop("length of 'angles' must equal 'ncol(x)'")
if (scale) {
x <- apply(x, 2L, function(x) (x - min(x, na.rm = TRUE))/diff(range(x,
na.rm = TRUE)))
}
x[is.na(x)] <- 0
mx <- max(x <- x * len)
if (is.null(xlim))
xlim <- range(xloc) + c(-mx, mx)
if (is.null(ylim))
ylim <- range(yloc) + c(-mx, mx)
deg <- pi/180
op <- par(mar = mar, xpd = xpd)
on.exit(par(op))
dev.hold()
on.exit(dev.flush(), add = TRUE)
if (plot && !add)
plot(0, type = "n", ..., xlim = xlim, ylim = ylim, main = main,
sub = sub, xlab = xlab, ylab = ylab, asp = 1, axes = axes)
if (!plot)
return(locations)
s.x <- xloc + x * rep.int(cos(angles), rep.int(n.loc, n.seg))
s.y <- yloc + x * rep.int(sin(angles), rep.int(n.loc, n.seg))
if (draw.segments) {
aangl <- c(angles, if (full) 2 * pi else pi)
for (i in 1L:n.loc) {
px <- py <- numeric()
for (j in 1L:n.seg) {
k <- seq.int(from = aangl[j], to = aangl[j +
1], by = 1 * deg)
px <- c(px, xloc[i], s.x[i, j], x[i, j] * cos(k) +
xloc[i], NA)
py <- c(py, yloc[i], s.y[i, j], x[i, j] * sin(k) +
yloc[i], NA)
}
polygon(px, py, col = col.segments, lwd = lwd, lty = lty)
}
}
else {
for (i in 1L:n.loc) {
polygon(s.x[i, ], s.y[i, ], lwd = lwd, lty = lty, border = col.lines[i], col = col.stars[i])
polygon(s.x[i, ], s.y[i, ], lwd = lwd, lty = lty,
border = col.lines[i], col = col.stars[i])
if (radius)
segments(rep.int(xloc[i], n.seg), rep.int(yloc[i],
n.seg), s.x[i, ], s.y[i, ], lwd = lwd, lty = lty)
}
}
if (!is.null(labels)) {
y.off <- mx * (if (full)
1
else 0.1)
if (flip.labels)
y.off <- y.off + cex * par("cxy")[2L] * ((1L:n.loc)%%2 -
if (full)
0.4
else 0)
text(xloc, yloc - y.off, labels, cex = cex, adj = c(0.5,
1))
}
if (!is.null(key.loc)) {
par(xpd = key.xpd)
key.x <- len * cos(angles) + key.loc[1L]
key.y <- len * sin(angles) + key.loc[2L]
if (draw.segments) {
px <- py <- numeric()
for (j in 1L:n.seg) {
k <- seq.int(from = aangl[j], to = aangl[j +
1], by = 1 * deg)
px <- c(px, key.loc[1L], key.x[j], len * cos(k) +
key.loc[1L], NA)
py <- c(py, key.loc[2L], key.y[j], len * sin(k) +
key.loc[2L], NA)
}
polygon(px, py, col = col.segments, lwd = lwd, lty = lty)
}
else {
polygon(key.x, key.y, lwd = lwd, lty = lty)
if (radius)
segments(rep.int(key.loc[1L], n.seg), rep.int(key.loc[2L],
n.seg), key.x, key.y, lwd = lwd, lty = lty)
}
lab.angl <- angles + if (draw.segments)
(angles[2L] - angles[1L])/2
else 0
label.x <- 1.1 * len * cos(lab.angl) + key.loc[1L]
label.y <- 1.1 * len * sin(lab.angl) + key.loc[2L]
for (k in 1L:n.seg) {
text.adj <- c(if (lab.angl[k] < 90 * deg || lab.angl[k] >
270 * deg) 0 else if (lab.angl[k] > 90 * deg &&
lab.angl[k] < 270 * deg) 1 else 0.5, if (lab.angl[k] <=
90 * deg) (1 - lab.angl[k]/(90 * deg))/2 else if (lab.angl[k] <=
270 * deg) (lab.angl[k] - 90 * deg)/(180 * deg) else 1 -
(lab.angl[k] - 270 * deg)/(180 * deg))
text(label.x[k], label.y[k], labels = key.labels[k],
cex = cex, adj = text.adj)
}
}
if (frame.plot)
box(...)
invisible(locations)
}
stars2(mtcars[, 1:7], locations = c(0,0), radius = FALSE,key.loc=c(0,0),
main="Motor Trend Cars", lty = 2,col.lines = 1:nrow(mtcars))
stars2(datar[1:42,c(1:38)], full=TRUE,
labels = NULL, lwd=1.1, lty=6, frame.plot = TRUE,
col.lines = 1:nrow(datar))
fad <- data[,c("case","age","fu.time","hgb","bmi","alk.phos","ast","age.ord")]
fada <- cbind(fad[sample(nrow(fad)),], fad[sample(nrow(fad)),], fad[sample(nrow(fad)),], fad[sample(nrow(fad)),])
x <- seq(-4, 4, length=200)
y <- dnorm(x, mean=0, sd=1)
tiff("Plot2.tif", width = 5, height = 4, units = 'in', res = 1000)
par(bg = 'cornsilk')
#plot(x,y, type = "l",xaxt='n', ann=FALSE,frame.plot=FALSE, labels=NULL, axes=FALSE,
# lty = 2, lwd=0.55)
hist(y,xaxt='n', ann=FALSE,frame.plot=FALSE, labels=NULL, axes=FALSE,
lty = 2, lwd=0.55)
#par(new=TRUE, bg = 'cornsilk')
par(new=TRUE)
stars2(fada[1:42,c(1:18)], full=TRUE,
labels = NULL, lwd=1.1, lty=6, frame.plot = FALSE,
col.lines = 1:nrow(fada))
dev.off()
|
mydata = read.table('../../TrainingSet/FullSet/AvgRank/liver.csv',head=T,sep=",")
model = lm(IC50 ~ factor(Cancer) + factor(Drug), data=mydata)
sse <- c(crossprod(model$residuals))
sink('./liver.txt',append=TRUE)
print(summary(model))
print(sse)
sink()
| /Model/ANOVA/liver.R | no_license | esbgkannan/QSMART | R | false | false | 253 | r | mydata = read.table('../../TrainingSet/FullSet/AvgRank/liver.csv',head=T,sep=",")
model = lm(IC50 ~ factor(Cancer) + factor(Drug), data=mydata)
sse <- c(crossprod(model$residuals))
sink('./liver.txt',append=TRUE)
print(summary(model))
print(sse)
sink()
|
\name{post_RA_3bm}
\alias{post_RA_3bm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Posterior reference analysis based on a data frame using 3 benchmarks
%% ~~function to do ... ~~
}
\description{
Computes a table of Hellinger distances between marginal posterior distributions
for different parameters in the NNHM
induced by the actual heterogeneity priors specified in \code{tau.prior} and
posterior benchmarks proposed in the Supplementary Material of Ott et al. (2021).
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
post_RA_3bm(df, tau.prior=list(function(x) dhalfnormal(x, scale=1)),
H.dist.method = "integral",
m_inf=NA, M_inf=NA, rlmc0=0.0001, rlmc1=0.9999,
mu.mean=0, mu.sd=4)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{df}{
data frame with one column "y" containing the (transformed) effect estimates for the individual studies and
one column "sigma" containing the standard errors of these estimates.
%% ~~Describe \code{x} here~~
}
\item{tau.prior}{
list of prior specifications, which are \emph{either} functions returning the probability densities of the actual priors of interest for the heterogeneity parameter tau \emph{or} character strings specifying priors implemented in the \code{bayesmeta} function. See the documentation of the argument \code{tau.prior} of the \code{bayesmeta} function for details.
}
%\item{tau.prior}{
% list of functions returning the probability densities of the actual priors of interest for the heterogeneity parameter tau.
%}
\item{H.dist.method}{
method for computation of Hellinger distances between marginal posterior densities. Either \code{"integral"} for integral-based computation or \code{"moment"} for approximate moment-based calculation using a normal approximation. Defaults to \code{"integral"}.
}
\item{m_inf}{
parameter value \eqn{m=m_{inf}} of the SGC(\eqn{m}) prior,
such that the median relative latent model complexity (RLMC) is close to 0.
If set to \code{NA} (the default), this parameter is computed
using the function \code{m_inf_sgc}, such that the median RLMC is
approximately equal to \code{rlmc0}.
}
\item{M_inf}{
parameter value \eqn{M=M_{inf}} of the SIGC(\eqn{M}) prior,
such that the median relative latent model complexity (RLMC) is close to 1.
If set to \code{NA} (the default), this parameter is computed
using the function \code{M_inf_sigc}, such that the median RLMC is
approximately equal to \code{rlmc1}.
}
\item{rlmc0}{
RLMC target value for the SGC(\eqn{m_{inf}}) benchmark prior (typically close to 0).
Is required only if \code{m_inf=NA}.
}
\item{rlmc1}{
RLMC target value for the SIGC(\eqn{M_{inf}}) benchmark prior (typically close to 1).
Is required only if \code{M_inf=NA}.
}
\item{mu.mean}{
mean of the normal prior for the effect mu.
}
\item{mu.sd}{
standard deviation of the normal prior for the effect mu.
}
}
\details{
The three posterior benchmarks used are
introduced in the Supplementary Material of Ott et al. (2021, Sections 2.2.1 and 2.5, see also Section 3.4 in Ott at al. (2021) for Jeffreys reference prior),
where they are denoted by
\eqn{po_{m_{inf}}(\Psi)}, \eqn{po_{J}(\Psi)} and \eqn{po_{M_{inf}}(\Psi)}.
Here, \eqn{\Psi \in \{ \mu, \tau, \theta_1, ..., \theta_k, \theta_{new} \}}
denotes the parameter of interest in the NNHM,
where \eqn{\theta_{i}} is the random effect in the \eqn{i}th study and
\eqn{\theta_{new}} the predicted effect for a new study.
Note that Jeffreys reference posterior \eqn{po_{J}} is proper if
there are at least two studies in the meta-analysis data set.
It is based on the improper Jeffreys reference prior,
which is minimally informative given the data.
If integral-based computation (\code{H.dist.method = "integral"}) of Hellinger distances is selected (the default), numerical integration is applied to obtain the Hellinger distance between the two marginal posterior densities (by using the function \code{H}).
If moment-based computation (\code{H.dist.method = "moment"}) is selected, the marginal densities are first approximated by normal densities with the same means and standard deviations and then the Hellinger distance between these normal densities can be obtained by an analytical formula (implemented in the function \code{H_normal}).
The default values for \code{mu.mean} and \code{mu.sd}
are suitable for effects mu on the log odds (ratio) scale.
%% ~~ If necessary, more details than the description above ~~
}
\value{
A list with two elements:
The first element named "table" is a matrix containing the Hellinger distance estimates and the
second element called "par" is a named vector giving the parameter values
of the benchmark priors.
The vector "par" has the following three components:
\code{m_inf}, \code{M_inf} and \code{C}.
The matrix "table" contains the Hellinger distance estimates between marginal posteriors
and has \eqn{3} columns and \eqn{n*(k+3)} rows,
where \eqn{n}=length(\code{tau.prior}) is the number of actual heterogeneity priors specified
and \eqn{k} is the number of studies in the meta-analysis data set
(so that there are \eqn{k+3} parameters of interest).
The columns of the matrix give the following Hellinger distance estimates
between two marginal posteriors (for the parameter of interest \eqn{\Psi} varying with rows)
induced by the following two heterogeneity priors
(from left to right):
\item{H(po_{m_inf}, po_act) }{ benchmark prior SGC(m_inf) and actual prior}
\item{H(po_J, po_act) }{ Jeffreys reference prior \eqn{\pi_J} and actual prior}
\item{H(po_{M_inf}, po_act) }{ benchmark prior SIGC(M_inf) and actual prior}
The actual heterogenity prior and the parameter of interest \eqn{\Psi} vary
with the rows in the following order:
\item{mu, pri_act_1 }{ \eqn{\Psi=\mu} and first actual prior in \code{tau.prior}}
\item{mu, pri_act_2 }{ \eqn{\Psi=\mu} and second actual prior in \code{tau.prior}}
\item{... }{ }
\item{mu, pri_act_n }{ \eqn{\Psi=\mu} and nth actual prior in \code{tau.prior}}
\item{tau, pri_act_1 }{ \eqn{\Psi=\tau} and first actual prior in \code{tau.prior}}
\item{... }{ }
\item{tau, pri_act_n }{ \eqn{\Psi=\tau} and nth actual prior}
\item{theta_1, pri_act_1 }{ \eqn{\Psi=\theta_1} and first actual prior}
\item{... }{ }
\item{theta_k, pri_act_n }{ \eqn{\Psi=\theta_k} and nth actual prior}
\item{theta_new, pri_act_1 }{ \eqn{\Psi=\theta_{new}} and first actual prior}
\item{... }{ }
\item{theta_new, pri_act_n }{ \eqn{\Psi=\theta_{new}} and nth actual prior}
}
\references{
Ott, M., Plummer, M., Roos, M. Supplementary Material:
How vague is vague? How informative is informative? Reference analysis for
Bayesian meta-analysis. Revised for \emph{Statistics in Medicine}. 2021.
Ott, M., Plummer, M., Roos, M. How vague is vague? How informative is
informative? Reference analysis for
Bayesian meta-analysis. Manuscript revised for \emph{Statistics in Medicine}. 2021.
}
\section{Warnings }{
A warning message will be displayed if one of the parameters \code{m_inf} or \code{M_inf}
has a value larger than 5*10^6, since this may lead to numerical problems
in the \code{bayesmeta} function
used for computation of the marginal posteriors.
If the integral-based method is used to compute Hellinger distances (\code{H.dist.method = "integral"}),
numerical problems may occur in some cases, which may lead to implausible outputs.
Therefore, we generally recommend to double-check the results of the integral-based method using the moment-based method (\code{H.dist.method = "moment"}) - especially if the former results are implausibe. If large differences between the two methods are observed, we recommend to rely on the moment-based method unless a normal approximation of the involved densities is inappropriate.
}
\seealso{
\code{\link{post_RA_fits}},
\code{\link{pri_RA_5bm}}
}
\examples{
# for aurigular acupuncture (AA) data set with two
# actual half-normal heterogeneity priors
data(aa)
\donttest{# it takes several seconds to run this function
post_RA_3bm(df=aa, tau.prior=list(function(t)dhalfnormal(t, scale=0.5),
function(t)dhalfnormal(t, scale=1)))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory. | /man/post_RA_3bm.Rd | no_license | cran/ra4bayesmeta | R | false | false | 8,328 | rd | \name{post_RA_3bm}
\alias{post_RA_3bm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Posterior reference analysis based on a data frame using 3 benchmarks
%% ~~function to do ... ~~
}
\description{
Computes a table of Hellinger distances between marginal posterior distributions
for different parameters in the NNHM
induced by the actual heterogeneity priors specified in \code{tau.prior} and
posterior benchmarks proposed in the Supplementary Material of Ott et al. (2021).
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
post_RA_3bm(df, tau.prior=list(function(x) dhalfnormal(x, scale=1)),
H.dist.method = "integral",
m_inf=NA, M_inf=NA, rlmc0=0.0001, rlmc1=0.9999,
mu.mean=0, mu.sd=4)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{df}{
data frame with one column "y" containing the (transformed) effect estimates for the individual studies and
one column "sigma" containing the standard errors of these estimates.
%% ~~Describe \code{x} here~~
}
\item{tau.prior}{
list of prior specifications, which are \emph{either} functions returning the probability densities of the actual priors of interest for the heterogeneity parameter tau \emph{or} character strings specifying priors implemented in the \code{bayesmeta} function. See the documentation of the argument \code{tau.prior} of the \code{bayesmeta} function for details.
}
%\item{tau.prior}{
% list of functions returning the probability densities of the actual priors of interest for the heterogeneity parameter tau.
%}
\item{H.dist.method}{
method for computation of Hellinger distances between marginal posterior densities. Either \code{"integral"} for integral-based computation or \code{"moment"} for approximate moment-based calculation using a normal approximation. Defaults to \code{"integral"}.
}
\item{m_inf}{
parameter value \eqn{m=m_{inf}} of the SGC(\eqn{m}) prior,
such that the median relative latent model complexity (RLMC) is close to 0.
If set to \code{NA} (the default), this parameter is computed
using the function \code{m_inf_sgc}, such that the median RLMC is
approximately equal to \code{rlmc0}.
}
\item{M_inf}{
parameter value \eqn{M=M_{inf}} of the SIGC(\eqn{M}) prior,
such that the median relative latent model complexity (RLMC) is close to 1.
If set to \code{NA} (the default), this parameter is computed
using the function \code{M_inf_sigc}, such that the median RLMC is
approximately equal to \code{rlmc1}.
}
\item{rlmc0}{
RLMC target value for the SGC(\eqn{m_{inf}}) benchmark prior (typically close to 0).
Is required only if \code{m_inf=NA}.
}
\item{rlmc1}{
RLMC target value for the SIGC(\eqn{M_{inf}}) benchmark prior (typically close to 1).
Is required only if \code{M_inf=NA}.
}
\item{mu.mean}{
mean of the normal prior for the effect mu.
}
\item{mu.sd}{
standard deviation of the normal prior for the effect mu.
}
}
\details{
The three posterior benchmarks used are
introduced in the Supplementary Material of Ott et al. (2021, Sections 2.2.1 and 2.5, see also Section 3.4 in Ott at al. (2021) for Jeffreys reference prior),
where they are denoted by
\eqn{po_{m_{inf}}(\Psi)}, \eqn{po_{J}(\Psi)} and \eqn{po_{M_{inf}}(\Psi)}.
Here, \eqn{\Psi \in \{ \mu, \tau, \theta_1, ..., \theta_k, \theta_{new} \}}
denotes the parameter of interest in the NNHM,
where \eqn{\theta_{i}} is the random effect in the \eqn{i}th study and
\eqn{\theta_{new}} the predicted effect for a new study.
Note that Jeffreys reference posterior \eqn{po_{J}} is proper if
there are at least two studies in the meta-analysis data set.
It is based on the improper Jeffreys reference prior,
which is minimally informative given the data.
If integral-based computation (\code{H.dist.method = "integral"}) of Hellinger distances is selected (the default), numerical integration is applied to obtain the Hellinger distance between the two marginal posterior densities (by using the function \code{H}).
If moment-based computation (\code{H.dist.method = "moment"}) is selected, the marginal densities are first approximated by normal densities with the same means and standard deviations and then the Hellinger distance between these normal densities can be obtained by an analytical formula (implemented in the function \code{H_normal}).
The default values for \code{mu.mean} and \code{mu.sd}
are suitable for effects mu on the log odds (ratio) scale.
%% ~~ If necessary, more details than the description above ~~
}
\value{
A list with two elements:
The first element named "table" is a matrix containing the Hellinger distance estimates and the
second element called "par" is a named vector giving the parameter values
of the benchmark priors.
The vector "par" has the following three components:
\code{m_inf}, \code{M_inf} and \code{C}.
The matrix "table" contains the Hellinger distance estimates between marginal posteriors
and has \eqn{3} columns and \eqn{n*(k+3)} rows,
where \eqn{n}=length(\code{tau.prior}) is the number of actual heterogeneity priors specified
and \eqn{k} is the number of studies in the meta-analysis data set
(so that there are \eqn{k+3} parameters of interest).
The columns of the matrix give the following Hellinger distance estimates
between two marginal posteriors (for the parameter of interest \eqn{\Psi} varying with rows)
induced by the following two heterogeneity priors
(from left to right):
\item{H(po_{m_inf}, po_act) }{ benchmark prior SGC(m_inf) and actual prior}
\item{H(po_J, po_act) }{ Jeffreys reference prior \eqn{\pi_J} and actual prior}
\item{H(po_{M_inf}, po_act) }{ benchmark prior SIGC(M_inf) and actual prior}
The actual heterogenity prior and the parameter of interest \eqn{\Psi} vary
with the rows in the following order:
\item{mu, pri_act_1 }{ \eqn{\Psi=\mu} and first actual prior in \code{tau.prior}}
\item{mu, pri_act_2 }{ \eqn{\Psi=\mu} and second actual prior in \code{tau.prior}}
\item{... }{ }
\item{mu, pri_act_n }{ \eqn{\Psi=\mu} and nth actual prior in \code{tau.prior}}
\item{tau, pri_act_1 }{ \eqn{\Psi=\tau} and first actual prior in \code{tau.prior}}
\item{... }{ }
\item{tau, pri_act_n }{ \eqn{\Psi=\tau} and nth actual prior}
\item{theta_1, pri_act_1 }{ \eqn{\Psi=\theta_1} and first actual prior}
\item{... }{ }
\item{theta_k, pri_act_n }{ \eqn{\Psi=\theta_k} and nth actual prior}
\item{theta_new, pri_act_1 }{ \eqn{\Psi=\theta_{new}} and first actual prior}
\item{... }{ }
\item{theta_new, pri_act_n }{ \eqn{\Psi=\theta_{new}} and nth actual prior}
}
\references{
Ott, M., Plummer, M., Roos, M. Supplementary Material:
How vague is vague? How informative is informative? Reference analysis for
Bayesian meta-analysis. Revised for \emph{Statistics in Medicine}. 2021.
Ott, M., Plummer, M., Roos, M. How vague is vague? How informative is
informative? Reference analysis for
Bayesian meta-analysis. Manuscript revised for \emph{Statistics in Medicine}. 2021.
}
\section{Warnings }{
A warning message will be displayed if one of the parameters \code{m_inf} or \code{M_inf}
has a value larger than 5*10^6, since this may lead to numerical problems
in the \code{bayesmeta} function
used for computation of the marginal posteriors.
If the integral-based method is used to compute Hellinger distances (\code{H.dist.method = "integral"}),
numerical problems may occur in some cases, which may lead to implausible outputs.
Therefore, we generally recommend to double-check the results of the integral-based method using the moment-based method (\code{H.dist.method = "moment"}) - especially if the former results are implausibe. If large differences between the two methods are observed, we recommend to rely on the moment-based method unless a normal approximation of the involved densities is inappropriate.
}
\seealso{
\code{\link{post_RA_fits}},
\code{\link{pri_RA_5bm}}
}
\examples{
# for aurigular acupuncture (AA) data set with two
# actual half-normal heterogeneity priors
data(aa)
\donttest{# it takes several seconds to run this function
post_RA_3bm(df=aa, tau.prior=list(function(t)dhalfnormal(t, scale=0.5),
function(t)dhalfnormal(t, scale=1)))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory. |
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, 1848891177L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) | /dexterMST/inst/testfiles/is_person_booklet_sorted/AFL_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1615939020-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 825 | r | testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, 1848891177L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) |
#' Search for clinical trials satisfying a query
#'
#' Searches for results and returns a \code{data.frame} with basic study information.
#'
#' @param query Search pattern as a string; a vector of key-value pairs is
#' interpreted as an advanced search and is therefore combined with '&'
#' @param count Limit the results to a specified integer. Set to NULL to include all results.
#'
#' @export
#'
#' @return A \code{data.frame} with the following columns: NCT study identifier, url, study title, status, condition summary, and date last changed
#'
#' @examples
#' # count trials satisfying 'heart disease AND stroke AND California'
#' \dontrun{clinicaltrials_search(query = 'heart disease AND stroke AND California')}
#'
#' # advanced search for open, interventional trials involving melanoma
#' \dontrun{clinicaltrials_search(query = c('recr=Open', 'type=Intr', 'cond=melanoma'))}
#'
#' # limit to 10 results
#' \dontrun{clinicaltrials_search(query = "colon cancer", count = 10)}
#'
#'
clinicaltrials_search <-
function(query = NULL, count = 20)
{
query_url <- "http://clinicaltrials.gov/ct2/results?"
query <- paste_query(query)
# count by default is 20, change to a very large number if count = NULL
if(is.null(count)) count <- 1e6 # there are currently 174862 trials as of 18-Sept-2014
if(!is.integer(as.integer(count))) stop("Count must be a number")
count_str <- paste0("&count=", as.integer(count))
search_result <- httr::GET(paste0(query_url, query, "&displayxml=true", count_str))
if(search_result$status != 200) stop(httr::http_status(search_result)$message)
parsed_result <- XML::xmlParse(httr::content(search_result, as = "text"))
result_list <- XML::xmlToList(parsed_result)
if(result_list$.attrs == "0") stop("Search returned 0 results")
#convert to data.frame
result_frame <- do.call("rbind", lapply(1:length(result_list), function(i) frame_studylist(result_list[i])))
result_frame$order <- NULL
result_frame$status..attrs <- NULL
rownames(result_frame) <- result_frame$nct_id
result_frame
}
# result list processing
frame_studylist <- function(listitem){
if(names(listitem) %in% c("query", ".attrs")) return(NULL)
as.data.frame(listitem[[1]], stringsAsFactors = FALSE)
}
| /R/clinicaltrials_search.R | permissive | meyera/rclinicaltrials | R | false | false | 2,287 | r | #' Search for clinical trials satisfying a query
#'
#' Searches for results and returns a \code{data.frame} with basic study information.
#'
#' @param query Search pattern as a string; a vector of key-value pairs is
#' interpreted as an advanced search and is therefore combined with '&'
#' @param count Limit the results to a specified integer. Set to NULL to include all results.
#'
#' @export
#'
#' @return A \code{data.frame} with the following columns: NCT study identifier, url, study title, status, condition summary, and date last changed
#'
#' @examples
#' # count trials satisfying 'heart disease AND stroke AND California'
#' \dontrun{clinicaltrials_search(query = 'heart disease AND stroke AND California')}
#'
#' # advanced search for open, interventional trials involving melanoma
#' \dontrun{clinicaltrials_search(query = c('recr=Open', 'type=Intr', 'cond=melanoma'))}
#'
#' # limit to 10 results
#' \dontrun{clinicaltrials_search(query = "colon cancer", count = 10)}
#'
#'
clinicaltrials_search <-
function(query = NULL, count = 20)
{
query_url <- "http://clinicaltrials.gov/ct2/results?"
query <- paste_query(query)
# count by default is 20, change to a very large number if count = NULL
if(is.null(count)) count <- 1e6 # there are currently 174862 trials as of 18-Sept-2014
if(!is.integer(as.integer(count))) stop("Count must be a number")
count_str <- paste0("&count=", as.integer(count))
search_result <- httr::GET(paste0(query_url, query, "&displayxml=true", count_str))
if(search_result$status != 200) stop(httr::http_status(search_result)$message)
parsed_result <- XML::xmlParse(httr::content(search_result, as = "text"))
result_list <- XML::xmlToList(parsed_result)
if(result_list$.attrs == "0") stop("Search returned 0 results")
#convert to data.frame
result_frame <- do.call("rbind", lapply(1:length(result_list), function(i) frame_studylist(result_list[i])))
result_frame$order <- NULL
result_frame$status..attrs <- NULL
rownames(result_frame) <- result_frame$nct_id
result_frame
}
# result list processing
frame_studylist <- function(listitem){
if(names(listitem) %in% c("query", ".attrs")) return(NULL)
as.data.frame(listitem[[1]], stringsAsFactors = FALSE)
}
|
util_getComputeTime<-function(time=c("timeMax","timeLeft")){
util_validate()
clientConnection=getOption('clientConnection')
requestToServer=.jnew("com.portfolioeffect.quant.client.RequestToServer")
result=requestToServer$getComputeTimeLeft(clientConnection)
result=result$getValueInt(time[1])
return(result)
}
util_POSIXTimeToDate<-function(time){
if(is.numeric(time)){
DateTimeUtil=.jnew("com.portfolioeffect.quant.client.util.DateTimeUtil")
time=.jcall(DateTimeUtil,returnSig='[S',method="POSIXTimeToDateStr",.jlong(time))
}
time
}
util_dateToPOSIXTime<-function(time){
if(!is.numeric(time)){
DateTimeUtil=.jnew("com.portfolioeffect.quant.client.util.DateTimeUtil")
check=1414818000000-.jcall(DateTimeUtil,returnSig='[J',method="toPOSIXTime",'2014-11-01 01:00:00')
time=.jcall(DateTimeUtil,returnSig='[J',method="toPOSIXTime",time)+check
}
time
}
util_screenshot<-function(path, delaySec=15){
Sys.sleep(delaySec)
J("com.portfolioeffect.quant.client.util.Screenshots")$takeScreenshot(path)
}
# util_setCredentials<-function(username,password,apiKey,hostname="quant07.portfolioeffect.com"){
# way<-switch(Sys.info()[['sysname']],
# Windows= {paste(Sys.getenv("APPDATA"),"\\ice9",sep="")},
# Linux = {paste(Sys.getenv("HOME"),"/.ice9",sep="")},
# Darwin = {paste(Sys.getenv("HOME"),"/.ice9",sep="")})
# way<-gsub("\\","/",way,fixed = T)
# way<-paste0(way, "/data")
# if (!file.exists(way)){
# dir.create(way,recursive=TRUE)
# }
# unlink(paste0(way,"/login.RData"), recursive = TRUE, force = FALSE)
# login<-c(username,password,apiKey,hostname)
# save(login, file=paste0(way, "/login.RData"))
# rm(login)
# if(!is.null(options('clientConnection')$clientConnection)){
# clientConnection=getOption('clientConnection')
# if(is.null(clientConnection)){
# options("clientConnection"=.jnew("com.portfolioeffect.quant.client.ClientConnection"))
# }
# .jcall(clientConnection,returnSig="V", method="setUsername",username)
# .jcall(clientConnection,returnSig="V", method="setPassword",password)
# .jcall(clientConnection,returnSig="V", method="setApiKey",apiKey)
# .jcall(clientConnection,returnSig="V", method="setHost",hostname)
# resultTemp=.jcall(clientConnection,returnSig="Lcom/portfolioeffect/quant/client/result/Metric;", method="restart")
# util_checkErrors(resultTemp)
# }
# util_validate()
# clientConnection=getOption('clientConnection')
# resultTemp=.jcall(clientConnection,returnSig="Lcom/portfolioeffect/quant/client/result/Metric;", method="restart")
# util_checkErrors(resultTemp)
# }
util_setCredentials<-function(username,password,apiKey,hostname="quant07.portfolioeffect.com"){
way<-switch(Sys.info()[['sysname']],
Windows= {paste(Sys.getenv("APPDATA"),"\\ice9",sep="")},
Linux = {paste(Sys.getenv("HOME"),"/.ice9",sep="")},
Darwin = {paste(Sys.getenv("HOME"),"/.ice9",sep="")})
way<-gsub("\\","/",way,fixed = T)
way<-paste0(way, "/data")
if (!file.exists(way)){
dir.create(way,recursive=TRUE)
}
unlink(paste0(way,"/login.RData"), recursive = TRUE, force = FALSE)
login<-c(username,password,apiKey,hostname)
save(login, file=paste0(way, "/login.RData"))
rm(login)
if(!is.null(options('clientConnection')$clientConnection)){
clientConnection=getOption('clientConnection')
if(is.null(clientConnection)){
options("clientConnection"=.jnew("com.portfolioeffect.quant.client.ClientConnection"))
clientConnection=getOption('clientConnection')
}else{
clientConnection$stop()
}
}else{
clientConnection=getOption('clientConnection')
}
util_validate()
.jcall(clientConnection,returnSig="V", method="setUsername",username)
.jcall(clientConnection,returnSig="V", method="setPassword",password)
.jcall(clientConnection,returnSig="V", method="setApiKey",apiKey)
.jcall(clientConnection,returnSig="V", method="setHost",hostname)
resultTemp=clientConnection$restart()
util_checkErrors(resultTemp)
}
util_cleanCredentials<-function(){
way<-switch(Sys.info()[['sysname']],
Windows= {paste(Sys.getenv("APPDATA"),"\\ice9",sep="")},
Linux = {paste(Sys.getenv("HOME"),"/.ice9",sep="")},
Darwin = {paste(Sys.getenv("HOME"),"/.ice9",sep="")})
way<-gsub("\\","/",way,fixed = T)
way<-paste0(way, "/data")
if (!file.exists(way)){
dir.create(way,recursive=TRUE)
}
unlink(paste0(way,"/login.RData"), recursive = TRUE, force = FALSE)
}
util_validate<- function(argList=NULL) {
names<-names(argList)
for(name in names){
if((name %in% c('portfolio'))&(!is(argList[[name]],'portfolio'))){
stopMessage("NOT_PORTFOLIO_CLASS")
}
if((name %in% c('optimizer'))&(!is(argList[[name]],'optimizer'))){
stopMessage("NOT_OPTIMIZER_CLASS")
}
if((name %in% c('symbol','symbol1','symbol2','metricType','constraintType','constraintMertic','goal','direction'))&(!is(argList[[name]],'character'))){
stopMessage("OBJECT_NOT_CHARACTER_CLASS",names=name)
}
if((name %in% c('lag','confidenceInterval','thresholdReturn','confidenceIntervalA','confidenceIntervalB','pValueLeft','pValueRight','nPoints','portfolioValue'))&(!is(argList[[name]],'numeric'))){
stopMessage("OBJECT_NOT_NUMERIC_CLASS",names=name)
}
if((name %in% c('lag','confidenceInterval','thresholdReturn','confidenceIntervalA','confidenceIntervalB','pValueLeft','pValueRight','nPoints','portfolioValue'))&(!(length(argList[[name]])==1))){
stopMessage("OBJECT_NOT_SINGLE_NUMBER",names=name)
}
if(name %in% c('lag','confidenceInterval','thresholdReturn','confidenceIntervalA','confidenceIntervalB','pValueLeft','pValueRight','nPoints')){
if((argList[[name]]<0)){
stopMessage("OBJECT_NOT_POSITIVE_NUMBER",names=name)
}
}
}
way<-switch(Sys.info()[['sysname']],
Windows= {paste(Sys.getenv("APPDATA"),"\\ice9",sep="")},
Linux = {paste(Sys.getenv("HOME"),"/.ice9",sep="")},
Darwin = {paste(Sys.getenv("HOME"),"/.ice9",sep="")})
way<-gsub("\\","/",way,fixed = T)
way<-paste0(way, "/data/login.RData")
if(!file.exists(way)){
stopMessage('FILE_CREDENTIALS_NO_EXISTS')
}
if(is.null(options('clientConnection')$clientConnection)){
options("clientConnection"=.jnew("com.portfolioeffect.quant.client.ClientConnection"))
clientConnection=getOption('clientConnection')
login<-NULL;
load(way)
.jcall(clientConnection,returnSig="V", method="setUsername",login[1])
.jcall(clientConnection,returnSig="V", method="setPassword",login[2])
.jcall(clientConnection,returnSig="V", method="setApiKey",login[3])
.jcall(clientConnection,returnSig="V", method="setHost",login[4])
}else{
clientConnection=getOption('clientConnection')
if(is.null(clientConnection)){
options("clientConnection"=.jnew("com.portfolioeffect.quant.client.ClientConnection"))
clientConnection=getOption('clientConnection')}
login<-NULL;
load(way)
.jcall(clientConnection,returnSig="V", method="setUsername",login[1])
.jcall(clientConnection,returnSig="V", method="setPassword",login[2])
.jcall(clientConnection,returnSig="V", method="setApiKey",login[3])
.jcall(clientConnection,returnSig="V", method="setHost",login[4])
}
}
util_checkErrors<-function(result){
if(getErrorStatus(result)){
Message=getErrorMessage(result)
n=nchar(Message)
c=paste(array("#",dim=min(abs(round(((n-15)/2)+0.01)),0)),collapse="")
cc=paste(array("#",dim=min(n+(n-15)%%2,0)),collapse="")
k=paste(c," ERROR MESSAGE ",c,sep="")
stop(paste("",k,Message,cc,sep="\n"),call.=FALSE)
}
}
printStatus<-function(){
clientConnection=getOption('clientConnection')
temp<-.jcall(clientConnection,returnSig="S", method="getStatus")
if(temp!=""){print(temp)}
}
getResult<-function(data,metricClass=F){
util_checkErrors(data)
if(metricClass){
dataNames<-c("time","value")
}else{
dataNames<-.jcall(data,returnSig="[S", method="getDataNames")
}
result=NULL
for(dataName in dataNames){
dataType<-.jcall(data,returnSig="S", method="getDataType",dataName)
resultTemp<-switch(dataType,
# NULL =,
DOUBLE =.jcall(data,returnSig="D", method="getDouble", dataName),
DOUBLE_VECTOR =.jcall(data,returnSig="[D", method="getDoubleArray", dataName),
DOUBLE_MATRIX =.jcall(data,returnSig="[[D", method="getDoubleMatrix", dataName, simplify=TRUE),
INT_VECTOR =.jcall(data,returnSig="[I", method="getIntArray", dataName),
# INT_MATRIX =.jcall(data,returnSig="[[I", method="getDoubleMatrix", dataName, simplify=TRUE),
LONG_VECTOR = .jcall(data,returnSig="[J", method="getLongArray", dataName),
# LONG_MATRIX =.jcall(data,returnSig="[[J", method="getDoubleMatrix", dataName, simplify=TRUE),
# FLOAT_VECTOR =.jcall(data,returnSig="[D", method="getFloatArray", dataName),
# FLOAT_MATRIX =.jcall(data,returnSig="[[D", method="getDoubleMatrix", dataName, simplify=TRUE),
# STRING =.jcall(data,returnSig="[D", method="getDoubleArray", dataName),
STRING_VECTOR =.jcall(data,returnSig="[S", method="getStringArray", dataName),
PORTFOLIO =.jcall(data,returnSig="Lcom/portfolioeffect/quant/client/portfolio/Portfolio;", method="getPortfolio", "portfolio"))
if(dataType=="PORTFOLIO"){
result=resultTemp
}else{
result=cbind(result,resultTemp)
}
}
if(NROW(result)>1){
colnames(result)<-dataNames
}
return(result)
}
getErrorStatus<-function(result){
.jcall(result,returnSig="Z", method="hasError")
}
getErrorMessage<-function(result){
.jcall(result,returnSig="S", method="getErrorMessage")
}
getTimeMilliSec<-function(result){
.jcall(result,returnSig="[J", method="getLongArray", "time")
}
getResultValuesLong<-function(result){
.jcall(result,returnSig="J", method="getLastLong", "value")
}
getResultValuesDoubleArray<-function(result){
.jcall(result,returnSig="[D", method="getDoubleArray", "value")
}
getResultValuesDoubleArrayWithTime<-function(result){
result<-cbind(.jcall(result,returnSig="[J", method="getLongArray", "time"),.jcall(result,returnSig="[D", method="getDoubleArray", "value"))
colnames(result)<-c("Time","Value")
return(result)
}
getResultValuesDouble2DArray<-function(portfolio,result){
result<-cbind(.jcall(result,returnSig="[J", method="getLongArray", "time"),.jcall(result,returnSig="[[D", method="getDoubleMatrix", "value", simplify=TRUE))
colnames(result)<-c("Time",position_list(portfolio))
return(result)
}
getResultValuesPortfolio<-function(result){
.jcall(result,returnSig="Lcom/portfolioeffect/quant/client/portfolio/Portfolio;", method="getPortfolio", "portfolio")
}
| /R/util.R | no_license | IanMadlenya/PortfolioEffectHFT | R | false | false | 10,683 | r | util_getComputeTime<-function(time=c("timeMax","timeLeft")){
util_validate()
clientConnection=getOption('clientConnection')
requestToServer=.jnew("com.portfolioeffect.quant.client.RequestToServer")
result=requestToServer$getComputeTimeLeft(clientConnection)
result=result$getValueInt(time[1])
return(result)
}
util_POSIXTimeToDate<-function(time){
if(is.numeric(time)){
DateTimeUtil=.jnew("com.portfolioeffect.quant.client.util.DateTimeUtil")
time=.jcall(DateTimeUtil,returnSig='[S',method="POSIXTimeToDateStr",.jlong(time))
}
time
}
util_dateToPOSIXTime<-function(time){
if(!is.numeric(time)){
DateTimeUtil=.jnew("com.portfolioeffect.quant.client.util.DateTimeUtil")
check=1414818000000-.jcall(DateTimeUtil,returnSig='[J',method="toPOSIXTime",'2014-11-01 01:00:00')
time=.jcall(DateTimeUtil,returnSig='[J',method="toPOSIXTime",time)+check
}
time
}
util_screenshot<-function(path, delaySec=15){
Sys.sleep(delaySec)
J("com.portfolioeffect.quant.client.util.Screenshots")$takeScreenshot(path)
}
# util_setCredentials<-function(username,password,apiKey,hostname="quant07.portfolioeffect.com"){
# way<-switch(Sys.info()[['sysname']],
# Windows= {paste(Sys.getenv("APPDATA"),"\\ice9",sep="")},
# Linux = {paste(Sys.getenv("HOME"),"/.ice9",sep="")},
# Darwin = {paste(Sys.getenv("HOME"),"/.ice9",sep="")})
# way<-gsub("\\","/",way,fixed = T)
# way<-paste0(way, "/data")
# if (!file.exists(way)){
# dir.create(way,recursive=TRUE)
# }
# unlink(paste0(way,"/login.RData"), recursive = TRUE, force = FALSE)
# login<-c(username,password,apiKey,hostname)
# save(login, file=paste0(way, "/login.RData"))
# rm(login)
# if(!is.null(options('clientConnection')$clientConnection)){
# clientConnection=getOption('clientConnection')
# if(is.null(clientConnection)){
# options("clientConnection"=.jnew("com.portfolioeffect.quant.client.ClientConnection"))
# }
# .jcall(clientConnection,returnSig="V", method="setUsername",username)
# .jcall(clientConnection,returnSig="V", method="setPassword",password)
# .jcall(clientConnection,returnSig="V", method="setApiKey",apiKey)
# .jcall(clientConnection,returnSig="V", method="setHost",hostname)
# resultTemp=.jcall(clientConnection,returnSig="Lcom/portfolioeffect/quant/client/result/Metric;", method="restart")
# util_checkErrors(resultTemp)
# }
# util_validate()
# clientConnection=getOption('clientConnection')
# resultTemp=.jcall(clientConnection,returnSig="Lcom/portfolioeffect/quant/client/result/Metric;", method="restart")
# util_checkErrors(resultTemp)
# }
util_setCredentials<-function(username,password,apiKey,hostname="quant07.portfolioeffect.com"){
way<-switch(Sys.info()[['sysname']],
Windows= {paste(Sys.getenv("APPDATA"),"\\ice9",sep="")},
Linux = {paste(Sys.getenv("HOME"),"/.ice9",sep="")},
Darwin = {paste(Sys.getenv("HOME"),"/.ice9",sep="")})
way<-gsub("\\","/",way,fixed = T)
way<-paste0(way, "/data")
if (!file.exists(way)){
dir.create(way,recursive=TRUE)
}
unlink(paste0(way,"/login.RData"), recursive = TRUE, force = FALSE)
login<-c(username,password,apiKey,hostname)
save(login, file=paste0(way, "/login.RData"))
rm(login)
if(!is.null(options('clientConnection')$clientConnection)){
clientConnection=getOption('clientConnection')
if(is.null(clientConnection)){
options("clientConnection"=.jnew("com.portfolioeffect.quant.client.ClientConnection"))
clientConnection=getOption('clientConnection')
}else{
clientConnection$stop()
}
}else{
clientConnection=getOption('clientConnection')
}
util_validate()
.jcall(clientConnection,returnSig="V", method="setUsername",username)
.jcall(clientConnection,returnSig="V", method="setPassword",password)
.jcall(clientConnection,returnSig="V", method="setApiKey",apiKey)
.jcall(clientConnection,returnSig="V", method="setHost",hostname)
resultTemp=clientConnection$restart()
util_checkErrors(resultTemp)
}
util_cleanCredentials<-function(){
way<-switch(Sys.info()[['sysname']],
Windows= {paste(Sys.getenv("APPDATA"),"\\ice9",sep="")},
Linux = {paste(Sys.getenv("HOME"),"/.ice9",sep="")},
Darwin = {paste(Sys.getenv("HOME"),"/.ice9",sep="")})
way<-gsub("\\","/",way,fixed = T)
way<-paste0(way, "/data")
if (!file.exists(way)){
dir.create(way,recursive=TRUE)
}
unlink(paste0(way,"/login.RData"), recursive = TRUE, force = FALSE)
}
util_validate<- function(argList=NULL) {
names<-names(argList)
for(name in names){
if((name %in% c('portfolio'))&(!is(argList[[name]],'portfolio'))){
stopMessage("NOT_PORTFOLIO_CLASS")
}
if((name %in% c('optimizer'))&(!is(argList[[name]],'optimizer'))){
stopMessage("NOT_OPTIMIZER_CLASS")
}
if((name %in% c('symbol','symbol1','symbol2','metricType','constraintType','constraintMertic','goal','direction'))&(!is(argList[[name]],'character'))){
stopMessage("OBJECT_NOT_CHARACTER_CLASS",names=name)
}
if((name %in% c('lag','confidenceInterval','thresholdReturn','confidenceIntervalA','confidenceIntervalB','pValueLeft','pValueRight','nPoints','portfolioValue'))&(!is(argList[[name]],'numeric'))){
stopMessage("OBJECT_NOT_NUMERIC_CLASS",names=name)
}
if((name %in% c('lag','confidenceInterval','thresholdReturn','confidenceIntervalA','confidenceIntervalB','pValueLeft','pValueRight','nPoints','portfolioValue'))&(!(length(argList[[name]])==1))){
stopMessage("OBJECT_NOT_SINGLE_NUMBER",names=name)
}
if(name %in% c('lag','confidenceInterval','thresholdReturn','confidenceIntervalA','confidenceIntervalB','pValueLeft','pValueRight','nPoints')){
if((argList[[name]]<0)){
stopMessage("OBJECT_NOT_POSITIVE_NUMBER",names=name)
}
}
}
way<-switch(Sys.info()[['sysname']],
Windows= {paste(Sys.getenv("APPDATA"),"\\ice9",sep="")},
Linux = {paste(Sys.getenv("HOME"),"/.ice9",sep="")},
Darwin = {paste(Sys.getenv("HOME"),"/.ice9",sep="")})
way<-gsub("\\","/",way,fixed = T)
way<-paste0(way, "/data/login.RData")
if(!file.exists(way)){
stopMessage('FILE_CREDENTIALS_NO_EXISTS')
}
if(is.null(options('clientConnection')$clientConnection)){
options("clientConnection"=.jnew("com.portfolioeffect.quant.client.ClientConnection"))
clientConnection=getOption('clientConnection')
login<-NULL;
load(way)
.jcall(clientConnection,returnSig="V", method="setUsername",login[1])
.jcall(clientConnection,returnSig="V", method="setPassword",login[2])
.jcall(clientConnection,returnSig="V", method="setApiKey",login[3])
.jcall(clientConnection,returnSig="V", method="setHost",login[4])
}else{
clientConnection=getOption('clientConnection')
if(is.null(clientConnection)){
options("clientConnection"=.jnew("com.portfolioeffect.quant.client.ClientConnection"))
clientConnection=getOption('clientConnection')}
login<-NULL;
load(way)
.jcall(clientConnection,returnSig="V", method="setUsername",login[1])
.jcall(clientConnection,returnSig="V", method="setPassword",login[2])
.jcall(clientConnection,returnSig="V", method="setApiKey",login[3])
.jcall(clientConnection,returnSig="V", method="setHost",login[4])
}
}
util_checkErrors<-function(result){
if(getErrorStatus(result)){
Message=getErrorMessage(result)
n=nchar(Message)
c=paste(array("#",dim=min(abs(round(((n-15)/2)+0.01)),0)),collapse="")
cc=paste(array("#",dim=min(n+(n-15)%%2,0)),collapse="")
k=paste(c," ERROR MESSAGE ",c,sep="")
stop(paste("",k,Message,cc,sep="\n"),call.=FALSE)
}
}
printStatus<-function(){
clientConnection=getOption('clientConnection')
temp<-.jcall(clientConnection,returnSig="S", method="getStatus")
if(temp!=""){print(temp)}
}
getResult<-function(data,metricClass=F){
util_checkErrors(data)
if(metricClass){
dataNames<-c("time","value")
}else{
dataNames<-.jcall(data,returnSig="[S", method="getDataNames")
}
result=NULL
for(dataName in dataNames){
dataType<-.jcall(data,returnSig="S", method="getDataType",dataName)
resultTemp<-switch(dataType,
# NULL =,
DOUBLE =.jcall(data,returnSig="D", method="getDouble", dataName),
DOUBLE_VECTOR =.jcall(data,returnSig="[D", method="getDoubleArray", dataName),
DOUBLE_MATRIX =.jcall(data,returnSig="[[D", method="getDoubleMatrix", dataName, simplify=TRUE),
INT_VECTOR =.jcall(data,returnSig="[I", method="getIntArray", dataName),
# INT_MATRIX =.jcall(data,returnSig="[[I", method="getDoubleMatrix", dataName, simplify=TRUE),
LONG_VECTOR = .jcall(data,returnSig="[J", method="getLongArray", dataName),
# LONG_MATRIX =.jcall(data,returnSig="[[J", method="getDoubleMatrix", dataName, simplify=TRUE),
# FLOAT_VECTOR =.jcall(data,returnSig="[D", method="getFloatArray", dataName),
# FLOAT_MATRIX =.jcall(data,returnSig="[[D", method="getDoubleMatrix", dataName, simplify=TRUE),
# STRING =.jcall(data,returnSig="[D", method="getDoubleArray", dataName),
STRING_VECTOR =.jcall(data,returnSig="[S", method="getStringArray", dataName),
PORTFOLIO =.jcall(data,returnSig="Lcom/portfolioeffect/quant/client/portfolio/Portfolio;", method="getPortfolio", "portfolio"))
if(dataType=="PORTFOLIO"){
result=resultTemp
}else{
result=cbind(result,resultTemp)
}
}
if(NROW(result)>1){
colnames(result)<-dataNames
}
return(result)
}
getErrorStatus<-function(result){
.jcall(result,returnSig="Z", method="hasError")
}
getErrorMessage<-function(result){
.jcall(result,returnSig="S", method="getErrorMessage")
}
getTimeMilliSec<-function(result){
.jcall(result,returnSig="[J", method="getLongArray", "time")
}
getResultValuesLong<-function(result){
.jcall(result,returnSig="J", method="getLastLong", "value")
}
getResultValuesDoubleArray<-function(result){
.jcall(result,returnSig="[D", method="getDoubleArray", "value")
}
getResultValuesDoubleArrayWithTime<-function(result){
result<-cbind(.jcall(result,returnSig="[J", method="getLongArray", "time"),.jcall(result,returnSig="[D", method="getDoubleArray", "value"))
colnames(result)<-c("Time","Value")
return(result)
}
getResultValuesDouble2DArray<-function(portfolio,result){
result<-cbind(.jcall(result,returnSig="[J", method="getLongArray", "time"),.jcall(result,returnSig="[[D", method="getDoubleMatrix", "value", simplify=TRUE))
colnames(result)<-c("Time",position_list(portfolio))
return(result)
}
getResultValuesPortfolio<-function(result){
.jcall(result,returnSig="Lcom/portfolioeffect/quant/client/portfolio/Portfolio;", method="getPortfolio", "portfolio")
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/metadata.R
\docType{methods}
\name{show,metadata-method}
\alias{show,metadata-method}
\title{Display a metadata object}
\usage{
\S4method{show}{metadata}(object)
}
\arguments{
\item{object}{The metadata object to be displayed}
}
\description{
Display a metadata object
}
| /man/show.metadata.Rd | no_license | jacaronda/loadflex | R | false | false | 358 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/metadata.R
\docType{methods}
\name{show,metadata-method}
\alias{show,metadata-method}
\title{Display a metadata object}
\usage{
\S4method{show}{metadata}(object)
}
\arguments{
\item{object}{The metadata object to be displayed}
}
\description{
Display a metadata object
}
|
if(!exists("NEI")){
NEI <- readRDS("./data/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./data/Source_Classification_Code.rds")
}
# merge the two data sets
if(!exists("NEISCC")){
NEISCC <- merge(NEI, SCC, by="SCC")
}
library(ggplot2)
# Across the United States, how have emissions from coal combustion-related sources changed from 1999-2008?
# fetch all NEIxSCC records with Short.Name (SCC) Coal
coalMatches <- grepl("coal", NEISCC$Short.Name, ignore.case=TRUE)
subsetNEISCC <- NEISCC[coalMatches, ]
aggregatedTotalByYear <- aggregate(Emissions ~ year, subsetNEISCC, sum)
png("Assign2Plot4.png", width=640, height=480)
g <- ggplot(aggregatedTotalByYear, aes(factor(year), Emissions))
g <- g + geom_bar(stat="identity") +
xlab("year") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle('Total Emissions from coal sources from 1999 to 2008')
print(g)
dev.off() | /Question4.R | no_license | jschlich/Exploratory-Data-Analysis-Assignment2 | R | false | false | 967 | r | if(!exists("NEI")){
NEI <- readRDS("./data/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./data/Source_Classification_Code.rds")
}
# merge the two data sets
if(!exists("NEISCC")){
NEISCC <- merge(NEI, SCC, by="SCC")
}
library(ggplot2)
# Across the United States, how have emissions from coal combustion-related sources changed from 1999-2008?
# fetch all NEIxSCC records with Short.Name (SCC) Coal
coalMatches <- grepl("coal", NEISCC$Short.Name, ignore.case=TRUE)
subsetNEISCC <- NEISCC[coalMatches, ]
aggregatedTotalByYear <- aggregate(Emissions ~ year, subsetNEISCC, sum)
png("Assign2Plot4.png", width=640, height=480)
g <- ggplot(aggregatedTotalByYear, aes(factor(year), Emissions))
g <- g + geom_bar(stat="identity") +
xlab("year") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle('Total Emissions from coal sources from 1999 to 2008')
print(g)
dev.off() |
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.02,family="gaussian",standardize=FALSE)
sink('./autonomic_ganglia_012.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/autonomic_ganglia/autonomic_ganglia_012.R | no_license | esbgkannan/QSMART | R | false | false | 372 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.02,family="gaussian",standardize=FALSE)
sink('./autonomic_ganglia_012.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Sticky!"),
sidebarPanel(
selectInput("gender", "Gender:", c("Female", "Male")),
selectInput("face", "Expression:", c("Happy", "Sad", "Surprised", "Annoyed")),
selectInput("arms", "Arms:", c("down", "nuetral", "up", "hip", "wave"))
),
mainPanel(
plotOutput("stick", width="400px")
)
)) | /inst/sticky/ui.R | no_license | mtmorgan/Elbo | R | false | false | 400 | r | library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Sticky!"),
sidebarPanel(
selectInput("gender", "Gender:", c("Female", "Male")),
selectInput("face", "Expression:", c("Happy", "Sad", "Surprised", "Annoyed")),
selectInput("arms", "Arms:", c("down", "nuetral", "up", "hip", "wave"))
),
mainPanel(
plotOutput("stick", width="400px")
)
)) |
# Yige Wu @WashU Sep 2021
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Library/CloudStorage/Box-Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
source("./ccRCC_snRNA_analysis/plotting.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
genes_process_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/tumor_specific_markers/overlap_tumor_vs_pt_DEGs_w_tumor_vs_other_DEGs/20210824.v1/ccRCC_markers.Surface.20210824.v1.tsv")
## input ATAC fold changes
geneactivity_fc_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Gene_Activity/FoldChange_ATACGeneActivity_Tumor_vs_AllOtherCells.20210924.tsv")
# specify parameters ---------------------------------------------------
## process genes to plot
genes_filter <- genes_process_df$Gene
genes_filter <- genes_filter[!(genes_filter %in% c("PIK3CB", "ARHGEF28", "PTGER3", "PARD3", "GNG12", "EFNA5", "SPIRE1", "LIFR", "PKP4", "SORBS1", "PTPRM", "FBXO16", "PAM"))]
genes_filter <- genes_filter[!(genes_filter %in% c("DPP6", "CPNE8", "EFNA5", "MGLL", "SPIRE1", "SPIRE1", "PLCB1", "OSMR", "SORBS1", "ANO6", "EPB41", "PAM"))]
## make
# dataname_snrna <- "Tumor cells vs. non-tumor cells\n(snRNA-seq)"
dataname_snrna <- "Tumor cells vs. non-tumor cells (snRNA-seq)"
dataname_snatac <- "Tumor cells vs. non-tumor cells (snATAC-seq)"
dataname_bulk_rna <- "Tumors vs. NATs (bulk RNA-seq)"
dataname_bulk_protein <- "Tumors vs. NATs (bulk proteomics)"
# make plot data ----------------------------------------------------------
genes_process_df <- merge(x = genes_process_df,
y = geneactivity_fc_df %>%
rename(log2FC.snATAC = avg_log2FC), by = c("Gene"), all.x = T)
plotdata_wide_df <- genes_process_df %>%
filter(Gene %in% genes_filter) %>%
select(Gene, avg_log2FC.mean.TumorcellsvsNontumor, log2FC.bulkRNA, log2FC.bulkpro, log2FC.snATAC) %>%
arrange(desc(avg_log2FC.mean.TumorcellsvsNontumor))
plotdata_df <- melt(plotdata_wide_df)
plotdata_df <- plotdata_df %>%
mutate(data_type = ifelse(variable == "avg_log2FC.mean.TumorcellsvsNontumor", dataname_snrna,
ifelse(variable == "log2FC.snATAC", dataname_snatac,
ifelse(variable == "log2FC.bulkRNA", dataname_bulk_rna, dataname_bulk_protein)))) %>%
mutate(foldchange = 2^value) %>%
mutate(y_plot = Gene)
summary(plotdata_df$foldchange)
plotdata_df <- plotdata_df %>%
mutate(x_plot = ifelse(foldchange >= 10, 10, foldchange))
plotdata_df$y_plot <- factor(x = plotdata_df$Gene, levels = plotdata_wide_df$Gene)
plotdata_df$data_type <- factor(x = plotdata_df$data_type, levels = c(dataname_snrna, dataname_snatac, dataname_bulk_rna, dataname_bulk_protein))
## make colors
display.brewer.all()
colors_datatype <- brewer.pal(n = 5, name = "Set1")[c(1, 3, 4, 5)]
names(colors_datatype) <- c(dataname_snrna, dataname_bulk_rna, dataname_bulk_protein, dataname_snatac)
# plot --------------------------------------------------------------------
p <- ggplot()
p <- p + geom_dotplot(data = plotdata_df, mapping = aes(x = y_plot, y = x_plot, fill = data_type, color = (data_type == dataname_snrna)),
binaxis='y', stackdir='center', position=position_dodge(0.6), alpha = 0.7)
p <- p + scale_fill_manual(values = colors_datatype)
p <- p + scale_color_manual(values = c("TRUE" = "black", "FALSE" = NA))
# p <- p + geom_hline(yintercept = 1, linetype = 2, alpha = 0.5)
p <- p + theme_classic(base_size = 12)
p <- p + coord_flip()
p <- p + scale_y_continuous(breaks = seq(0, 10, 2))
p <- p + ylab("Fold change")
p <- p + theme(panel.grid.major.y = element_line(size=.1, color="black" ))
p <- p + theme(axis.text.y = element_text(size = 12, color = "black"), axis.title.y = element_blank())
p <- p + theme(axis.text.x = element_text(size = 12, color = "black"), axis.line.x = element_line(arrow = grid::arrow(length = unit(0.3, "cm"), ends = "last")))
p <- p + theme(legend.position = "top")
p <- p + guides(fill = guide_legend(override.aes = list(size=4), nrow = 4, title = NULL, label.theme = element_text(size = 12)))
file2write <- paste0(dir_out, "Foldchanges", ".png")
png(file2write, width = 600, height = 900, res = 150)
print(p)
dev.off()
file2write <- paste0(dir_out, "Foldchanges", ".pdf")
pdf(file2write, width = 4.25, height = 7, useDingbats = F)
print(p)
dev.off()
| /visualize_expression/bubbleplot/bubbleplot_tumorcell_markers_manual_filtered_foldchanges_wATAC.R | no_license | ding-lab/ccRCC_snRNA_analysis | R | false | false | 4,789 | r | # Yige Wu @WashU Sep 2021
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Library/CloudStorage/Box-Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
source("./ccRCC_snRNA_analysis/plotting.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
genes_process_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/tumor_specific_markers/overlap_tumor_vs_pt_DEGs_w_tumor_vs_other_DEGs/20210824.v1/ccRCC_markers.Surface.20210824.v1.tsv")
## input ATAC fold changes
geneactivity_fc_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Gene_Activity/FoldChange_ATACGeneActivity_Tumor_vs_AllOtherCells.20210924.tsv")
# specify parameters ---------------------------------------------------
## process genes to plot
genes_filter <- genes_process_df$Gene
genes_filter <- genes_filter[!(genes_filter %in% c("PIK3CB", "ARHGEF28", "PTGER3", "PARD3", "GNG12", "EFNA5", "SPIRE1", "LIFR", "PKP4", "SORBS1", "PTPRM", "FBXO16", "PAM"))]
genes_filter <- genes_filter[!(genes_filter %in% c("DPP6", "CPNE8", "EFNA5", "MGLL", "SPIRE1", "SPIRE1", "PLCB1", "OSMR", "SORBS1", "ANO6", "EPB41", "PAM"))]
## make
# dataname_snrna <- "Tumor cells vs. non-tumor cells\n(snRNA-seq)"
dataname_snrna <- "Tumor cells vs. non-tumor cells (snRNA-seq)"
dataname_snatac <- "Tumor cells vs. non-tumor cells (snATAC-seq)"
dataname_bulk_rna <- "Tumors vs. NATs (bulk RNA-seq)"
dataname_bulk_protein <- "Tumors vs. NATs (bulk proteomics)"
# make plot data ----------------------------------------------------------
genes_process_df <- merge(x = genes_process_df,
y = geneactivity_fc_df %>%
rename(log2FC.snATAC = avg_log2FC), by = c("Gene"), all.x = T)
plotdata_wide_df <- genes_process_df %>%
filter(Gene %in% genes_filter) %>%
select(Gene, avg_log2FC.mean.TumorcellsvsNontumor, log2FC.bulkRNA, log2FC.bulkpro, log2FC.snATAC) %>%
arrange(desc(avg_log2FC.mean.TumorcellsvsNontumor))
plotdata_df <- melt(plotdata_wide_df)
plotdata_df <- plotdata_df %>%
mutate(data_type = ifelse(variable == "avg_log2FC.mean.TumorcellsvsNontumor", dataname_snrna,
ifelse(variable == "log2FC.snATAC", dataname_snatac,
ifelse(variable == "log2FC.bulkRNA", dataname_bulk_rna, dataname_bulk_protein)))) %>%
mutate(foldchange = 2^value) %>%
mutate(y_plot = Gene)
summary(plotdata_df$foldchange)
plotdata_df <- plotdata_df %>%
mutate(x_plot = ifelse(foldchange >= 10, 10, foldchange))
plotdata_df$y_plot <- factor(x = plotdata_df$Gene, levels = plotdata_wide_df$Gene)
plotdata_df$data_type <- factor(x = plotdata_df$data_type, levels = c(dataname_snrna, dataname_snatac, dataname_bulk_rna, dataname_bulk_protein))
## make colors
display.brewer.all()
colors_datatype <- brewer.pal(n = 5, name = "Set1")[c(1, 3, 4, 5)]
names(colors_datatype) <- c(dataname_snrna, dataname_bulk_rna, dataname_bulk_protein, dataname_snatac)
# plot --------------------------------------------------------------------
p <- ggplot()
p <- p + geom_dotplot(data = plotdata_df, mapping = aes(x = y_plot, y = x_plot, fill = data_type, color = (data_type == dataname_snrna)),
binaxis='y', stackdir='center', position=position_dodge(0.6), alpha = 0.7)
p <- p + scale_fill_manual(values = colors_datatype)
p <- p + scale_color_manual(values = c("TRUE" = "black", "FALSE" = NA))
# p <- p + geom_hline(yintercept = 1, linetype = 2, alpha = 0.5)
p <- p + theme_classic(base_size = 12)
p <- p + coord_flip()
p <- p + scale_y_continuous(breaks = seq(0, 10, 2))
p <- p + ylab("Fold change")
p <- p + theme(panel.grid.major.y = element_line(size=.1, color="black" ))
p <- p + theme(axis.text.y = element_text(size = 12, color = "black"), axis.title.y = element_blank())
p <- p + theme(axis.text.x = element_text(size = 12, color = "black"), axis.line.x = element_line(arrow = grid::arrow(length = unit(0.3, "cm"), ends = "last")))
p <- p + theme(legend.position = "top")
p <- p + guides(fill = guide_legend(override.aes = list(size=4), nrow = 4, title = NULL, label.theme = element_text(size = 12)))
file2write <- paste0(dir_out, "Foldchanges", ".png")
png(file2write, width = 600, height = 900, res = 150)
print(p)
dev.off()
file2write <- paste0(dir_out, "Foldchanges", ".pdf")
pdf(file2write, width = 4.25, height = 7, useDingbats = F)
print(p)
dev.off()
|
#!/applications/R/R-4.0.0/bin/Rscript
# For each feat:
# 1. calculate a measure of among-read agreement in methylation state (e.g., Fleiss' kappa)
# Get reads that overlap each featName
fOverlapsStrand <- function(chr_featGR, chr_tabGR_str) {
## Note: findOverlaps() approach does not work where a window does not overlap
## any positions in chr_tabGR, which can occur with smaller genomeBinSize
# Identify overlapping windows and midpoint coordinates
fOverlaps_str <- findOverlaps(query = chr_featGR,
subject = chr_tabGR_str,
type = "any",
select = "all",
ignore.strand = T)
fOverlaps_str
}
# Function to calculate among-read agreement for a given feature x
makeDFx_strand <- function(fOverlaps_str, chr_tabGR_str, chr_featGR, featNum) {
chr_tabGR_str_x <- chr_tabGR_str[subjectHits(fOverlaps_str[queryHits(fOverlaps_str) == featNum])]
if(length(chr_tabGR_str_x) > 0) {
chr_tabGR_str_x <- sortSeqlevels(chr_tabGR_str_x)
chr_tabGR_str_x <- sort(chr_tabGR_str_x, by = ~ read + start)
df_str_x <- data.frame(pos = start(chr_tabGR_str_x),
read = chr_tabGR_str_x$read,
call = chr_tabGR_str_x$call)
pwider_str_x <- as.data.frame(tidyr::pivot_wider(data = df_str_x,
names_from = read,
# names_prefix = "read_",
values_from = call))
pwider_str_x <- pwider_str_x[ with(data = pwider_str_x, expr = order(pos)), ]
rownames(pwider_str_x) <- pwider_str_x[,1]
pwider_str_x <- pwider_str_x[ , -1, drop = F]
# kappam.fleiss() uses only rows (cytosines) with complete information
# across all columns (reads)
# Therefore, remove columns (reads) containing >= NAmax proportion NAs to
# to retain more cytosines in the data.frame for kappa calculation
mask_cols <- apply(pwider_str_x, MARGIN = 2, FUN = function(col) sum(is.na(col)) >= nrow(pwider_str_x) * NAmax)
# Report proportion of columns (reads) to be retained
prop_reads_retained_str_x <- sum(!(mask_cols)) / ncol(pwider_str_x)
# Conditionally remove columns (reads) containing >= NAmax proportion NAs
if(sum(mask_cols) > 0) {
pwider_str_x <- pwider_str_x[ , !(mask_cols), drop = F]
}
# Report number of columns (reads) to be retained for kappa and other calculations
num_reads_retained_str_x <- ncol(pwider_str_x)
# Identify rows (cytosines) containing any NAs across the retained columns (reads),
# as these will not be used by kappam.fleiss()
mask_rows <- apply(pwider_str_x, MARGIN = 1, FUN = function(row) sum(is.na(row)) > 0)
# Report proportion of rows (cytosines) to be retained
prop_Cs_retained_str_x <- sum(!(mask_rows)) / nrow(pwider_str_x)
# Keep rows (cytosines) with NAs for other calculations
stocha_pwider_str_x <- pwider_str_x
# Conditionally remove rows (cytosines) containing any NAs
if(sum(mask_rows) > 0) {
pwider_str_x <- pwider_str_x[ !(mask_rows), , drop = F]
}
# Report number of rows (cytosines) to be retained for kappa and other calculations
num_Cs_retained_str_x <- nrow(pwider_str_x)
stocha_num_Cs_retained_str_x <- nrow(stocha_pwider_str_x)
# Calculate mean methylation for region
mean_mC_pwider_str_x <- mean(as.matrix(stocha_pwider_str_x), na.rm = T)
# Calculate mean and standard deviation of per-read mean methylation
mean_read_mC_pwider_str_x <- mean(colMeans(stocha_pwider_str_x, na.rm = T), na.rm = T)
sd_read_mC_pwider_str_x <- sd(colMeans(stocha_pwider_str_x, na.rm = T), na.rm = T)
# Calculate Fleiss' kappa
if(nrow(pwider_str_x) >= min_Cs && nrow(pwider_str_x) <= max_Cs &&
ncol(pwider_str_x) >= min_reads && ncol(pwider_str_x) <= max_reads) {
# Calculate Fleiss' kappa
fkappa_pwider_str_x <- irr::kappam.fleiss(pwider_str_x, detail = F)
# Sanity checks
stopifnot(fkappa_pwider_str_x$raters == num_reads_retained_str_x)
stopifnot(fkappa_pwider_str_x$subjects == num_Cs_retained_str_x)
fkappa_pwider_str_x_kappa <- fkappa_pwider_str_x$value
fkappa_pwider_str_x_pval <- fkappa_pwider_str_x$p.value
fkappa_pwider_str_x_zstat <- fkappa_pwider_str_x$statistic
fkappa_pwider_str_x_reads <- fkappa_pwider_str_x$raters
fkappa_pwider_str_x_Cs <- fkappa_pwider_str_x$subjects
} else {
fkappa_pwider_str_x_kappa <- NaN
fkappa_pwider_str_x_pval <- NaN
fkappa_pwider_str_x_zstat <- NaN
fkappa_pwider_str_x_reads <- NaN
fkappa_pwider_str_x_Cs <- NaN
}
# Calculate Krippendorff's alpha, an inter-rater reliability or agreement metric that can handle incomplete data,
# and for which "computed reliabilities are comparable across any numbers of coders [raters], values, ... and unequal sample sizes.";
# https://en.wikipedia.org/wiki/Krippendorff%27s_alpha
# Also calculate site-to-site stochasticity
if(nrow(stocha_pwider_str_x) >= min_Cs && nrow(stocha_pwider_str_x) <= max_Cs &&
ncol(stocha_pwider_str_x) >= min_reads && ncol(stocha_pwider_str_x) <= max_reads) {
# Calculate Krippendorff's alpha
kalpha_pwider_str_x <- irr::kripp.alpha(t(stocha_pwider_str_x), method = "nominal")
# Sanity checks
stopifnot(kalpha_pwider_str_x$raters == num_reads_retained_str_x)
stopifnot(kalpha_pwider_str_x$subjects == stocha_num_Cs_retained_str_x)
kalpha_pwider_str_x_alpha <- kalpha_pwider_str_x$value
kalpha_pwider_str_x_nmatchval <- kalpha_pwider_str_x$nmatchval
# Calculate absolute differences between methylation statuses of neighbouring Cs within each read
absdiff_pwider_str_x <- abs(diff(as.matrix(stocha_pwider_str_x)))
# Calculate the mean absolute difference for each read
colMeans_absdiff_pwider_str_x <- colMeans(absdiff_pwider_str_x, na.rm = T)
# Across all reads overlapping a given feature, calculate the mean and median of mean absolute differences
mean_stocha_pwider_str_x <- mean(colMeans_absdiff_pwider_str_x, na.rm = T)
median_stocha_pwider_str_x <- median(colMeans_absdiff_pwider_str_x, na.rm = T)
# Across all reads overlapping a given feature, calculate the sd of mean absolute differences
sd_stocha_pwider_str_x <- sd(colMeans_absdiff_pwider_str_x, na.rm = T)
# Report number of rows (cytosines) retained for other calculations
stocha_pwider_str_x_Cs <- nrow(stocha_pwider_str_x)
# Calculate autocorrelations between methylation statuses of neighbouring Cs within each read
acf_pwider_str_x_list <- apply(stocha_pwider_str_x, MARGIN = 2,
FUN = function(col) acf(col, lag.max = 10, plot = F, na.action = na.pass))
mean_min_acf_pwider_str_x <- mean(sapply(seq_along(acf_pwider_str_x_list), function(col) {
if(sum(acf_pwider_str_x_list[[col]]$acf, na.rm = T) != 0) {
min(as.vector(acf_pwider_str_x_list[[col]]$acf)[-1], na.rm = T)
} else {
NA
}
}), na.rm = T)
mean_max_acf_pwider_str_x <- mean(sapply(seq_along(acf_pwider_str_x_list), function(col) {
if(sum(acf_pwider_str_x_list[[col]]$acf, na.rm = T) != 0) {
max(as.vector(acf_pwider_str_x_list[[col]]$acf)[-1], na.rm = T)
} else {
NA
}
}), na.rm = T)
mean_mean_acf_pwider_str_x <- mean(sapply(seq_along(acf_pwider_str_x_list), function(col) {
if(sum(acf_pwider_str_x_list[[col]]$acf, na.rm = T) != 0) {
mean(as.vector(acf_pwider_str_x_list[[col]]$acf)[-1], na.rm = T)
} else {
NA
}
}), na.rm = T)
mean_median_acf_pwider_str_x <- mean(sapply(seq_along(acf_pwider_str_x_list), function(col) {
if(sum(acf_pwider_str_x_list[[col]]$acf, na.rm = T) != 0) {
median(as.vector(acf_pwider_str_x_list[[col]]$acf)[-1], na.rm = T)
} else {
NA
}
}), na.rm = T)
} else {
kalpha_pwider_str_x_alpha <- NaN
kalpha_pwider_str_x_nmatchval <- NaN
mean_stocha_pwider_str_x <- NaN
median_stocha_pwider_str_x <- NaN
sd_stocha_pwider_str_x <- NaN
stocha_pwider_str_x_Cs <- NaN
mean_min_acf_pwider_str_x <- NaN
mean_max_acf_pwider_str_x <- NaN
mean_mean_acf_pwider_str_x <- NaN
mean_median_acf_pwider_str_x <- NaN
}
fk_df_str_win_x <- data.frame(chr = seqnames(chr_featGR[featNum]),
start = start(chr_featGR[featNum]),
end = end(chr_featGR[featNum]),
midpoint = round((start(chr_featGR[featNum])+end(chr_featGR[featNum]))/2),
strand = strand(chr_featGR[featNum]),
name = chr_featGR[featNum]$name,
score = chr_featGR[featNum]$score,
DNA_RNA = chr_featGR[featNum]$DNA_RNA,
ltr_identity = chr_featGR[featNum]$ltr_identity,
feature_width = chr_featGR[featNum]$feature_width,
mean_mC_str = mean_mC_pwider_str_x,
mean_read_mC_str = mean_read_mC_pwider_str_x,
sd_read_mC_str = sd_read_mC_pwider_str_x,
fk_kappa_str = fkappa_pwider_str_x_kappa,
fk_pval_str = fkappa_pwider_str_x_pval,
fk_zstat_str = fkappa_pwider_str_x_zstat,
fk_reads_str = fkappa_pwider_str_x_reads,
fk_Cs_str = fkappa_pwider_str_x_Cs,
ka_alpha_str = kalpha_pwider_str_x_alpha,
ka_nmatchval_str = kalpha_pwider_str_x_nmatchval,
mean_stocha_str = mean_stocha_pwider_str_x,
median_stocha_str = median_stocha_pwider_str_x,
sd_stocha_str = sd_stocha_pwider_str_x,
stocha_Cs_str = stocha_pwider_str_x_Cs,
mean_min_acf_str = mean_min_acf_pwider_str_x,
mean_max_acf_str = mean_max_acf_pwider_str_x,
mean_mean_acf_str = mean_mean_acf_pwider_str_x,
mean_median_acf_str = mean_median_acf_pwider_str_x
)
} else {
fk_df_str_win_x <- data.frame(chr = seqnames(chr_featGR[featNum]),
start = start(chr_featGR[featNum]),
end = end(chr_featGR[featNum]),
midpoint = round((start(chr_featGR[featNum])+end(chr_featGR[featNum]))/2),
strand = strand(chr_featGR[featNum]),
name = chr_featGR[featNum]$name,
score = chr_featGR[featNum]$score,
DNA_RNA = chr_featGR[featNum]$DNA_RNA,
ltr_identity = chr_featGR[featNum]$ltr_identity,
feature_width = chr_featGR[featNum]$feature_width,
mean_mC_str = NaN,
mean_read_mC_str = NaN,
sd_read_mC_str = NaN,
fk_kappa_str = NaN,
fk_pval_str = NaN,
fk_zstat_str = NaN,
fk_reads_str = NaN,
fk_Cs_str = NaN,
ka_alpha_str = NaN,
ka_nmatchval_str = NaN,
mean_stocha_str = NaN,
median_stocha_str = NaN,
sd_stocha_str = NaN,
stocha_Cs_str = NaN,
mean_min_acf_str = NaN,
mean_max_acf_str = NaN,
mean_mean_acf_str = NaN,
mean_median_acf_str = NaN
)
}
fk_df_str_win_x
}
| /t2t-col.20210610/deepsignal_DNAmeth/per_read_analysis/among_read_variation/feature_among_read_variation_scoring_func_TEs_function.R | no_license | ajtock/nanopore | R | false | false | 12,619 | r | #!/applications/R/R-4.0.0/bin/Rscript
# For each feat:
# 1. calculate a measure of among-read agreement in methylation state (e.g., Fleiss' kappa)
# Get reads that overlap each featName
fOverlapsStrand <- function(chr_featGR, chr_tabGR_str) {
## Note: findOverlaps() approach does not work where a window does not overlap
## any positions in chr_tabGR, which can occur with smaller genomeBinSize
# Identify overlapping windows and midpoint coordinates
fOverlaps_str <- findOverlaps(query = chr_featGR,
subject = chr_tabGR_str,
type = "any",
select = "all",
ignore.strand = T)
fOverlaps_str
}
# Function to calculate among-read agreement for a given feature x
makeDFx_strand <- function(fOverlaps_str, chr_tabGR_str, chr_featGR, featNum) {
chr_tabGR_str_x <- chr_tabGR_str[subjectHits(fOverlaps_str[queryHits(fOverlaps_str) == featNum])]
if(length(chr_tabGR_str_x) > 0) {
chr_tabGR_str_x <- sortSeqlevels(chr_tabGR_str_x)
chr_tabGR_str_x <- sort(chr_tabGR_str_x, by = ~ read + start)
df_str_x <- data.frame(pos = start(chr_tabGR_str_x),
read = chr_tabGR_str_x$read,
call = chr_tabGR_str_x$call)
pwider_str_x <- as.data.frame(tidyr::pivot_wider(data = df_str_x,
names_from = read,
# names_prefix = "read_",
values_from = call))
pwider_str_x <- pwider_str_x[ with(data = pwider_str_x, expr = order(pos)), ]
rownames(pwider_str_x) <- pwider_str_x[,1]
pwider_str_x <- pwider_str_x[ , -1, drop = F]
# kappam.fleiss() uses only rows (cytosines) with complete information
# across all columns (reads)
# Therefore, remove columns (reads) containing >= NAmax proportion NAs to
# to retain more cytosines in the data.frame for kappa calculation
mask_cols <- apply(pwider_str_x, MARGIN = 2, FUN = function(col) sum(is.na(col)) >= nrow(pwider_str_x) * NAmax)
# Report proportion of columns (reads) to be retained
prop_reads_retained_str_x <- sum(!(mask_cols)) / ncol(pwider_str_x)
# Conditionally remove columns (reads) containing >= NAmax proportion NAs
if(sum(mask_cols) > 0) {
pwider_str_x <- pwider_str_x[ , !(mask_cols), drop = F]
}
# Report number of columns (reads) to be retained for kappa and other calculations
num_reads_retained_str_x <- ncol(pwider_str_x)
# Identify rows (cytosines) containing any NAs across the retained columns (reads),
# as these will not be used by kappam.fleiss()
mask_rows <- apply(pwider_str_x, MARGIN = 1, FUN = function(row) sum(is.na(row)) > 0)
# Report proportion of rows (cytosines) to be retained
prop_Cs_retained_str_x <- sum(!(mask_rows)) / nrow(pwider_str_x)
# Keep rows (cytosines) with NAs for other calculations
stocha_pwider_str_x <- pwider_str_x
# Conditionally remove rows (cytosines) containing any NAs
if(sum(mask_rows) > 0) {
pwider_str_x <- pwider_str_x[ !(mask_rows), , drop = F]
}
# Report number of rows (cytosines) to be retained for kappa and other calculations
num_Cs_retained_str_x <- nrow(pwider_str_x)
stocha_num_Cs_retained_str_x <- nrow(stocha_pwider_str_x)
# Calculate mean methylation for region
mean_mC_pwider_str_x <- mean(as.matrix(stocha_pwider_str_x), na.rm = T)
# Calculate mean and standard deviation of per-read mean methylation
mean_read_mC_pwider_str_x <- mean(colMeans(stocha_pwider_str_x, na.rm = T), na.rm = T)
sd_read_mC_pwider_str_x <- sd(colMeans(stocha_pwider_str_x, na.rm = T), na.rm = T)
# Calculate Fleiss' kappa
if(nrow(pwider_str_x) >= min_Cs && nrow(pwider_str_x) <= max_Cs &&
ncol(pwider_str_x) >= min_reads && ncol(pwider_str_x) <= max_reads) {
# Calculate Fleiss' kappa
fkappa_pwider_str_x <- irr::kappam.fleiss(pwider_str_x, detail = F)
# Sanity checks
stopifnot(fkappa_pwider_str_x$raters == num_reads_retained_str_x)
stopifnot(fkappa_pwider_str_x$subjects == num_Cs_retained_str_x)
fkappa_pwider_str_x_kappa <- fkappa_pwider_str_x$value
fkappa_pwider_str_x_pval <- fkappa_pwider_str_x$p.value
fkappa_pwider_str_x_zstat <- fkappa_pwider_str_x$statistic
fkappa_pwider_str_x_reads <- fkappa_pwider_str_x$raters
fkappa_pwider_str_x_Cs <- fkappa_pwider_str_x$subjects
} else {
fkappa_pwider_str_x_kappa <- NaN
fkappa_pwider_str_x_pval <- NaN
fkappa_pwider_str_x_zstat <- NaN
fkappa_pwider_str_x_reads <- NaN
fkappa_pwider_str_x_Cs <- NaN
}
# Calculate Krippendorff's alpha, an inter-rater reliability or agreement metric that can handle incomplete data,
# and for which "computed reliabilities are comparable across any numbers of coders [raters], values, ... and unequal sample sizes.";
# https://en.wikipedia.org/wiki/Krippendorff%27s_alpha
# Also calculate site-to-site stochasticity
if(nrow(stocha_pwider_str_x) >= min_Cs && nrow(stocha_pwider_str_x) <= max_Cs &&
ncol(stocha_pwider_str_x) >= min_reads && ncol(stocha_pwider_str_x) <= max_reads) {
# Calculate Krippendorff's alpha
kalpha_pwider_str_x <- irr::kripp.alpha(t(stocha_pwider_str_x), method = "nominal")
# Sanity checks
stopifnot(kalpha_pwider_str_x$raters == num_reads_retained_str_x)
stopifnot(kalpha_pwider_str_x$subjects == stocha_num_Cs_retained_str_x)
kalpha_pwider_str_x_alpha <- kalpha_pwider_str_x$value
kalpha_pwider_str_x_nmatchval <- kalpha_pwider_str_x$nmatchval
# Calculate absolute differences between methylation statuses of neighbouring Cs within each read
absdiff_pwider_str_x <- abs(diff(as.matrix(stocha_pwider_str_x)))
# Calculate the mean absolute difference for each read
colMeans_absdiff_pwider_str_x <- colMeans(absdiff_pwider_str_x, na.rm = T)
# Across all reads overlapping a given feature, calculate the mean and median of mean absolute differences
mean_stocha_pwider_str_x <- mean(colMeans_absdiff_pwider_str_x, na.rm = T)
median_stocha_pwider_str_x <- median(colMeans_absdiff_pwider_str_x, na.rm = T)
# Across all reads overlapping a given feature, calculate the sd of mean absolute differences
sd_stocha_pwider_str_x <- sd(colMeans_absdiff_pwider_str_x, na.rm = T)
# Report number of rows (cytosines) retained for other calculations
stocha_pwider_str_x_Cs <- nrow(stocha_pwider_str_x)
# Calculate autocorrelations between methylation statuses of neighbouring Cs within each read
acf_pwider_str_x_list <- apply(stocha_pwider_str_x, MARGIN = 2,
FUN = function(col) acf(col, lag.max = 10, plot = F, na.action = na.pass))
mean_min_acf_pwider_str_x <- mean(sapply(seq_along(acf_pwider_str_x_list), function(col) {
if(sum(acf_pwider_str_x_list[[col]]$acf, na.rm = T) != 0) {
min(as.vector(acf_pwider_str_x_list[[col]]$acf)[-1], na.rm = T)
} else {
NA
}
}), na.rm = T)
mean_max_acf_pwider_str_x <- mean(sapply(seq_along(acf_pwider_str_x_list), function(col) {
if(sum(acf_pwider_str_x_list[[col]]$acf, na.rm = T) != 0) {
max(as.vector(acf_pwider_str_x_list[[col]]$acf)[-1], na.rm = T)
} else {
NA
}
}), na.rm = T)
mean_mean_acf_pwider_str_x <- mean(sapply(seq_along(acf_pwider_str_x_list), function(col) {
if(sum(acf_pwider_str_x_list[[col]]$acf, na.rm = T) != 0) {
mean(as.vector(acf_pwider_str_x_list[[col]]$acf)[-1], na.rm = T)
} else {
NA
}
}), na.rm = T)
mean_median_acf_pwider_str_x <- mean(sapply(seq_along(acf_pwider_str_x_list), function(col) {
if(sum(acf_pwider_str_x_list[[col]]$acf, na.rm = T) != 0) {
median(as.vector(acf_pwider_str_x_list[[col]]$acf)[-1], na.rm = T)
} else {
NA
}
}), na.rm = T)
} else {
kalpha_pwider_str_x_alpha <- NaN
kalpha_pwider_str_x_nmatchval <- NaN
mean_stocha_pwider_str_x <- NaN
median_stocha_pwider_str_x <- NaN
sd_stocha_pwider_str_x <- NaN
stocha_pwider_str_x_Cs <- NaN
mean_min_acf_pwider_str_x <- NaN
mean_max_acf_pwider_str_x <- NaN
mean_mean_acf_pwider_str_x <- NaN
mean_median_acf_pwider_str_x <- NaN
}
fk_df_str_win_x <- data.frame(chr = seqnames(chr_featGR[featNum]),
start = start(chr_featGR[featNum]),
end = end(chr_featGR[featNum]),
midpoint = round((start(chr_featGR[featNum])+end(chr_featGR[featNum]))/2),
strand = strand(chr_featGR[featNum]),
name = chr_featGR[featNum]$name,
score = chr_featGR[featNum]$score,
DNA_RNA = chr_featGR[featNum]$DNA_RNA,
ltr_identity = chr_featGR[featNum]$ltr_identity,
feature_width = chr_featGR[featNum]$feature_width,
mean_mC_str = mean_mC_pwider_str_x,
mean_read_mC_str = mean_read_mC_pwider_str_x,
sd_read_mC_str = sd_read_mC_pwider_str_x,
fk_kappa_str = fkappa_pwider_str_x_kappa,
fk_pval_str = fkappa_pwider_str_x_pval,
fk_zstat_str = fkappa_pwider_str_x_zstat,
fk_reads_str = fkappa_pwider_str_x_reads,
fk_Cs_str = fkappa_pwider_str_x_Cs,
ka_alpha_str = kalpha_pwider_str_x_alpha,
ka_nmatchval_str = kalpha_pwider_str_x_nmatchval,
mean_stocha_str = mean_stocha_pwider_str_x,
median_stocha_str = median_stocha_pwider_str_x,
sd_stocha_str = sd_stocha_pwider_str_x,
stocha_Cs_str = stocha_pwider_str_x_Cs,
mean_min_acf_str = mean_min_acf_pwider_str_x,
mean_max_acf_str = mean_max_acf_pwider_str_x,
mean_mean_acf_str = mean_mean_acf_pwider_str_x,
mean_median_acf_str = mean_median_acf_pwider_str_x
)
} else {
fk_df_str_win_x <- data.frame(chr = seqnames(chr_featGR[featNum]),
start = start(chr_featGR[featNum]),
end = end(chr_featGR[featNum]),
midpoint = round((start(chr_featGR[featNum])+end(chr_featGR[featNum]))/2),
strand = strand(chr_featGR[featNum]),
name = chr_featGR[featNum]$name,
score = chr_featGR[featNum]$score,
DNA_RNA = chr_featGR[featNum]$DNA_RNA,
ltr_identity = chr_featGR[featNum]$ltr_identity,
feature_width = chr_featGR[featNum]$feature_width,
mean_mC_str = NaN,
mean_read_mC_str = NaN,
sd_read_mC_str = NaN,
fk_kappa_str = NaN,
fk_pval_str = NaN,
fk_zstat_str = NaN,
fk_reads_str = NaN,
fk_Cs_str = NaN,
ka_alpha_str = NaN,
ka_nmatchval_str = NaN,
mean_stocha_str = NaN,
median_stocha_str = NaN,
sd_stocha_str = NaN,
stocha_Cs_str = NaN,
mean_min_acf_str = NaN,
mean_max_acf_str = NaN,
mean_mean_acf_str = NaN,
mean_median_acf_str = NaN
)
}
fk_df_str_win_x
}
|
path <- "/Users/pba/github/simulacao/FortranRIntegration"
setwd(path)
library(RCurl)
library(rjson)
library(FortranRIntegration)
library(shiny)
library(ggplot2)
library(reshape)
library(scales)
#load the data
w <- getWeatherDataFromTxt(paste(path,"/data/", sep = ""))
i <- getIrrigDataFromTxt(paste(path,"/data/", sep = ""))
soil <- getSoilDataFromTxt(paste(path,"/data/", sep = ""))
plant <- getPlantDataFromTxt(paste(path,"/data/", sep = ""))
#collect stations
dataCurl <- getURLContent("http://dev.sisalert.com.br/apirest/api/v1/stations/src/INMET/lobs/true", ssl.verifypeer = FALSE)
jsonStation <- fromJSON(dataCurl)
stations <- data.frame("id"=sapply(jsonStation, function(x) x[[1]]), "name"=sapply(jsonStation, function(x) x[[2]]), "code"=sapply(jsonStation, function(x) x$metaData$weather[[3]]), "maxdatetime"=as.character(sapply(jsonStation, function(x) x$lobs$datetime)),"timezone"=as.character(sapply(jsonStation, function(x) x$location$zoneName)))
dataStation <- subset(stations, id=='564f796a16af35ca3decd181' | id=='564f796916af35ca3decd180' | id=='564f7b0316af35ca3decd292' | id=='564f7ce316af35ca3decd3e1')
#change de colnames
colnames(dataStation)[1] <- "ID"
colnames(dataStation)[2] <- "Name"
dataStation$Name <- as.character(dataStation$Name)
dataStation$ID <- as.character(dataStation$ID)
dataStation$url <-
paste0(
"http://dev.sisalert.com.br/apirest/api/v1/data/station/model/",
dataStation$ID, "/range/11-01-2013/12-31-2014")
# Tipo de solo, para seleção
dataSoils <- data.frame( c(1, 2, 3, 4), c("Arenoso", "Medio", "Argiloso", "Default") )
colnames(dataSoils) <- c("ID", "Name")
dataSoils$Name <- as.character(dataSoils$Name)
#
#
# Funções functionModelVanGenucthen e functionSolos foram fornecidas pelo GRUPO DE SOLOS
#
#parametros do modelo de Van Genucthen (1980) para solo arenoso com teor de argila de 152 g kg-1
#dados disponiveis em Carducci et al., (2011)
functionModelVanGenucthen<-function(thpmp,thsat,a,n,solo) {
result<-thpmp+((thsat-thpmp)/(1+(a*solo)^n)^(1-1/n))
return(result)
}
functionSolos <- function(tipoSolo){
####### solo arenoso com teor de argila de 152 g kg-1###############################
if (tipoSolo == 1) {
#Solos arenosos profundos com pouco silte e argila;
CN<-64
# DP--> atribuindo a profundidade do perfil do solo (cm)
DP<-200
# STp--> encontrando a umidade do solo saturado (thsat=0 kPa)
STp<-functionModelVanGenucthen(0.046,0.331,0.719,1.841,0.0)
# FCp--> encontrando a umidade do solo na capacidade de campo (thCC=10 kPa)
FCp<-functionModelVanGenucthen(0.046,0.331,0.719,1.841,10)
# WPp--> encontrando a umidade do solo no ponto de murcha permanente (thPMP=1500 kPa)
WPp<-functionModelVanGenucthen(0.046,0.331,0.719,1.841,1500)
WPp<- round(WPp, digits = 3)
# DNRp--> encontrando o percentual de drenagem diaria (thsat-thCC)
DRNp<-STp-FCp
# SWC--> encontrando a lamina de agua disponivel no perfil de solo considerado (mm)
SWC<-(STp-FCp)*DP*10
SWC
# Retorna um vetor contendo o TipoSolo,CN, DP, STp, FCp, WPp, DRNp, SWC
dadosSolo<-data.frame(CN, DP, STp, FCp, WPp, DRNp, SWC)
return(dadosSolo)
}
#######solo medio com teor de argila de 420 g kg-1###############################
if (tipoSolo == 2) {
#Solos arenosos profundos com pouco silte e argila;
CN<-76
# DP--> atribuindo a profundidade do perfil do solo (cm)
DP<-200
# STp--> encontrando a umidade do solo saturado (thsat=0 kPa)
STp<-functionModelVanGenucthen(0.190,0.645,1.398,1.398,0.0)
# FCp--> encontrando a umidade do solo na capacidade de campo (thCC=10 kPa)
FCp<-functionModelVanGenucthen(0.190,0.645,1.398,1.398,10)
# WPp--> encontrando a umidade do solo no ponto de murcha permanente (thPMP=1500 kPa)
WPp<-functionModelVanGenucthen(0.190,0.645,1.398,1.398,1500)
WPp<- round(WPp, digits = 3)
# DNRp--> encontrando o percentual de drenagem diaria (thsat-thCC)
DRNp<-STp-FCp
# SWC--> encontrando a lamina de agua disponivel no perfil de solo considerado (mm)
SWC<-(STp-FCp)*DP*10
# Retorna um vetor contendo o TipoSolo,CN, DP, STp, FCp, WPp, DRNp, SWC
dadosSolo<-data.frame(CN, DP, STp, FCp, WPp, DRNp, SWC)
return(dadosSolo)
}
#######solo argiloso com teor de argila de 716 g kg-1#############################
if (tipoSolo == 3) {
#Solos arenosos profundos com pouco silte e argila;
CN<-84
# DP--> atribuindo a profundidade do perfil do solo (cm)
DP<-200
thpmp=0.250
thsat=0.806
a=1.786
n=1.599
# STp--> encontrando a umidade do solo saturado (thsat=0 kPa)
STp<-functionModelVanGenucthen(0.250,0.806,1.786,1.599,0.0)
# FCp--> encontrando a umidade do solo na capacidade de campo (thCC=10 kPa)
FCp<-functionModelVanGenucthen(0.250,0.806,1.786,1.599,10)
# WPp--> encontrando a umidade do solo no ponto de murcha permanente (thPMP=1500 kPa)
WPp<-functionModelVanGenucthen(0.250,0.806,1.786,1.599,1500)
WPp<- round(WPp, digits = 3)
# DNRp--> encontrando o percentual de drenagem diaria (thsat-thCC)
DRNp<-STp-FCp
# SWC--> encontrando a lamina de agua disponivel no perfil de solo considerado (mm)
SWC<-(STp-FCp)*DP*10
# Retorna um vetor contendo o TipoSolo,CN, DP, STp, FCp, WPp, DRNp, SWC
dadosSolo<-data.frame(CN, DP, STp, FCp, WPp, DRNp, SWC)
return(dadosSolo)
}
}
#
#
# Funções gráfico do Meterologico
# Função de PLOT multiplo
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
plots <- c(list(...), plotlist)
numPlots = length(plots)
if (is.null(layout)) {
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
for (i in 1:numPlots) {
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,layout.pos.col = matchidx$col))
}
}
}
# Função para transformar fator em numeric
as.numeric.factor <- function(x) {as.numeric(levels(x))[x]}
#################
# Shiny Server
#################
shinyServer(function(input, output, session) {
output$ui <- renderUI({
sidebarPanel(
h4("Instruções"),
p("Clique em Rodar Modelo após Escolher a Estação e o Tipo de Solo."),
br(),
selectInput(inputId = "cbxStations",
label = "Escolher a estação",
choices = dataStation$Name,
selectize = TRUE),
selectInput(inputId = "cbxSoils",
label = "Escolher o tipo de solo",
choices = dataSoils$Name,
selectize = TRUE),
actionButton("goSimulation", "Rodar Modelo"),
uiOutput("mesg")
)})
getSelectionKey <- function() {
#return(as.character(input$cbxStations))
return(as.character(subset(dataStation, Name == input$cbxStations)$url))
}
getSelectionSoil <- function() {
soilId <- as.integer(subset(dataSoils, Name == input$cbxSoils)$ID)
if (soilId == 4) {
return(soil)
} else {
return(functionSolos(soilId))
}
}
observeEvent(input$goSimulation, {
output$mesg <- renderText("")
rodarSimulacao(getSelectionKey(), getSelectionSoil())
})
rodarSimulacao <- function(url, soils){
#url <- paste("http://dev.sisalert.com.br/apirest/api/v1/data/station/model/",
# input$cbxStations[1], "/range/01-01-2013/01-01-2016", sep="")
dataCurl <- getURLContent(url, ssl.verifypeer = FALSE)
jsonWeather <- fromJSON(dataCurl)
weatherData <- do.call(rbind, lapply(jsonWeather, function(x) data.frame(x)))
# As estações que não possem dados suficientes para uma simulação, ou 365 dias de dados da erro
if (dim(weatherData)[1] > 365) {
##
weatherData$date <- as.character(weatherData$date)
weatherData <- weatherData[order(weatherData$date,decreasing = FALSE),]
row.names(weatherData) <- NULL
weatherData <- data.frame('date'=weatherData$date,'srad'=weatherData$srad,'tmax'=weatherData$tmax,'tmin'=weatherData$tmin,'rain'=weatherData$rain,'par'=weatherData$par)
##
runSimulation(weather = weatherData,plant = plant,soil = soils,irrig = i,doyp = 1, frop = 1)
output$tableResultsPlant <- renderDataTable({
plantOut <- read.table("plant.out",skip = 9)
#change the colnames for plantOut
colnames(plantOut) <- c("Dia do Ano", "Número de Folhas", "Acum.Temp. Reprod. (oC)", "Peso da Planta (g/m2)",
"Peso do Docel (g/m2)", "Peso da Raiz (g/m2)", "Peso da Fruta (g/m2)",
"Ind. Area Foliar (m2/m2)")
data <- plantOut
})
output$tableResultsSoil <- renderDataTable({
swOut <- read.table("sw.out",skip = 6)
#change the colnames for swOut
colnames(swOut) <- c("Dia do ano", "Rad. Solar(MJ/m2)", "Temp. Max(oC)", "Temp. Min(oC)", "Chuva(mm)",
"Irrig.(mm)", "Escoamento(mm)", "Infil.(mm)", "Drenagem(mm)", "Evapo. Transp(mm)",
"Evapo. Solo(mm)", "Evapo. Planta(mm)", "Agua no solo(mm)", "Agua no solo(mm3/mm3)",
"Estresse hídrico", "Excesso de estresse hídrico")
data <- swOut
})
output$tableFinal <- renderDataTable({
wbalOut <- read.table("WBAL.OUT",skip = 4, sep = ":")
#change the colnames for wbalOut
colnames(wbalOut) <- c("Descrição", "Valores")
data <- wbalOut
})
output$tableResultsStation <- renderDataTable({
data <- dataStation
})
output$distPlot <- renderPlot({
plantOut <- read.table("plant.out",skip = 9)
colnames(plantOut) <- c("doy", "Número de Folhas", "Acum.Temp. Reprod. (oC)", "Peso da Planta (g/m2)",
"Peso do Docel (g/m2)", "Peso da Raiz (g/m2)", "Peso da Fruta (g/m2)",
"iaf")
ggplot(data = plantOut, aes(x = doy)) +
geom_line(aes(y = iaf)) +
labs (title="Índice de Area Foliar X Dia", x="Dia do Ano", y="Ind. Area Foliar (m2/m2)")
})
output$plotMeteorologico <- renderPlot({
dataCurlP <- getURLContent(getSelectionKey(), ssl.verifypeer = FALSE)
jW <- fromJSON(dataCurlP)
wD <- do.call(rbind, lapply(jW, function(x) data.frame(x)))
wD$date <- strptime(wD$date,"%y%j")
wD$par <- as.numeric.factor(wD$par)
wD$rain <- as.numeric.factor(wD$rain)
wD$srad <- as.numeric.factor(wD$srad)
wD$tmax <- as.numeric.factor(wD$tmax)
wD$tmin <- as.numeric.factor(wD$tmin)
p1 <- ggplot(wD, aes(x=date, y=par)) +
geom_line(color="red") + scale_x_datetime() +
ggtitle("PAR") + ylab("(MJ/m²) * 2 =~ mol[photon]/m²-day") + xlab("Data")
p2 <- ggplot(wD, aes(x=date, y=rain)) +
geom_line(color="red") + scale_x_datetime() + geom_point(colour="red", size=3, shape=21, fill="white") +
ggtitle("Precipitação") + ylab("mm") + xlab("Data")
# Third plot
p3 <- ggplot(wD, aes(x=date, y=srad)) +
geom_line(color="red") + scale_x_datetime() + geom_point(colour="red", size=3, shape=21, fill="white") +
ggtitle("Radiação Solar") + ylab("W/m²") + xlab("Data")
# Fourth plot
p4 <- ggplot(wD, aes(x=date, y=tmax)) +
geom_line(color="red") + scale_x_datetime() + geom_point(colour="red", size=3, shape=21, fill="white") +
ggtitle("Temperatura Máxima") + ylab("ºC") + xlab("Data")
p5 <- ggplot(wD, aes(x=date, y=tmin)) +
geom_line(color="red") + scale_x_datetime() + geom_point(colour="red", size=3, shape=21, fill="white") +
ggtitle("Temperatura Mínima") + ylab("ºC") + xlab("Data")
multiplot(p1, p2, p3, p4, p5, cols=2)
})
} else {
output$mesg <- renderUI(p("Estação não possui dados suficientes para uma simulação!", style = "color:red"))
}
}
})
| /server.R | no_license | borellaster/simulacao | R | false | false | 12,572 | r | path <- "/Users/pba/github/simulacao/FortranRIntegration"
setwd(path)
library(RCurl)
library(rjson)
library(FortranRIntegration)
library(shiny)
library(ggplot2)
library(reshape)
library(scales)
#load the data
w <- getWeatherDataFromTxt(paste(path,"/data/", sep = ""))
i <- getIrrigDataFromTxt(paste(path,"/data/", sep = ""))
soil <- getSoilDataFromTxt(paste(path,"/data/", sep = ""))
plant <- getPlantDataFromTxt(paste(path,"/data/", sep = ""))
#collect stations
dataCurl <- getURLContent("http://dev.sisalert.com.br/apirest/api/v1/stations/src/INMET/lobs/true", ssl.verifypeer = FALSE)
jsonStation <- fromJSON(dataCurl)
stations <- data.frame("id"=sapply(jsonStation, function(x) x[[1]]), "name"=sapply(jsonStation, function(x) x[[2]]), "code"=sapply(jsonStation, function(x) x$metaData$weather[[3]]), "maxdatetime"=as.character(sapply(jsonStation, function(x) x$lobs$datetime)),"timezone"=as.character(sapply(jsonStation, function(x) x$location$zoneName)))
dataStation <- subset(stations, id=='564f796a16af35ca3decd181' | id=='564f796916af35ca3decd180' | id=='564f7b0316af35ca3decd292' | id=='564f7ce316af35ca3decd3e1')
#change de colnames
colnames(dataStation)[1] <- "ID"
colnames(dataStation)[2] <- "Name"
dataStation$Name <- as.character(dataStation$Name)
dataStation$ID <- as.character(dataStation$ID)
dataStation$url <-
paste0(
"http://dev.sisalert.com.br/apirest/api/v1/data/station/model/",
dataStation$ID, "/range/11-01-2013/12-31-2014")
# Tipo de solo, para seleção
dataSoils <- data.frame( c(1, 2, 3, 4), c("Arenoso", "Medio", "Argiloso", "Default") )
colnames(dataSoils) <- c("ID", "Name")
dataSoils$Name <- as.character(dataSoils$Name)
#
#
# Funções functionModelVanGenucthen e functionSolos foram fornecidas pelo GRUPO DE SOLOS
#
#parametros do modelo de Van Genucthen (1980) para solo arenoso com teor de argila de 152 g kg-1
#dados disponiveis em Carducci et al., (2011)
functionModelVanGenucthen<-function(thpmp,thsat,a,n,solo) {
result<-thpmp+((thsat-thpmp)/(1+(a*solo)^n)^(1-1/n))
return(result)
}
functionSolos <- function(tipoSolo){
####### solo arenoso com teor de argila de 152 g kg-1###############################
if (tipoSolo == 1) {
#Solos arenosos profundos com pouco silte e argila;
CN<-64
# DP--> atribuindo a profundidade do perfil do solo (cm)
DP<-200
# STp--> encontrando a umidade do solo saturado (thsat=0 kPa)
STp<-functionModelVanGenucthen(0.046,0.331,0.719,1.841,0.0)
# FCp--> encontrando a umidade do solo na capacidade de campo (thCC=10 kPa)
FCp<-functionModelVanGenucthen(0.046,0.331,0.719,1.841,10)
# WPp--> encontrando a umidade do solo no ponto de murcha permanente (thPMP=1500 kPa)
WPp<-functionModelVanGenucthen(0.046,0.331,0.719,1.841,1500)
WPp<- round(WPp, digits = 3)
# DNRp--> encontrando o percentual de drenagem diaria (thsat-thCC)
DRNp<-STp-FCp
# SWC--> encontrando a lamina de agua disponivel no perfil de solo considerado (mm)
SWC<-(STp-FCp)*DP*10
SWC
# Retorna um vetor contendo o TipoSolo,CN, DP, STp, FCp, WPp, DRNp, SWC
dadosSolo<-data.frame(CN, DP, STp, FCp, WPp, DRNp, SWC)
return(dadosSolo)
}
#######solo medio com teor de argila de 420 g kg-1###############################
if (tipoSolo == 2) {
#Solos arenosos profundos com pouco silte e argila;
CN<-76
# DP--> atribuindo a profundidade do perfil do solo (cm)
DP<-200
# STp--> encontrando a umidade do solo saturado (thsat=0 kPa)
STp<-functionModelVanGenucthen(0.190,0.645,1.398,1.398,0.0)
# FCp--> encontrando a umidade do solo na capacidade de campo (thCC=10 kPa)
FCp<-functionModelVanGenucthen(0.190,0.645,1.398,1.398,10)
# WPp--> encontrando a umidade do solo no ponto de murcha permanente (thPMP=1500 kPa)
WPp<-functionModelVanGenucthen(0.190,0.645,1.398,1.398,1500)
WPp<- round(WPp, digits = 3)
# DNRp--> encontrando o percentual de drenagem diaria (thsat-thCC)
DRNp<-STp-FCp
# SWC--> encontrando a lamina de agua disponivel no perfil de solo considerado (mm)
SWC<-(STp-FCp)*DP*10
# Retorna um vetor contendo o TipoSolo,CN, DP, STp, FCp, WPp, DRNp, SWC
dadosSolo<-data.frame(CN, DP, STp, FCp, WPp, DRNp, SWC)
return(dadosSolo)
}
#######solo argiloso com teor de argila de 716 g kg-1#############################
if (tipoSolo == 3) {
#Solos arenosos profundos com pouco silte e argila;
CN<-84
# DP--> atribuindo a profundidade do perfil do solo (cm)
DP<-200
thpmp=0.250
thsat=0.806
a=1.786
n=1.599
# STp--> encontrando a umidade do solo saturado (thsat=0 kPa)
STp<-functionModelVanGenucthen(0.250,0.806,1.786,1.599,0.0)
# FCp--> encontrando a umidade do solo na capacidade de campo (thCC=10 kPa)
FCp<-functionModelVanGenucthen(0.250,0.806,1.786,1.599,10)
# WPp--> encontrando a umidade do solo no ponto de murcha permanente (thPMP=1500 kPa)
WPp<-functionModelVanGenucthen(0.250,0.806,1.786,1.599,1500)
WPp<- round(WPp, digits = 3)
# DNRp--> encontrando o percentual de drenagem diaria (thsat-thCC)
DRNp<-STp-FCp
# SWC--> encontrando a lamina de agua disponivel no perfil de solo considerado (mm)
SWC<-(STp-FCp)*DP*10
# Retorna um vetor contendo o TipoSolo,CN, DP, STp, FCp, WPp, DRNp, SWC
dadosSolo<-data.frame(CN, DP, STp, FCp, WPp, DRNp, SWC)
return(dadosSolo)
}
}
#
#
# Funções gráfico do Meterologico
# Função de PLOT multiplo
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
plots <- c(list(...), plotlist)
numPlots = length(plots)
if (is.null(layout)) {
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
for (i in 1:numPlots) {
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,layout.pos.col = matchidx$col))
}
}
}
# Função para transformar fator em numeric
as.numeric.factor <- function(x) {as.numeric(levels(x))[x]}
#################
# Shiny Server
#################
shinyServer(function(input, output, session) {
output$ui <- renderUI({
sidebarPanel(
h4("Instruções"),
p("Clique em Rodar Modelo após Escolher a Estação e o Tipo de Solo."),
br(),
selectInput(inputId = "cbxStations",
label = "Escolher a estação",
choices = dataStation$Name,
selectize = TRUE),
selectInput(inputId = "cbxSoils",
label = "Escolher o tipo de solo",
choices = dataSoils$Name,
selectize = TRUE),
actionButton("goSimulation", "Rodar Modelo"),
uiOutput("mesg")
)})
getSelectionKey <- function() {
#return(as.character(input$cbxStations))
return(as.character(subset(dataStation, Name == input$cbxStations)$url))
}
getSelectionSoil <- function() {
soilId <- as.integer(subset(dataSoils, Name == input$cbxSoils)$ID)
if (soilId == 4) {
return(soil)
} else {
return(functionSolos(soilId))
}
}
observeEvent(input$goSimulation, {
output$mesg <- renderText("")
rodarSimulacao(getSelectionKey(), getSelectionSoil())
})
rodarSimulacao <- function(url, soils){
#url <- paste("http://dev.sisalert.com.br/apirest/api/v1/data/station/model/",
# input$cbxStations[1], "/range/01-01-2013/01-01-2016", sep="")
dataCurl <- getURLContent(url, ssl.verifypeer = FALSE)
jsonWeather <- fromJSON(dataCurl)
weatherData <- do.call(rbind, lapply(jsonWeather, function(x) data.frame(x)))
# As estações que não possem dados suficientes para uma simulação, ou 365 dias de dados da erro
if (dim(weatherData)[1] > 365) {
##
weatherData$date <- as.character(weatherData$date)
weatherData <- weatherData[order(weatherData$date,decreasing = FALSE),]
row.names(weatherData) <- NULL
weatherData <- data.frame('date'=weatherData$date,'srad'=weatherData$srad,'tmax'=weatherData$tmax,'tmin'=weatherData$tmin,'rain'=weatherData$rain,'par'=weatherData$par)
##
runSimulation(weather = weatherData,plant = plant,soil = soils,irrig = i,doyp = 1, frop = 1)
output$tableResultsPlant <- renderDataTable({
plantOut <- read.table("plant.out",skip = 9)
#change the colnames for plantOut
colnames(plantOut) <- c("Dia do Ano", "Número de Folhas", "Acum.Temp. Reprod. (oC)", "Peso da Planta (g/m2)",
"Peso do Docel (g/m2)", "Peso da Raiz (g/m2)", "Peso da Fruta (g/m2)",
"Ind. Area Foliar (m2/m2)")
data <- plantOut
})
output$tableResultsSoil <- renderDataTable({
swOut <- read.table("sw.out",skip = 6)
#change the colnames for swOut
colnames(swOut) <- c("Dia do ano", "Rad. Solar(MJ/m2)", "Temp. Max(oC)", "Temp. Min(oC)", "Chuva(mm)",
"Irrig.(mm)", "Escoamento(mm)", "Infil.(mm)", "Drenagem(mm)", "Evapo. Transp(mm)",
"Evapo. Solo(mm)", "Evapo. Planta(mm)", "Agua no solo(mm)", "Agua no solo(mm3/mm3)",
"Estresse hídrico", "Excesso de estresse hídrico")
data <- swOut
})
output$tableFinal <- renderDataTable({
wbalOut <- read.table("WBAL.OUT",skip = 4, sep = ":")
#change the colnames for wbalOut
colnames(wbalOut) <- c("Descrição", "Valores")
data <- wbalOut
})
output$tableResultsStation <- renderDataTable({
data <- dataStation
})
output$distPlot <- renderPlot({
plantOut <- read.table("plant.out",skip = 9)
colnames(plantOut) <- c("doy", "Número de Folhas", "Acum.Temp. Reprod. (oC)", "Peso da Planta (g/m2)",
"Peso do Docel (g/m2)", "Peso da Raiz (g/m2)", "Peso da Fruta (g/m2)",
"iaf")
ggplot(data = plantOut, aes(x = doy)) +
geom_line(aes(y = iaf)) +
labs (title="Índice de Area Foliar X Dia", x="Dia do Ano", y="Ind. Area Foliar (m2/m2)")
})
output$plotMeteorologico <- renderPlot({
dataCurlP <- getURLContent(getSelectionKey(), ssl.verifypeer = FALSE)
jW <- fromJSON(dataCurlP)
wD <- do.call(rbind, lapply(jW, function(x) data.frame(x)))
wD$date <- strptime(wD$date,"%y%j")
wD$par <- as.numeric.factor(wD$par)
wD$rain <- as.numeric.factor(wD$rain)
wD$srad <- as.numeric.factor(wD$srad)
wD$tmax <- as.numeric.factor(wD$tmax)
wD$tmin <- as.numeric.factor(wD$tmin)
p1 <- ggplot(wD, aes(x=date, y=par)) +
geom_line(color="red") + scale_x_datetime() +
ggtitle("PAR") + ylab("(MJ/m²) * 2 =~ mol[photon]/m²-day") + xlab("Data")
p2 <- ggplot(wD, aes(x=date, y=rain)) +
geom_line(color="red") + scale_x_datetime() + geom_point(colour="red", size=3, shape=21, fill="white") +
ggtitle("Precipitação") + ylab("mm") + xlab("Data")
# Third plot
p3 <- ggplot(wD, aes(x=date, y=srad)) +
geom_line(color="red") + scale_x_datetime() + geom_point(colour="red", size=3, shape=21, fill="white") +
ggtitle("Radiação Solar") + ylab("W/m²") + xlab("Data")
# Fourth plot
p4 <- ggplot(wD, aes(x=date, y=tmax)) +
geom_line(color="red") + scale_x_datetime() + geom_point(colour="red", size=3, shape=21, fill="white") +
ggtitle("Temperatura Máxima") + ylab("ºC") + xlab("Data")
p5 <- ggplot(wD, aes(x=date, y=tmin)) +
geom_line(color="red") + scale_x_datetime() + geom_point(colour="red", size=3, shape=21, fill="white") +
ggtitle("Temperatura Mínima") + ylab("ºC") + xlab("Data")
multiplot(p1, p2, p3, p4, p5, cols=2)
})
} else {
output$mesg <- renderUI(p("Estação não possui dados suficientes para uma simulação!", style = "color:red"))
}
}
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\name{overwritefun}
\alias{overwritefun}
\title{overwrite a function in its namespace
useful for dev purposes.}
\usage{
overwritefun(newfun, oldfun, package, envir = globalenv())
}
\description{
overwrite a function in its namespace
useful for dev purposes.
}
| /man/overwritefun.Rd | no_license | kevinmhadi/khtools | R | false | true | 350 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\name{overwritefun}
\alias{overwritefun}
\title{overwrite a function in its namespace
useful for dev purposes.}
\usage{
overwritefun(newfun, oldfun, package, envir = globalenv())
}
\description{
overwrite a function in its namespace
useful for dev purposes.
}
|
#' create_pca_plot
#'
#' \code{create_pca_plot} Takes a data.frame and creates an x-y PCA scatterplot
#'
#' The plot data data.frame should contain the columns for the x, y and fill variables
#' and optionally ones for the shape variable and text labels.
#' The text label column should be named sample_names.
#'
#' @param plot_data data.frame - plot data
#' @param x_component character - name of the column to plot on the x axis
#' @param y_component character - name of the column to plot on the y axis
#' @param fill_palette character - a named character vectors of colours for the fill aesthetic
#' @param shape_palette character - a named character vectors of colours for the shape aesthetic
#' @param input shiny input list - contains fill_var, fill_levels, shape_var, shape_levels and sample_names
#' @param session Shiny session_object
#'
#' @return ggplot2 object
#'
#' @examples
#' create_pca_plot( plot_data, 'PC1', 'PC2', fill_palette, shape_palette, input, session )
#'
#' @export
#'
create_pca_plot <- function(plot_data, x_component = 'PC1', y_component = 'PC2',
fill_palette, shape_palette, current_limits, input, session, ...) {
if (session$userData[['debug']]) {
cat("Function: create_pca_plot\n")
}
fill_var <- input$fill_var
shape_var <- input$shape_var
fill_levels <- input$fill_levels_checkgroup
shape_levels <- input$shape_levels_checkgroup
if (!any(colnames(plot_data) == fill_var)) {
return(NULL)
} else if ( shape_var != 'None' &
!any(colnames(plot_data) == shape_var)) {
return(NULL)
}
plot_data <- subset_plot_data(plot_data, fill_var, fill_levels,
shape_var, shape_levels, session)
if (session$userData[['debug']]) {
cat('Plot data after subset:\n')
print(head(plot_data))
print(fill_palette)
print(nrow(plot_data))
}
if(nrow(plot_data) == 0) {
cat('Plot data is empty, returning NULL\n')
return(NULL)
}
# create plot
plot <-
scatterplot_two_components(plot_data,
x_component, y_component,
fill_var, fill_palette,
shape_var, shape_palette,
sample_names = input$sample_names,
point_size = input$point_size, ... )
# set limits
# button_val <- input$apply_limits
limits <- get_limits(current_limits, plot_data, x_component, y_component, session)
plot <- plot + xlim(c(limits[['xmin']], limits[['xmax']])) +
ylim(c(limits[['ymin']], limits[['ymax']])) +
coord_cartesian(clip = "off")
return(plot)
}
get_limits <- function(current_limits, plot_data, x_component, y_component, session){
if (session$userData[['debug']]) {
cat("Function: get_limits\n")
}
data_limits <- calculate_limits(plot_data, x_component, y_component, session)
new_limits <- list()
for (i in c('xmin', 'xmax', 'ymin', 'ymax')) {
new_limits[[i]] <- ifelse(is.na(current_limits[[i]]), data_limits[[i]], current_limits[[i]])
}
if (session$userData[['debug']]) {
print(current_limits)
print(data_limits)
print(new_limits)
}
return(new_limits)
}
#' calculate_limits
#'
#' \code{calculate_limits} Calculate X/Y limits of the current plot data
#'
#' The plot data data.frame should contain the columns for the x, y and fill variables
#'
#' @param plot_data data.frame - plot data
#' @param x_component character - name of the column to plot on the x axis
#' @param y_component character - name of the column to plot on the y axis
#' @param session Shiny session_object
#'
#' @return ggplot2 object
#'
#' @examples
#' calculate_limits( plot_data, 'PC1', 'PC2', session )
#'
#' @export
#'
calculate_limits <- function(plot_data, x_component, y_component, session) {
limits <- list( xmin = floor(min(plot_data[[x_component]])),
xmax = ceiling(max(plot_data[[x_component]]) + 0.5),
ymin = floor(min(plot_data[[y_component]])),
ymax = ceiling(max(plot_data[[y_component]]) + 0.5))
return(limits)
}
#' scatterplot_two_components
#'
#' \code{scatterplot_two_components} Takes a data.frame and creates an x-y scatterplot
#'
#' The plot data data.frame should contain the columns for the x, y and fill variables
#' and optionally ones for the shape variable and text labels.
#' The text label column should be named sample_names.
#'
#' @param plot_data data.frame - plot data
#' @param x_component character - name of the column to plot on the x axis
#' @param y_component character - name of the column to plot on the y axis
#' @param fill_var character - name of the column to use as the fill aesthetic
#' @param fill_palette character - a named character vectors of colours for the fill aesthetic
#' @param shape_var character - name of the column to use as the shape aesthetic
#' @param shape_palette character - a named character vectors of colours for the shape aesthetic
#' @param samples_names logical - whether text labels should be added to label the points
#'
#' @return ggplot2 object
#'
#' @examples
#' scatterplot_two_components( plot_data, 'PC1', 'PC2', 'Gene', fill_palette,
#' 'Genotype', shape_palette, sample_names )
#'
#' @export
#'
scatterplot_two_components <-
function(plot_data, x_component, y_component,
fill_var, fill_palette,
shape_var, shape_palette,
sample_names = TRUE,
point_size = 4, ...) {
plot <- ggplot(data = plot_data,
aes(x = !!rlang::sym(x_component),
y = !!rlang::sym(y_component),
colour = highlight))
if (shape_var == 'None') {
plot <- plot +
geom_point(aes(fill = !!rlang::sym(fill_var),
stroke = highlight),
size = point_size, shape = 21)
} else {
plot <- plot +
geom_point(aes(fill = !!rlang::sym(fill_var),
shape = !!rlang::sym(shape_var),
stroke = highlight),
size = point_size) +
scale_shape_manual(values = shape_palette,
guide = guide_legend(order = 2),
na.translate = FALSE)
}
# add colour scale for highlighting points
plot <- plot +
scale_colour_manual(values = c("FALSE" = 'black', "TRUE" = 'firebrick3'),
guide = "none") +
scale_discrete_manual(
aesthetics = "stroke",
values = c(`FALSE` = 1, `TRUE` = 2),
guide = "none"
)
if (class(plot_data[[fill_var]]) == 'factor') {
# add fill scale
plot <- plot +
scale_fill_manual(
values = fill_palette,
guide = guide_legend(override.aes = list(shape = 21),
order = 1)
)
} else {
# fill_palette should be either viridis or diverging
if(fill_palette == 'viridis'){
plot <- plot + scale_fill_viridis(...)
} else if (fill_palette == 'diverging') {
plot <- plot +
scale_fill_gradient2(low = '#2166ac', mid = 'white', high = '#b2182b',
midpoint = 0)
}
}
# add text labels
if (sample_names) {
plot <- plot + geom_text_repel(aes(label = sample_name),
hjust = 0, vjust = 0,
nudge_x = 0.5, nudge_y = 0.5,
size=4, show.legend=FALSE)
} else if (sum(plot_data$highlight, na.rm = TRUE) > 0) {
plot <- plot + geom_label_repel(aes(label = sample_label),
hjust = 0, vjust = 0,
nudge_x = 0.5, nudge_y = 0.5,
size=4, show.legend=FALSE)
}
# change theme
plot <- plot + theme_minimal()
return(plot)
}
#' subset_plot_data
#'
#' \code{subset_plot_data} Takes a data.frame and subsets it to the supplied levels of up to 2 variables
#'
#' @param plot_data data.frame - plot data
#' @param fill_var character - name of the column to use as the fill aesthetic
#' @param fill_levels character - a named character vectors of colours for the fill aesthetic
#' @param shape_var character - name of the column to use as the shape aesthetic
#' @param shape_levels character - a named character vectors of colours for the shape aesthetic
#' @param session Shiny session_object
#'
#' @return data.frame
#'
#' @examples
#' subset_plot_data( plot_data, 'Gene', c('Gene1', 'Gene2'),
#' 'Genotype', c('wt', 'hom') )
#'
#' @export
#'
subset_plot_data <- function(plot_data, fill_var, fill_levels,
shape_var, shape_levels, session) {
if (session$userData[['debug']]) {
cat("Function: subset_plot_data\n")
cat('Plot data:\n')
print(head(plot_data))
cat(sprintf('Fill variable: %s\n', fill_var))
cat(sprintf('Fill levels: %s\n', fill_levels))
cat(sprintf('Shape variable: %s\n', shape_var))
cat(sprintf('Shape levels: %s\n', shape_levels))
}
# return original data if both sets of levels are NULL
if (is.null(fill_levels) & is.null(shape_levels)) {
cat('Both fill levels and shape levels are NULL. Returning original data\n')
return(plot_data)
}
# only subset data if levels are not NULL
if (is.null(fill_levels)) {
plot_data_subset <- plot_data
} else{
fill_variable <- rlang::sym(fill_var)
plot_data_subset <-
do.call(rbind, lapply(fill_levels,
function(level){ dplyr::filter(plot_data, !!fill_variable == level) } ) )
}
if (shape_var != 'None') {
shape_variable <- rlang::sym(shape_var)
if (!is.null(shape_levels)) {
plot_data_subset <-
do.call(rbind, lapply(shape_levels,
function(level){ dplyr::filter(plot_data, !!shape_variable == level) } ) )
}
}
return(plot_data_subset)
} | /R/pca_plots.R | no_license | richysix/pca_plot | R | false | false | 9,966 | r | #' create_pca_plot
#'
#' \code{create_pca_plot} Takes a data.frame and creates an x-y PCA scatterplot
#'
#' The plot data data.frame should contain the columns for the x, y and fill variables
#' and optionally ones for the shape variable and text labels.
#' The text label column should be named sample_names.
#'
#' @param plot_data data.frame - plot data
#' @param x_component character - name of the column to plot on the x axis
#' @param y_component character - name of the column to plot on the y axis
#' @param fill_palette character - a named character vectors of colours for the fill aesthetic
#' @param shape_palette character - a named character vectors of colours for the shape aesthetic
#' @param input shiny input list - contains fill_var, fill_levels, shape_var, shape_levels and sample_names
#' @param session Shiny session_object
#'
#' @return ggplot2 object
#'
#' @examples
#' create_pca_plot( plot_data, 'PC1', 'PC2', fill_palette, shape_palette, input, session )
#'
#' @export
#'
create_pca_plot <- function(plot_data, x_component = 'PC1', y_component = 'PC2',
fill_palette, shape_palette, current_limits, input, session, ...) {
if (session$userData[['debug']]) {
cat("Function: create_pca_plot\n")
}
fill_var <- input$fill_var
shape_var <- input$shape_var
fill_levels <- input$fill_levels_checkgroup
shape_levels <- input$shape_levels_checkgroup
if (!any(colnames(plot_data) == fill_var)) {
return(NULL)
} else if ( shape_var != 'None' &
!any(colnames(plot_data) == shape_var)) {
return(NULL)
}
plot_data <- subset_plot_data(plot_data, fill_var, fill_levels,
shape_var, shape_levels, session)
if (session$userData[['debug']]) {
cat('Plot data after subset:\n')
print(head(plot_data))
print(fill_palette)
print(nrow(plot_data))
}
if(nrow(plot_data) == 0) {
cat('Plot data is empty, returning NULL\n')
return(NULL)
}
# create plot
plot <-
scatterplot_two_components(plot_data,
x_component, y_component,
fill_var, fill_palette,
shape_var, shape_palette,
sample_names = input$sample_names,
point_size = input$point_size, ... )
# set limits
# button_val <- input$apply_limits
limits <- get_limits(current_limits, plot_data, x_component, y_component, session)
plot <- plot + xlim(c(limits[['xmin']], limits[['xmax']])) +
ylim(c(limits[['ymin']], limits[['ymax']])) +
coord_cartesian(clip = "off")
return(plot)
}
get_limits <- function(current_limits, plot_data, x_component, y_component, session){
if (session$userData[['debug']]) {
cat("Function: get_limits\n")
}
data_limits <- calculate_limits(plot_data, x_component, y_component, session)
new_limits <- list()
for (i in c('xmin', 'xmax', 'ymin', 'ymax')) {
new_limits[[i]] <- ifelse(is.na(current_limits[[i]]), data_limits[[i]], current_limits[[i]])
}
if (session$userData[['debug']]) {
print(current_limits)
print(data_limits)
print(new_limits)
}
return(new_limits)
}
#' calculate_limits
#'
#' \code{calculate_limits} Calculate X/Y limits of the current plot data
#'
#' The plot data data.frame should contain the columns for the x, y and fill variables
#'
#' @param plot_data data.frame - plot data
#' @param x_component character - name of the column to plot on the x axis
#' @param y_component character - name of the column to plot on the y axis
#' @param session Shiny session_object
#'
#' @return ggplot2 object
#'
#' @examples
#' calculate_limits( plot_data, 'PC1', 'PC2', session )
#'
#' @export
#'
calculate_limits <- function(plot_data, x_component, y_component, session) {
limits <- list( xmin = floor(min(plot_data[[x_component]])),
xmax = ceiling(max(plot_data[[x_component]]) + 0.5),
ymin = floor(min(plot_data[[y_component]])),
ymax = ceiling(max(plot_data[[y_component]]) + 0.5))
return(limits)
}
#' scatterplot_two_components
#'
#' \code{scatterplot_two_components} Takes a data.frame and creates an x-y scatterplot
#'
#' The plot data data.frame should contain the columns for the x, y and fill variables
#' and optionally ones for the shape variable and text labels.
#' The text label column should be named sample_names.
#'
#' @param plot_data data.frame - plot data
#' @param x_component character - name of the column to plot on the x axis
#' @param y_component character - name of the column to plot on the y axis
#' @param fill_var character - name of the column to use as the fill aesthetic
#' @param fill_palette character - a named character vectors of colours for the fill aesthetic
#' @param shape_var character - name of the column to use as the shape aesthetic
#' @param shape_palette character - a named character vectors of colours for the shape aesthetic
#' @param samples_names logical - whether text labels should be added to label the points
#'
#' @return ggplot2 object
#'
#' @examples
#' scatterplot_two_components( plot_data, 'PC1', 'PC2', 'Gene', fill_palette,
#' 'Genotype', shape_palette, sample_names )
#'
#' @export
#'
scatterplot_two_components <-
function(plot_data, x_component, y_component,
fill_var, fill_palette,
shape_var, shape_palette,
sample_names = TRUE,
point_size = 4, ...) {
plot <- ggplot(data = plot_data,
aes(x = !!rlang::sym(x_component),
y = !!rlang::sym(y_component),
colour = highlight))
if (shape_var == 'None') {
plot <- plot +
geom_point(aes(fill = !!rlang::sym(fill_var),
stroke = highlight),
size = point_size, shape = 21)
} else {
plot <- plot +
geom_point(aes(fill = !!rlang::sym(fill_var),
shape = !!rlang::sym(shape_var),
stroke = highlight),
size = point_size) +
scale_shape_manual(values = shape_palette,
guide = guide_legend(order = 2),
na.translate = FALSE)
}
# add colour scale for highlighting points
plot <- plot +
scale_colour_manual(values = c("FALSE" = 'black', "TRUE" = 'firebrick3'),
guide = "none") +
scale_discrete_manual(
aesthetics = "stroke",
values = c(`FALSE` = 1, `TRUE` = 2),
guide = "none"
)
if (class(plot_data[[fill_var]]) == 'factor') {
# add fill scale
plot <- plot +
scale_fill_manual(
values = fill_palette,
guide = guide_legend(override.aes = list(shape = 21),
order = 1)
)
} else {
# fill_palette should be either viridis or diverging
if(fill_palette == 'viridis'){
plot <- plot + scale_fill_viridis(...)
} else if (fill_palette == 'diverging') {
plot <- plot +
scale_fill_gradient2(low = '#2166ac', mid = 'white', high = '#b2182b',
midpoint = 0)
}
}
# add text labels
if (sample_names) {
plot <- plot + geom_text_repel(aes(label = sample_name),
hjust = 0, vjust = 0,
nudge_x = 0.5, nudge_y = 0.5,
size=4, show.legend=FALSE)
} else if (sum(plot_data$highlight, na.rm = TRUE) > 0) {
plot <- plot + geom_label_repel(aes(label = sample_label),
hjust = 0, vjust = 0,
nudge_x = 0.5, nudge_y = 0.5,
size=4, show.legend=FALSE)
}
# change theme
plot <- plot + theme_minimal()
return(plot)
}
#' subset_plot_data
#'
#' \code{subset_plot_data} Takes a data.frame and subsets it to the supplied levels of up to 2 variables
#'
#' @param plot_data data.frame - plot data
#' @param fill_var character - name of the column to use as the fill aesthetic
#' @param fill_levels character - a named character vectors of colours for the fill aesthetic
#' @param shape_var character - name of the column to use as the shape aesthetic
#' @param shape_levels character - a named character vectors of colours for the shape aesthetic
#' @param session Shiny session_object
#'
#' @return data.frame
#'
#' @examples
#' subset_plot_data( plot_data, 'Gene', c('Gene1', 'Gene2'),
#' 'Genotype', c('wt', 'hom') )
#'
#' @export
#'
subset_plot_data <- function(plot_data, fill_var, fill_levels,
shape_var, shape_levels, session) {
if (session$userData[['debug']]) {
cat("Function: subset_plot_data\n")
cat('Plot data:\n')
print(head(plot_data))
cat(sprintf('Fill variable: %s\n', fill_var))
cat(sprintf('Fill levels: %s\n', fill_levels))
cat(sprintf('Shape variable: %s\n', shape_var))
cat(sprintf('Shape levels: %s\n', shape_levels))
}
# return original data if both sets of levels are NULL
if (is.null(fill_levels) & is.null(shape_levels)) {
cat('Both fill levels and shape levels are NULL. Returning original data\n')
return(plot_data)
}
# only subset data if levels are not NULL
if (is.null(fill_levels)) {
plot_data_subset <- plot_data
} else{
fill_variable <- rlang::sym(fill_var)
plot_data_subset <-
do.call(rbind, lapply(fill_levels,
function(level){ dplyr::filter(plot_data, !!fill_variable == level) } ) )
}
if (shape_var != 'None') {
shape_variable <- rlang::sym(shape_var)
if (!is.null(shape_levels)) {
plot_data_subset <-
do.call(rbind, lapply(shape_levels,
function(level){ dplyr::filter(plot_data, !!shape_variable == level) } ) )
}
}
return(plot_data_subset)
} |
library(shiny)
# library(plyr)
# Define UI for Bank of England Base Rate History application
shinyUI(pageWithSidebar(
# Application title
headerPanel("Bank of England Base Rate Viewer",
windowTitle="Bank of England Base Rate Changes Plotter Application"
),
sidebarPanel(
h4("Enter a year (format YYYY) to view the BoE base rate changes since that year",
col="red"),
numericInput(inputId='YearID', label="Enter a year (data from 1694 onwards)",
value=2000,
step=1
),
checkboxInput(inputId='TypeID',
label="Click for Line Chart (default stair-steps chart) ",
value = FALSE
),
submitButton(text = "Submit",
icon = NULL
)
),
mainPanel(
verbatimTextOutput('caption'),
plotOutput('newHist')
)
))
| /ui.R | no_license | ibanezplayer/shinyapp | R | false | false | 974 | r | library(shiny)
# library(plyr)
# Define UI for Bank of England Base Rate History application
shinyUI(pageWithSidebar(
# Application title
headerPanel("Bank of England Base Rate Viewer",
windowTitle="Bank of England Base Rate Changes Plotter Application"
),
sidebarPanel(
h4("Enter a year (format YYYY) to view the BoE base rate changes since that year",
col="red"),
numericInput(inputId='YearID', label="Enter a year (data from 1694 onwards)",
value=2000,
step=1
),
checkboxInput(inputId='TypeID',
label="Click for Line Chart (default stair-steps chart) ",
value = FALSE
),
submitButton(text = "Submit",
icon = NULL
)
),
mainPanel(
verbatimTextOutput('caption'),
plotOutput('newHist')
)
))
|
#' Windowed rank functions.
#'
#' Six variations on ranking functions, mimicking the ranking functions
#' described in SQL2003. They are currently implemented using the built in
#' `rank` function, and are provided mainly as a convenience when
#' converting between R and SQL. All ranking functions map smallest inputs
#' to smallest outputs. Use [desc()] to reverse the direction.
#'
#' * `row_number()`: equivalent to `rank(ties.method = "first")`
#'
#' * `min_rank()`: equivalent to `rank(ties.method = "min")`
#'
#' * `dense_rank()`: like `min_rank()`, but with no gaps between
#' ranks
#'
#' * `percent_rank()`: a number between 0 and 1 computed by
#' rescaling `min_rank` to `[0, 1]`
#'
#' * `cume_dist()`: a cumulative distribution function. Proportion
#' of all values less than or equal to the current rank.
#'
#' * `ntile()`: a rough rank, which breaks the input vector into
#' `n` buckets.
#'
#' @name ranking
#' @param x a vector of values to rank. Missing values are left as is.
#' If you want to treat them as the smallest or largest values, replace
#' with Inf or -Inf before ranking.
#' @examples
#' x <- c(5, 1, 3, 2, 2, NA)
#' row_number(x)
#' min_rank(x)
#' dense_rank(x)
#' percent_rank(x)
#' cume_dist(x)
#'
#' ntile(x, 2)
#' ntile(runif(100), 10)
#'
#' # row_number can be used with single table verbs without specifying x
#' # (for data frames and databases that support windowing)
#' mutate(mtcars, row_number() == 1L)
#' mtcars %>% filter(between(row_number(), 1, 10))
NULL
#' @export
#' @rdname ranking
row_number <- function(x) {
if (missing(x)){
seq_len(from_context("..group_size"))
} else {
rank(x, ties.method = "first", na.last = "keep")
}
}
# Definition from
# http://blogs.msdn.com/b/craigfr/archive/2008/03/31/ranking-functions-rank-dense-rank-and-ntile.aspx
#' @param n number of groups to split up into.
#' @export
#' @rdname ranking
ntile <- function(x = row_number(), n) {
len <- sum(!is.na(x))
if (len == 0L) {
rep(NA_integer_, length(x))
} else {
as.integer(floor(n * (row_number(x) - 1) / len + 1))
}
}
#' @export
#' @rdname ranking
min_rank <- function(x) rank(x, ties.method = "min", na.last = "keep")
#' @export
#' @rdname ranking
dense_rank <- function(x) {
match(x, sort(unique(x)))
}
#' @export
#' @rdname ranking
percent_rank <- function(x) {
(min_rank(x) - 1) / (sum(!is.na(x)) - 1)
}
#' @export
#' @rdname ranking
cume_dist <- function(x) {
rank(x, ties.method = "max", na.last = "keep") / sum(!is.na(x))
}
| /R/rank.R | permissive | krlmlr/dplyr | R | false | false | 2,514 | r | #' Windowed rank functions.
#'
#' Six variations on ranking functions, mimicking the ranking functions
#' described in SQL2003. They are currently implemented using the built in
#' `rank` function, and are provided mainly as a convenience when
#' converting between R and SQL. All ranking functions map smallest inputs
#' to smallest outputs. Use [desc()] to reverse the direction.
#'
#' * `row_number()`: equivalent to `rank(ties.method = "first")`
#'
#' * `min_rank()`: equivalent to `rank(ties.method = "min")`
#'
#' * `dense_rank()`: like `min_rank()`, but with no gaps between
#' ranks
#'
#' * `percent_rank()`: a number between 0 and 1 computed by
#' rescaling `min_rank` to `[0, 1]`
#'
#' * `cume_dist()`: a cumulative distribution function. Proportion
#' of all values less than or equal to the current rank.
#'
#' * `ntile()`: a rough rank, which breaks the input vector into
#' `n` buckets.
#'
#' @name ranking
#' @param x a vector of values to rank. Missing values are left as is.
#' If you want to treat them as the smallest or largest values, replace
#' with Inf or -Inf before ranking.
#' @examples
#' x <- c(5, 1, 3, 2, 2, NA)
#' row_number(x)
#' min_rank(x)
#' dense_rank(x)
#' percent_rank(x)
#' cume_dist(x)
#'
#' ntile(x, 2)
#' ntile(runif(100), 10)
#'
#' # row_number can be used with single table verbs without specifying x
#' # (for data frames and databases that support windowing)
#' mutate(mtcars, row_number() == 1L)
#' mtcars %>% filter(between(row_number(), 1, 10))
NULL
#' @export
#' @rdname ranking
row_number <- function(x) {
if (missing(x)){
seq_len(from_context("..group_size"))
} else {
rank(x, ties.method = "first", na.last = "keep")
}
}
# Definition from
# http://blogs.msdn.com/b/craigfr/archive/2008/03/31/ranking-functions-rank-dense-rank-and-ntile.aspx
#' @param n number of groups to split up into.
#' @export
#' @rdname ranking
ntile <- function(x = row_number(), n) {
len <- sum(!is.na(x))
if (len == 0L) {
rep(NA_integer_, length(x))
} else {
as.integer(floor(n * (row_number(x) - 1) / len + 1))
}
}
#' @export
#' @rdname ranking
min_rank <- function(x) rank(x, ties.method = "min", na.last = "keep")
#' @export
#' @rdname ranking
dense_rank <- function(x) {
match(x, sort(unique(x)))
}
#' @export
#' @rdname ranking
percent_rank <- function(x) {
(min_rank(x) - 1) / (sum(!is.na(x)) - 1)
}
#' @export
#' @rdname ranking
cume_dist <- function(x) {
rank(x, ties.method = "max", na.last = "keep") / sum(!is.na(x))
}
|
###
###
###
### Purpose: Reference classes for tables of document status and abreviations
### started: 2016/03/09 (pvr)
###
### ############################################################################## ###
#' Reference class to represent table-like reference objects
#'
#' @description
#' A reference object of reference class \code{refObjTable} assumes
#' that a table simply consists of a tabel header and a table body.
#' Those components are represented by the fields of the reference class.
#' Apart from the initialisation method, the getter and setter methods
#' for the table header, we have a method to add additional rows to
#' the existing table body. The two methods \code{to_knitr_kable} and
#' \code{to_pander_pandoc} write tables in markdown format using
#' functions \code{knitr::kable()} and \code{pander::pandoc.table()}
#'
#' @field sTableHeader vector of table headers
#' @field lTableBody list of table body
#' @exportClass refObjTable
#' @export refObjTable
refObjTable <- setRefClass(Class = "refObjTable",
fields = list(sTableHeader = "character",
lTableBody = "list"),
methods = list(
initialize = function(){
'Initialisation of table with empty body.'
lTableBody <<- list()
},
setTableHeader = function(psTableHeader){
'Setter for table header'
sTableHeader <<- psTableHeader
},
getTableHeader = function(){
'Getter for table header'
return(sTableHeader)
},
addRow = function(plTableRow){
'Adding a row represented by a list to the
body of the table. The list representing
the row must have the same names as the
existing table body, otherwise, the row is
not added to the table body.
'
if (length(lTableBody) == 0L){
lTableBody <<- plTableRow
} else {
sTableBodyNames <- names(lTableBody)
sTableRowNames <- names(plTableRow)
if (any(sTableBodyNames != sTableRowNames)){
cat(" * Error cannot add current row to table due to name missmatches")
cat(" * Table names:\n")
print(sTableBodyNames)
cat(" * Table row names:\n")
print(sTableRowNames)
} else {
### # use mapply to merge fields of table body and row to be added
lTableBody <<- mapply(c, lTableBody, plTableRow, SIMPLIFY = FALSE)
}
}
},
to_knitr_kable = function(){
'Output current table in markdown format using function
knitr::kable(). In case the length of the specified table
header is consistent with the number of columns, then
the table header is added as columnnames of the data.frame
representation of the table body.
'
### # convert table body to data.frame
dfTable <- as.data.frame(lTableBody, stringsAsFactors = FALSE)
### # in case length of sTableHeader and number of columns match
### # use them as column names
if (identical(length(sTableHeader), ncol(dfTable)))
colnames(dfTable) <- sTableHeader
### # use knitr::kable to print the output
knitr::kable(dfTable)
},
to_pander_pandoc = function(psStyle = "rmarkdown",
psJustify = NULL,
pnSplitCells = 30){
'Output current table in markdown format using the function
pander::pandoc.table(). This method accepts two parameters
psStyle and psJustify which are passed to
to pander::pandoc.table().
'
### # convert table body to data.frame
dfTable <- as.data.frame(lTableBody, stringsAsFactors = FALSE)
### # in case length of sTableHeader and number of columns match
### # use them as column names
if (identical(length(sTableHeader), ncol(dfTable)))
colnames(dfTable) <- sTableHeader
### # in case psJustify is specified, use it, otherwise use default
if (!is.null(psJustify) & identical(length(psJustify), ncol(dfTable))){
pander::pandoc.table(dfTable,
style = psStyle,
justify = psJustify,
split.cells = pnSplitCells)
} else {
pander::pandoc.table(dfTable,
style = psStyle,
split.cells = pnSplitCells)
}
}
))
| /R/rqudocutablerefclass.R | no_license | pvrqualitasag/rqudocuhelper | R | false | false | 6,419 | r | ###
###
###
### Purpose: Reference classes for tables of document status and abreviations
### started: 2016/03/09 (pvr)
###
### ############################################################################## ###
#' Reference class to represent table-like reference objects
#'
#' @description
#' A reference object of reference class \code{refObjTable} assumes
#' that a table simply consists of a tabel header and a table body.
#' Those components are represented by the fields of the reference class.
#' Apart from the initialisation method, the getter and setter methods
#' for the table header, we have a method to add additional rows to
#' the existing table body. The two methods \code{to_knitr_kable} and
#' \code{to_pander_pandoc} write tables in markdown format using
#' functions \code{knitr::kable()} and \code{pander::pandoc.table()}
#'
#' @field sTableHeader vector of table headers
#' @field lTableBody list of table body
#' @exportClass refObjTable
#' @export refObjTable
refObjTable <- setRefClass(Class = "refObjTable",
fields = list(sTableHeader = "character",
lTableBody = "list"),
methods = list(
initialize = function(){
'Initialisation of table with empty body.'
lTableBody <<- list()
},
setTableHeader = function(psTableHeader){
'Setter for table header'
sTableHeader <<- psTableHeader
},
getTableHeader = function(){
'Getter for table header'
return(sTableHeader)
},
addRow = function(plTableRow){
'Adding a row represented by a list to the
body of the table. The list representing
the row must have the same names as the
existing table body, otherwise, the row is
not added to the table body.
'
if (length(lTableBody) == 0L){
lTableBody <<- plTableRow
} else {
sTableBodyNames <- names(lTableBody)
sTableRowNames <- names(plTableRow)
if (any(sTableBodyNames != sTableRowNames)){
cat(" * Error cannot add current row to table due to name missmatches")
cat(" * Table names:\n")
print(sTableBodyNames)
cat(" * Table row names:\n")
print(sTableRowNames)
} else {
### # use mapply to merge fields of table body and row to be added
lTableBody <<- mapply(c, lTableBody, plTableRow, SIMPLIFY = FALSE)
}
}
},
to_knitr_kable = function(){
'Output current table in markdown format using function
knitr::kable(). In case the length of the specified table
header is consistent with the number of columns, then
the table header is added as columnnames of the data.frame
representation of the table body.
'
### # convert table body to data.frame
dfTable <- as.data.frame(lTableBody, stringsAsFactors = FALSE)
### # in case length of sTableHeader and number of columns match
### # use them as column names
if (identical(length(sTableHeader), ncol(dfTable)))
colnames(dfTable) <- sTableHeader
### # use knitr::kable to print the output
knitr::kable(dfTable)
},
to_pander_pandoc = function(psStyle = "rmarkdown",
psJustify = NULL,
pnSplitCells = 30){
'Output current table in markdown format using the function
pander::pandoc.table(). This method accepts two parameters
psStyle and psJustify which are passed to
to pander::pandoc.table().
'
### # convert table body to data.frame
dfTable <- as.data.frame(lTableBody, stringsAsFactors = FALSE)
### # in case length of sTableHeader and number of columns match
### # use them as column names
if (identical(length(sTableHeader), ncol(dfTable)))
colnames(dfTable) <- sTableHeader
### # in case psJustify is specified, use it, otherwise use default
if (!is.null(psJustify) & identical(length(psJustify), ncol(dfTable))){
pander::pandoc.table(dfTable,
style = psStyle,
justify = psJustify,
split.cells = pnSplitCells)
} else {
pander::pandoc.table(dfTable,
style = psStyle,
split.cells = pnSplitCells)
}
}
))
|
context("Founder imputation, two parents, finite selfing")
test_that("Test zero generations of intercrossing",
{
testFunc <- function(map, pedigree)
{
cross <- simulateMPCross(map=map, pedigree=pedigree, mapFunction = haldane)
mapped <- new("mpcrossMapped", cross, map = map)
suppressWarnings(result <- imputeFounders(mapped))
expect_identical(result@geneticData[[1]]@imputed@data, result@geneticData[[1]]@finals)
#Dominance doesn't really make a difference, because it's assumed inbred
cross2 <- cross + biparentalDominant()
mapped <- new("mpcrossMapped", cross2, map = map)
result <- imputeFounders(mapped)
tmp <- table(result@geneticData[[1]]@imputed@data, cross@geneticData[[1]]@finals)
expect_true(sum(diag(tmp)) / sum(tmp) > 0.9)
}
map1 <- sim.map(len = 100, n.mar = 101, anchor.tel = TRUE, include.x=FALSE, eq.spacing=TRUE)
map2 <- sim.map(len = c(100, 100), n.mar = 101, anchor.tel = TRUE, include.x=FALSE, eq.spacing=TRUE)
maps <- list(map1, map2)
pedigree1 <- f2Pedigree(1000)
pedigree1@selfing <- "finite"
pedigree2 <- rilPedigree(populationSize = 1000, selfingGenerations = 2)
pedigree2@selfing <- "finite"
pedigrees <- list(pedigree1, pedigree2)
for(map in maps)
{
for(pedigree in pedigrees)
{
testFunc(map, pedigree)
}
}
})
test_that("Test non-zero generations of intercrossing",
{
testFunc <- function(map, pedigree)
{
cross <- simulateMPCross(map=map, pedigree=pedigree, mapFunction = haldane)
mapped <- new("mpcrossMapped", cross, map = map)
suppressWarnings(result <- imputeFounders(mapped))
expect_identical(result@geneticData[[1]]@imputed@data, result@geneticData[[1]]@finals)
#Dominance doesn't really make a difference, because it's assumed inbred
cross2 <- cross + biparentalDominant()
mapped <- new("mpcrossMapped", cross2, map = map)
result <- imputeFounders(mapped)
tmp <- table(result@geneticData[[1]]@imputed@data, cross@geneticData[[1]]@finals)
expect_true(sum(diag(tmp)) / sum(tmp) > 0.9)
}
map1 <- sim.map(len = 100, n.mar = 101, anchor.tel = TRUE, include.x=FALSE, eq.spacing=TRUE)
map2 <- sim.map(len = c(100, 100), n.mar = 101, anchor.tel = TRUE, include.x=FALSE, eq.spacing=TRUE)
maps <- list(map1, map2)
pedigree1 <- twoParentPedigree(initialPopulationSize = 1000, selfingGenerations = 2, nSeeds = 1, intercrossingGenerations = 1)
pedigree1@selfing <- "finite"
pedigree2 <- twoParentPedigree(initialPopulationSize = 1000, selfingGenerations = 2, nSeeds = 1, intercrossingGenerations = 2)
pedigree2@selfing <- "finite"
pedigree3 <- twoParentPedigree(initialPopulationSize = 1000, selfingGenerations = 1, nSeeds = 1, intercrossingGenerations = 1)
pedigree3@selfing <- "finite"
pedigree4 <- twoParentPedigree(initialPopulationSize = 1000, selfingGenerations = 1, nSeeds = 1, intercrossingGenerations = 2)
pedigree4@selfing <- "finite"
pedigrees <- list(pedigree1, pedigree2, pedigree3, pedigree4)
for(map in maps)
{
for(pedigree in pedigrees)
{
testFunc(map, pedigree)
}
}
})
| /tests/testthat/test-imputeFounders2ParentFinite.R | no_license | lmw40/mpMap2 | R | false | false | 3,066 | r | context("Founder imputation, two parents, finite selfing")
test_that("Test zero generations of intercrossing",
{
testFunc <- function(map, pedigree)
{
cross <- simulateMPCross(map=map, pedigree=pedigree, mapFunction = haldane)
mapped <- new("mpcrossMapped", cross, map = map)
suppressWarnings(result <- imputeFounders(mapped))
expect_identical(result@geneticData[[1]]@imputed@data, result@geneticData[[1]]@finals)
#Dominance doesn't really make a difference, because it's assumed inbred
cross2 <- cross + biparentalDominant()
mapped <- new("mpcrossMapped", cross2, map = map)
result <- imputeFounders(mapped)
tmp <- table(result@geneticData[[1]]@imputed@data, cross@geneticData[[1]]@finals)
expect_true(sum(diag(tmp)) / sum(tmp) > 0.9)
}
map1 <- sim.map(len = 100, n.mar = 101, anchor.tel = TRUE, include.x=FALSE, eq.spacing=TRUE)
map2 <- sim.map(len = c(100, 100), n.mar = 101, anchor.tel = TRUE, include.x=FALSE, eq.spacing=TRUE)
maps <- list(map1, map2)
pedigree1 <- f2Pedigree(1000)
pedigree1@selfing <- "finite"
pedigree2 <- rilPedigree(populationSize = 1000, selfingGenerations = 2)
pedigree2@selfing <- "finite"
pedigrees <- list(pedigree1, pedigree2)
for(map in maps)
{
for(pedigree in pedigrees)
{
testFunc(map, pedigree)
}
}
})
test_that("Test non-zero generations of intercrossing",
{
testFunc <- function(map, pedigree)
{
cross <- simulateMPCross(map=map, pedigree=pedigree, mapFunction = haldane)
mapped <- new("mpcrossMapped", cross, map = map)
suppressWarnings(result <- imputeFounders(mapped))
expect_identical(result@geneticData[[1]]@imputed@data, result@geneticData[[1]]@finals)
#Dominance doesn't really make a difference, because it's assumed inbred
cross2 <- cross + biparentalDominant()
mapped <- new("mpcrossMapped", cross2, map = map)
result <- imputeFounders(mapped)
tmp <- table(result@geneticData[[1]]@imputed@data, cross@geneticData[[1]]@finals)
expect_true(sum(diag(tmp)) / sum(tmp) > 0.9)
}
map1 <- sim.map(len = 100, n.mar = 101, anchor.tel = TRUE, include.x=FALSE, eq.spacing=TRUE)
map2 <- sim.map(len = c(100, 100), n.mar = 101, anchor.tel = TRUE, include.x=FALSE, eq.spacing=TRUE)
maps <- list(map1, map2)
pedigree1 <- twoParentPedigree(initialPopulationSize = 1000, selfingGenerations = 2, nSeeds = 1, intercrossingGenerations = 1)
pedigree1@selfing <- "finite"
pedigree2 <- twoParentPedigree(initialPopulationSize = 1000, selfingGenerations = 2, nSeeds = 1, intercrossingGenerations = 2)
pedigree2@selfing <- "finite"
pedigree3 <- twoParentPedigree(initialPopulationSize = 1000, selfingGenerations = 1, nSeeds = 1, intercrossingGenerations = 1)
pedigree3@selfing <- "finite"
pedigree4 <- twoParentPedigree(initialPopulationSize = 1000, selfingGenerations = 1, nSeeds = 1, intercrossingGenerations = 2)
pedigree4@selfing <- "finite"
pedigrees <- list(pedigree1, pedigree2, pedigree3, pedigree4)
for(map in maps)
{
for(pedigree in pedigrees)
{
testFunc(map, pedigree)
}
}
})
|
# Priklad 1
X <- c (0.31, 0.30, 0.29, 0.32)
n <- length (X)
prumer <- mean (X)
odchylka <- sd (X)
alpha <- 0.05
kvantil <- qchisq (1 - alpha, n - 1)
D <- sqrt (n - 1) * odchylka / sqrt (kvantil)
D
# Priklad 2
data <- read.csv (file = "intervaly.csv", header = TRUE, sep = ",", dec = ".")
stredy <- (data$dolni + data$horni) / 2
X <- rep (stredy, data$cetnost)
n <- sum (data$cetnost)
prumer <- mean (X)
odchylka <- sd (X)
rozptyl <- var (X)
# a)
alpha <- 0.05
kvantil <- qt (1 - alpha/2, n - 1)
D <- prumer - kvantil * odchylka / sqrt(n)
H <- prumer + kvantil * odchylka / sqrt(n)
c (D, H)
# b)
alpha <- 0.05
kvantil1 <- qchisq (1 - alpha/2, n - 1)
kvantil2 <- qchisq (alpha/2, n - 1)
D <- (n - 1) * rozptyl / kvantil1
H <- (n - 1) * rozptyl / kvantil2
c (D, H)
# Priklad 3
# a)
alpha <- 0.05
kvantil1 <- qnorm (1 - alpha/2)
odhad <- 48/160
D <- odhad - kvantil1 * sqrt((odhad * (1-odhad))/160)
H <- odhad + kvantil1 * sqrt((odhad * (1-odhad))/160)
c (D, H)
# b)
odhad * 8000
D <- D*8000
H <- H*8000
c (D,H)
# priklad 4
A <- c (0.14, 0.138, 0.143, 0.142, 0.144, 0.137)
B <- c (0.135, 0.140, 0.142, 0.136, 0.138)
alpha <- 0.05
kvantil <- qnorm (1 - alpha)
prumerA <- mean(A)
prumerB <- mean(B)
rozptylA <- 4e-6
rozptylB <- 9e-6
D <- (prumerA - prumerB) - (kvantil * sqrt((rozptylA/length(A)) + (rozptylB/length(B))))
#priklad 5
data <- read.csv (file = "selata.csv", header = TRUE, sep = ",", dec = ".")
str(data)
A <- data$prir1
B <- data$prir2
alpha <- 0.05
nA <- length(A)
nB <- length(B)
kvantil1 <- qt (1 - alpha/2, nA + nB)
prumerA <- mean(A)
prumerB <- mean(B)
vyberovyRozptyl1 <- sd(A)^2
vyberovyRozptyl2 <- sd(B)^2
vyberovyRozptyl12 <- sqrt(( ((nA-1)*vyberovyRozptyl1) + ((nB-1)*vyberovyRozptyl2) ) / (nA + nB - 2))
D <- ( (-kvantil1 * vyberovyRozptyl12) / sqrt( (nA*nB)/(nA+nB)) ) + (prumerA - prumerB)
H <- ( (kvantil1 * vyberovyRozptyl12) / sqrt( (nA*nB)/(nA+nB)) ) + (prumerA - prumerB)
c (D, H)
D <- (prumerA - prumerB) - (kvantil1 * vyberovyRozptyl12 * sqrt( (nA+nB)/(nA*nB)))
H <- (prumerA - prumerB) + (kvantil1 * vyberovyRozptyl12 * sqrt( (nA+nB)/(nA*nB)))
c (D, H) #nevychádza ale vzorec z prednášky ...why ?
#priklad 6
A <- c (3.26, 3.26, 3.27, 3.27)
B <- c (3.23, 3.27, 3.29, 3.29)
alpha <- 0.05
nA <- length(A)
nB <- length(B)
kvantil1 <- qf (1 - alpha, nA-1, nB-1)
vyberovyRozptyl1 <- sd(A)^2
vyberovyRozptyl2 <- sd(B)^2
vyberovyRozptyl12 <- sqrt(( ((nA-1)*vyberovyRozptyl1) + ((nB-1)*vyberovyRozptyl2) ) / (nA + nB - 2))
H <- vyberovyRozptyl1 * kvantil1 / vyberovyRozptyl2
sqrt(H)
# Priklad 7
data <- read.csv (file = "spotreba.csv", header = TRUE, sep = ";", dec = ",")
X <- data$spotreba
n <- length(X)
prumer <- mean (X)
odchylka <- sd (X)
rozptyl <- var (X)
# stredni hodnota
alpha <- 0.05
kvantil <- qt (1 - alpha/2, n - 1)
D <- prumer - kvantil * odchylka / sqrt(n)
H <- prumer + kvantil * odchylka / sqrt(n)
c (D, H)
# rozptyl
alpha <- 0.05
kvantil1 <- qchisq (1 - alpha/2, n - 1)
kvantil2 <- qchisq (alpha/2, n - 1)
D <- (n - 1) * rozptyl / kvantil1
H <- (n - 1) * rozptyl / kvantil2
c (D, H)
# Priklad 8
n <- 25
prumer <- 3118
odchylka <- 357
rozptyl = odchylka^2
alpha <- 0.05
kvantil1 <- qchisq (1 - alpha/2, n - 1)
kvantil2 <- qchisq (alpha/2, n - 1)
D <- sqrt( (n - 1) * rozptyl / kvantil1)
H <- sqrt( (n - 1) * rozptyl / kvantil2)
c (D, H)
# Priklad 9
data <- read.csv (file = "pevnost.csv", header = TRUE, sep = ";", dec = ",")
X <- data$pevnost
n <- length(X)
prumer <- mean (X)
odchylka <- sd (X)
rozptyl <- var (X)
alpha <- 0.05
kvantil <- qchisq (alpha, n - 1)
H <- (n - 1) * rozptyl / kvantil
H
# priklad 10
data <- read.csv (file = "SiO2.csv", header = TRUE, sep = ";", dec = ",")
str(data)
A <- subset(data, metoda == "A")$obsah
B <- subset(data, metoda == "B")$obsah
alpha <- 0.05
nA <- length(A)
nB <- length(B)
kvantil1 <- qt (1 - alpha/2, nA + nB -2)
kvantil2 <- qt (alpha/2, nA + nB -2)
prumerA <- mean(A)
prumerB <- mean(B)
vyberovyRozptyl1 <- sd(A)^2
vyberovyRozptyl2 <- sd(B)^2
vyberovyRozptyl12 <- sqrt(( ((nA-1)*vyberovyRozptyl1) + ((nB-1)*vyberovyRozptyl2) ) / (nA + nB - 2))
#TODO opytat sa preco musi byt prumer prehodeny, nie je A - B
D <- ( (-kvantil1 * vyberovyRozptyl12) / sqrt( (nA*nB)/(nA+nB)) ) + (prumerB - prumerA)
H <- ( (kvantil1 * vyberovyRozptyl12) / sqrt( (nA*nB)/(nA+nB)) ) + (prumerB - prumerA)
c (D, H)
# Priklad 11
alpha <- 0.05
data <- read.csv (file = "zakaznici.csv", header = TRUE, sep = ";", dec = ",")
str(data)
X <- data$zakaznici
pocet <- length(X)
prumer <- mean(X)
S <- sd(X)
kvantil <- qnorm(1-alpha/2)
D <- (-kvantil * sqrt(prumer) / sqrt(pocet)) + prumer
H <- (kvantil * sqrt(prumer) / sqrt(pocet)) + prumer
c (D, H)
# Priklad 12
data <- read.csv (file = "kola.csv", header = TRUE, sep = ",", dec = ".")
# cyklus pomoci funkce "sapply", kde se promenna M (= cislo mesice) postupne meni od 1 do 12
# v kazdem cyklu vratime 4 cisla: cislo mesice, dolni odhad stredni hodnoty, prumer, a horni odhad stredni hodnoty
matice <- sapply (seq (1, 12), function (M) {
X <- subset (data, mnth == M)$cnt
n <- length (X)
prumer <- mean (X)
odchylka <- sd (X)
alpha <- 0.05
kvantil <- qnorm (1 - alpha / 2)
D <- prumer - kvantil * odchylka / sqrt (n)
H <- prumer + kvantil * odchylka / sqrt (n)
# jako posledni prikaz tela funkce v cyklu se uvede vektor cisel, ktera se maji ulozit do matice vysledku
return (c (mesic = M, D = D, prumer = prumer, H = H))
})
matice
# prehodime jeste sloupce a radky funkci "t" (= transpozice matice) a vytvorime tabulku
tabulka <- data.frame (t (matice))
tabulka
plot (tabulka$mesic, tabulka$prumer, type = "b", lwd = 2, col = "red", ylim = c (1500, 6500), xlab = "mesic", ylab = "prumerny pocet zapujcek za den s 95% IS")
lines (tabulka$mesic, tabulka$D, type = "b", lty = 2, col = "green")
lines (tabulka$mesic, tabulka$H, type = "b", lty = 2, col = "blue")
| /Cviko 9 - intervalove odhady/cv-09.R | no_license | domino789/statistika | R | false | false | 5,859 | r | # Priklad 1
X <- c (0.31, 0.30, 0.29, 0.32)
n <- length (X)
prumer <- mean (X)
odchylka <- sd (X)
alpha <- 0.05
kvantil <- qchisq (1 - alpha, n - 1)
D <- sqrt (n - 1) * odchylka / sqrt (kvantil)
D
# Priklad 2
data <- read.csv (file = "intervaly.csv", header = TRUE, sep = ",", dec = ".")
stredy <- (data$dolni + data$horni) / 2
X <- rep (stredy, data$cetnost)
n <- sum (data$cetnost)
prumer <- mean (X)
odchylka <- sd (X)
rozptyl <- var (X)
# a)
alpha <- 0.05
kvantil <- qt (1 - alpha/2, n - 1)
D <- prumer - kvantil * odchylka / sqrt(n)
H <- prumer + kvantil * odchylka / sqrt(n)
c (D, H)
# b)
alpha <- 0.05
kvantil1 <- qchisq (1 - alpha/2, n - 1)
kvantil2 <- qchisq (alpha/2, n - 1)
D <- (n - 1) * rozptyl / kvantil1
H <- (n - 1) * rozptyl / kvantil2
c (D, H)
# Priklad 3
# a)
alpha <- 0.05
kvantil1 <- qnorm (1 - alpha/2)
odhad <- 48/160
D <- odhad - kvantil1 * sqrt((odhad * (1-odhad))/160)
H <- odhad + kvantil1 * sqrt((odhad * (1-odhad))/160)
c (D, H)
# b)
odhad * 8000
D <- D*8000
H <- H*8000
c (D,H)
# priklad 4
A <- c (0.14, 0.138, 0.143, 0.142, 0.144, 0.137)
B <- c (0.135, 0.140, 0.142, 0.136, 0.138)
alpha <- 0.05
kvantil <- qnorm (1 - alpha)
prumerA <- mean(A)
prumerB <- mean(B)
rozptylA <- 4e-6
rozptylB <- 9e-6
D <- (prumerA - prumerB) - (kvantil * sqrt((rozptylA/length(A)) + (rozptylB/length(B))))
#priklad 5
data <- read.csv (file = "selata.csv", header = TRUE, sep = ",", dec = ".")
str(data)
A <- data$prir1
B <- data$prir2
alpha <- 0.05
nA <- length(A)
nB <- length(B)
kvantil1 <- qt (1 - alpha/2, nA + nB)
prumerA <- mean(A)
prumerB <- mean(B)
vyberovyRozptyl1 <- sd(A)^2
vyberovyRozptyl2 <- sd(B)^2
vyberovyRozptyl12 <- sqrt(( ((nA-1)*vyberovyRozptyl1) + ((nB-1)*vyberovyRozptyl2) ) / (nA + nB - 2))
D <- ( (-kvantil1 * vyberovyRozptyl12) / sqrt( (nA*nB)/(nA+nB)) ) + (prumerA - prumerB)
H <- ( (kvantil1 * vyberovyRozptyl12) / sqrt( (nA*nB)/(nA+nB)) ) + (prumerA - prumerB)
c (D, H)
D <- (prumerA - prumerB) - (kvantil1 * vyberovyRozptyl12 * sqrt( (nA+nB)/(nA*nB)))
H <- (prumerA - prumerB) + (kvantil1 * vyberovyRozptyl12 * sqrt( (nA+nB)/(nA*nB)))
c (D, H) #nevychádza ale vzorec z prednášky ...why ?
#priklad 6
A <- c (3.26, 3.26, 3.27, 3.27)
B <- c (3.23, 3.27, 3.29, 3.29)
alpha <- 0.05
nA <- length(A)
nB <- length(B)
kvantil1 <- qf (1 - alpha, nA-1, nB-1)
vyberovyRozptyl1 <- sd(A)^2
vyberovyRozptyl2 <- sd(B)^2
vyberovyRozptyl12 <- sqrt(( ((nA-1)*vyberovyRozptyl1) + ((nB-1)*vyberovyRozptyl2) ) / (nA + nB - 2))
H <- vyberovyRozptyl1 * kvantil1 / vyberovyRozptyl2
sqrt(H)
# Priklad 7
data <- read.csv (file = "spotreba.csv", header = TRUE, sep = ";", dec = ",")
X <- data$spotreba
n <- length(X)
prumer <- mean (X)
odchylka <- sd (X)
rozptyl <- var (X)
# stredni hodnota
alpha <- 0.05
kvantil <- qt (1 - alpha/2, n - 1)
D <- prumer - kvantil * odchylka / sqrt(n)
H <- prumer + kvantil * odchylka / sqrt(n)
c (D, H)
# rozptyl
alpha <- 0.05
kvantil1 <- qchisq (1 - alpha/2, n - 1)
kvantil2 <- qchisq (alpha/2, n - 1)
D <- (n - 1) * rozptyl / kvantil1
H <- (n - 1) * rozptyl / kvantil2
c (D, H)
# Priklad 8
n <- 25
prumer <- 3118
odchylka <- 357
rozptyl = odchylka^2
alpha <- 0.05
kvantil1 <- qchisq (1 - alpha/2, n - 1)
kvantil2 <- qchisq (alpha/2, n - 1)
D <- sqrt( (n - 1) * rozptyl / kvantil1)
H <- sqrt( (n - 1) * rozptyl / kvantil2)
c (D, H)
# Priklad 9
data <- read.csv (file = "pevnost.csv", header = TRUE, sep = ";", dec = ",")
X <- data$pevnost
n <- length(X)
prumer <- mean (X)
odchylka <- sd (X)
rozptyl <- var (X)
alpha <- 0.05
kvantil <- qchisq (alpha, n - 1)
H <- (n - 1) * rozptyl / kvantil
H
# priklad 10
data <- read.csv (file = "SiO2.csv", header = TRUE, sep = ";", dec = ",")
str(data)
A <- subset(data, metoda == "A")$obsah
B <- subset(data, metoda == "B")$obsah
alpha <- 0.05
nA <- length(A)
nB <- length(B)
kvantil1 <- qt (1 - alpha/2, nA + nB -2)
kvantil2 <- qt (alpha/2, nA + nB -2)
prumerA <- mean(A)
prumerB <- mean(B)
vyberovyRozptyl1 <- sd(A)^2
vyberovyRozptyl2 <- sd(B)^2
vyberovyRozptyl12 <- sqrt(( ((nA-1)*vyberovyRozptyl1) + ((nB-1)*vyberovyRozptyl2) ) / (nA + nB - 2))
#TODO opytat sa preco musi byt prumer prehodeny, nie je A - B
D <- ( (-kvantil1 * vyberovyRozptyl12) / sqrt( (nA*nB)/(nA+nB)) ) + (prumerB - prumerA)
H <- ( (kvantil1 * vyberovyRozptyl12) / sqrt( (nA*nB)/(nA+nB)) ) + (prumerB - prumerA)
c (D, H)
# Priklad 11
alpha <- 0.05
data <- read.csv (file = "zakaznici.csv", header = TRUE, sep = ";", dec = ",")
str(data)
X <- data$zakaznici
pocet <- length(X)
prumer <- mean(X)
S <- sd(X)
kvantil <- qnorm(1-alpha/2)
D <- (-kvantil * sqrt(prumer) / sqrt(pocet)) + prumer
H <- (kvantil * sqrt(prumer) / sqrt(pocet)) + prumer
c (D, H)
# Priklad 12
data <- read.csv (file = "kola.csv", header = TRUE, sep = ",", dec = ".")
# cyklus pomoci funkce "sapply", kde se promenna M (= cislo mesice) postupne meni od 1 do 12
# v kazdem cyklu vratime 4 cisla: cislo mesice, dolni odhad stredni hodnoty, prumer, a horni odhad stredni hodnoty
matice <- sapply (seq (1, 12), function (M) {
X <- subset (data, mnth == M)$cnt
n <- length (X)
prumer <- mean (X)
odchylka <- sd (X)
alpha <- 0.05
kvantil <- qnorm (1 - alpha / 2)
D <- prumer - kvantil * odchylka / sqrt (n)
H <- prumer + kvantil * odchylka / sqrt (n)
# jako posledni prikaz tela funkce v cyklu se uvede vektor cisel, ktera se maji ulozit do matice vysledku
return (c (mesic = M, D = D, prumer = prumer, H = H))
})
matice
# prehodime jeste sloupce a radky funkci "t" (= transpozice matice) a vytvorime tabulku
tabulka <- data.frame (t (matice))
tabulka
plot (tabulka$mesic, tabulka$prumer, type = "b", lwd = 2, col = "red", ylim = c (1500, 6500), xlab = "mesic", ylab = "prumerny pocet zapujcek za den s 95% IS")
lines (tabulka$mesic, tabulka$D, type = "b", lty = 2, col = "green")
lines (tabulka$mesic, tabulka$H, type = "b", lty = 2, col = "blue")
|
#' Seattle 2000 Employment
#'
#' Employment and demographic information for the Seattle-Tacoma-Bellevue Metropolitan Statistical Area, WA (2000 Census data).
#'
#' Sf object, unprojected. EPSG 4326: WGS84.
#'
#' @format An sf data frame with 664 rows, 31 variables, and a geometry column:
#' \describe{
#' \item{ FIPS }{ FIPS Code }
#' \item{ MSA }{ MSA Name }
#' \item{ TOT_POP }{ Total population: Total }
#' \item{ POP_16 }{ Total population: Under 16 }
#' \item{ POP_65 }{ Total population: 65+ }
#' \item{ WHITE }{ Total population: Not Hispanic or Latino; White alone }
#' \item{ BLACK }{ Total population: Not Hispanic or Latino; Black or African American alone }
#' \item{ ASIAN }{ Total population: Not Hispanic or Latino; Asian alone }
#' \item{ HISP }{ Total population: Hispanic or Latino }
#' \item{ MULTI_RA }{ Total population: Hispanic or Latino }
#' \item{ MALES }{ Total population: Male }
#' \item{ FEMALES }{ Total population: Female }
#' \item{ MALE1664 }{ Total population: Male working age 16-64 }
#' \item{ FEM1664 }{ Total population: Female working age 16-64 }
#' \item{ EMPL16 }{ Workers 16 years and over: Total }
#' \item{ EMP_AWAY }{ Workers 16 years and over: Did not work at home }
#' \item{ EMP_HOME }{ Workers 16 years and over: Worked at home }
#' \item{ EMP_29 }{ Workers 16 years and over: Did not work at home; Travel time to work; Less than 30 minutes }
#' \item{ EMP_30 }{ Workers 16 years and over: Did not work at home; Travel time to work; 30 minutes or more }
#' \item{ EMP16_2 }{ Employed civilian population 16 years and over: Total }
#' \item{ EMP_MALE }{ Employed civilian population 16 years and over: Male }
#' \item{ EMP_FEM }{ Employed civilian population 16 years and over: Female }
#' \item{ OCC_MAN }{ Employed civilian population 16 years and over: Production occupations }
#' \item{ OCC_OFF1 }{ Employed civilian population 16 years and over: Office (001-219 except 020, 021) }
#' \item{ OCC_INFO }{ Employed civilian population 16 years and over: Computer and mathematical occupations }
#' \item{ HH_INC }{ Households: Median household income in 1999 }
#' \item{ POV_POP }{ Population for whom poverty status is determined: Total Population }
#' \item{ POV_TOT }{ Population for whom poverty status is determined: Income in 1999 below poverty level }
#' \item{ HSG_VAL }{ Owner-occupied housing units: Median value }
#' \item{ POLYID }{ Unique ID }
#' }
#' @source 2000 Census, Summary File 3. Available at \url{http://factfinder.census.gov)}.
#'
#' @examples
#' if (requireNamespace("sf", quietly = TRUE)) {
#' library(sf)
#' data(seattle1)
#' plot(seattle1["EMP_MALE"])
#' }
"seattle1"
| /R/seattle1.R | permissive | spatialanalysis/geodaData | R | false | false | 2,652 | r | #' Seattle 2000 Employment
#'
#' Employment and demographic information for the Seattle-Tacoma-Bellevue Metropolitan Statistical Area, WA (2000 Census data).
#'
#' Sf object, unprojected. EPSG 4326: WGS84.
#'
#' @format An sf data frame with 664 rows, 31 variables, and a geometry column:
#' \describe{
#' \item{ FIPS }{ FIPS Code }
#' \item{ MSA }{ MSA Name }
#' \item{ TOT_POP }{ Total population: Total }
#' \item{ POP_16 }{ Total population: Under 16 }
#' \item{ POP_65 }{ Total population: 65+ }
#' \item{ WHITE }{ Total population: Not Hispanic or Latino; White alone }
#' \item{ BLACK }{ Total population: Not Hispanic or Latino; Black or African American alone }
#' \item{ ASIAN }{ Total population: Not Hispanic or Latino; Asian alone }
#' \item{ HISP }{ Total population: Hispanic or Latino }
#' \item{ MULTI_RA }{ Total population: Hispanic or Latino }
#' \item{ MALES }{ Total population: Male }
#' \item{ FEMALES }{ Total population: Female }
#' \item{ MALE1664 }{ Total population: Male working age 16-64 }
#' \item{ FEM1664 }{ Total population: Female working age 16-64 }
#' \item{ EMPL16 }{ Workers 16 years and over: Total }
#' \item{ EMP_AWAY }{ Workers 16 years and over: Did not work at home }
#' \item{ EMP_HOME }{ Workers 16 years and over: Worked at home }
#' \item{ EMP_29 }{ Workers 16 years and over: Did not work at home; Travel time to work; Less than 30 minutes }
#' \item{ EMP_30 }{ Workers 16 years and over: Did not work at home; Travel time to work; 30 minutes or more }
#' \item{ EMP16_2 }{ Employed civilian population 16 years and over: Total }
#' \item{ EMP_MALE }{ Employed civilian population 16 years and over: Male }
#' \item{ EMP_FEM }{ Employed civilian population 16 years and over: Female }
#' \item{ OCC_MAN }{ Employed civilian population 16 years and over: Production occupations }
#' \item{ OCC_OFF1 }{ Employed civilian population 16 years and over: Office (001-219 except 020, 021) }
#' \item{ OCC_INFO }{ Employed civilian population 16 years and over: Computer and mathematical occupations }
#' \item{ HH_INC }{ Households: Median household income in 1999 }
#' \item{ POV_POP }{ Population for whom poverty status is determined: Total Population }
#' \item{ POV_TOT }{ Population for whom poverty status is determined: Income in 1999 below poverty level }
#' \item{ HSG_VAL }{ Owner-occupied housing units: Median value }
#' \item{ POLYID }{ Unique ID }
#' }
#' @source 2000 Census, Summary File 3. Available at \url{http://factfinder.census.gov)}.
#'
#' @examples
#' if (requireNamespace("sf", quietly = TRUE)) {
#' library(sf)
#' data(seattle1)
#' plot(seattle1["EMP_MALE"])
#' }
"seattle1"
|
#' ralgotools.
#'
#' @name ralgotools
#' @docType package
NULL
| /R/development/ralgotools-package.r | no_license | bfatemi/ralgotools | R | false | false | 63 | r | #' ralgotools.
#'
#' @name ralgotools
#' @docType package
NULL
|
#' @references{ Glatthorn, Jonas (2021). A spatially explicit index for tree
#' species or trait diversity at neighborhood and stand level. Ecological
#' Indicators, 130, 108073. https://doi.org/10.1016/j.ecolind.2021.108073.}
| /man-roxygen/ref_glatthorn_2021.R | no_license | JonasGlatthorn/APAtree | R | false | false | 227 | r | #' @references{ Glatthorn, Jonas (2021). A spatially explicit index for tree
#' species or trait diversity at neighborhood and stand level. Ecological
#' Indicators, 130, 108073. https://doi.org/10.1016/j.ecolind.2021.108073.}
|
\name{validation.ord}
\alias{validation.ord}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Validates the marginal specification of the ordinal variables}
\description{Checks whether the marginal specification of the ordinal part is valid and consistent.}
\usage{validation.ord(n.O, prop.list = NULL)}
\arguments{
\item{n.O}{Number of ordinal variables.}
\item{prop.list}{A list of probability vectors corresponding to each ordinal variable. The i-th element of prop.list is a vector of the cumulative probabilities defining the marginal
distribution of the i-th ordinal component of the multivariate variables. If the i-th ordinal variable has k categories, the i-th vector of the prop.list will contain
k-1 probability values. The k-th element is implicitly 1.}
}
\value{The function returns TRUE if no specification problem is encountered. Otherwise, it returns an error message.}
\examples{
n.O<-3
prop.list<-list(c(0.3,0.6),c(0.25,0.5,0.75),c(0.1,0.2,0.8,0.9))
validation.ord(n.O,prop.list)
\dontrun{
n.O<-3
validation.ord(n.O)
n.O<-NULL
prop.list<-list(c(0.3,0.6),c(0.25,0.5,0.75),c(0.1,0.2,0.8,0.9))
validation.ord(prop.list=prop.list)
n.O<--3
prop.list<-list(c(0.3,0.6),c(0.25,0.5,0.75),c(0.1,0.2,0.8,0.9))
validation.ord(-3,prop.list)
n.O<--0
prop.list<-list(c(0.3,0.6),c(0.25,0.5,0.75),c(0.1,0.2,0.8,0.9))
validation.ord(n.O,prop.list)
n.O<-5
prop.list<-list(c(0.3,0.6),c(0.25,0.5,0.75),c(0.1,0.2,0.8,0.9))
validation.ord(n.O,prop.list)
n.O<-3
prop.list<-list(c(0.3,0.6),c(0.25,0.5,-0.75),c(0.1,0.2,0.8,1.5))
validation.ord(n.O,prop.list)
n.O<-3
prop.list<-list(0.3,c(0.3,0.4),c(0.4,0.2,0.3))
validation.ord(n.O,prop.list)
}
}
| /man/validation.ord.Rd | no_license | cran/PoisBinOrd | R | false | false | 1,732 | rd | \name{validation.ord}
\alias{validation.ord}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Validates the marginal specification of the ordinal variables}
\description{Checks whether the marginal specification of the ordinal part is valid and consistent.}
\usage{validation.ord(n.O, prop.list = NULL)}
\arguments{
\item{n.O}{Number of ordinal variables.}
\item{prop.list}{A list of probability vectors corresponding to each ordinal variable. The i-th element of prop.list is a vector of the cumulative probabilities defining the marginal
distribution of the i-th ordinal component of the multivariate variables. If the i-th ordinal variable has k categories, the i-th vector of the prop.list will contain
k-1 probability values. The k-th element is implicitly 1.}
}
\value{The function returns TRUE if no specification problem is encountered. Otherwise, it returns an error message.}
\examples{
n.O<-3
prop.list<-list(c(0.3,0.6),c(0.25,0.5,0.75),c(0.1,0.2,0.8,0.9))
validation.ord(n.O,prop.list)
\dontrun{
n.O<-3
validation.ord(n.O)
n.O<-NULL
prop.list<-list(c(0.3,0.6),c(0.25,0.5,0.75),c(0.1,0.2,0.8,0.9))
validation.ord(prop.list=prop.list)
n.O<--3
prop.list<-list(c(0.3,0.6),c(0.25,0.5,0.75),c(0.1,0.2,0.8,0.9))
validation.ord(-3,prop.list)
n.O<--0
prop.list<-list(c(0.3,0.6),c(0.25,0.5,0.75),c(0.1,0.2,0.8,0.9))
validation.ord(n.O,prop.list)
n.O<-5
prop.list<-list(c(0.3,0.6),c(0.25,0.5,0.75),c(0.1,0.2,0.8,0.9))
validation.ord(n.O,prop.list)
n.O<-3
prop.list<-list(c(0.3,0.6),c(0.25,0.5,-0.75),c(0.1,0.2,0.8,1.5))
validation.ord(n.O,prop.list)
n.O<-3
prop.list<-list(0.3,c(0.3,0.4),c(0.4,0.2,0.3))
validation.ord(n.O,prop.list)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flag_outliers.R
\name{flag_outliers}
\alias{flag_outliers}
\title{Flag outlier observations}
\usage{
flag_outliers(data, sd_for_outlier = 2, flag_by = "facility",
result = "outliers")
}
\arguments{
\item{data}{The ANC-RT dataset. The functions \link[ANCRTAdjust]{check_data}, \link[ANCRTAdjust]{data_clean} and \link[ANCRTAdjust]{mt_adjust} should have been run on the data to properly
prepare the data for use here. The dataset must have the following variables:
\itemize{
\item \code{faciluid}: Facility ID.
\item \code{time}: The time period over which the data was collected.
\item \code{n_clients}: The number of women from the specified facility, during the specified time period, that attended their first ANC visit.
\item \code{n_status_c}: The cleaned number of women from the specified facility, during the specified time period, that had their HIV status ascertained at their first ANC visit, either by testing or through previous knowledge
(generated using the \link[ANCRTAdjust]{data_clean} function).
\item \code{testpos_c}: The cleaned number of women from the specified facility, during the specified time period, that tested positive for HIV at their first ANC visit
(generated using the \link[ANCRTAdjust]{data_clean} function).
\item \code{knownpos_c}: The cleaned number of women from the specified facility, during the specified time period, that already knew that they were HIV-positive at their first ANC visit
(generated using the \link[ANCRTAdjust]{data_clean} function).
\item \code{testneg_c}: The cleaned number of women from the specified facility, during the specified time period, that tested negative for HIV at their first ANC visit
(generated using the \link[ANCRTAdjust]{data_clean} function).
\item \code{totpos_c}: The cleaned total number of positive HIV cases (generated using the \link[ANCRTAdjust]{data_clean} function).
\item \code{prv}: The HIV prevalence from the specified facility at the specified time period (generated using the \link[ANCRTAdjust]{mt_adjust} function).
\item \code{cov}: The HIV testing coverage from the specified facility at the specified time period (generated using the \link[ANCRTAdjust]{mt_adjust} function).
\item \code{snu1}: The subnational unit 1 (only required if results are to be flagged by snu1).
}}
\item{sd_for_outlier}{Standard deviation used to flag outliers (default is 2).}
\item{flag_by}{Options include:
\itemize{
\item "\code{facility}" compares each observation's value to their facility's mean value and flags the observations that
are greater than or less than 2 standard deviations from the facility mean.
\item "\code{snu1}" compares each observation's value to their sub national unit 1's mean value and flags the observations that
are greater than or less than 2 standard deviations from the snu1 mean.
\item "\code{country}" compares each observation's value to their country's mean value and flags the observations that
are greater than or less than 2 standard deviations from the country mean.
}}
\item{result}{Options include:
\itemize{
\item "\code{outliers}" returns a dataset including the observations that are considered to have an outlier value for any of:
\code{n_clients}, \code{n_status_c}, \code{testpos_c}, \code{testneg_c}, \code{knownpos_c}, \code{totpos_c}, \code{prv} or \code{cov}. The values
for each of the eight variables are only reported if they are considered an outlier. If they are not considered an outlier, they are reported
as "NA". For identification purposes \code{faciluid} and \code{time} are also included.
\item "\code{data}" returns the complete dataset (that was originally input into the function) with the following additional variables:
\itemize{
\item \code{flag_n_clients}: A value of 1 indicates that the \code{n_clients} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_n_status_c}: A value of 1 indicates that the \code{n_status_c} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_testpos_c}: A value of 1 indicates that the \code{testpos_c} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_testneg_c}: A value of 1 indicates that the \code{testneg_c} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_knownpos_c}: A value of 1 indicates that the \code{knownpos_c} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_totpos_c}: A value of 1 indicates that the \code{totpos_c} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_prv}: A value of 1 indicates that the \code{prv} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_cov}: A value of 1 indicates that the \code{cov} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
}
}}
}
\value{
A dataset including either the flagged observations only or the full, original dataset with additional variables indicating flagged observations, according to user inputs.
}
\description{
Flags observations that are considered outliers
}
\details{
This function has been developed to flag outlier observations for the following variables: \code{n_clients}, \code{n_status_c}, \code{testpos_c}, \code{testneg_c}, \code{knownpos_c},
\code{totpos_c}, \code{prv} and \code{cov}. Outliers are defined as 2 standard deviations greater than or less than the mean value.
}
\author{
Mathieu Maheu-Giroux
Brittany Blouin
}
| /man/flag_outliers.Rd | permissive | brittanyblouin/ANCRTAdjust | R | false | true | 5,920 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flag_outliers.R
\name{flag_outliers}
\alias{flag_outliers}
\title{Flag outlier observations}
\usage{
flag_outliers(data, sd_for_outlier = 2, flag_by = "facility",
result = "outliers")
}
\arguments{
\item{data}{The ANC-RT dataset. The functions \link[ANCRTAdjust]{check_data}, \link[ANCRTAdjust]{data_clean} and \link[ANCRTAdjust]{mt_adjust} should have been run on the data to properly
prepare the data for use here. The dataset must have the following variables:
\itemize{
\item \code{faciluid}: Facility ID.
\item \code{time}: The time period over which the data was collected.
\item \code{n_clients}: The number of women from the specified facility, during the specified time period, that attended their first ANC visit.
\item \code{n_status_c}: The cleaned number of women from the specified facility, during the specified time period, that had their HIV status ascertained at their first ANC visit, either by testing or through previous knowledge
(generated using the \link[ANCRTAdjust]{data_clean} function).
\item \code{testpos_c}: The cleaned number of women from the specified facility, during the specified time period, that tested positive for HIV at their first ANC visit
(generated using the \link[ANCRTAdjust]{data_clean} function).
\item \code{knownpos_c}: The cleaned number of women from the specified facility, during the specified time period, that already knew that they were HIV-positive at their first ANC visit
(generated using the \link[ANCRTAdjust]{data_clean} function).
\item \code{testneg_c}: The cleaned number of women from the specified facility, during the specified time period, that tested negative for HIV at their first ANC visit
(generated using the \link[ANCRTAdjust]{data_clean} function).
\item \code{totpos_c}: The cleaned total number of positive HIV cases (generated using the \link[ANCRTAdjust]{data_clean} function).
\item \code{prv}: The HIV prevalence from the specified facility at the specified time period (generated using the \link[ANCRTAdjust]{mt_adjust} function).
\item \code{cov}: The HIV testing coverage from the specified facility at the specified time period (generated using the \link[ANCRTAdjust]{mt_adjust} function).
\item \code{snu1}: The subnational unit 1 (only required if results are to be flagged by snu1).
}}
\item{sd_for_outlier}{Standard deviation used to flag outliers (default is 2).}
\item{flag_by}{Options include:
\itemize{
\item "\code{facility}" compares each observation's value to their facility's mean value and flags the observations that
are greater than or less than 2 standard deviations from the facility mean.
\item "\code{snu1}" compares each observation's value to their sub national unit 1's mean value and flags the observations that
are greater than or less than 2 standard deviations from the snu1 mean.
\item "\code{country}" compares each observation's value to their country's mean value and flags the observations that
are greater than or less than 2 standard deviations from the country mean.
}}
\item{result}{Options include:
\itemize{
\item "\code{outliers}" returns a dataset including the observations that are considered to have an outlier value for any of:
\code{n_clients}, \code{n_status_c}, \code{testpos_c}, \code{testneg_c}, \code{knownpos_c}, \code{totpos_c}, \code{prv} or \code{cov}. The values
for each of the eight variables are only reported if they are considered an outlier. If they are not considered an outlier, they are reported
as "NA". For identification purposes \code{faciluid} and \code{time} are also included.
\item "\code{data}" returns the complete dataset (that was originally input into the function) with the following additional variables:
\itemize{
\item \code{flag_n_clients}: A value of 1 indicates that the \code{n_clients} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_n_status_c}: A value of 1 indicates that the \code{n_status_c} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_testpos_c}: A value of 1 indicates that the \code{testpos_c} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_testneg_c}: A value of 1 indicates that the \code{testneg_c} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_knownpos_c}: A value of 1 indicates that the \code{knownpos_c} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_totpos_c}: A value of 1 indicates that the \code{totpos_c} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_prv}: A value of 1 indicates that the \code{prv} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
\item \code{flag_cov}: A value of 1 indicates that the \code{cov} value is considered an outlier and a value of 0 indicates that it is not considered an outlier.
}
}}
}
\value{
A dataset including either the flagged observations only or the full, original dataset with additional variables indicating flagged observations, according to user inputs.
}
\description{
Flags observations that are considered outliers
}
\details{
This function has been developed to flag outlier observations for the following variables: \code{n_clients}, \code{n_status_c}, \code{testpos_c}, \code{testneg_c}, \code{knownpos_c},
\code{totpos_c}, \code{prv} and \code{cov}. Outliers are defined as 2 standard deviations greater than or less than the mean value.
}
\author{
Mathieu Maheu-Giroux
Brittany Blouin
}
|
## The following function creates a "matrix" object that
## can cache its inverese.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function (y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveM) inv <<- solveM
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat)
x$setInverse(inv)
inv
}
| /cacheMatrix.R | no_license | tkollah44/Lexical-Scoping-R-Programing- | R | false | false | 689 | r | ## The following function creates a "matrix" object that
## can cache its inverese.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function (y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveM) inv <<- solveM
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat)
x$setInverse(inv)
inv
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/tbl_json.r
\name{tbl_json}
\alias{as.tbl_json}
\alias{as.tbl_json.character}
\alias{as.tbl_json.data.frame}
\alias{as.tbl_json.tbl_json}
\alias{is.tbl_json}
\alias{tbl_json}
\title{Combines structured JSON (as a data.frame) with remaining JSON}
\usage{
tbl_json(df, json.list, drop.null.json = FALSE)
as.tbl_json(x, ...)
\method{as.tbl_json}{tbl_json}(x, ...)
\method{as.tbl_json}{character}(x, ...)
\method{as.tbl_json}{data.frame}(x, json.column, ...)
is.tbl_json(x)
}
\arguments{
\item{df}{data.frame}
\item{json.list}{list of json lists parsed with fromJSON}
\item{drop.null.json}{drop NULL json entries from data.frame and json}
\item{x}{character vector of json}
\item{...}{other arguments}
\item{json.column}{the name of the JSON column of data in x, if x is a data.frame}
\item{x}{data.frame that has a column of JSON data}
}
\description{
Combines structured JSON (as a data.frame) with remaining JSON
Note that json.list must have the same length as nrow(df), and if json.list
has any NULL elements, the corresponding rows will be removed from df. Also
note that "..JSON" is a reserved column name used internally for filtering
tbl_json objects, and so is not allowed in the data.frame names.
Turns a character vector into a tbl_json object
Turns a data.frame into a tbl_json object
}
| /man/tbl_json.Rd | permissive | abresler/tidyjson | R | false | false | 1,396 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/tbl_json.r
\name{tbl_json}
\alias{as.tbl_json}
\alias{as.tbl_json.character}
\alias{as.tbl_json.data.frame}
\alias{as.tbl_json.tbl_json}
\alias{is.tbl_json}
\alias{tbl_json}
\title{Combines structured JSON (as a data.frame) with remaining JSON}
\usage{
tbl_json(df, json.list, drop.null.json = FALSE)
as.tbl_json(x, ...)
\method{as.tbl_json}{tbl_json}(x, ...)
\method{as.tbl_json}{character}(x, ...)
\method{as.tbl_json}{data.frame}(x, json.column, ...)
is.tbl_json(x)
}
\arguments{
\item{df}{data.frame}
\item{json.list}{list of json lists parsed with fromJSON}
\item{drop.null.json}{drop NULL json entries from data.frame and json}
\item{x}{character vector of json}
\item{...}{other arguments}
\item{json.column}{the name of the JSON column of data in x, if x is a data.frame}
\item{x}{data.frame that has a column of JSON data}
}
\description{
Combines structured JSON (as a data.frame) with remaining JSON
Note that json.list must have the same length as nrow(df), and if json.list
has any NULL elements, the corresponding rows will be removed from df. Also
note that "..JSON" is a reserved column name used internally for filtering
tbl_json objects, and so is not allowed in the data.frame names.
Turns a character vector into a tbl_json object
Turns a data.frame into a tbl_json object
}
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(3.2667689008931e+187, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.57528333031318e+82, 8.96970810842156e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902400.06, 6.68889883968795e+51, -6.96018102688475e-145, -5.04058672304916e-308, -3.52614199720469e+43, -4.11939976291847e+154, -3.49441916913348e+62, 8465182267292623, 6.93341938385698e-05, 2.21633295806634e+76, 4.26726477362633e+139, -3.50995023347733e+38, 7.69395670445668e+35, -7.32003158046006e+211))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615848366-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 908 | r | testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(3.2667689008931e+187, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.57528333031318e+82, 8.96970810842156e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902400.06, 6.68889883968795e+51, -6.96018102688475e-145, -5.04058672304916e-308, -3.52614199720469e+43, -4.11939976291847e+154, -3.49441916913348e+62, 8465182267292623, 6.93341938385698e-05, 2.21633295806634e+76, 4.26726477362633e+139, -3.50995023347733e+38, 7.69395670445668e+35, -7.32003158046006e+211))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
library(testthat)
expected_metadata_column_count <- 9L #This is a property of the function, and not the input dataset.
test_that("Dataset: InsectSprays", {
#Declare the input.
d_observed <- datasets::InsectSprays
#Declare the expected values.
expected_metadata <- structure(list(variable_name = c("count", "spray"), remark = c("",
""), class = c("numeric", "factor"), should_graph = c(TRUE, TRUE
), graph_function = c("histogram_continuous", "histogram_discrete"
), x_label_format = c("scales::comma", "scales::comma"), bin_width = c(1, 1),
bin_start = c(0, 1), rounding_digits = c(1, 1)), .Names = c("variable_name",
"remark", "class", "should_graph", "graph_function", "x_label_format",
"bin_width", "bin_start", "rounding_digits"), row.names = c(NA,
-2L), class = "data.frame")
#Run the function
returned_metadata <- create_manifest_explore_univariate(d_observed, write_to_disk=FALSE) #dput(returned_metadata)
#Compare the returned & expected values.
expect_equal(ncol(expected_metadata), expected_metadata_column_count, label="The number of metadata columns should be correct.")
expect_equal(nrow(expected_metadata), ncol(d_observed), label="The number of metadata rows should equal the number of rows in d_observed.")
expect_equal(returned_metadata, expected=expected_metadata, label="The returned data.frame should be correct", ignore_attr = TRUE) # dput(returned_object$data)
})
test_that("Dataset: freeny", {
#Declare the input.
d_observed <- datasets::freeny
#Declare the expected values.
expected_metadata <- structure(list(variable_name = c("y", "lag.quarterly.revenue",
"price.index", "income.level", "market.potential"), remark = c("",
"", "", "", ""), class = c("ts", "numeric", "numeric", "numeric",
"numeric"), should_graph = c(TRUE, TRUE, TRUE, TRUE, TRUE), graph_function = c("histogram_generic",
"histogram_continuous", "histogram_continuous", "histogram_continuous",
"histogram_continuous"), x_label_format = c("scales::comma", "scales::comma",
"scales::comma", "scales::comma", "scales::comma"), bin_width = c(1, 0.0500000000000007,
0.0199999999999996, 0.00999999999999979, 0.00500000000000078),
bin_start = c(1, 8.75, 4.26, 5.82, 12.965), rounding_digits = c(1,
6, 6, 6, 6)), .Names = c("variable_name", "remark", "class",
"should_graph", "graph_function", "x_label_format", "bin_width",
"bin_start", "rounding_digits"), row.names = c(NA, -5L), class = "data.frame")
#Run the function
returned_metadata <- create_manifest_explore_univariate(d_observed, write_to_disk=FALSE) #dput(returned_metadata)
#Compare the returned & expected values.
expect_equal(ncol(expected_metadata), expected_metadata_column_count, label="The number of metadata columns should be correct.")
expect_equal(nrow(expected_metadata), ncol(d_observed), label="The number of metadata rows should equal the number of rows in d_observed.")
expect_equal(returned_metadata, expected=expected_metadata, label="The returned data.frame should be correct", ignore_attr = TRUE) # dput(returned_object$data)
})
# test_that("InsectSprays2", {
#
# expected_data <- structure(list(variable_name = structure(1:2, .Label = c("countDDDDD",
# "spray"), class = "factor"), class = structure(c(2L, 1L), .Label = c("factor",
# "numeric"), class = "factor"), should_graph = c(TRUE, TRUE),
# graph_function = structure(1:2, .Label = c("histogram_continuous",
# "histogram_discrete"), class = "factor"), x_label_format = structure(c(1L,
# 1L), .Label = "scales::comma", class = "factor"), remark = structure(c(1L,
# 1L), class = "factor", .Label = "")), .Names = c("variable_name",
# "class", "should_graph", "graph_function", "x_label_format",
# "remark"), row.names = c("count", "spray"), class = "data.frame")
#
# returned_object <- create_manifest_explore_univariate(datasets::InsectSprays, write_to_disk=FALSE) #dput(returned_object)
#
#
# expect_equal(returned_object, expected=expected_data, label="The returned data.frame should be correct", ignore_attr = TRUE) # dput(returned_object$data)
#
# })
rm(expected_metadata_column_count)
| /tests/testthat/test-create-manifest-explore-univariate.R | permissive | Melinae/TabularManifest | R | false | false | 7,074 | r | library(testthat)
expected_metadata_column_count <- 9L #This is a property of the function, and not the input dataset.
test_that("Dataset: InsectSprays", {
#Declare the input.
d_observed <- datasets::InsectSprays
#Declare the expected values.
expected_metadata <- structure(list(variable_name = c("count", "spray"), remark = c("",
""), class = c("numeric", "factor"), should_graph = c(TRUE, TRUE
), graph_function = c("histogram_continuous", "histogram_discrete"
), x_label_format = c("scales::comma", "scales::comma"), bin_width = c(1, 1),
bin_start = c(0, 1), rounding_digits = c(1, 1)), .Names = c("variable_name",
"remark", "class", "should_graph", "graph_function", "x_label_format",
"bin_width", "bin_start", "rounding_digits"), row.names = c(NA,
-2L), class = "data.frame")
#Run the function
returned_metadata <- create_manifest_explore_univariate(d_observed, write_to_disk=FALSE) #dput(returned_metadata)
#Compare the returned & expected values.
expect_equal(ncol(expected_metadata), expected_metadata_column_count, label="The number of metadata columns should be correct.")
expect_equal(nrow(expected_metadata), ncol(d_observed), label="The number of metadata rows should equal the number of rows in d_observed.")
expect_equal(returned_metadata, expected=expected_metadata, label="The returned data.frame should be correct", ignore_attr = TRUE) # dput(returned_object$data)
})
test_that("Dataset: freeny", {
#Declare the input.
d_observed <- datasets::freeny
#Declare the expected values.
expected_metadata <- structure(list(variable_name = c("y", "lag.quarterly.revenue",
"price.index", "income.level", "market.potential"), remark = c("",
"", "", "", ""), class = c("ts", "numeric", "numeric", "numeric",
"numeric"), should_graph = c(TRUE, TRUE, TRUE, TRUE, TRUE), graph_function = c("histogram_generic",
"histogram_continuous", "histogram_continuous", "histogram_continuous",
"histogram_continuous"), x_label_format = c("scales::comma", "scales::comma",
"scales::comma", "scales::comma", "scales::comma"), bin_width = c(1, 0.0500000000000007,
0.0199999999999996, 0.00999999999999979, 0.00500000000000078),
bin_start = c(1, 8.75, 4.26, 5.82, 12.965), rounding_digits = c(1,
6, 6, 6, 6)), .Names = c("variable_name", "remark", "class",
"should_graph", "graph_function", "x_label_format", "bin_width",
"bin_start", "rounding_digits"), row.names = c(NA, -5L), class = "data.frame")
#Run the function
returned_metadata <- create_manifest_explore_univariate(d_observed, write_to_disk=FALSE) #dput(returned_metadata)
#Compare the returned & expected values.
expect_equal(ncol(expected_metadata), expected_metadata_column_count, label="The number of metadata columns should be correct.")
expect_equal(nrow(expected_metadata), ncol(d_observed), label="The number of metadata rows should equal the number of rows in d_observed.")
expect_equal(returned_metadata, expected=expected_metadata, label="The returned data.frame should be correct", ignore_attr = TRUE) # dput(returned_object$data)
})
# test_that("InsectSprays2", {
#
# expected_data <- structure(list(variable_name = structure(1:2, .Label = c("countDDDDD",
# "spray"), class = "factor"), class = structure(c(2L, 1L), .Label = c("factor",
# "numeric"), class = "factor"), should_graph = c(TRUE, TRUE),
# graph_function = structure(1:2, .Label = c("histogram_continuous",
# "histogram_discrete"), class = "factor"), x_label_format = structure(c(1L,
# 1L), .Label = "scales::comma", class = "factor"), remark = structure(c(1L,
# 1L), class = "factor", .Label = "")), .Names = c("variable_name",
# "class", "should_graph", "graph_function", "x_label_format",
# "remark"), row.names = c("count", "spray"), class = "data.frame")
#
# returned_object <- create_manifest_explore_univariate(datasets::InsectSprays, write_to_disk=FALSE) #dput(returned_object)
#
#
# expect_equal(returned_object, expected=expected_data, label="The returned data.frame should be correct", ignore_attr = TRUE) # dput(returned_object$data)
#
# })
rm(expected_metadata_column_count)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{compile_models}
\alias{compile_models}
\title{Compile a tree model}
\usage{
compile_models(model = c("effect_diffusion_tree", "simple_hierarchical"))
}
\arguments{
\item{model}{The model to compile, see the
default args for a list of available models.}
}
\value{
A \link{stan_model} object
}
\description{
Compile one of the models available in this
package.
}
| /man/compile_models.Rd | no_license | cfhammill/hierarchyTrees | R | false | true | 453 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{compile_models}
\alias{compile_models}
\title{Compile a tree model}
\usage{
compile_models(model = c("effect_diffusion_tree", "simple_hierarchical"))
}
\arguments{
\item{model}{The model to compile, see the
default args for a list of available models.}
}
\value{
A \link{stan_model} object
}
\description{
Compile one of the models available in this
package.
}
|
#install.packages("shinyWidgets")
#install.packages("devtools")
#devtools::install_github("dreamRs/shinyWidgets")
#shinyWidgets::shinyWidgetsGallery()
library(shiny)
library(devtools)
# Define UI ----
ui <- fluidPage(
# Main title
titlePanel("Well Subsetting"),
# Select multiple wells
# needs up to date well names
selectInput("select", label = h3("Select Well(s)"),
choices = list("1" = 1, "2" = 2, "3" = 3), multiple = TRUE),
# Date selection
#Select whether user wants single date or range to choose from
selectInput('range', "Single Date or Range",
choices = c("Single Date", "Date Range"),
selected = "Single Date"),
#if user chose single date, show a single date input box
conditionalPanel(condition = "input.range == 'Single Date'",
dateInput("dRange", "Date:", value = "2010-08-01",
format = "mm/dd/yy")),
#if user chose date range, show a date range input box
conditionalPanel(condition = "input.range == 'Date Range'",
dateRangeInput("dRange", "Date Range:", start = "2010-08-01", end = "2013-01-10",
format = "mm/dd/yy")),
# Stream Flow Button
checkboxInput("checkbox", label = "View Stream Flow Data", value = FALSE),
# Precipitation Slider
sliderInput("slider", label = h3("Precipitation Range (units)"), min = 0, max = 100, value = c(20, 50)),
# plot and ability to zoom into plot (DO NOT TOUCH plz - katie)
fluidRow(column(width = 10, offset = 1, class = "well",
h4("To zoom: Click and drag box, then double click. Double click plot to zoom back out."),
plotOutput("wellplot",
dblclick = "plot1_dblclick",
brush = brushOpts(
id = "plot1_brush",
resetOnNew = TRUE
)))
)
)
# Define server logic ----
server <- function(input, output) {
#################### *** CHANGE THIS *** ####################
setwd("C:/Users/Katie Dunlap/Documents/EI Capstone/")
#############################################################
# read in and clean data
well_data <- read_csv("N3.txt")
subset <- well_data %>%
filter(date >= "2012-03-01" & date <= "2012-06-01") %>%
select(date, level)
subset$level[subset$level > 162] <- NA # is.na would have worked
subset$level <- subset$level - 94.5
# makes range data for slider tool
ranges <- reactiveValues(x = as.POSIXct(c(start = "2010-08-01", end = "2013-01-10")))
maxrange <- reactiveValues(x = as.POSIXct(c(start = "2010-08-01", end = "2013-01-10")))
# plot (DO NOT TOUCH plz - katie)
output$wellplot <- renderPlot({
# ID = well number
#ID <- strsplit(input$wells, " ")[[1]]
# filters graph by wells selected
#wells <- filter(well_data, Well == ID)
start <- input$dRange[1]
end <- input$dRange[2]
xmin <- input$plot_brush$xmin
xmax <- input$plot_brush$xmax
ggplot(data = subset, mapping = aes(x = date, y = level))+
geom_line()+
scale_y_reverse()+
ylab("Water Table Depth (cm)")+
xlab("Date") +
coord_cartesian(xlim = as.POSIXct(ranges$x, origin = "1970-01-01"), expand = FALSE)+
theme_classic()
})
# ability to zoom into plot (DO NOT TOUCH plz - katie)
observeEvent(input$plot1_dblclick,
{
brush <- input$plot1_brush
if (!is.null(brush))
{
ranges$x <- c(brush$xmin, brush$xmax)
#input$dRange <- ranges$x
} else {
ranges$x <- maxrange$x
}
})
}
# Run the app ----
shinyApp(ui = ui, server = server)
| /Query_Group/TEST_app.R | no_license | rmelton13/EI_Capstone_S20 | R | false | false | 3,849 | r | #install.packages("shinyWidgets")
#install.packages("devtools")
#devtools::install_github("dreamRs/shinyWidgets")
#shinyWidgets::shinyWidgetsGallery()
library(shiny)
library(devtools)
# Define UI ----
ui <- fluidPage(
# Main title
titlePanel("Well Subsetting"),
# Select multiple wells
# needs up to date well names
selectInput("select", label = h3("Select Well(s)"),
choices = list("1" = 1, "2" = 2, "3" = 3), multiple = TRUE),
# Date selection
#Select whether user wants single date or range to choose from
selectInput('range', "Single Date or Range",
choices = c("Single Date", "Date Range"),
selected = "Single Date"),
#if user chose single date, show a single date input box
conditionalPanel(condition = "input.range == 'Single Date'",
dateInput("dRange", "Date:", value = "2010-08-01",
format = "mm/dd/yy")),
#if user chose date range, show a date range input box
conditionalPanel(condition = "input.range == 'Date Range'",
dateRangeInput("dRange", "Date Range:", start = "2010-08-01", end = "2013-01-10",
format = "mm/dd/yy")),
# Stream Flow Button
checkboxInput("checkbox", label = "View Stream Flow Data", value = FALSE),
# Precipitation Slider
sliderInput("slider", label = h3("Precipitation Range (units)"), min = 0, max = 100, value = c(20, 50)),
# plot and ability to zoom into plot (DO NOT TOUCH plz - katie)
fluidRow(column(width = 10, offset = 1, class = "well",
h4("To zoom: Click and drag box, then double click. Double click plot to zoom back out."),
plotOutput("wellplot",
dblclick = "plot1_dblclick",
brush = brushOpts(
id = "plot1_brush",
resetOnNew = TRUE
)))
)
)
# Define server logic ----
server <- function(input, output) {
#################### *** CHANGE THIS *** ####################
setwd("C:/Users/Katie Dunlap/Documents/EI Capstone/")
#############################################################
# read in and clean data
well_data <- read_csv("N3.txt")
subset <- well_data %>%
filter(date >= "2012-03-01" & date <= "2012-06-01") %>%
select(date, level)
subset$level[subset$level > 162] <- NA # is.na would have worked
subset$level <- subset$level - 94.5
# makes range data for slider tool
ranges <- reactiveValues(x = as.POSIXct(c(start = "2010-08-01", end = "2013-01-10")))
maxrange <- reactiveValues(x = as.POSIXct(c(start = "2010-08-01", end = "2013-01-10")))
# plot (DO NOT TOUCH plz - katie)
output$wellplot <- renderPlot({
# ID = well number
#ID <- strsplit(input$wells, " ")[[1]]
# filters graph by wells selected
#wells <- filter(well_data, Well == ID)
start <- input$dRange[1]
end <- input$dRange[2]
xmin <- input$plot_brush$xmin
xmax <- input$plot_brush$xmax
ggplot(data = subset, mapping = aes(x = date, y = level))+
geom_line()+
scale_y_reverse()+
ylab("Water Table Depth (cm)")+
xlab("Date") +
coord_cartesian(xlim = as.POSIXct(ranges$x, origin = "1970-01-01"), expand = FALSE)+
theme_classic()
})
# ability to zoom into plot (DO NOT TOUCH plz - katie)
observeEvent(input$plot1_dblclick,
{
brush <- input$plot1_brush
if (!is.null(brush))
{
ranges$x <- c(brush$xmin, brush$xmax)
#input$dRange <- ranges$x
} else {
ranges$x <- maxrange$x
}
})
}
# Run the app ----
shinyApp(ui = ui, server = server)
|
# Title : TODO
# Objective : TODO
# Created by: pedro
# Created on: 18/11/2020
library('quantmod');
library('tseries');
s_arma<- read.csv("s_arma.csv")
attach(s_arma)
arima(x,c(1,0,3), fixed=c(NA,0,0,NA,NA))
#arma(x, order = c(1,3))7
fit = arma(x, lag = list(ar = 1, ma = c(3)))
fit
acf(fit$residuals, na.action = na.omit) | /eletivas/cmc11/entrega07/entrega01.R | permissive | alvesouza/Comp-semestre-4 | R | false | false | 328 | r | # Title : TODO
# Objective : TODO
# Created by: pedro
# Created on: 18/11/2020
library('quantmod');
library('tseries');
s_arma<- read.csv("s_arma.csv")
attach(s_arma)
arima(x,c(1,0,3), fixed=c(NA,0,0,NA,NA))
#arma(x, order = c(1,3))7
fit = arma(x, lag = list(ar = 1, ma = c(3)))
fit
acf(fit$residuals, na.action = na.omit) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gather_results.R
\name{gather_results}
\alias{gather_results}
\title{Gather enrichment results from \code{celltype_associations_pipeline}}
\usage{
gather_results(
MAGMA_results,
level = 2,
dataset_name = NULL,
species = "mouse",
filetype = "ctAssocMerged",
fdr_thresh = NULL,
save_dir = NULL
)
}
\description{
Gather enrichment results from \code{celltype_associations_pipeline}
}
\examples{
\dontrun{
library(MAGMA.Celltyping)
local_files <- import_magma_files(download_dir=".")
#' magma_dirs <- unique(dirname(local_files))
res <- celltype_associations_pipeline(ctd=ewceData::ctd(),
ctd_name="Zeisel2018",
magma_dirs=magma_dirs,
genome_ref_path="~/Downloads/g1000_eur/g1000_eur")
merged_res <- gather_results(res)
}
}
| /man/gather_results.Rd | no_license | obada-alzoubi/MAGMA_Celltyping | R | false | true | 930 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gather_results.R
\name{gather_results}
\alias{gather_results}
\title{Gather enrichment results from \code{celltype_associations_pipeline}}
\usage{
gather_results(
MAGMA_results,
level = 2,
dataset_name = NULL,
species = "mouse",
filetype = "ctAssocMerged",
fdr_thresh = NULL,
save_dir = NULL
)
}
\description{
Gather enrichment results from \code{celltype_associations_pipeline}
}
\examples{
\dontrun{
library(MAGMA.Celltyping)
local_files <- import_magma_files(download_dir=".")
#' magma_dirs <- unique(dirname(local_files))
res <- celltype_associations_pipeline(ctd=ewceData::ctd(),
ctd_name="Zeisel2018",
magma_dirs=magma_dirs,
genome_ref_path="~/Downloads/g1000_eur/g1000_eur")
merged_res <- gather_results(res)
}
}
|
server <- function(input, output) {
observeEvent(input$toggle_shared_options, {
toggle("shared_filter_panel")
})
unit_factor <- reactive({
available_units %>%
filter(prefix == input$unit_prefix) %>%
pull(factor)
})
assign("unit_factor", unit_factor, envir = .GlobalEnv)
all_plots <- names(plots)
# for each plot in plots[] produce plots of the namescheme plot_(data_name)_(average/by_iterations)
map(all_plots, function(plot_name){
# get average of all iterations plot
observe({
output$selected_single_plot <- renderPlot(
expr = {get_plot_filtered(input$single_plot_selected, input, input$iteration_average)},
height = input$selected_single_plot_height)
})
output$selected_single_plot_title <- renderText({
input$single_plot_selected %>%
variable_name_to_title()
})
# # get by iterations plot
# output[[paste("plot", plot_name, "by_iterations", sep = "_")]] <- renderPlot({
# get_plot_filtered(plot_name, input, average = FALSE)
# })
})
## Log files and value boxes
if(config_params[["analyse_log"]]){
log_table <- DT::datatable(emlab_log, filter = list(position = 'top', clear = FALSE), options = list(scrollX = T))
} else {
log_table <- tibble(info = "Logs analysis not activated in config.R.")
}
output$dt_log_table = DT::renderDataTable({
log_table
})
toggle_filters <- function(filter_name, selected_plot){
if(selected_plot %in% names(show_filters)){
(filter_name %in% show_filters[[selected_plot]])
} else {
# If not defined show anyway
TRUE
}
}
## Custom filters
### Filter for market
output$show_filter_market <- reactive({
toggle_filters("market",input$single_plot_selected)
})
outputOptions(output, "show_filter_market", suspendWhenHidden = FALSE)
### Filter for technology
output$show_filter_technology <- reactive({
toggle_filters("technology",input$single_plot_selected)
})
outputOptions(output, "show_filter_technology", suspendWhenHidden = FALSE)
### Filter for producer
output$show_filter_producer <- reactive({
toggle_filters("producer",input$single_plot_selected)
})
outputOptions(output, "show_filter_producer", suspendWhenHidden = FALSE)
### Filter for cashflow
output$show_filter_cashflow <- reactive({
toggle_filters("cashflow",input$single_plot_selected)
})
outputOptions(output, "show_filter_cashflow", suspendWhenHidden = FALSE)
### Filter for fuel
output$show_filter_fuel <- reactive({
toggle_filters("fuel",input$single_plot_selected)
})
outputOptions(output, "show_filter_fuel", suspendWhenHidden = FALSE)
### Filter for segment
output$show_filter_segment <- reactive({
toggle_filters("segment",input$single_plot_selected)
})
outputOptions(output, "show_filter_segment", suspendWhenHidden = FALSE)
### Filter for tick_expected
output$show_filter_tick_expected <- reactive({
toggle_filters("tick_expected",input$single_plot_selected)
})
outputOptions(output, "show_filter_tick_expected", suspendWhenHidden = FALSE)
### Filter for tick
output$show_filter_tick <- reactive({
toggle_filters("tick_filter",input$single_plot_selected)
})
outputOptions(output, "show_filter_tick", suspendWhenHidden = FALSE)
### Filter for single iteration
output$show_filter_single_iteration <- reactive({
toggle_filters("iterations",input$single_plot_selected)
})
outputOptions(output, "show_filter_single_iteration", suspendWhenHidden = FALSE)
### Filter for range
output$hide_filter_iteration_range <- reactive({
toggle_filters("hide_iterations_range",input$single_plot_selected)
})
outputOptions(output, "hide_filter_iteration_range", suspendWhenHidden = FALSE)
# Logic for saving data
scenario_descriptions_title <- reactiveVal(
ifelse(identical(scenario_descriptions_initial_name, character(0)), prefix, scenario_descriptions_initial_name)
)
output$scenario_descriptions_title <- renderText({
paste("Loaded Scenario:", scenario_descriptions_title())
})
observeEvent(input$submit, {
save_to_description_file(
file = description_file,
prefix = prefix,
name = input[["file_scenario_name"]],
caption = input[["file_scenario_caption"]])
scenario_descriptions_title(input[["file_scenario_name"]])
})
output$current_scenario_title <- renderText({
paste("Loaded scenario:", if_else(scenario_descriptions_current_name == "", prefix, scenario_descriptions_current_name))
})
}
| /analysis/app/server.R | permissive | ejlchappin/emlab-generation2 | R | false | false | 4,696 | r | server <- function(input, output) {
observeEvent(input$toggle_shared_options, {
toggle("shared_filter_panel")
})
unit_factor <- reactive({
available_units %>%
filter(prefix == input$unit_prefix) %>%
pull(factor)
})
assign("unit_factor", unit_factor, envir = .GlobalEnv)
all_plots <- names(plots)
# for each plot in plots[] produce plots of the namescheme plot_(data_name)_(average/by_iterations)
map(all_plots, function(plot_name){
# get average of all iterations plot
observe({
output$selected_single_plot <- renderPlot(
expr = {get_plot_filtered(input$single_plot_selected, input, input$iteration_average)},
height = input$selected_single_plot_height)
})
output$selected_single_plot_title <- renderText({
input$single_plot_selected %>%
variable_name_to_title()
})
# # get by iterations plot
# output[[paste("plot", plot_name, "by_iterations", sep = "_")]] <- renderPlot({
# get_plot_filtered(plot_name, input, average = FALSE)
# })
})
## Log files and value boxes
if(config_params[["analyse_log"]]){
log_table <- DT::datatable(emlab_log, filter = list(position = 'top', clear = FALSE), options = list(scrollX = T))
} else {
log_table <- tibble(info = "Logs analysis not activated in config.R.")
}
output$dt_log_table = DT::renderDataTable({
log_table
})
toggle_filters <- function(filter_name, selected_plot){
if(selected_plot %in% names(show_filters)){
(filter_name %in% show_filters[[selected_plot]])
} else {
# If not defined show anyway
TRUE
}
}
## Custom filters
### Filter for market
output$show_filter_market <- reactive({
toggle_filters("market",input$single_plot_selected)
})
outputOptions(output, "show_filter_market", suspendWhenHidden = FALSE)
### Filter for technology
output$show_filter_technology <- reactive({
toggle_filters("technology",input$single_plot_selected)
})
outputOptions(output, "show_filter_technology", suspendWhenHidden = FALSE)
### Filter for producer
output$show_filter_producer <- reactive({
toggle_filters("producer",input$single_plot_selected)
})
outputOptions(output, "show_filter_producer", suspendWhenHidden = FALSE)
### Filter for cashflow
output$show_filter_cashflow <- reactive({
toggle_filters("cashflow",input$single_plot_selected)
})
outputOptions(output, "show_filter_cashflow", suspendWhenHidden = FALSE)
### Filter for fuel
output$show_filter_fuel <- reactive({
toggle_filters("fuel",input$single_plot_selected)
})
outputOptions(output, "show_filter_fuel", suspendWhenHidden = FALSE)
### Filter for segment
output$show_filter_segment <- reactive({
toggle_filters("segment",input$single_plot_selected)
})
outputOptions(output, "show_filter_segment", suspendWhenHidden = FALSE)
### Filter for tick_expected
output$show_filter_tick_expected <- reactive({
toggle_filters("tick_expected",input$single_plot_selected)
})
outputOptions(output, "show_filter_tick_expected", suspendWhenHidden = FALSE)
### Filter for tick
output$show_filter_tick <- reactive({
toggle_filters("tick_filter",input$single_plot_selected)
})
outputOptions(output, "show_filter_tick", suspendWhenHidden = FALSE)
### Filter for single iteration
output$show_filter_single_iteration <- reactive({
toggle_filters("iterations",input$single_plot_selected)
})
outputOptions(output, "show_filter_single_iteration", suspendWhenHidden = FALSE)
### Filter for range
output$hide_filter_iteration_range <- reactive({
toggle_filters("hide_iterations_range",input$single_plot_selected)
})
outputOptions(output, "hide_filter_iteration_range", suspendWhenHidden = FALSE)
# Logic for saving data
scenario_descriptions_title <- reactiveVal(
ifelse(identical(scenario_descriptions_initial_name, character(0)), prefix, scenario_descriptions_initial_name)
)
output$scenario_descriptions_title <- renderText({
paste("Loaded Scenario:", scenario_descriptions_title())
})
observeEvent(input$submit, {
save_to_description_file(
file = description_file,
prefix = prefix,
name = input[["file_scenario_name"]],
caption = input[["file_scenario_caption"]])
scenario_descriptions_title(input[["file_scenario_name"]])
})
output$current_scenario_title <- renderText({
paste("Loaded scenario:", if_else(scenario_descriptions_current_name == "", prefix, scenario_descriptions_current_name))
})
}
|
#' @export
makeRLearner.classif.qda = function() {
makeRLearnerClassif(
cl = "classif.qda",
package = "MASS",
par.set = makeParamSet(
makeDiscreteLearnerParam(id = "method", default = "moment", values = c("moment", "mle", "mve", "t")),
makeNumericLearnerParam(id = "nu", default = 5 , lower = 2, requires = quote(method == "t")),
makeDiscreteLearnerParam(id = "predict.method", values = c("plug-in", "predictive", "debiased"),
default = "plug-in", when = "predict")
),
properties = c("twoclass", "multiclass", "numerics", "factors", "prob"),
name = "Quadratic Discriminant Analysis",
short.name = "qda",
note = "Learner parameter `predict.method` maps to `method` in `predict.qda`."
)
}
#' @export
trainLearner.classif.qda = function(.learner, .task, .subset, .weights = NULL, ...) {
f = getTaskFormula(.task)
MASS::qda(f, data = getTaskData(.task, .subset, recode.target = "drop.levels"), ...)
}
#' @export
predictLearner.classif.qda = function(.learner, .model, .newdata, predict.method = "plug-in", ...) {
p = predict(.model$learner.model, newdata = .newdata, method = predict.method, ...)
if(.learner$predict.type == "response")
return(p$class)
else
return(p$posterior)
}
| /mlr/R/RLearner_classif_qda.R | no_license | ingted/R-Examples | R | false | false | 1,259 | r | #' @export
makeRLearner.classif.qda = function() {
makeRLearnerClassif(
cl = "classif.qda",
package = "MASS",
par.set = makeParamSet(
makeDiscreteLearnerParam(id = "method", default = "moment", values = c("moment", "mle", "mve", "t")),
makeNumericLearnerParam(id = "nu", default = 5 , lower = 2, requires = quote(method == "t")),
makeDiscreteLearnerParam(id = "predict.method", values = c("plug-in", "predictive", "debiased"),
default = "plug-in", when = "predict")
),
properties = c("twoclass", "multiclass", "numerics", "factors", "prob"),
name = "Quadratic Discriminant Analysis",
short.name = "qda",
note = "Learner parameter `predict.method` maps to `method` in `predict.qda`."
)
}
#' @export
trainLearner.classif.qda = function(.learner, .task, .subset, .weights = NULL, ...) {
f = getTaskFormula(.task)
MASS::qda(f, data = getTaskData(.task, .subset, recode.target = "drop.levels"), ...)
}
#' @export
predictLearner.classif.qda = function(.learner, .model, .newdata, predict.method = "plug-in", ...) {
p = predict(.model$learner.model, newdata = .newdata, method = predict.method, ...)
if(.learner$predict.type == "response")
return(p$class)
else
return(p$posterior)
}
|
#Get Iris from R
library(caret)
library(dplyr)
library(rattle)
a <- read.csv("data_clean/iris_clean.csv")
#Create Training and Testing Sets
set.seed(42)
inTrain<-createDataPartition(y=a$Species, p=0.70, list=FALSE)
training.Iris<-a[inTrain,]
testing.Iris<-a[-inTrain,]
# Display a pairs plot for the selected variables.
training.Iris %>%
dplyr::mutate(Species=as.factor(Species)) %>%
GGally::ggpairs(columns=c(1,2,3,4,5),
mapping=ggplot2::aes(colour=Species, alpha=0.5),
diag=list(continuous="density",
discrete="bar"),
upper=list(continuous="cor",
combo="box",
discrete="ratio"),
lower=list(continuous="points",
combo="denstrip",
discrete="facetbar")) +
ggplot2::theme(panel.grid.major=ggplot2::element_blank())
#preProcess, center and scale the data
##training set
preObj<-preProcess(training.Iris[,-5], method = c("center", "scale"))
preObjData<-predict(preObj,training.Iris[,-5])
boxplot(preObjData, main="Normalized data" )
training.Iris_N <- transform(preObjData,Species=training.Iris$Species)
##testing set
preObj<-preProcess(testing.Iris[,-5], method = c("center", "scale"))
preObjData<-predict(preObj,testing.Iris[,-5])
boxplot(preObjData, main="Normalized data" )
testing.Iris_N <- transform(preObjData,Species=testing.Iris$Species)
# Decision Tree
## The 'rpart' package provides the 'rpart' function.
library(rpart, quietly=TRUE)
## Reset the random number seed to obtain the same results each time.
set.seed(42)
## Build the Decision Tree model.
rpart <- rpart(Species ~ .,
training.Iris_N,
method="class",
parms=list(split="information"),
control=rpart.control(usesurrogate=0,
maxsurrogate=0),
model=TRUE)
## Generate a textual view of the Decision Tree model.
print(rpart)
printcp(rpart)
## Evaluate model performance on the testing dataset.
### Generate an Error Matrix for the Decision Tree model.
#### Obtain the response from the Decision Tree model.
pr_rpart <- predict(rpart, newdata=testing.Iris_N,
type="class")
#### Generate the confusion matrix showing counts.
rattle::errorMatrix(testing.Iris_N$Species, pr_rpart, count=TRUE)
#### Generate the confusion matrix showing proportions.
(per_rpart <- rattle::errorMatrix(testing.Iris_N$Species, pr_rpart))
#### Calculate the overall error percentage.
cat("Calculate the overall error percentage:", 100-sum(diag(per_rpart), na.rm=TRUE))
#### Calculate the averaged class error percentage.
cat("Calculate the averaged class error percentage:", mean(per_rpart[,"Error"], na.rm=TRUE))
### ROC Curve: requires the ROCR package.
library(ROCR)
#### ROC Curve: requires the ggplot2 package.
library(ggplot2, quietly=TRUE)
#### Generate an ROC Curve for the rpart model on a [test].
pr_rpart_roc <- predict(rpart, newdata=testing.Iris_N, type= "prob")[,2]
#### Remove observations with missing target.
no.miss <- na.omit(testing.Iris_N$Species)
miss.list <- attr(no.miss, "na.action")
attributes(no.miss) <- NULL
if (length(miss.list))
{
pred_rpart <- prediction(pr_rpart_roc[-miss.list], no.miss)
} else
{
pred_rpart <- prediction(pr_rpart_roc, no.miss)
}
pe <- performance(pred_rpart, "tpr", "fpr")
au <- performance(pred_rpart, "auc")@y.values[[1]]
pd <- data.frame(fpr=unlist(pe@x.values), tpr=unlist(pe@y.values))
p <- ggplot(pd, aes(x=fpr, y=tpr))
p <- p + geom_line(colour="red")
p <- p + xlab("False Positive Rate") + ylab("True Positive Rate")
p <- p + ggtitle("ROC Curve Decision Tree a [test] Species")
p <- p + theme(plot.title=element_text(size=10))
p <- p + geom_line(data=data.frame(), aes(x=c(0,1), y=c(0,1)), colour="grey")
p <- p + annotate("text", x=0.50, y=0.00, hjust=0, vjust=0, size=5,
label=paste("AUC =", round(au, 2)))
print(p)
#### Calculate the area under the curve for the plot.
no.miss <- na.omit(testing.Iris_N$Species)
miss.list <- attr(no.miss, "na.action")
attributes(no.miss) <- NULL
if (length(miss.list))
{
pred_rpart <- prediction(pr_rpart_roc[-miss.list], no.miss)
} else
{
pred_rpart <- prediction(pr_rpart_roc, no.miss)
}
performance(pred_rpart, "auc")
# Support vector machine.
## The 'kernlab' package provides the 'ksvm' function.
library(kernlab, quietly=TRUE)
## Build a Support Vector Machine model.
set.seed(42)
ksvm <- ksvm(as.factor(Species) ~ .,
data=training.Iris_N,
kernel="rbfdot",
prob.model=TRUE)
## Generate a textual view of the SVM model.
ksvm
## Evaluate model performance on the testing dataset.
### Generate an Error Matrix for the SVM model.
#### Obtain the response from the SVM model.
pr_ksvm <- kernlab::predict(ksvm, newdata=na.omit(testing.Iris_N))
#### Generate the confusion matrix showing counts.
rattle::errorMatrix(na.omit(testing.Iris_N)$Species, pr_ksvm, count=TRUE)
#### Generate the confusion matrix showing proportions.
(per_ksvm <- rattle::errorMatrix(na.omit(testing.Iris_N)$Species, pr_ksvm))
#### Calculate the overall error percentage.
cat("Calculate the overall error percentage:", 100-sum(diag(per_ksvm), na.rm=TRUE))
#### Calculate the averaged class error percentage.
cat("Calculate the averaged class error percentage:", mean(per_ksvm[,"Error"], na.rm=TRUE))
### ROC Curve: requires the ROCR package.
library(ROCR)
#### ROC Curve: requires the ggplot2 package.
library(ggplot2, quietly=TRUE)
#### Generate an ROC Curve for the ksvm model on a [test].
pr_ksvm_roc <- kernlab::predict(ksvm, newdata=na.omit(testing.Iris_N),
type = "probabilities")[,2]
#### Remove observations with missing target.
no.miss <- na.omit(na.omit(testing.Iris_N)$Species)
miss.list <- attr(no.miss, "na.action")
attributes(no.miss) <- NULL
if (length(miss.list))
{
pred <- prediction(pr_ksvm_roc[-miss.list], no.miss)
} else
{
pred <- prediction(pr_ksvm_roc, no.miss)
}
pe <- performance(pred, "tpr", "fpr")
au <- performance(pred, "auc")@y.values[[1]]
pd <- data.frame(fpr=unlist(pe@x.values), tpr=unlist(pe@y.values))
p <- ggplot(pd, aes(x=fpr, y=tpr))
p <- p + geom_line(colour="red")
p <- p + xlab("False Positive Rate") + ylab("True Positive Rate")
p <- p + ggtitle("ROC Curve SVM a [test] Species")
p <- p + theme(plot.title=element_text(size=10))
p <- p + geom_line(data=data.frame(), aes(x=c(0,1), y=c(0,1)), colour="grey")
p <- p + annotate("text", x=0.50, y=0.00, hjust=0, vjust=0, size=5,
label=paste("AUC =", round(au, 2)))
print(p)
#### Calculate the area under the curve for the plot.
no.miss <- na.omit(testing.Iris_N$Species)
miss.list <- attr(no.miss, "na.action")
attributes(no.miss) <- NULL
if (length(miss.list))
{
pred_ksvm <- prediction(pr_ksvm_roc[-miss.list], no.miss)
} else
{
pred_ksvm <- prediction(pr_ksvm_roc, no.miss)
}
performance(pred_ksvm, "auc")
| /src/analysis.R | no_license | n634763231/workflow | R | false | false | 7,105 | r |
#Get Iris from R
library(caret)
library(dplyr)
library(rattle)
a <- read.csv("data_clean/iris_clean.csv")
#Create Training and Testing Sets
set.seed(42)
inTrain<-createDataPartition(y=a$Species, p=0.70, list=FALSE)
training.Iris<-a[inTrain,]
testing.Iris<-a[-inTrain,]
# Display a pairs plot for the selected variables.
training.Iris %>%
dplyr::mutate(Species=as.factor(Species)) %>%
GGally::ggpairs(columns=c(1,2,3,4,5),
mapping=ggplot2::aes(colour=Species, alpha=0.5),
diag=list(continuous="density",
discrete="bar"),
upper=list(continuous="cor",
combo="box",
discrete="ratio"),
lower=list(continuous="points",
combo="denstrip",
discrete="facetbar")) +
ggplot2::theme(panel.grid.major=ggplot2::element_blank())
#preProcess, center and scale the data
##training set
preObj<-preProcess(training.Iris[,-5], method = c("center", "scale"))
preObjData<-predict(preObj,training.Iris[,-5])
boxplot(preObjData, main="Normalized data" )
training.Iris_N <- transform(preObjData,Species=training.Iris$Species)
##testing set
preObj<-preProcess(testing.Iris[,-5], method = c("center", "scale"))
preObjData<-predict(preObj,testing.Iris[,-5])
boxplot(preObjData, main="Normalized data" )
testing.Iris_N <- transform(preObjData,Species=testing.Iris$Species)
# Decision Tree
## The 'rpart' package provides the 'rpart' function.
library(rpart, quietly=TRUE)
## Reset the random number seed to obtain the same results each time.
set.seed(42)
## Build the Decision Tree model.
rpart <- rpart(Species ~ .,
training.Iris_N,
method="class",
parms=list(split="information"),
control=rpart.control(usesurrogate=0,
maxsurrogate=0),
model=TRUE)
## Generate a textual view of the Decision Tree model.
print(rpart)
printcp(rpart)
## Evaluate model performance on the testing dataset.
### Generate an Error Matrix for the Decision Tree model.
#### Obtain the response from the Decision Tree model.
pr_rpart <- predict(rpart, newdata=testing.Iris_N,
type="class")
#### Generate the confusion matrix showing counts.
rattle::errorMatrix(testing.Iris_N$Species, pr_rpart, count=TRUE)
#### Generate the confusion matrix showing proportions.
(per_rpart <- rattle::errorMatrix(testing.Iris_N$Species, pr_rpart))
#### Calculate the overall error percentage.
cat("Calculate the overall error percentage:", 100-sum(diag(per_rpart), na.rm=TRUE))
#### Calculate the averaged class error percentage.
cat("Calculate the averaged class error percentage:", mean(per_rpart[,"Error"], na.rm=TRUE))
### ROC Curve: requires the ROCR package.
library(ROCR)
#### ROC Curve: requires the ggplot2 package.
library(ggplot2, quietly=TRUE)
#### Generate an ROC Curve for the rpart model on a [test].
pr_rpart_roc <- predict(rpart, newdata=testing.Iris_N, type= "prob")[,2]
#### Remove observations with missing target.
no.miss <- na.omit(testing.Iris_N$Species)
miss.list <- attr(no.miss, "na.action")
attributes(no.miss) <- NULL
if (length(miss.list))
{
pred_rpart <- prediction(pr_rpart_roc[-miss.list], no.miss)
} else
{
pred_rpart <- prediction(pr_rpart_roc, no.miss)
}
pe <- performance(pred_rpart, "tpr", "fpr")
au <- performance(pred_rpart, "auc")@y.values[[1]]
pd <- data.frame(fpr=unlist(pe@x.values), tpr=unlist(pe@y.values))
p <- ggplot(pd, aes(x=fpr, y=tpr))
p <- p + geom_line(colour="red")
p <- p + xlab("False Positive Rate") + ylab("True Positive Rate")
p <- p + ggtitle("ROC Curve Decision Tree a [test] Species")
p <- p + theme(plot.title=element_text(size=10))
p <- p + geom_line(data=data.frame(), aes(x=c(0,1), y=c(0,1)), colour="grey")
p <- p + annotate("text", x=0.50, y=0.00, hjust=0, vjust=0, size=5,
label=paste("AUC =", round(au, 2)))
print(p)
#### Calculate the area under the curve for the plot.
no.miss <- na.omit(testing.Iris_N$Species)
miss.list <- attr(no.miss, "na.action")
attributes(no.miss) <- NULL
if (length(miss.list))
{
pred_rpart <- prediction(pr_rpart_roc[-miss.list], no.miss)
} else
{
pred_rpart <- prediction(pr_rpart_roc, no.miss)
}
performance(pred_rpart, "auc")
# Support vector machine.
## The 'kernlab' package provides the 'ksvm' function.
library(kernlab, quietly=TRUE)
## Build a Support Vector Machine model.
set.seed(42)
ksvm <- ksvm(as.factor(Species) ~ .,
data=training.Iris_N,
kernel="rbfdot",
prob.model=TRUE)
## Generate a textual view of the SVM model.
ksvm
## Evaluate model performance on the testing dataset.
### Generate an Error Matrix for the SVM model.
#### Obtain the response from the SVM model.
pr_ksvm <- kernlab::predict(ksvm, newdata=na.omit(testing.Iris_N))
#### Generate the confusion matrix showing counts.
rattle::errorMatrix(na.omit(testing.Iris_N)$Species, pr_ksvm, count=TRUE)
#### Generate the confusion matrix showing proportions.
(per_ksvm <- rattle::errorMatrix(na.omit(testing.Iris_N)$Species, pr_ksvm))
#### Calculate the overall error percentage.
cat("Calculate the overall error percentage:", 100-sum(diag(per_ksvm), na.rm=TRUE))
#### Calculate the averaged class error percentage.
cat("Calculate the averaged class error percentage:", mean(per_ksvm[,"Error"], na.rm=TRUE))
### ROC Curve: requires the ROCR package.
library(ROCR)
#### ROC Curve: requires the ggplot2 package.
library(ggplot2, quietly=TRUE)
#### Generate an ROC Curve for the ksvm model on a [test].
pr_ksvm_roc <- kernlab::predict(ksvm, newdata=na.omit(testing.Iris_N),
type = "probabilities")[,2]
#### Remove observations with missing target.
no.miss <- na.omit(na.omit(testing.Iris_N)$Species)
miss.list <- attr(no.miss, "na.action")
attributes(no.miss) <- NULL
if (length(miss.list))
{
pred <- prediction(pr_ksvm_roc[-miss.list], no.miss)
} else
{
pred <- prediction(pr_ksvm_roc, no.miss)
}
pe <- performance(pred, "tpr", "fpr")
au <- performance(pred, "auc")@y.values[[1]]
pd <- data.frame(fpr=unlist(pe@x.values), tpr=unlist(pe@y.values))
p <- ggplot(pd, aes(x=fpr, y=tpr))
p <- p + geom_line(colour="red")
p <- p + xlab("False Positive Rate") + ylab("True Positive Rate")
p <- p + ggtitle("ROC Curve SVM a [test] Species")
p <- p + theme(plot.title=element_text(size=10))
p <- p + geom_line(data=data.frame(), aes(x=c(0,1), y=c(0,1)), colour="grey")
p <- p + annotate("text", x=0.50, y=0.00, hjust=0, vjust=0, size=5,
label=paste("AUC =", round(au, 2)))
print(p)
#### Calculate the area under the curve for the plot.
no.miss <- na.omit(testing.Iris_N$Species)
miss.list <- attr(no.miss, "na.action")
attributes(no.miss) <- NULL
if (length(miss.list))
{
pred_ksvm <- prediction(pr_ksvm_roc[-miss.list], no.miss)
} else
{
pred_ksvm <- prediction(pr_ksvm_roc, no.miss)
}
performance(pred_ksvm, "auc")
|
source("../../R/Rcheck.R")
data <- read.jagsdata("birats-data.R")
inits <- read.jagsdata("birats-inits.R")
m <- jags.model("birats4.bug", data, inits, n.chains=2)
check.data(m, data)
update(m, 1000)
x <- coda.samples(m, c("mu.beta","sigma.beta","r", "alpha0"), thin=10,
n.iter=10000)
source("bench-test4.R")
check.fun()
| /JAGS/examples/classic-bugs/vol2/birats/test4.R | no_license | amandawarlick/IPMWorkshop | R | false | false | 339 | r | source("../../R/Rcheck.R")
data <- read.jagsdata("birats-data.R")
inits <- read.jagsdata("birats-inits.R")
m <- jags.model("birats4.bug", data, inits, n.chains=2)
check.data(m, data)
update(m, 1000)
x <- coda.samples(m, c("mu.beta","sigma.beta","r", "alpha0"), thin=10,
n.iter=10000)
source("bench-test4.R")
check.fun()
|
Outofsample_method<-function(squared_matrix, points){
D2<-squared_matrix
n <- nrow(D2)
ones <- rep(1, n-1)
w <-c(ones,0)
#print(w)
B <- mds.w.centered(D2,w)
X<-points
#Euc_distances<-c(dist(X, method = "euclidean"))
#stdv<-sd(Euc_distances)
#y <- nlm(minimizing_func,p=rnorm(2,sd=stdv),B,X)
y <- nlm(minimizing_func,p=c(-0.009640601,-0.11018199),B,X)
New_points <- rbind(X,y$estimate)
#New_Euc_distances<-c(dist(New_points, method = "euclidean"))
return(New_points)
}
mds.w.centered <- function(D,w) {
n <- length(w)
w <- matrix(w,ncol=1)
e <- matrix(1,nrow=n,ncol=1)
s <- sum(w)
P <- diag(n) - e %*% t(w)/s
Q <- diag(n) - w %*% t(e)/s
X <- -0.5 * P %*% D %*% Q
return (X)
}
minimizing_func <- function(y,B,X) {
n <- nrow(X)+1
b <- matrix(B[-n,n],ncol=1)
beta <- B[n,n]
y <- matrix(y,ncol=1)
res <- 2*sum((b - X %*% y)^2) + (beta-sum(y^2))^2
attr(res,"gradient") <- -4 * t(X) %*% (b - X %*% y) -
4 * (beta - sum(y^2)) * y
return(res)
}
| /OnePoint_Embedding.R | no_license | dilruu89/MDSProjection | R | false | false | 1,045 | r |
Outofsample_method<-function(squared_matrix, points){
D2<-squared_matrix
n <- nrow(D2)
ones <- rep(1, n-1)
w <-c(ones,0)
#print(w)
B <- mds.w.centered(D2,w)
X<-points
#Euc_distances<-c(dist(X, method = "euclidean"))
#stdv<-sd(Euc_distances)
#y <- nlm(minimizing_func,p=rnorm(2,sd=stdv),B,X)
y <- nlm(minimizing_func,p=c(-0.009640601,-0.11018199),B,X)
New_points <- rbind(X,y$estimate)
#New_Euc_distances<-c(dist(New_points, method = "euclidean"))
return(New_points)
}
mds.w.centered <- function(D,w) {
n <- length(w)
w <- matrix(w,ncol=1)
e <- matrix(1,nrow=n,ncol=1)
s <- sum(w)
P <- diag(n) - e %*% t(w)/s
Q <- diag(n) - w %*% t(e)/s
X <- -0.5 * P %*% D %*% Q
return (X)
}
minimizing_func <- function(y,B,X) {
n <- nrow(X)+1
b <- matrix(B[-n,n],ncol=1)
beta <- B[n,n]
y <- matrix(y,ncol=1)
res <- 2*sum((b - X %*% y)^2) + (beta-sum(y^2))^2
attr(res,"gradient") <- -4 * t(X) %*% (b - X %*% y) -
4 * (beta - sum(y^2)) * y
return(res)
}
|
#' Simple summary of abundance results for bootstrap model
#'
#' When using [`bootdht`][bootdht] one needs to use a summary function to
#' extract results from the resulting models per replicate. This function is
#' the simplest possible example of such a function, that just extracts the
#' estimated abundance (with stratum labels).
#'
#' Further examples of such functions can be found at
#' <http://examples.distancesampling.org>.
#'
#' @param ests output from [`dht2`][dht2].
#' @param fit fitted detection function object (unused).
#' @return `data.frame` with two columns ("`Nhat`" and "`Label`"), giving the
#' estimate(s) of abundance of individuals per stratum from each bootstrap
#' replicate. This `data.frame` can be examined for example, with
#' [`quantile`][stats::quantile] to compute confidence intervals.
#' @export
#' @seealso [`bootdht`][bootdht] which this function is to be used with and
#' [`bootdht_Dhat_summarize`][bootdht_Dhat_summarize] which does the same job
#' but for abundance results.
bootdht_Nhat_summarize <- function(ests, fit) {
return(data.frame(Label = ests$individuals$N$Label,
Nhat = ests$individuals$N$Estimate))
}
| /R/bootdht_Nhat_summarize.R | no_license | cran/Distance | R | false | false | 1,204 | r | #' Simple summary of abundance results for bootstrap model
#'
#' When using [`bootdht`][bootdht] one needs to use a summary function to
#' extract results from the resulting models per replicate. This function is
#' the simplest possible example of such a function, that just extracts the
#' estimated abundance (with stratum labels).
#'
#' Further examples of such functions can be found at
#' <http://examples.distancesampling.org>.
#'
#' @param ests output from [`dht2`][dht2].
#' @param fit fitted detection function object (unused).
#' @return `data.frame` with two columns ("`Nhat`" and "`Label`"), giving the
#' estimate(s) of abundance of individuals per stratum from each bootstrap
#' replicate. This `data.frame` can be examined for example, with
#' [`quantile`][stats::quantile] to compute confidence intervals.
#' @export
#' @seealso [`bootdht`][bootdht] which this function is to be used with and
#' [`bootdht_Dhat_summarize`][bootdht_Dhat_summarize] which does the same job
#' but for abundance results.
bootdht_Nhat_summarize <- function(ests, fit) {
return(data.frame(Label = ests$individuals$N$Label,
Nhat = ests$individuals$N$Estimate))
}
|
`resampling.model` <-
function(model,data,k,console=FALSE) {
modelo<-model
parte<-strsplit(model,"~")[[1]]
model<-as.formula(model)
ecuacion<-lm(model,data=data)
xx<-data.frame(anova(ecuacion),NA)
yy<-ecuacion$model[[1]]
fc<-xx[,4]
names(xx)<-c("Df","Sum Sq","Mean Sq","F value","Pr(>F)","Resampling")
m<-nrow(data)
gk<-nrow(xx)-1
f<-rep(0,gk)
cuenta <- rep(0,gk)
# Start Resampling
model <- paste("y","~",parte[2])
model<-as.formula(model)
for(i in 1:k){
y<-sample(yy,m)
resample<-lm(model,data=data)
for (j in 1:gk){
f[j]<-anova(resample)[j,4]
if(f[j] >= fc[j])cuenta[j]<-cuenta[j]+1
}
}
# finish resampling
for( j in 1:gk){
xx[j,6]<-cuenta[j]/k
}
if(console){
cat("\nResampling of the experiments\n")
cat(rep("-",14),"\n")
cat("Proposed model:",modelo,"\n")
cat("---\n")
cat("Resampling of the analysis of variancia for the proposed model\n")
cat("Determination of the P-Value by Resampling\n")
cat("Samples:",k,"\n\n")
xx<-as.matrix(xx)
print(xx,na.print="")
cat("---\n\n")
}
out<-list(model=resample, solution=xx,acum=cuenta,samples=k)
invisible(out)
}
| /agricolae/R/resampling.model.R | no_license | ingted/R-Examples | R | false | false | 1,106 | r | `resampling.model` <-
function(model,data,k,console=FALSE) {
modelo<-model
parte<-strsplit(model,"~")[[1]]
model<-as.formula(model)
ecuacion<-lm(model,data=data)
xx<-data.frame(anova(ecuacion),NA)
yy<-ecuacion$model[[1]]
fc<-xx[,4]
names(xx)<-c("Df","Sum Sq","Mean Sq","F value","Pr(>F)","Resampling")
m<-nrow(data)
gk<-nrow(xx)-1
f<-rep(0,gk)
cuenta <- rep(0,gk)
# Start Resampling
model <- paste("y","~",parte[2])
model<-as.formula(model)
for(i in 1:k){
y<-sample(yy,m)
resample<-lm(model,data=data)
for (j in 1:gk){
f[j]<-anova(resample)[j,4]
if(f[j] >= fc[j])cuenta[j]<-cuenta[j]+1
}
}
# finish resampling
for( j in 1:gk){
xx[j,6]<-cuenta[j]/k
}
if(console){
cat("\nResampling of the experiments\n")
cat(rep("-",14),"\n")
cat("Proposed model:",modelo,"\n")
cat("---\n")
cat("Resampling of the analysis of variancia for the proposed model\n")
cat("Determination of the P-Value by Resampling\n")
cat("Samples:",k,"\n\n")
xx<-as.matrix(xx)
print(xx,na.print="")
cat("---\n\n")
}
out<-list(model=resample, solution=xx,acum=cuenta,samples=k)
invisible(out)
}
|
#' Computes Persistence Scores For a Data.Frame of Time-Series Across Multiple Lags
#'
#' Takes a \code{data.frame} of numeric gene expression over time (genes X ZT times) and computes the persistence score using \code{\link{getPersistence}}.
#' For a given gene, each lag (min to max) is used to transform the expression into a 3-D embedded space via time-delay embedding.
#' A non-linear dimension reduction technique (laplacian eigenmaps) is used to transfrom the 3-D embedding to a 2-D embedding.
#' Finally, the persistence score of the 2-D embedding is calculated via persistence homology.
#' The median persistence score across all lags (min to max) for each gene is returned as a numeric vector.
#' For more details see TimeCycle's vignette:
#' \code{vignette("TimeCycle")}.
#'
#' @param data a \code{data.frame} of \code{numeric} gene expression over time (row = genes \emph{x} col = ZT times).
#' @param minLag a \code{numeric} specifying the min lag to check in the 3-D embedding. Default is \code{2}.
#' @param maxLag a \code{numeric} specifying the max lag to check in the 3-D embedding. Default is \code{5}.
#' @param cores a \code{numeric} specifying the number of parallel cores to use. Default number of cores is \code{parallel::detectedCores() - 2}.
#' @param laplacian a \code{logical} scalar. Should the Laplacian Eigenmaps be used for dimensionality reduction? Default \code{TRUE}.
#'
#' @references{
#' \itemize{
#' \item Wadhwa RR, Williamson DFK, Dhawan A, Scott JG. (2018). "TDAstats: R pipeline for computing persistent homology in topological data analysis." \emph{Journal of Open Source Software}. 2018; 3(28): 860. doi:\href{https://doi.org/10.21105/joss.00860}{[10.21105/joss.00860]}
#' \item Bauer U. (2019). "Ripser: Efficient computation of Vietoris-Rips persistence barcodes." \emph{arXiv}: 1908.02518.
#' }
#' }
#' @seealso
#' \itemize{
#' \item \code{\link[TDAstats]{calculate_homology}} for Persistence Homology calculation.
#' \item \code{\link{buildTakens_ndim}} for for generating time-delay embedding.
#' \item \code{\link{computeLaplacianEmbedding}} for 3-D to 2-D laplacian eigenmaps dimension reduction.
#' \item \code{\link{getPersistence}} for use on a single gene expression time-series.
#'}
#'
#' @return a \code{vector} of the median persistence score across lags (minLag to maxLag) for each gene in data
#' @export
#'
computePersistence <- function(data, minLag = 2, maxLag = 5, cores = parallel::detectCores() - 2, laplacian = T){
vect <- as.list(minLag:maxLag)
#compute PS at each lag
output <- parallel::mclapply(vect, mc.cores = cores, function(lag){
perTSoutput <- apply(data,1,function(TS){
return(getPersistence(t(as.matrix(TS)), lag = lag, laplacian = laplacian))
})
perTSoutput <- as.data.frame(perTSoutput)
})
#save persistence score at each lag
PSatEachLag <- do.call(cbind,output)
return(apply(PSatEachLag,1, stats::median))
}
| /R/computePersistence.R | permissive | nesscoder/TimeCycle | R | false | false | 2,959 | r | #' Computes Persistence Scores For a Data.Frame of Time-Series Across Multiple Lags
#'
#' Takes a \code{data.frame} of numeric gene expression over time (genes X ZT times) and computes the persistence score using \code{\link{getPersistence}}.
#' For a given gene, each lag (min to max) is used to transform the expression into a 3-D embedded space via time-delay embedding.
#' A non-linear dimension reduction technique (laplacian eigenmaps) is used to transfrom the 3-D embedding to a 2-D embedding.
#' Finally, the persistence score of the 2-D embedding is calculated via persistence homology.
#' The median persistence score across all lags (min to max) for each gene is returned as a numeric vector.
#' For more details see TimeCycle's vignette:
#' \code{vignette("TimeCycle")}.
#'
#' @param data a \code{data.frame} of \code{numeric} gene expression over time (row = genes \emph{x} col = ZT times).
#' @param minLag a \code{numeric} specifying the min lag to check in the 3-D embedding. Default is \code{2}.
#' @param maxLag a \code{numeric} specifying the max lag to check in the 3-D embedding. Default is \code{5}.
#' @param cores a \code{numeric} specifying the number of parallel cores to use. Default number of cores is \code{parallel::detectedCores() - 2}.
#' @param laplacian a \code{logical} scalar. Should the Laplacian Eigenmaps be used for dimensionality reduction? Default \code{TRUE}.
#'
#' @references{
#' \itemize{
#' \item Wadhwa RR, Williamson DFK, Dhawan A, Scott JG. (2018). "TDAstats: R pipeline for computing persistent homology in topological data analysis." \emph{Journal of Open Source Software}. 2018; 3(28): 860. doi:\href{https://doi.org/10.21105/joss.00860}{[10.21105/joss.00860]}
#' \item Bauer U. (2019). "Ripser: Efficient computation of Vietoris-Rips persistence barcodes." \emph{arXiv}: 1908.02518.
#' }
#' }
#' @seealso
#' \itemize{
#' \item \code{\link[TDAstats]{calculate_homology}} for Persistence Homology calculation.
#' \item \code{\link{buildTakens_ndim}} for for generating time-delay embedding.
#' \item \code{\link{computeLaplacianEmbedding}} for 3-D to 2-D laplacian eigenmaps dimension reduction.
#' \item \code{\link{getPersistence}} for use on a single gene expression time-series.
#'}
#'
#' @return a \code{vector} of the median persistence score across lags (minLag to maxLag) for each gene in data
#' @export
#'
computePersistence <- function(data, minLag = 2, maxLag = 5, cores = parallel::detectCores() - 2, laplacian = T){
vect <- as.list(minLag:maxLag)
#compute PS at each lag
output <- parallel::mclapply(vect, mc.cores = cores, function(lag){
perTSoutput <- apply(data,1,function(TS){
return(getPersistence(t(as.matrix(TS)), lag = lag, laplacian = laplacian))
})
perTSoutput <- as.data.frame(perTSoutput)
})
#save persistence score at each lag
PSatEachLag <- do.call(cbind,output)
return(apply(PSatEachLag,1, stats::median))
}
|
library(here)
library(tidyverse)
library(rio)
theme_set(theme_minimal())
## ----forward-selection---------------------------------------------------
simple <- lm(mpg ~ 1, data = mtcars)
full <- formula(lm(mpg ~ ., mtcars)) # note call wrapped in `formula`
fwd_mod <- step(simple,
scope = full,
direction = "forward")
## ----forward-anova-------------------------------------------------------
arm::display(fwd_mod, detail = TRUE)
fwd_mod$anova
## ----backward-selection--------------------------------------------------
full <- lm(mpg ~ ., data = mtcars)
simple <- formula(lm(mpg ~ 1, mtcars)) # note call wrapped in `formula`
back_mod <- step(full,
scope = simple,
direction = "backward")
## ----back-anova----------------------------------------------------------
arm::display(back_mod, detail = TRUE)
back_mod$anova
## ----stepwise-selection--------------------------------------------------
simple <- lm(mpg ~ 1, data = mtcars)
full <- formula(lm(mpg ~ ., mtcars)) # note call wrapped in `formula`
step_mod <- step(simple,
scope = full,
direction = "both")
## ----step-anova----------------------------------------------------------
arm::display(step_mod, detail = TRUE)
step_mod$anova
## ----train-test----------------------------------------------------------
set.seed(8675309)
train <- mtcars %>%
sample_frac(.8)
test <- anti_join(mtcars, train)
nrow(train)
nrow(test)
## ----train-mods----------------------------------------------------------
m1 <- lm(mpg ~ hp, train)
m2 <- lm(mpg ~ hp + disp, train)
m3 <- lm(mpg ~ hp + disp + cyl, train)
sundry::aic_weights(m1, m2, m3)
## ----test_preds----------------------------------------------------------
test <- test %>%
mutate(pred_mpg = predict(m2, newdata = test))
test
## ----diff----------------------------------------------------------------
test %>%
mutate(diff = pred_mpg - mpg)
## ----mse-----------------------------------------------------------------
test %>%
summarize(mse = mean((pred_mpg - mpg)^2))
## ----rmse----------------------------------------------------------------
test %>%
summarize(rmse = sqrt(mean((pred_mpg - mpg)^2)))
## ----k-fold-cv-----------------------------------------------------------
library(modelr)
iris %>%
crossv_kfold(10) %>%
mutate(model = map(train, ~lm(Sepal.Length ~ Petal.Length, data=.)))
## ----k-fold-cv-rmse------------------------------------------------------
iris %>%
crossv_kfold(10) %>%
mutate(model = map(train, ~lm(Sepal.Length ~ Petal.Length, data=.)),
rmse = map2_dbl(model, test, rmse)) #<<
## ----k-fold-cv-rmse-summary----------------------------------------------
iris %>%
crossv_kfold(10) %>%
mutate(model = map(train,
~lm(Sepal.Length ~ Petal.Length,
data=.)),
rmse = map2_dbl(model, test, rmse)) %>%
summarize(mean_rmse = mean(rmse)) #<<
## ----k-fold-cv-rmse-nonlinear--------------------------------------------
iris %>%
crossv_kfold(10) %>%
mutate(model = map(train,
~lm(Sepal.Length ~ poly(Petal.Length, 3),
data=.)),
rmse = map2_dbl(model, test, rmse)) %>%
summarize(mean_rmse = mean(rmse)) #<<
| /slidescripts/w10.R | permissive | datalorax/mr-fall18 | R | false | false | 3,299 | r | library(here)
library(tidyverse)
library(rio)
theme_set(theme_minimal())
## ----forward-selection---------------------------------------------------
simple <- lm(mpg ~ 1, data = mtcars)
full <- formula(lm(mpg ~ ., mtcars)) # note call wrapped in `formula`
fwd_mod <- step(simple,
scope = full,
direction = "forward")
## ----forward-anova-------------------------------------------------------
arm::display(fwd_mod, detail = TRUE)
fwd_mod$anova
## ----backward-selection--------------------------------------------------
full <- lm(mpg ~ ., data = mtcars)
simple <- formula(lm(mpg ~ 1, mtcars)) # note call wrapped in `formula`
back_mod <- step(full,
scope = simple,
direction = "backward")
## ----back-anova----------------------------------------------------------
arm::display(back_mod, detail = TRUE)
back_mod$anova
## ----stepwise-selection--------------------------------------------------
simple <- lm(mpg ~ 1, data = mtcars)
full <- formula(lm(mpg ~ ., mtcars)) # note call wrapped in `formula`
step_mod <- step(simple,
scope = full,
direction = "both")
## ----step-anova----------------------------------------------------------
arm::display(step_mod, detail = TRUE)
step_mod$anova
## ----train-test----------------------------------------------------------
set.seed(8675309)
train <- mtcars %>%
sample_frac(.8)
test <- anti_join(mtcars, train)
nrow(train)
nrow(test)
## ----train-mods----------------------------------------------------------
m1 <- lm(mpg ~ hp, train)
m2 <- lm(mpg ~ hp + disp, train)
m3 <- lm(mpg ~ hp + disp + cyl, train)
sundry::aic_weights(m1, m2, m3)
## ----test_preds----------------------------------------------------------
test <- test %>%
mutate(pred_mpg = predict(m2, newdata = test))
test
## ----diff----------------------------------------------------------------
test %>%
mutate(diff = pred_mpg - mpg)
## ----mse-----------------------------------------------------------------
test %>%
summarize(mse = mean((pred_mpg - mpg)^2))
## ----rmse----------------------------------------------------------------
test %>%
summarize(rmse = sqrt(mean((pred_mpg - mpg)^2)))
## ----k-fold-cv-----------------------------------------------------------
library(modelr)
iris %>%
crossv_kfold(10) %>%
mutate(model = map(train, ~lm(Sepal.Length ~ Petal.Length, data=.)))
## ----k-fold-cv-rmse------------------------------------------------------
iris %>%
crossv_kfold(10) %>%
mutate(model = map(train, ~lm(Sepal.Length ~ Petal.Length, data=.)),
rmse = map2_dbl(model, test, rmse)) #<<
## ----k-fold-cv-rmse-summary----------------------------------------------
iris %>%
crossv_kfold(10) %>%
mutate(model = map(train,
~lm(Sepal.Length ~ Petal.Length,
data=.)),
rmse = map2_dbl(model, test, rmse)) %>%
summarize(mean_rmse = mean(rmse)) #<<
## ----k-fold-cv-rmse-nonlinear--------------------------------------------
iris %>%
crossv_kfold(10) %>%
mutate(model = map(train,
~lm(Sepal.Length ~ poly(Petal.Length, 3),
data=.)),
rmse = map2_dbl(model, test, rmse)) %>%
summarize(mean_rmse = mean(rmse)) #<<
|
\name{strip}
\alias{strip}
\title{Strip Text}
\usage{
strip(x, char.keep = "~~", digit.remove = TRUE, apostrophe.remove = TRUE,
lower.case = TRUE)
}
\arguments{
\item{x}{The text variable.}
\item{char.keep}{A character vector of symbols (i.e.,
punctuation) that \code{\link[qdap]{strip}} should keep.
The default is to strip every symbol except apostrophes
and a double tilde \code{"~~"}. The double tilde
\code{"~~"} is included for a convenient means of keeping
word groups together in functions that split text apart
based on spaces. To remove double tildes \code{"~~"} set
\code{char.keep} to \code{NULL}.}
\item{digit.remove}{logical. If \code{TRUE} strips
digits from the text.}
\item{apostrophe.remove}{logical. If \code{TRUE} removes
apostrophes from the output.}
\item{lower.case}{logical. If \code{TRUE} forces all
alpha characters to lower case.}
}
\value{
Returns a vector of text that has been stripped of unwanted
characters.
}
\description{
Strip text of unwanted characters.
}
\examples{
\dontrun{
DATA$state #no strip applied
strip(DATA$state)
strip(DATA$state, apostrophe.remove=FALSE)
strip(DATA$state, char.keep = c("?", "."))
}
}
\seealso{
\code{\link[qdap]{rm_stopwords}}
}
| /man/strip.Rd | no_license | craigcitro/qdap | R | false | false | 1,238 | rd | \name{strip}
\alias{strip}
\title{Strip Text}
\usage{
strip(x, char.keep = "~~", digit.remove = TRUE, apostrophe.remove = TRUE,
lower.case = TRUE)
}
\arguments{
\item{x}{The text variable.}
\item{char.keep}{A character vector of symbols (i.e.,
punctuation) that \code{\link[qdap]{strip}} should keep.
The default is to strip every symbol except apostrophes
and a double tilde \code{"~~"}. The double tilde
\code{"~~"} is included for a convenient means of keeping
word groups together in functions that split text apart
based on spaces. To remove double tildes \code{"~~"} set
\code{char.keep} to \code{NULL}.}
\item{digit.remove}{logical. If \code{TRUE} strips
digits from the text.}
\item{apostrophe.remove}{logical. If \code{TRUE} removes
apostrophes from the output.}
\item{lower.case}{logical. If \code{TRUE} forces all
alpha characters to lower case.}
}
\value{
Returns a vector of text that has been stripped of unwanted
characters.
}
\description{
Strip text of unwanted characters.
}
\examples{
\dontrun{
DATA$state #no strip applied
strip(DATA$state)
strip(DATA$state, apostrophe.remove=FALSE)
strip(DATA$state, char.keep = c("?", "."))
}
}
\seealso{
\code{\link[qdap]{rm_stopwords}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/protein_quant.R
\name{rrollup}
\alias{rrollup}
\title{Applies rrollup function}
\usage{
rrollup(pepData, combine_fn, parallel = TRUE)
}
\arguments{
\item{pepData}{an omicsData object of class 'pepData'}
\item{combine_fn}{logical indicating what combine_fn to use, defaults to
median, other option is mean}
\item{parallel}{logical indicating whether or not to use "doParallel" loop in
applying rrollup function. Defaults to TRUE.}
}
\value{
an omicsData object of class 'proData'
}
\description{
This function applies the rrollup method to a pepData object for each unique
protein and returns a proData object.
}
\details{
In the rrollup method, peptides are scaled based on a reference
peptide and protein abundance is set as the mean of these scaled peptides.
}
\references{
Matzke, M. M., Brown, J. N., Gritsenko, M. A., Metz, T. O.,
Pounds, J. G., Rodland, K. D., ... Webb-Robertson, B.-J. (2013). \emph{A
comparative analysis of computational approaches to relative protein
quantification using peptide peak intensities in label-free LC-MS
proteomics experiments}. Proteomics, 13(0), 493-503.
Polpitiya, A. D., Qian, W.-J., Jaitly, N., Petyuk, V. A., Adkins, J. N.,
Camp, D. G., ... Smith, R. D. (2008). \emph{DAnTE: a statistical tool for
quantitative analysis of -omics data}. Bioinformatics (Oxford, England),
24(13), 1556-1558.
}
| /man/rrollup.Rd | permissive | clabornd/pmartR | R | false | true | 1,437 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/protein_quant.R
\name{rrollup}
\alias{rrollup}
\title{Applies rrollup function}
\usage{
rrollup(pepData, combine_fn, parallel = TRUE)
}
\arguments{
\item{pepData}{an omicsData object of class 'pepData'}
\item{combine_fn}{logical indicating what combine_fn to use, defaults to
median, other option is mean}
\item{parallel}{logical indicating whether or not to use "doParallel" loop in
applying rrollup function. Defaults to TRUE.}
}
\value{
an omicsData object of class 'proData'
}
\description{
This function applies the rrollup method to a pepData object for each unique
protein and returns a proData object.
}
\details{
In the rrollup method, peptides are scaled based on a reference
peptide and protein abundance is set as the mean of these scaled peptides.
}
\references{
Matzke, M. M., Brown, J. N., Gritsenko, M. A., Metz, T. O.,
Pounds, J. G., Rodland, K. D., ... Webb-Robertson, B.-J. (2013). \emph{A
comparative analysis of computational approaches to relative protein
quantification using peptide peak intensities in label-free LC-MS
proteomics experiments}. Proteomics, 13(0), 493-503.
Polpitiya, A. D., Qian, W.-J., Jaitly, N., Petyuk, V. A., Adkins, J. N.,
Camp, D. G., ... Smith, R. D. (2008). \emph{DAnTE: a statistical tool for
quantitative analysis of -omics data}. Bioinformatics (Oxford, England),
24(13), 1556-1558.
}
|
library(rLakeAnalyzer)
### Name: wtr.plot.temp
### Title: Creates a time series plot of the thermocline and top and bottom
### of the metalimnion
### Aliases: wtr.plot.temp
### Keywords: hplot
### ** Examples
wtr.path <- system.file('extdata', 'Sparkling.wtr', package="rLakeAnalyzer")
#Load data for example lake, Sparkilng Lake, Wisconsin.
wtr = load.ts(wtr.path)
## Not run:
##D wtr.plot.temp(wtr)
## End(Not run)
| /data/genthat_extracted_code/rLakeAnalyzer/examples/wtr.plot.temp.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 432 | r | library(rLakeAnalyzer)
### Name: wtr.plot.temp
### Title: Creates a time series plot of the thermocline and top and bottom
### of the metalimnion
### Aliases: wtr.plot.temp
### Keywords: hplot
### ** Examples
wtr.path <- system.file('extdata', 'Sparkling.wtr', package="rLakeAnalyzer")
#Load data for example lake, Sparkilng Lake, Wisconsin.
wtr = load.ts(wtr.path)
## Not run:
##D wtr.plot.temp(wtr)
## End(Not run)
|
\name{FSR}
\alias{FSR}
\docType{data}
\title{Freitag and Schlicht (2009)}
\description{The \code{FSR} data frame has 16 rows and 8 sets}
\usage{data(FSR)}
\format{
A data frame with 16 observations on the following 8 sets.
\describe{
\item{\code{integrated_comp_schools}}{a numeric vector. Condition, percentage of Pupils Enrolled in Integrated Comprehensive Schools,}
\item{\code{coop_comp_schools}}{a numeric vector. Condition, percentage of Pupils Enrolled in Cooperative Comprehensive Schools.}
\item{\code{full_day_schools}}{a numeric vector. Condition, percentage of Pupils Enrolled in All-Day Schools}
\item{\code{child_care}}{a numeric vector. Condition, ratio of Number of Child Care Facilities to Total Population between 0 and 6 Years (percent).}
\item{\code{pre_schools}}{a numeric vector. Condition, ratio of pupils Enrolled in Pre-School to Total 6-Year-Old Population (per cent)}
\item{\code{early_tracking}}{a numeric vector. Condition, onset of Tracking, Legal Regulation.}
\item{\code{outcome}}{a numeric vector. Outcome, high Degree of Social Inequality
Cases in Education. }
\item{\code{indep_hauptschule}}{a numeric vector. Condition, autonomy of the Hauptschule.}
}
}
\details{
Data are used by Freitag and Schlicht (2009) to analyze social inequality in education. The data are raw scores.}
%\source{}
\references{Freitag, M, and Schlicht, R. (2009) "Educational Federalism in Germany: Foundations of Social Inequalities in Education", Governance 22(1), pp. 47-72.
Schneider, C. Q., Wagemann, C. (2012) Set-Theoretic Methods for the Social Sciences, Cambridge University Press: Cambridge.
}
\examples{data(FSR)}
\keyword{datasets}
| /man/FSR.Rd | no_license | nenaoana/SetMethods | R | false | false | 1,695 | rd | \name{FSR}
\alias{FSR}
\docType{data}
\title{Freitag and Schlicht (2009)}
\description{The \code{FSR} data frame has 16 rows and 8 sets}
\usage{data(FSR)}
\format{
A data frame with 16 observations on the following 8 sets.
\describe{
\item{\code{integrated_comp_schools}}{a numeric vector. Condition, percentage of Pupils Enrolled in Integrated Comprehensive Schools,}
\item{\code{coop_comp_schools}}{a numeric vector. Condition, percentage of Pupils Enrolled in Cooperative Comprehensive Schools.}
\item{\code{full_day_schools}}{a numeric vector. Condition, percentage of Pupils Enrolled in All-Day Schools}
\item{\code{child_care}}{a numeric vector. Condition, ratio of Number of Child Care Facilities to Total Population between 0 and 6 Years (percent).}
\item{\code{pre_schools}}{a numeric vector. Condition, ratio of pupils Enrolled in Pre-School to Total 6-Year-Old Population (per cent)}
\item{\code{early_tracking}}{a numeric vector. Condition, onset of Tracking, Legal Regulation.}
\item{\code{outcome}}{a numeric vector. Outcome, high Degree of Social Inequality
Cases in Education. }
\item{\code{indep_hauptschule}}{a numeric vector. Condition, autonomy of the Hauptschule.}
}
}
\details{
Data are used by Freitag and Schlicht (2009) to analyze social inequality in education. The data are raw scores.}
%\source{}
\references{Freitag, M, and Schlicht, R. (2009) "Educational Federalism in Germany: Foundations of Social Inequalities in Education", Governance 22(1), pp. 47-72.
Schneider, C. Q., Wagemann, C. (2012) Set-Theoretic Methods for the Social Sciences, Cambridge University Press: Cambridge.
}
\examples{data(FSR)}
\keyword{datasets}
|
# create new flowering time index
library(dplyr)
library(purrr)
library(broom)
library(tidyr)
library(ggplot2)
## binning
binx<-0.5 ## long bin size in degrees
biny<-0.5 ## lat bin size in degrees
PhenolAllData <- read.csv("outputs/PhenolAllData.csv")
longs<-seq(min(PhenolAllData$Longitude,na.rm=T)+binx,max(PhenolAllData$Longitude,na.rm=T)-binx,by=binx*2)
lats<-seq(min(PhenolAllData$Latitude,na.rm=T)+biny,max(PhenolAllData$Latitude,na.rm=T)-biny,by=biny*2)
# make bins for grids
lat_groups <- cut(PhenolAllData$Latitude, breaks = seq(floor(min(PhenolAllData$Latitude)),
ceiling(max(PhenolAllData$Latitude)),
by = 0.1))
long_groups <- cut(PhenolAllData$Longitude, breaks = seq(floor(min(PhenolAllData$Longitude)), ceiling(max(PhenolAllData$Longitude)), 0.1))
# tidy grid has points grouped into bins based on lat long
tidy_grid <- PhenolAllData %>%
group_by(long_gr = long_groups, lat_gr = lat_groups) %>%
select(time, GD, GDDs, phind, Pop_Code, Year, minYear)
sample_grid <- function(x, df){
df %>% sample_n(1) %>% ungroup() %>% as.data.frame()
}
set.seed(1001)
list_df <- lapply(1:1000, sample_grid, df = tidy_grid)
tib_list_df <- tibble(group = 1:1000, data = list_df)
library(progress)
pb <- progress_bar$new(total = 1000)
tidy_full_nls <- function(x){
model <- nls.multstart::nls_multstart(
phind~a^exp(-exp(1)*(b + c * time + d * GD + e * GD * time) * GDDs),
data=x,
iter = 100,
start_upper = c(a = 0.3, b=0.0001, c = 0.00001, d = 0.000001, e = 0.0000001),
start_lower = c(a = 0.001, b=0.000001, c = 0.0000001, d = 0.0000001, e = 0.00000001))
pb$tick()
tidy(model)
}
coef_list <- tib_list_df %>% mutate(tidy_model = map(data, tidy_full_nls))
a_coef <- coef_list %>% unnest(tidy_model) %>% select(term, estimate, std.error) %>% filter(term == "a")
b_coef <- coef_list %>% unnest(tidy_model) %>% select(term, estimate, std.error) %>% filter(term == "b")
c_coef <- coef_list %>% unnest(tidy_model) %>% select(term, estimate, std.error) %>% filter(term == "c")
d_coef <- coef_list %>% unnest(tidy_model) %>% select(term, estimate, std.error) %>% filter(term == "d")
e_coef <- coef_list %>% unnest(tidy_model) %>% select(term, estimate, std.error) %>% filter(term == "e")
full_coef_df <- rbind(a_coef, b_coef, c_coef, d_coef, e_coef)
save(full_coef_df, file = "outputs/full_pheno_mod_coefs.RData")
load("outputs/full_pheno_mod_coefs.RData")
library(ggplot2)
ggplot(full_coef_df) +
geom_histogram(aes(x = estimate)) +
facet_grid(.~term, scales = "free") + theme_classic() +
theme(axis.text.x = element_text(angle = 90))
mean_a <- mean(a_coef$estimate)
mean_b <- mean(b_coef$estimate)
mean_c <- mean(c_coef$estimate)
mean_d <- mean(d_coef$estimate)
mean_e <- mean(e_coef$estimate)
PhenolAllData <- read.csv("outputs/PhenolAllData.csv")
PhenolAllData$phi_calc <- mean_a^exp(-exp(1)*(mean_b + mean_c * mean(PhenolAllData$time) + mean_d * mean(PhenolAllData$GD) + mean_e * mean(PhenolAllData$time) * mean(PhenolAllData$GD))* PhenolAllData$GDDs)
PhenolAllData$fti <- PhenolAllData$phi_calc - PhenolAllData$phind
write.csv(PhenolAllData, "outputs/PhenolAllData.csv", row.names=F)
| /scripts/new_model.R | no_license | ColauttiLab/WuColauttiHerbarium | R | false | false | 3,259 | r | # create new flowering time index
library(dplyr)
library(purrr)
library(broom)
library(tidyr)
library(ggplot2)
## binning
binx<-0.5 ## long bin size in degrees
biny<-0.5 ## lat bin size in degrees
PhenolAllData <- read.csv("outputs/PhenolAllData.csv")
longs<-seq(min(PhenolAllData$Longitude,na.rm=T)+binx,max(PhenolAllData$Longitude,na.rm=T)-binx,by=binx*2)
lats<-seq(min(PhenolAllData$Latitude,na.rm=T)+biny,max(PhenolAllData$Latitude,na.rm=T)-biny,by=biny*2)
# make bins for grids
lat_groups <- cut(PhenolAllData$Latitude, breaks = seq(floor(min(PhenolAllData$Latitude)),
ceiling(max(PhenolAllData$Latitude)),
by = 0.1))
long_groups <- cut(PhenolAllData$Longitude, breaks = seq(floor(min(PhenolAllData$Longitude)), ceiling(max(PhenolAllData$Longitude)), 0.1))
# tidy grid has points grouped into bins based on lat long
tidy_grid <- PhenolAllData %>%
group_by(long_gr = long_groups, lat_gr = lat_groups) %>%
select(time, GD, GDDs, phind, Pop_Code, Year, minYear)
sample_grid <- function(x, df){
df %>% sample_n(1) %>% ungroup() %>% as.data.frame()
}
set.seed(1001)
list_df <- lapply(1:1000, sample_grid, df = tidy_grid)
tib_list_df <- tibble(group = 1:1000, data = list_df)
library(progress)
pb <- progress_bar$new(total = 1000)
tidy_full_nls <- function(x){
model <- nls.multstart::nls_multstart(
phind~a^exp(-exp(1)*(b + c * time + d * GD + e * GD * time) * GDDs),
data=x,
iter = 100,
start_upper = c(a = 0.3, b=0.0001, c = 0.00001, d = 0.000001, e = 0.0000001),
start_lower = c(a = 0.001, b=0.000001, c = 0.0000001, d = 0.0000001, e = 0.00000001))
pb$tick()
tidy(model)
}
coef_list <- tib_list_df %>% mutate(tidy_model = map(data, tidy_full_nls))
a_coef <- coef_list %>% unnest(tidy_model) %>% select(term, estimate, std.error) %>% filter(term == "a")
b_coef <- coef_list %>% unnest(tidy_model) %>% select(term, estimate, std.error) %>% filter(term == "b")
c_coef <- coef_list %>% unnest(tidy_model) %>% select(term, estimate, std.error) %>% filter(term == "c")
d_coef <- coef_list %>% unnest(tidy_model) %>% select(term, estimate, std.error) %>% filter(term == "d")
e_coef <- coef_list %>% unnest(tidy_model) %>% select(term, estimate, std.error) %>% filter(term == "e")
full_coef_df <- rbind(a_coef, b_coef, c_coef, d_coef, e_coef)
save(full_coef_df, file = "outputs/full_pheno_mod_coefs.RData")
load("outputs/full_pheno_mod_coefs.RData")
library(ggplot2)
ggplot(full_coef_df) +
geom_histogram(aes(x = estimate)) +
facet_grid(.~term, scales = "free") + theme_classic() +
theme(axis.text.x = element_text(angle = 90))
mean_a <- mean(a_coef$estimate)
mean_b <- mean(b_coef$estimate)
mean_c <- mean(c_coef$estimate)
mean_d <- mean(d_coef$estimate)
mean_e <- mean(e_coef$estimate)
PhenolAllData <- read.csv("outputs/PhenolAllData.csv")
PhenolAllData$phi_calc <- mean_a^exp(-exp(1)*(mean_b + mean_c * mean(PhenolAllData$time) + mean_d * mean(PhenolAllData$GD) + mean_e * mean(PhenolAllData$time) * mean(PhenolAllData$GD))* PhenolAllData$GDDs)
PhenolAllData$fti <- PhenolAllData$phi_calc - PhenolAllData$phind
write.csv(PhenolAllData, "outputs/PhenolAllData.csv", row.names=F)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ex25.11}
\alias{ex25.11}
\title{Data from Exercise 25.11}
\format{\Sexpr[results=rd]{bps5data:::doc_data("ex25.11") }}
\source{
\url{ http://bcs.whfreeman.com/bps5e/content/cat_030/PC-Text.zip }
}
\usage{
data("ex25.11")
}
\description{
Data from Exercise 25.11 of \emph{The Basic Practice of Statistics}, 5th edition.
}
\references{
Moore, David S. 2009. \emph{The Basic Practice of Statistics}. 5th edition. New York: W. H. Freeman.
}
\seealso{
Other datasets from Chapter 25 of \emph{BPS} 5th ed.: \code{\link{eg25.01}};
\code{\link{eg25.05}}; \code{\link{eg25.06}};
\code{\link{eg25.09}}; \code{\link{eg25.10}};
\code{\link{ex25.01}}; \code{\link{ex25.02}};
\code{\link{ex25.05}}; \code{\link{ex25.06}};
\code{\link{ex25.07}}; \code{\link{ex25.08}};
\code{\link{ex25.12}}; \code{\link{ex25.13}};
\code{\link{ex25.14}}; \code{\link{ex25.15}};
\code{\link{ex25.16}}; \code{\link{ex25.18}};
\code{\link{ex25.19}}; \code{\link{ex25.21}};
\code{\link{ex25.22}}; \code{\link{ex25.23}};
\code{\link{ex25.24}}; \code{\link{ex25.25}};
\code{\link{ex25.26}}; \code{\link{ex25.27}};
\code{\link{ex25.28}}; \code{\link{ex25.29}};
\code{\link{ex25.30}}; \code{\link{ex25.42}};
\code{\link{ex25.43}}; \code{\link{ex25.44}};
\code{\link{ex25.45}}; \code{\link{ex25.48}};
\code{\link{ex25.49}}; \code{\link{ta25.01}}
}
| /man/ex25.11.Rd | no_license | jrnold/bps5data | R | false | false | 1,456 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ex25.11}
\alias{ex25.11}
\title{Data from Exercise 25.11}
\format{\Sexpr[results=rd]{bps5data:::doc_data("ex25.11") }}
\source{
\url{ http://bcs.whfreeman.com/bps5e/content/cat_030/PC-Text.zip }
}
\usage{
data("ex25.11")
}
\description{
Data from Exercise 25.11 of \emph{The Basic Practice of Statistics}, 5th edition.
}
\references{
Moore, David S. 2009. \emph{The Basic Practice of Statistics}. 5th edition. New York: W. H. Freeman.
}
\seealso{
Other datasets from Chapter 25 of \emph{BPS} 5th ed.: \code{\link{eg25.01}};
\code{\link{eg25.05}}; \code{\link{eg25.06}};
\code{\link{eg25.09}}; \code{\link{eg25.10}};
\code{\link{ex25.01}}; \code{\link{ex25.02}};
\code{\link{ex25.05}}; \code{\link{ex25.06}};
\code{\link{ex25.07}}; \code{\link{ex25.08}};
\code{\link{ex25.12}}; \code{\link{ex25.13}};
\code{\link{ex25.14}}; \code{\link{ex25.15}};
\code{\link{ex25.16}}; \code{\link{ex25.18}};
\code{\link{ex25.19}}; \code{\link{ex25.21}};
\code{\link{ex25.22}}; \code{\link{ex25.23}};
\code{\link{ex25.24}}; \code{\link{ex25.25}};
\code{\link{ex25.26}}; \code{\link{ex25.27}};
\code{\link{ex25.28}}; \code{\link{ex25.29}};
\code{\link{ex25.30}}; \code{\link{ex25.42}};
\code{\link{ex25.43}}; \code{\link{ex25.44}};
\code{\link{ex25.45}}; \code{\link{ex25.48}};
\code{\link{ex25.49}}; \code{\link{ta25.01}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_precip_PCA_data.R
\name{process_SSprecip_PCA_data}
\alias{process_SSprecip_PCA_data}
\title{Process Spring/Summer PCA Precipitation data before merging with survey data.}
\usage{
process_SSprecip_PCA_data(data)
}
\arguments{
\item{x}{Output PCA data (from SAS).}
}
\description{
For use with SAS data.
}
| /man/process_SSprecip_PCA_data.Rd | no_license | ksauby/modresproc | R | false | true | 391 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_precip_PCA_data.R
\name{process_SSprecip_PCA_data}
\alias{process_SSprecip_PCA_data}
\title{Process Spring/Summer PCA Precipitation data before merging with survey data.}
\usage{
process_SSprecip_PCA_data(data)
}
\arguments{
\item{x}{Output PCA data (from SAS).}
}
\description{
For use with SAS data.
}
|
# ------------------------------------------------------------------------------
# David Phillips
#
# 6/19/2019
# Analysis to assess the impact of having the full package of iCCM services on mortality
# ------------------------------------------------------------------------------
# -------------------
# Set up R
rm(list=ls())
library(data.table)
library(ggplot2)
# -------------------
# ---------------------------------------------------------------------------------------
# Files, directories and settings
# whether or not to run analysis among full-iccm-package health zones only
fullpackageOnly = TRUE
# root directory
j = ifelse(Sys.info()[1]=='Windows', 'J:', '/home/j')
dir = paste0(j, '/Project/Evaluation/GF/')
# input file
inFile = paste0(dir, 'impact_evaluation/cod/prepped_data/ssc_analyses/DiD_input_data.rdata')
if(fullpackageOnly) inFile = gsub('.rdata', '_full_package_HZs_only.rdata', inFile)
# file listing health zones
hzFile = paste0(dir, '/mapping/cod/ssc_lists/prepped_hz_list.csv')
# output files
outFile = paste0(dir, '/impact_evaluation/cod/prepped_data/ssc_analyses/DiD_results.rdata')
graphFile = paste0(dir, '/impact_evaluation/cod/visualizations/ssc_analyses/DiD_results.pdf')
# modify output file names if we're running analysis among UNICEF health zones only
if(fullpackageOnly) graphFile = gsub('.pdf', '_full_package_HZs_only.pdf', graphFile)
if(fullpackageOnly) outFile = gsub('.rdata', '_full_package_HZs_only.rdata', outFile)
# ---------------------------------------------------------------------------------------
# -------------------------------------
# Load
# load data produced by set_up_data.r
load(inFile)
# -------------------------------------
# ------------------------------------------------------------------------------
# Run analysis
# difference in differences with OLS on malaria treatment coverage (uncomplicated)
lmFit1 = lm(mildMalariaTreated_under5_rate~intervention*period, data)
summary(lmFit1)
# difference in differences with OLS on malaria mortality
lmFit2 = lm(malariaDeaths_under5_rate~intervention*period, data)
summary(lmFit2)
# difference in differences with OLS on all cause mortality
lmFit3 = lm(allDeaths_under5_rate~intervention*period, data)
summary(lmFit3)
# difference in differences with OLS on malaria case detection rate
lmFit4 = lm(newCasesMalariaMild_under5_rate~intervention*period, data)
summary(lmFit4)
# predict from lmFit and glmFit for graph
preds1 = data.table(predict(lmFit1, interval='confidence', newdata=means))
preds2 = data.table(predict(lmFit2, interval='confidence', newdata=means))
preds3 = data.table(predict(lmFit3, interval='confidence', newdata=means))
preds4 = data.table(predict(lmFit4, interval='confidence', newdata=means))
setnames(preds1, paste0(names(preds1), '_coverage'))
setnames(preds2, paste0(names(preds2), '_malaria'))
setnames(preds3, paste0(names(preds3), '_all_cause'))
setnames(preds3, paste0(names(preds4), '_detection'))
means = cbind(means, preds1)
means = cbind(means, preds2)
means = cbind(means, preds3)
means = cbind(means, preds4)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Graph histograms
# switch title if specified
if (fullpackageOnly==FALSE) intlab = 'SSCs'
if (fullpackageOnly==TRUE) intlab = 'iCCM'
# histogram of malaria treatment coverage
cut = 100
n_trunc1 = sum(data$mildMalariaTreated_under5_rate>=cut)
p1 = ggplot(data[mildMalariaTreated_under5_rate<cut], aes(x=mildMalariaTreated_under5_rate)) +
geom_histogram() +
geom_vline(data=means, aes(xintercept=mildMalariaTreated_under5_rate)) +
facet_grid(intervention_label~period_label, scales='free') +
labs(title='Under-5 Antimalarial Treatment Coverage',
subtitle=paste('Comparison between Health Zones with and without', intlab),
y='Frequency', x='Under-5 Antimalarial Treatment Coverage (proportion of cases)',
caption=paste0('Unit of analysis is health zone-quarters, 2010-2018\n(',
n_trunc1, ' values >', cut, ' not displayed)')) +
theme_bw()
# histogram of malaria mortality
cut = 100
n_trunc2 = sum(data$malariaDeaths_under5_rate>=cut)
p2 = ggplot(data[malariaDeaths_under5_rate<cut], aes(x=malariaDeaths_under5_rate)) +
geom_histogram() +
geom_vline(data=means, aes(xintercept=malariaDeaths_under5_rate)) +
facet_grid(intervention_label~period_label, scales='free') +
labs(title='Under-5 Malaria Mortality Rates',
subtitle=paste('Comparison between Health Zones with and without', intlab),
y='Frequency', x='Under-5 Malaria Mortality Rate (per 100,000 population)',
caption=paste0('Unit of analysis is health zone-quarters, 2010-2018\n(',
n_trunc2, ' values >', cut, ' not displayed)')) +
theme_bw()
# histogram of all-cause mortality
cut = 300
n_trunc3 = sum(data$allDeaths_under5_rate>=cut)
p3 = ggplot(data[allDeaths_under5_rate<cut], aes(x=allDeaths_under5_rate)) +
geom_histogram() +
geom_vline(data=means, aes(xintercept=allDeaths_under5_rate)) +
facet_grid(intervention_label~period_label, scales='free') +
labs(title='Under-5 All-Cause Mortality Rates',
subtitle=paste('Comparison between Health Zones with and without', intlab),
y='Frequency', x='Under-5 All-Cause Mortality Rate (per 100,000 population)',
caption=paste0('Unit of analysis is health zone-quarters, 2010-2018\n(',
n_trunc3, ' values >', cut, ' not displayed)')) +
theme_bw()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Graph time series plots
# time series graph of the malaria treatment data
p4 = ggplot(means_ts, aes(y=mildMalariaTreated_under5_rate, ymin=lower_pctle_mild_coverage,
ymax=upper_pctle_mild_coverage, x=date, color=intervention_label, fill=intervention_label)) +
geom_ribbon(alpha=.5) +
geom_line(size=1.25) +
scale_fill_manual(values=c('#33a02c', '#1f78b4')) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Antimalarial Treatment Coverage Under 5',
subtitle=paste('Comparing Health Zones with and without', intlab),
y='Under-5 Antimalarial Treatment Coverage (proportion of cases)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria treatment data excluding the control group
p4b = ggplot(means_ts[intervention==1], aes(y=mildMalariaTreated_under5_rate, ymin=lower_pctle_mild_coverage,
ymax=upper_pctle_mild_coverage, x=date)) +
geom_ribbon(alpha=.5, fill='#1f78b4') +
geom_line(size=1.25, color='#1f78b4') +
labs(title='Antimalarial Treatment Coverage Under 5',
subtitle=paste('Health Zones with', intlab),
y='Under-5 Antimalarial Treatment Coverage (proportion of cases)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria case detection rate
p5 = ggplot(means_ts, aes(y=newCasesMalariaMild_under5_rate, ymin=lower_pctle_mild_detection,
ymax=upper_pctle_mild_detection, x=date, color=intervention_label, fill=intervention_label)) +
geom_ribbon(alpha=.5) +
geom_line(size=1.25) +
scale_fill_manual(values=c('#33a02c', '#1f78b4')) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Malaria Case Detection Rate Under 5',
subtitle=paste('Comparing Health Zones with and without', intlab),
y='Under-5 Malaria Case Detection Rate (per 10,000 population)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria case detection rate excluding the control group
p5b = ggplot(means_ts[intervention==1], aes(y=newCasesMalariaMild_under5_rate, ymin=lower_pctle_mild_detection,
ymax=upper_pctle_mild_detection, x=date)) +
geom_ribbon(alpha=.5, fill='#1f78b4') +
geom_line(size=1.25, color='#1f78b4') +
labs(title='Malaria Case Detection Rate Under 5',
subtitle=paste('Health Zones with', intlab),
y='Under-5 Malaria Case Detection Rate (per 10,000 population)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria case detection fraction of estimates
p6 = ggplot(means_ts, aes(y=proportion_estimated_cases_detected, ymin=lower_pctle_detection_prop,
ymax=upper_pctle_detection_prop, x=date, color=intervention_label, fill=intervention_label)) +
geom_ribbon(alpha=.5) +
geom_line(size=1.25) +
scale_fill_manual(values=c('#33a02c', '#1f78b4')) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Ratio of Detected Malaria Cases to Estimated Malaria Cases',
subtitle=paste('Comparing Health Zones with and without', intlab),
y='Ratio of Detected Cases to Estimated Cases (all ages)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria case detection fraction of estimates excluding the control group
p6b = ggplot(means_ts[intervention==1], aes(y=proportion_estimated_cases_detected, ymin=lower_pctle_detection_prop,
ymax=upper_pctle_detection_prop, x=date)) +
geom_ribbon(alpha=.5, fill='#1f78b4') +
geom_line(size=1.25, color='#1f78b4') +
labs(title='Ratio of Detected Malaria Cases to Estimated Malaria Cases',
subtitle=paste('Health Zones with', intlab),
y='Ratio of Detected Cases to Estimated Cases (all ages)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria data
p7 = ggplot(means_ts, aes(y=malariaDeaths_under5_rate, ymin=lower_pctle_malaria,
ymax=upper_pctle_malaria, x=date, color=intervention_label, fill=intervention_label)) +
geom_ribbon(alpha=.5) +
geom_line(size=1.25) +
scale_fill_manual(values=c('#33a02c', '#1f78b4')) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Malaria Mortality Under 5',
subtitle=paste('Comparing Health Zones with and without', intlab),
y='Under-5 Malaria Mortality Rate (per 10,000 population)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria data excluding the control group
p7b = ggplot(means_ts[intervention==1], aes(y=malariaDeaths_under5_rate, ymin=lower_pctle_malaria,
ymax=upper_pctle_malaria, x=date)) +
geom_ribbon(alpha=.5, fill='#1f78b4') +
geom_line(size=1.25, color='#1f78b4') +
labs(title='Malaria Mortality Under 5',
subtitle=paste('Health Zones with', intlab),
y='Under-5 Malaria Mortality Rate (per 10,000 population)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the all-cause data
p8 = ggplot(means_ts, aes(y=allDeaths_under5_rate, ymin=lower_pctle_all_cause,
ymax=upper_pctle_all_cause, x=date, color=intervention_label, fill=intervention_label)) +
geom_ribbon(alpha=.5) +
geom_line(size=1.25) +
scale_fill_manual(values=c('#33a02c', '#1f78b4')) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='All-Cause Mortality Under 5',
subtitle=paste('Comparing Health Zones with and without', intlab),
y='Under-5 All-Cause Mortality Rate (per 10,000 population)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Graph DiD results
# traditional DiD graph with malaria model estimates
p9 = ggplot(means, aes(y=fit_coverage, ymin=lwr_coverage, ymax=upr_coverage,
x=period_label, color=intervention_label)) +
geom_pointrange(position=position_dodge(width=.2), size=1) +
geom_line(aes(group=intervention_label), position=position_dodge(width=.2), size=1) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Difference-in-Difference Analysis of Health Zones with SSCs', subtitle='Antimalarial Treatment Coverage',
y='Under-5 Antimalarial Treatment Coverage', x='Period', color='Health Zones',
caption='Points and ranges show midpoint, upper and lower 95% confidence interval from model') +
theme_bw()
# traditional DiD graph with malaria model estimates
p10 = ggplot(means, aes(y=fit_malaria, ymin=lwr_malaria, ymax=upr_malaria,
x=period_label, color=intervention_label)) +
geom_pointrange(position=position_dodge(width=.2), size=1) +
geom_line(aes(group=intervention_label), position=position_dodge(width=.2), size=1) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Difference-in-Difference Analysis of Health Zones with SSCs', subtitle='Malaria-Specific Mortality',
y='Under-5 Malaria Mortality Rate', x='Period', color='Health Zones',
caption='Points and ranges show midpoint, upper and lower 95% confidence interval from model') +
theme_bw()
# traditional DiD graph with all-cause model estimates
p11 = ggplot(means, aes(y=fit_all_cause, ymin=lwr_all_cause, ymax=upr_all_cause,
x=period_label, color=intervention_label)) +
geom_pointrange(position=position_dodge(width=.2), size=1) +
geom_line(aes(group=intervention_label), position=position_dodge(width=.2), size=1) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Difference-in-Difference Analysis of Health Zones with SSCs', subtitle='All-Cause Mortality',
y='Under-5 All-Cause Mortality Rate', x='Period', color='Health Zones',
caption='Points and ranges show midpoint, upper and lower 95% confidence interval from model') +
theme_bw()
# ------------------------------------------------------------------------------
# -----------------------------------
# Save results
# save graphs
pdf(graphFile, height=5.5, width=8)
p1
p2
p3
p4
p4b
p5
p5b
p6
p6b
p7
p7b
p8
p9
p10
p11
dev.off()
# save results
dropObjs = c('inFile', 'outFile', 'graphFile')
save(list=ls()[!ls() %in% dropObjs], file=outFile)
# -----------------------------------
| /impact_evaluation/drc/ssc_analyses/ssc_impact.r | no_license | ihmeuw/gf | R | false | false | 14,771 | r | # ------------------------------------------------------------------------------
# David Phillips
#
# 6/19/2019
# Analysis to assess the impact of having the full package of iCCM services on mortality
# ------------------------------------------------------------------------------
# -------------------
# Set up R
rm(list=ls())
library(data.table)
library(ggplot2)
# -------------------
# ---------------------------------------------------------------------------------------
# Files, directories and settings
# whether or not to run analysis among full-iccm-package health zones only
fullpackageOnly = TRUE
# root directory
j = ifelse(Sys.info()[1]=='Windows', 'J:', '/home/j')
dir = paste0(j, '/Project/Evaluation/GF/')
# input file
inFile = paste0(dir, 'impact_evaluation/cod/prepped_data/ssc_analyses/DiD_input_data.rdata')
if(fullpackageOnly) inFile = gsub('.rdata', '_full_package_HZs_only.rdata', inFile)
# file listing health zones
hzFile = paste0(dir, '/mapping/cod/ssc_lists/prepped_hz_list.csv')
# output files
outFile = paste0(dir, '/impact_evaluation/cod/prepped_data/ssc_analyses/DiD_results.rdata')
graphFile = paste0(dir, '/impact_evaluation/cod/visualizations/ssc_analyses/DiD_results.pdf')
# modify output file names if we're running analysis among UNICEF health zones only
if(fullpackageOnly) graphFile = gsub('.pdf', '_full_package_HZs_only.pdf', graphFile)
if(fullpackageOnly) outFile = gsub('.rdata', '_full_package_HZs_only.rdata', outFile)
# ---------------------------------------------------------------------------------------
# -------------------------------------
# Load
# load data produced by set_up_data.r
load(inFile)
# -------------------------------------
# ------------------------------------------------------------------------------
# Run analysis
# difference in differences with OLS on malaria treatment coverage (uncomplicated)
lmFit1 = lm(mildMalariaTreated_under5_rate~intervention*period, data)
summary(lmFit1)
# difference in differences with OLS on malaria mortality
lmFit2 = lm(malariaDeaths_under5_rate~intervention*period, data)
summary(lmFit2)
# difference in differences with OLS on all cause mortality
lmFit3 = lm(allDeaths_under5_rate~intervention*period, data)
summary(lmFit3)
# difference in differences with OLS on malaria case detection rate
lmFit4 = lm(newCasesMalariaMild_under5_rate~intervention*period, data)
summary(lmFit4)
# predict from lmFit and glmFit for graph
preds1 = data.table(predict(lmFit1, interval='confidence', newdata=means))
preds2 = data.table(predict(lmFit2, interval='confidence', newdata=means))
preds3 = data.table(predict(lmFit3, interval='confidence', newdata=means))
preds4 = data.table(predict(lmFit4, interval='confidence', newdata=means))
setnames(preds1, paste0(names(preds1), '_coverage'))
setnames(preds2, paste0(names(preds2), '_malaria'))
setnames(preds3, paste0(names(preds3), '_all_cause'))
setnames(preds3, paste0(names(preds4), '_detection'))
means = cbind(means, preds1)
means = cbind(means, preds2)
means = cbind(means, preds3)
means = cbind(means, preds4)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Graph histograms
# switch title if specified
if (fullpackageOnly==FALSE) intlab = 'SSCs'
if (fullpackageOnly==TRUE) intlab = 'iCCM'
# histogram of malaria treatment coverage
cut = 100
n_trunc1 = sum(data$mildMalariaTreated_under5_rate>=cut)
p1 = ggplot(data[mildMalariaTreated_under5_rate<cut], aes(x=mildMalariaTreated_under5_rate)) +
geom_histogram() +
geom_vline(data=means, aes(xintercept=mildMalariaTreated_under5_rate)) +
facet_grid(intervention_label~period_label, scales='free') +
labs(title='Under-5 Antimalarial Treatment Coverage',
subtitle=paste('Comparison between Health Zones with and without', intlab),
y='Frequency', x='Under-5 Antimalarial Treatment Coverage (proportion of cases)',
caption=paste0('Unit of analysis is health zone-quarters, 2010-2018\n(',
n_trunc1, ' values >', cut, ' not displayed)')) +
theme_bw()
# histogram of malaria mortality
cut = 100
n_trunc2 = sum(data$malariaDeaths_under5_rate>=cut)
p2 = ggplot(data[malariaDeaths_under5_rate<cut], aes(x=malariaDeaths_under5_rate)) +
geom_histogram() +
geom_vline(data=means, aes(xintercept=malariaDeaths_under5_rate)) +
facet_grid(intervention_label~period_label, scales='free') +
labs(title='Under-5 Malaria Mortality Rates',
subtitle=paste('Comparison between Health Zones with and without', intlab),
y='Frequency', x='Under-5 Malaria Mortality Rate (per 100,000 population)',
caption=paste0('Unit of analysis is health zone-quarters, 2010-2018\n(',
n_trunc2, ' values >', cut, ' not displayed)')) +
theme_bw()
# histogram of all-cause mortality
cut = 300
n_trunc3 = sum(data$allDeaths_under5_rate>=cut)
p3 = ggplot(data[allDeaths_under5_rate<cut], aes(x=allDeaths_under5_rate)) +
geom_histogram() +
geom_vline(data=means, aes(xintercept=allDeaths_under5_rate)) +
facet_grid(intervention_label~period_label, scales='free') +
labs(title='Under-5 All-Cause Mortality Rates',
subtitle=paste('Comparison between Health Zones with and without', intlab),
y='Frequency', x='Under-5 All-Cause Mortality Rate (per 100,000 population)',
caption=paste0('Unit of analysis is health zone-quarters, 2010-2018\n(',
n_trunc3, ' values >', cut, ' not displayed)')) +
theme_bw()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Graph time series plots
# time series graph of the malaria treatment data
p4 = ggplot(means_ts, aes(y=mildMalariaTreated_under5_rate, ymin=lower_pctle_mild_coverage,
ymax=upper_pctle_mild_coverage, x=date, color=intervention_label, fill=intervention_label)) +
geom_ribbon(alpha=.5) +
geom_line(size=1.25) +
scale_fill_manual(values=c('#33a02c', '#1f78b4')) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Antimalarial Treatment Coverage Under 5',
subtitle=paste('Comparing Health Zones with and without', intlab),
y='Under-5 Antimalarial Treatment Coverage (proportion of cases)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria treatment data excluding the control group
p4b = ggplot(means_ts[intervention==1], aes(y=mildMalariaTreated_under5_rate, ymin=lower_pctle_mild_coverage,
ymax=upper_pctle_mild_coverage, x=date)) +
geom_ribbon(alpha=.5, fill='#1f78b4') +
geom_line(size=1.25, color='#1f78b4') +
labs(title='Antimalarial Treatment Coverage Under 5',
subtitle=paste('Health Zones with', intlab),
y='Under-5 Antimalarial Treatment Coverage (proportion of cases)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria case detection rate
p5 = ggplot(means_ts, aes(y=newCasesMalariaMild_under5_rate, ymin=lower_pctle_mild_detection,
ymax=upper_pctle_mild_detection, x=date, color=intervention_label, fill=intervention_label)) +
geom_ribbon(alpha=.5) +
geom_line(size=1.25) +
scale_fill_manual(values=c('#33a02c', '#1f78b4')) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Malaria Case Detection Rate Under 5',
subtitle=paste('Comparing Health Zones with and without', intlab),
y='Under-5 Malaria Case Detection Rate (per 10,000 population)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria case detection rate excluding the control group
p5b = ggplot(means_ts[intervention==1], aes(y=newCasesMalariaMild_under5_rate, ymin=lower_pctle_mild_detection,
ymax=upper_pctle_mild_detection, x=date)) +
geom_ribbon(alpha=.5, fill='#1f78b4') +
geom_line(size=1.25, color='#1f78b4') +
labs(title='Malaria Case Detection Rate Under 5',
subtitle=paste('Health Zones with', intlab),
y='Under-5 Malaria Case Detection Rate (per 10,000 population)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria case detection fraction of estimates
p6 = ggplot(means_ts, aes(y=proportion_estimated_cases_detected, ymin=lower_pctle_detection_prop,
ymax=upper_pctle_detection_prop, x=date, color=intervention_label, fill=intervention_label)) +
geom_ribbon(alpha=.5) +
geom_line(size=1.25) +
scale_fill_manual(values=c('#33a02c', '#1f78b4')) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Ratio of Detected Malaria Cases to Estimated Malaria Cases',
subtitle=paste('Comparing Health Zones with and without', intlab),
y='Ratio of Detected Cases to Estimated Cases (all ages)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria case detection fraction of estimates excluding the control group
p6b = ggplot(means_ts[intervention==1], aes(y=proportion_estimated_cases_detected, ymin=lower_pctle_detection_prop,
ymax=upper_pctle_detection_prop, x=date)) +
geom_ribbon(alpha=.5, fill='#1f78b4') +
geom_line(size=1.25, color='#1f78b4') +
labs(title='Ratio of Detected Malaria Cases to Estimated Malaria Cases',
subtitle=paste('Health Zones with', intlab),
y='Ratio of Detected Cases to Estimated Cases (all ages)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria data
p7 = ggplot(means_ts, aes(y=malariaDeaths_under5_rate, ymin=lower_pctle_malaria,
ymax=upper_pctle_malaria, x=date, color=intervention_label, fill=intervention_label)) +
geom_ribbon(alpha=.5) +
geom_line(size=1.25) +
scale_fill_manual(values=c('#33a02c', '#1f78b4')) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Malaria Mortality Under 5',
subtitle=paste('Comparing Health Zones with and without', intlab),
y='Under-5 Malaria Mortality Rate (per 10,000 population)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the malaria data excluding the control group
p7b = ggplot(means_ts[intervention==1], aes(y=malariaDeaths_under5_rate, ymin=lower_pctle_malaria,
ymax=upper_pctle_malaria, x=date)) +
geom_ribbon(alpha=.5, fill='#1f78b4') +
geom_line(size=1.25, color='#1f78b4') +
labs(title='Malaria Mortality Under 5',
subtitle=paste('Health Zones with', intlab),
y='Under-5 Malaria Mortality Rate (per 10,000 population)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# time series graph of the all-cause data
p8 = ggplot(means_ts, aes(y=allDeaths_under5_rate, ymin=lower_pctle_all_cause,
ymax=upper_pctle_all_cause, x=date, color=intervention_label, fill=intervention_label)) +
geom_ribbon(alpha=.5) +
geom_line(size=1.25) +
scale_fill_manual(values=c('#33a02c', '#1f78b4')) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='All-Cause Mortality Under 5',
subtitle=paste('Comparing Health Zones with and without', intlab),
y='Under-5 All-Cause Mortality Rate (per 10,000 population)', x='Period',
color='', fill='',
caption='Lines and intervals show median, 20th and 80th percentiles of health zones') +
theme_bw()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Graph DiD results
# traditional DiD graph with malaria model estimates
p9 = ggplot(means, aes(y=fit_coverage, ymin=lwr_coverage, ymax=upr_coverage,
x=period_label, color=intervention_label)) +
geom_pointrange(position=position_dodge(width=.2), size=1) +
geom_line(aes(group=intervention_label), position=position_dodge(width=.2), size=1) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Difference-in-Difference Analysis of Health Zones with SSCs', subtitle='Antimalarial Treatment Coverage',
y='Under-5 Antimalarial Treatment Coverage', x='Period', color='Health Zones',
caption='Points and ranges show midpoint, upper and lower 95% confidence interval from model') +
theme_bw()
# traditional DiD graph with malaria model estimates
p10 = ggplot(means, aes(y=fit_malaria, ymin=lwr_malaria, ymax=upr_malaria,
x=period_label, color=intervention_label)) +
geom_pointrange(position=position_dodge(width=.2), size=1) +
geom_line(aes(group=intervention_label), position=position_dodge(width=.2), size=1) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Difference-in-Difference Analysis of Health Zones with SSCs', subtitle='Malaria-Specific Mortality',
y='Under-5 Malaria Mortality Rate', x='Period', color='Health Zones',
caption='Points and ranges show midpoint, upper and lower 95% confidence interval from model') +
theme_bw()
# traditional DiD graph with all-cause model estimates
p11 = ggplot(means, aes(y=fit_all_cause, ymin=lwr_all_cause, ymax=upr_all_cause,
x=period_label, color=intervention_label)) +
geom_pointrange(position=position_dodge(width=.2), size=1) +
geom_line(aes(group=intervention_label), position=position_dodge(width=.2), size=1) +
scale_color_manual(values=c('#33a02c', '#1f78b4')) +
labs(title='Difference-in-Difference Analysis of Health Zones with SSCs', subtitle='All-Cause Mortality',
y='Under-5 All-Cause Mortality Rate', x='Period', color='Health Zones',
caption='Points and ranges show midpoint, upper and lower 95% confidence interval from model') +
theme_bw()
# ------------------------------------------------------------------------------
# -----------------------------------
# Save results
# save graphs
pdf(graphFile, height=5.5, width=8)
p1
p2
p3
p4
p4b
p5
p5b
p6
p6b
p7
p7b
p8
p9
p10
p11
dev.off()
# save results
dropObjs = c('inFile', 'outFile', 'graphFile')
save(list=ls()[!ls() %in% dropObjs], file=outFile)
# -----------------------------------
|
setwd("c:\users\joseph\git\r-trading_homework")
# this script currently depends on downloading CSV format of historical data from yahoo
# and the file being named GSPC.csv
# https://finance.yahoo.com/quote/%5EGSPC/history?p=%5EGSPC
require(quantmod)
require(TTR)
library(dplyr)
library(plyr)
function(x, shiftLen = 1L) {
r <- (1L + shiftLen):(length(x) + shiftLen)
r[r<1] <- NA
return(x[r])
}
#save today's date for labeling plot graphics
today <- Sys.Date()
#change symbol here
sym = "GSPC"
startDate <- '2017-01-01'
#S&P 500 ia ^GSPC, RUT is ^RUT, VIX is ^VIX
#note, if you are working with a normal stock symbol, not and index, then
#you do not need the leading ^ that is inserted by the line below
symbol <- paste("^", sym, sep="")
getSymbols(symbol, from = startDate)
#change symbol name here
instrument <- GSPC
#number of periods
QQ <- read.csv('GSPC.csv', na.strings='null', colClasses=c("character", rep("numeric",6)))
QQ$Date <- as.Date(QQ$Date)
QQ$PrevClose <- rowShift(QQ$Close, -1)
QQ$Gap <- round(QQ$Open - rowShift(QQ$Close, -1), 2)
QQ$Range <- round(QQ$High - QQ$Low, 2)
QQ$HigherOpen <- QQ$Open > rowShift(QQ$Close, -1)
QQ$HigherClose <- QQ$Close > rowShift(QQ$Close, -1)
QQ$Adj.Close <- NULL
QQ$CloseToCloseRange <- round(abs(QQ$Close - rowShift(QQ$Close, -1)), 2)
RR <- filter(QQ, HigherOpen == TRUE)
RR$GapClose <- RR$Low < rowShift(RR$Close)
SS <- filter(QQ, HigherOpen == FALSE)
SS$GapClose <- SS$High > rowShift(SS$Close)
#add year column to data
dates <- as.Date(QQ$Date, "%d-%b-%y")
years <- format(dates, "%Y")
QQ$Year <- years
#get by date subset
Recent <- subset(QQ, Date > "2012-01-01")
#plot for Close to Close range
u <- NULL
means <- NULL
maxs <- NULL
means <- aggregate(CloseToCloseRange ~ Year, Recent, mean)
maxs <- aggregate(CloseToCloseRange ~ Year, Recent, max)
means$CloseToCloseRange <- round(means$CloseToCloseRange, 2)
u <- ggplot(data=Recent, aes(x=Year, y=CloseToCloseRange))
meds <- aggregate(CloseToCloseRange ~ Year, Recent, median)
u <- u + geom_jitter(aes(colour=HigherClose)) +
geom_boxplot(alpha=0.7, outlier.shape = NA, na.rm = FALSE, size = .5) +
geom_text(data = means, aes(label = CloseToCloseRange, y = CloseToCloseRange)) +
geom_text(data = maxs, aes(label = CloseToCloseRange, y =CloseToCloseRange))
u <- u + xlab("Year") +
ylab("Close To Close Range") +
ggtitle(paste("SPX Range from One Daily Close To The Next [ through", today, "]")) +
theme(axis.title.x = element_text(colour="Black", size=20),
axis.title.y = element_text(colour="Black", size=20),
axis.text.x = element_text(size=10),
axis.text.y = element_text(size=10),
legend.title = element_text(size=12),
legend.text = element_text(size=10),
legend.position = c(1,1),
legend.justification = c(1,1),
plot.title = element_text(colour="DarkBlue", size=25))
u
#Plot for Open to Close range
v <- NULL
means <- NULL
maxs <- NULL
means <- aggregate(Range ~ Year, Recent, mean)
maxs <- aggregate(Range ~ Year, Recent, max)
means$Range <- round(means$Range, 2)
v <- ggplot(data=Recent, aes(x=Year, y=Range))
meds <- aggregate(Range ~ Year, Recent, median)
v <- v + geom_jitter(aes(colour=HigherClose)) +
geom_boxplot(alpha=0.7, outlier.shape = NA, na.rm = FALSE, size = .5) +
geom_text(data = means, aes(label = Range, y = Range)) +
geom_text(data = maxs, aes(label = Range, y =Range))
v <- v + xlab("Year") +
ylab("Daily Range") +
ggtitle(paste("SPX Range from Open to Close [ through", today, "]")) +
theme(axis.title.x = element_text(colour="Black", size=20),
axis.title.y = element_text(colour="Black", size=20),
axis.text.x = element_text(size=10),
axis.text.y = element_text(size=10),
legend.title = element_text(size=12),
legend.text = element_text(size=10),
legend.position = c(1,1),
legend.justification = c(1,1),
plot.title = element_text(colour="DarkBlue", size=25))
v
#stats
quantile(Recent$Range, na.rm = TRUE, c(.5, .8,.9,.95, .99,1))
quantile(Recent$CloseToCloseRange, na.rm = TRUE, c(.5, .8,.9,.95, .99,1))
#another visualization of daily and close to close ranges by date
qplot(x=Date, y=Range,
data=Recent, na.rm=TRUE,
main="Daily Range",
xlab="Date", ylab="Range")
qplot(x=Date, y=CloseToCloseRange,
data=Recent, na.rm=TRUE,
main="Close To Close Range",
xlab="Date", ylab="Close to Close Range")
#line chart of range
ggplot(subset(Recent, Date > "2016-01-01"), aes(Date, Range)) + geom_line()
#histogram with mode and standard deviation markers
#node mode and SD are static, need to fix so that they're per year
getmode <- function(v, round=0) {
uniqv <- round(unique(v), round)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
mymode <- getmode(round(Recent$Range, 0))
mysd <- round(sd(Recent$Range, na.rm = TRUE), 1)
ggplot(Recent, aes(x=round(Range, 0))) +
geom_histogram(binwidth=1, colour="black", fill="grey") +
geom_vline(aes(xintercept=mymode, na.rm=T), # Ignore NA values for mean
color="red", linetype="dashed", size=1) +
geom_vline(aes(xintercept=(quantile(Recent$Range, .7, na.rm = TRUE)), na.rm=T), # Ignore NA values for mean
color="black", linetype="solid", size=1) +
facet_grid(. ~ Year) + facet_wrap(~ Year, ncol = 3)
#filtering by year
min(Recent[ which(Recent$Year == 2016),]$Range, na.rm = TRUE)
getmode(Recent[ which(Recent$Year == 2016),]$Range)
count(round(unique(Recent[ which(Recent$Year == 2015),]$Range)))
#add day of week to the data frame
Recent$DayOfWeek <- weekdays(as.Date(Recent$Date))
#plot for Close to Close range
u <- NULL
means <- NULL
maxs <- NULL
means <- aggregate(CloseToCloseRange ~ DayOfWeek, Recent, mean)
maxs <- aggregate(CloseToCloseRange ~ DayOfWeek, Recent, max)
means$CloseToCloseRange <- round(means$CloseToCloseRange, 2)
u <- ggplot(data=Recent, aes(x=DayOfWeek, y=CloseToCloseRange))
meds <- aggregate(CloseToCloseRange ~ DayOfWeek, Recent, median)
u <- u + geom_jitter(aes(colour=HigherClose)) +
geom_boxplot(alpha=0.7, outlier.shape = NA, na.rm = FALSE, size = .5) +
geom_text(data = means, aes(label = CloseToCloseRange, y = CloseToCloseRange)) +
geom_text(data = maxs, aes(label = CloseToCloseRange, y =CloseToCloseRange))
u <- u + xlab("Day of Week") +
ylab("Close To Close Range") +
ggtitle(paste("SPX Range from One Daily Close To The Next [ through", today, "]")) +
theme(axis.title.x = element_text(colour="Black", size=20),
axis.title.y = element_text(colour="Black", size=20),
axis.text.x = element_text(size=10),
axis.text.y = element_text(size=10),
legend.title = element_text(size=12),
legend.text = element_text(size=10),
legend.position = c(1,1),
legend.justification = c(1,1),
plot.title = element_text(colour="DarkBlue", size=25))
u
#range by day of week
round(quantile(Recent[ which(Recent$DayOfWeek == 'Monday'),]$Range, c(.7, .8, .9, .99, 1)), 1)
round(quantile(Recent[ which(Recent$DayOfWeek == 'Monday'),]$CloseToCloseRange, c(.7, .8, .9, .99, 1)), 1)
| /GapClose.R | no_license | jminck/r-trading-homework | R | false | false | 7,210 | r | setwd("c:\users\joseph\git\r-trading_homework")
# this script currently depends on downloading CSV format of historical data from yahoo
# and the file being named GSPC.csv
# https://finance.yahoo.com/quote/%5EGSPC/history?p=%5EGSPC
require(quantmod)
require(TTR)
library(dplyr)
library(plyr)
function(x, shiftLen = 1L) {
r <- (1L + shiftLen):(length(x) + shiftLen)
r[r<1] <- NA
return(x[r])
}
#save today's date for labeling plot graphics
today <- Sys.Date()
#change symbol here
sym = "GSPC"
startDate <- '2017-01-01'
#S&P 500 ia ^GSPC, RUT is ^RUT, VIX is ^VIX
#note, if you are working with a normal stock symbol, not and index, then
#you do not need the leading ^ that is inserted by the line below
symbol <- paste("^", sym, sep="")
getSymbols(symbol, from = startDate)
#change symbol name here
instrument <- GSPC
#number of periods
QQ <- read.csv('GSPC.csv', na.strings='null', colClasses=c("character", rep("numeric",6)))
QQ$Date <- as.Date(QQ$Date)
QQ$PrevClose <- rowShift(QQ$Close, -1)
QQ$Gap <- round(QQ$Open - rowShift(QQ$Close, -1), 2)
QQ$Range <- round(QQ$High - QQ$Low, 2)
QQ$HigherOpen <- QQ$Open > rowShift(QQ$Close, -1)
QQ$HigherClose <- QQ$Close > rowShift(QQ$Close, -1)
QQ$Adj.Close <- NULL
QQ$CloseToCloseRange <- round(abs(QQ$Close - rowShift(QQ$Close, -1)), 2)
RR <- filter(QQ, HigherOpen == TRUE)
RR$GapClose <- RR$Low < rowShift(RR$Close)
SS <- filter(QQ, HigherOpen == FALSE)
SS$GapClose <- SS$High > rowShift(SS$Close)
#add year column to data
dates <- as.Date(QQ$Date, "%d-%b-%y")
years <- format(dates, "%Y")
QQ$Year <- years
#get by date subset
Recent <- subset(QQ, Date > "2012-01-01")
#plot for Close to Close range
u <- NULL
means <- NULL
maxs <- NULL
means <- aggregate(CloseToCloseRange ~ Year, Recent, mean)
maxs <- aggregate(CloseToCloseRange ~ Year, Recent, max)
means$CloseToCloseRange <- round(means$CloseToCloseRange, 2)
u <- ggplot(data=Recent, aes(x=Year, y=CloseToCloseRange))
meds <- aggregate(CloseToCloseRange ~ Year, Recent, median)
u <- u + geom_jitter(aes(colour=HigherClose)) +
geom_boxplot(alpha=0.7, outlier.shape = NA, na.rm = FALSE, size = .5) +
geom_text(data = means, aes(label = CloseToCloseRange, y = CloseToCloseRange)) +
geom_text(data = maxs, aes(label = CloseToCloseRange, y =CloseToCloseRange))
u <- u + xlab("Year") +
ylab("Close To Close Range") +
ggtitle(paste("SPX Range from One Daily Close To The Next [ through", today, "]")) +
theme(axis.title.x = element_text(colour="Black", size=20),
axis.title.y = element_text(colour="Black", size=20),
axis.text.x = element_text(size=10),
axis.text.y = element_text(size=10),
legend.title = element_text(size=12),
legend.text = element_text(size=10),
legend.position = c(1,1),
legend.justification = c(1,1),
plot.title = element_text(colour="DarkBlue", size=25))
u
#Plot for Open to Close range
v <- NULL
means <- NULL
maxs <- NULL
means <- aggregate(Range ~ Year, Recent, mean)
maxs <- aggregate(Range ~ Year, Recent, max)
means$Range <- round(means$Range, 2)
v <- ggplot(data=Recent, aes(x=Year, y=Range))
meds <- aggregate(Range ~ Year, Recent, median)
v <- v + geom_jitter(aes(colour=HigherClose)) +
geom_boxplot(alpha=0.7, outlier.shape = NA, na.rm = FALSE, size = .5) +
geom_text(data = means, aes(label = Range, y = Range)) +
geom_text(data = maxs, aes(label = Range, y =Range))
v <- v + xlab("Year") +
ylab("Daily Range") +
ggtitle(paste("SPX Range from Open to Close [ through", today, "]")) +
theme(axis.title.x = element_text(colour="Black", size=20),
axis.title.y = element_text(colour="Black", size=20),
axis.text.x = element_text(size=10),
axis.text.y = element_text(size=10),
legend.title = element_text(size=12),
legend.text = element_text(size=10),
legend.position = c(1,1),
legend.justification = c(1,1),
plot.title = element_text(colour="DarkBlue", size=25))
v
#stats
quantile(Recent$Range, na.rm = TRUE, c(.5, .8,.9,.95, .99,1))
quantile(Recent$CloseToCloseRange, na.rm = TRUE, c(.5, .8,.9,.95, .99,1))
#another visualization of daily and close to close ranges by date
qplot(x=Date, y=Range,
data=Recent, na.rm=TRUE,
main="Daily Range",
xlab="Date", ylab="Range")
qplot(x=Date, y=CloseToCloseRange,
data=Recent, na.rm=TRUE,
main="Close To Close Range",
xlab="Date", ylab="Close to Close Range")
#line chart of range
ggplot(subset(Recent, Date > "2016-01-01"), aes(Date, Range)) + geom_line()
#histogram with mode and standard deviation markers
#node mode and SD are static, need to fix so that they're per year
getmode <- function(v, round=0) {
uniqv <- round(unique(v), round)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
mymode <- getmode(round(Recent$Range, 0))
mysd <- round(sd(Recent$Range, na.rm = TRUE), 1)
ggplot(Recent, aes(x=round(Range, 0))) +
geom_histogram(binwidth=1, colour="black", fill="grey") +
geom_vline(aes(xintercept=mymode, na.rm=T), # Ignore NA values for mean
color="red", linetype="dashed", size=1) +
geom_vline(aes(xintercept=(quantile(Recent$Range, .7, na.rm = TRUE)), na.rm=T), # Ignore NA values for mean
color="black", linetype="solid", size=1) +
facet_grid(. ~ Year) + facet_wrap(~ Year, ncol = 3)
#filtering by year
min(Recent[ which(Recent$Year == 2016),]$Range, na.rm = TRUE)
getmode(Recent[ which(Recent$Year == 2016),]$Range)
count(round(unique(Recent[ which(Recent$Year == 2015),]$Range)))
#add day of week to the data frame
Recent$DayOfWeek <- weekdays(as.Date(Recent$Date))
#plot for Close to Close range
u <- NULL
means <- NULL
maxs <- NULL
means <- aggregate(CloseToCloseRange ~ DayOfWeek, Recent, mean)
maxs <- aggregate(CloseToCloseRange ~ DayOfWeek, Recent, max)
means$CloseToCloseRange <- round(means$CloseToCloseRange, 2)
u <- ggplot(data=Recent, aes(x=DayOfWeek, y=CloseToCloseRange))
meds <- aggregate(CloseToCloseRange ~ DayOfWeek, Recent, median)
u <- u + geom_jitter(aes(colour=HigherClose)) +
geom_boxplot(alpha=0.7, outlier.shape = NA, na.rm = FALSE, size = .5) +
geom_text(data = means, aes(label = CloseToCloseRange, y = CloseToCloseRange)) +
geom_text(data = maxs, aes(label = CloseToCloseRange, y =CloseToCloseRange))
u <- u + xlab("Day of Week") +
ylab("Close To Close Range") +
ggtitle(paste("SPX Range from One Daily Close To The Next [ through", today, "]")) +
theme(axis.title.x = element_text(colour="Black", size=20),
axis.title.y = element_text(colour="Black", size=20),
axis.text.x = element_text(size=10),
axis.text.y = element_text(size=10),
legend.title = element_text(size=12),
legend.text = element_text(size=10),
legend.position = c(1,1),
legend.justification = c(1,1),
plot.title = element_text(colour="DarkBlue", size=25))
u
#range by day of week
round(quantile(Recent[ which(Recent$DayOfWeek == 'Monday'),]$Range, c(.7, .8, .9, .99, 1)), 1)
round(quantile(Recent[ which(Recent$DayOfWeek == 'Monday'),]$CloseToCloseRange, c(.7, .8, .9, .99, 1)), 1)
|
## This function creates a matrix that will cache it's own inverse
makeCacheMatrix <- function(x = matrix()) {
## create properties for, and set, the inverse
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
## create the matrix
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse,
getInverse = getInverse)
}
## This function takes the "makeCacheMatrix" results and computes the
## inverse. It will retrive the inverse if it has already been created.
cacheSolve <- function(x, ...) {
## Returns the inverse matrix of "x"
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | machm/ProgrammingAssignment2 | R | false | false | 991 | r | ## This function creates a matrix that will cache it's own inverse
makeCacheMatrix <- function(x = matrix()) {
## create properties for, and set, the inverse
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
## create the matrix
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse,
getInverse = getInverse)
}
## This function takes the "makeCacheMatrix" results and computes the
## inverse. It will retrive the inverse if it has already been created.
cacheSolve <- function(x, ...) {
## Returns the inverse matrix of "x"
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
#setGeneric("coef")
#setGeneric("print")
#setGeneric("fitted")
#setGeneric("extractAIC")
if (!isGeneric("coefplot")) {
setGeneric("coefplot",
function(object, ...)
standardGeneric("coefplot"))
}
if (!isGeneric("display")) {
setGeneric("display",
function(object, ...)
standardGeneric("display"))
}
if (!isGeneric("sim")) {
setGeneric("sim",
function(object, ...)
standardGeneric("sim"))
}
sigma.hat <- function(object,...){
UseMethod("sigma.hat")
}
if (!isGeneric("se.coef")) {
setGeneric("se.coef",
function(object, ...)
standardGeneric("se.coef"))
}
if (!isGeneric("mcsamp")) {
setGeneric("mcsamp",
function(object, ...)
standardGeneric("mcsamp"))
}
if (!isGeneric("standardize")) {
setGeneric("standardize",
function(object, ...)
standardGeneric("standardize"))
}
#if (!isGeneric("terms.bayes")) {
# setGeneric("terms.bayes",
# function(x, ...)
# standardGeneric("terms.bayes"))
#}
if (!isGeneric("traceplot")) {
setGeneric("traceplot",
function(x, ...)
standardGeneric("traceplot"),
useAsDefault = function(x, ...) coda::traceplot(x, ...))
}
| /R/AllGeneric.R | no_license | suyusung/arm | R | false | false | 1,426 | r |
#setGeneric("coef")
#setGeneric("print")
#setGeneric("fitted")
#setGeneric("extractAIC")
if (!isGeneric("coefplot")) {
setGeneric("coefplot",
function(object, ...)
standardGeneric("coefplot"))
}
if (!isGeneric("display")) {
setGeneric("display",
function(object, ...)
standardGeneric("display"))
}
if (!isGeneric("sim")) {
setGeneric("sim",
function(object, ...)
standardGeneric("sim"))
}
sigma.hat <- function(object,...){
UseMethod("sigma.hat")
}
if (!isGeneric("se.coef")) {
setGeneric("se.coef",
function(object, ...)
standardGeneric("se.coef"))
}
if (!isGeneric("mcsamp")) {
setGeneric("mcsamp",
function(object, ...)
standardGeneric("mcsamp"))
}
if (!isGeneric("standardize")) {
setGeneric("standardize",
function(object, ...)
standardGeneric("standardize"))
}
#if (!isGeneric("terms.bayes")) {
# setGeneric("terms.bayes",
# function(x, ...)
# standardGeneric("terms.bayes"))
#}
if (!isGeneric("traceplot")) {
setGeneric("traceplot",
function(x, ...)
standardGeneric("traceplot"),
useAsDefault = function(x, ...) coda::traceplot(x, ...))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GetEditedReads.R
\name{GetEditedReads}
\alias{GetEditedReads}
\title{Calculate number of edited reads from frequency and coverage}
\usage{
GetEditedReads(x)
}
\arguments{
\item{x}{An RNA editing data object which includes Frequency matrix and
Coverage-q25 matrix at minimum}
}
\value{
A matrix with total number of edited reads for each edited site by
individual
}
\description{
\code{GetEditedReads} calculates the number of edited reads using the
Frequency and Coverage-q25 matrices from the RNA Editing data object
}
\details{
The input should be a list object which includes matrices named "Frequency"
and "Coverage-q25".
}
\examples{
\dontrun{
head(GetEditedReads(RNAEdData))
}
}
| /man/GetEditedReads.Rd | no_license | okg3/RNAEditingAnalysisTools | R | false | true | 765 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GetEditedReads.R
\name{GetEditedReads}
\alias{GetEditedReads}
\title{Calculate number of edited reads from frequency and coverage}
\usage{
GetEditedReads(x)
}
\arguments{
\item{x}{An RNA editing data object which includes Frequency matrix and
Coverage-q25 matrix at minimum}
}
\value{
A matrix with total number of edited reads for each edited site by
individual
}
\description{
\code{GetEditedReads} calculates the number of edited reads using the
Frequency and Coverage-q25 matrices from the RNA Editing data object
}
\details{
The input should be a list object which includes matrices named "Frequency"
and "Coverage-q25".
}
\examples{
\dontrun{
head(GetEditedReads(RNAEdData))
}
}
|
library(reshape2)
filename <- "getdata_dataset.zip"
## UCI HAR Clean Data Set
## Download and unzip the dataset:
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(fileURL, filename, method="curl")
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
# Load activity labels + features
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
activityLabels[,2] <- as.character(activityLabels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# Extract only the data on mean and standard deviation
featuresWanted <- grep(".*mean.*|.*std.*", features[,2])
featuresWanted.names <- features[featuresWanted,2]
featuresWanted.names = gsub('-mean', 'Mean', featuresWanted.names)
featuresWanted.names = gsub('-std', 'Std', featuresWanted.names)
featuresWanted.names <- gsub('[-()]', '', featuresWanted.names)
# Load the datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresWanted]
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresWanted]
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# merge datasets and add labels
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", featuresWanted.names)
# turn activities & subjects into factors
allData$activity <- factor(allData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
allData$subject <- as.factor(allData$subject)
allData.melted <- melt(allData, id = c("subject", "activity"))
allData.mean <- dcast(allData.melted, subject + activity ~ variable, mean)
write.table(allData.mean, "tidy.txt", row.names = FALSE, quote = FALSE) | /run_analysis.R | no_license | cvsrao/CleanData | R | false | false | 2,052 | r | library(reshape2)
filename <- "getdata_dataset.zip"
## UCI HAR Clean Data Set
## Download and unzip the dataset:
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(fileURL, filename, method="curl")
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
# Load activity labels + features
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
activityLabels[,2] <- as.character(activityLabels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# Extract only the data on mean and standard deviation
featuresWanted <- grep(".*mean.*|.*std.*", features[,2])
featuresWanted.names <- features[featuresWanted,2]
featuresWanted.names = gsub('-mean', 'Mean', featuresWanted.names)
featuresWanted.names = gsub('-std', 'Std', featuresWanted.names)
featuresWanted.names <- gsub('[-()]', '', featuresWanted.names)
# Load the datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresWanted]
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresWanted]
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# merge datasets and add labels
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", featuresWanted.names)
# turn activities & subjects into factors
allData$activity <- factor(allData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
allData$subject <- as.factor(allData$subject)
allData.melted <- melt(allData, id = c("subject", "activity"))
allData.mean <- dcast(allData.melted, subject + activity ~ variable, mean)
write.table(allData.mean, "tidy.txt", row.names = FALSE, quote = FALSE) |
# Kernel SVM Classification
#Importing the data
dataset <- read.csv("Social_Network_Ads.csv")
dataset = dataset[3:5]
#splitting the dataset into training set and Test set
#install.packages("caTools")
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
#Feature Scaling preferred on Classification Problems
#Scaling only the Independent Variable
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
#Fitting Linear SVM Classifier to the Training set
library(e1071)
classifier = svm(formula = Purchased ~ .,
data = training_set,
type = 'C-classification',
kernel = 'linear')
#Predict the test set results
y_pred = predict(classifier, newdata = test_set[-3])
#Making the Confusion Matrix - to evaluate how many of our pred is true
cm = table(test_set[,3], y_pred)
#Visualising the Test set
#install.packages("ElemStatLearn")
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[,1]) - 1, max(set[,1]) + 1, by = 0.01)
X2 = seq(min(set[,2]) - 1, max(set[,2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c("Age", "EstimatedSalary")
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = "Linear SVM Classifier (Test Set)",
xlab = "Age", ylab = "Estimated Salary",
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = ".", col = ifelse(y_grid == 1, "yellow2", "royalblue4"))
points(set, pch = 21, bg = ifelse(set[,3] == 1, "orange", "navyblue"))
#Fitting Kernel SVM Classifier to the Training Set
library(e1071)
classifier = svm(formula = Purchased ~ .,
data = training_set,
type = 'C-classification',
kernel = 'radial')
#Predict the test set results
y_pred = predict(classifier, newdata = test_set[-3])
#Making the Confusion Matrix - to evaluate how many of our pred is true
cm = table(test_set[,3], y_pred)
#cm in console
#Installed the ElemStatLearn package if encountered with any issue
#Visualising the Training set
#install.packages("ElemStatLearn")
library(ElemStatLearn)
set = training_set
#Building the grid using imaginary pixels
X1 = seq(min(set[,1]) - 1, max(set[,1]) + 1, by = 0.01)
X2 = seq(min(set[,2]) - 1, max(set[,2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c("Age", "EstimatedSalary")
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = "RBF Kernel SVM Classifier (Training Set)",
xlab = "Age", ylab = "Estimated Salary",
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = ".", col = ifelse(y_grid == 1, "yellow2", "royalblue4"))
points(set, pch = 21, bg = ifelse(set[,3] == 1, "orange", "navyblue"))
#Visualising the Test set
#install.packages("ElemStatLearn")
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[,1]) - 1, max(set[,1]) + 1, by = 0.01)
X2 = seq(min(set[,2]) - 1, max(set[,2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c("Age", "EstimatedSalary")
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = "RBF Kernel SVM Classifier (Test Set)",
xlab = "Age", ylab = "Estimated Salary",
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = ".", col = ifelse(y_grid == 1, "yellow2", "royalblue4"))
points(set, pch = 21, bg = ifelse(set[,3] == 1, "orange", "navyblue"))
| /SVM_Classification.R | no_license | rishikamalli/Linear-SVM-versus-RBF-Kernel-SVM-Classification | R | false | false | 3,843 | r | # Kernel SVM Classification
#Importing the data
dataset <- read.csv("Social_Network_Ads.csv")
dataset = dataset[3:5]
#splitting the dataset into training set and Test set
#install.packages("caTools")
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
#Feature Scaling preferred on Classification Problems
#Scaling only the Independent Variable
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
#Fitting Linear SVM Classifier to the Training set
library(e1071)
classifier = svm(formula = Purchased ~ .,
data = training_set,
type = 'C-classification',
kernel = 'linear')
#Predict the test set results
y_pred = predict(classifier, newdata = test_set[-3])
#Making the Confusion Matrix - to evaluate how many of our pred is true
cm = table(test_set[,3], y_pred)
#Visualising the Test set
#install.packages("ElemStatLearn")
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[,1]) - 1, max(set[,1]) + 1, by = 0.01)
X2 = seq(min(set[,2]) - 1, max(set[,2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c("Age", "EstimatedSalary")
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = "Linear SVM Classifier (Test Set)",
xlab = "Age", ylab = "Estimated Salary",
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = ".", col = ifelse(y_grid == 1, "yellow2", "royalblue4"))
points(set, pch = 21, bg = ifelse(set[,3] == 1, "orange", "navyblue"))
#Fitting Kernel SVM Classifier to the Training Set
library(e1071)
classifier = svm(formula = Purchased ~ .,
data = training_set,
type = 'C-classification',
kernel = 'radial')
#Predict the test set results
y_pred = predict(classifier, newdata = test_set[-3])
#Making the Confusion Matrix - to evaluate how many of our pred is true
cm = table(test_set[,3], y_pred)
#cm in console
#Installed the ElemStatLearn package if encountered with any issue
#Visualising the Training set
#install.packages("ElemStatLearn")
library(ElemStatLearn)
set = training_set
#Building the grid using imaginary pixels
X1 = seq(min(set[,1]) - 1, max(set[,1]) + 1, by = 0.01)
X2 = seq(min(set[,2]) - 1, max(set[,2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c("Age", "EstimatedSalary")
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = "RBF Kernel SVM Classifier (Training Set)",
xlab = "Age", ylab = "Estimated Salary",
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = ".", col = ifelse(y_grid == 1, "yellow2", "royalblue4"))
points(set, pch = 21, bg = ifelse(set[,3] == 1, "orange", "navyblue"))
#Visualising the Test set
#install.packages("ElemStatLearn")
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[,1]) - 1, max(set[,1]) + 1, by = 0.01)
X2 = seq(min(set[,2]) - 1, max(set[,2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c("Age", "EstimatedSalary")
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = "RBF Kernel SVM Classifier (Test Set)",
xlab = "Age", ylab = "Estimated Salary",
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = ".", col = ifelse(y_grid == 1, "yellow2", "royalblue4"))
points(set, pch = 21, bg = ifelse(set[,3] == 1, "orange", "navyblue"))
|
#install.packages('neuralnet')
library("neuralnet")
#?neuralnet
#read train and test data
train = read.csv("C:/Users/Jianghui/Desktop/BreastCancer/train.csv", header = TRUE)
data = train[!is.na(train$Nuclei),2:11]
dim(data)
write.table(train, "C:/Users/Jianghui/Desktop/BreastCancer/guide/train.txt", sep=",", row.names = FALSE,col.names = FALSE)
# Basic Scatterplot Matrix
pairs(~Thickness+Size+Shape+ Adhesion+ Epithelial + Nuclei+ Bland +Nucleoli+ Mitoses+Class,data=train,
main="Simple Scatterplot Matrix")
test = read.csv("C:/Users/Jianghui/Desktop/BreastCancer/test.csv", header = TRUE)
valid = test[!is.na(test$Nuclei),2:10]
dim(valid)
write.table(test, "C:/Users/Jianghui/Desktop/BreastCancer/guide/test.txt", sep=",", row.names = FALSE,col.names = FALSE)
save(test, file = "C:/Users/Jianghui/Desktop/BreastCancer/guide/test.RData")
#fit the neural network model
fit = neuralnet(Class ~ Thickness+Size+Shape+ Adhesion+ Epithelial + Nuclei+ Bland +Nucleoli+ Mitoses,data, hidden=10, threshold=0.01)
print(fit)
#Plot the neural network
plot(fit)
# apply model to test data
predict = compute(fit, valid)
ls(predict)
test_results = cbind(test[!is.na(test$Nuclei),11], as.data.frame(predict$net.result))
colnames(test_results) <- c("class","predict")
#apply model to train data
predict = compute(fit, data[,1:9])
train_results = cbind(data[,10], as.data.frame(predict$net.result))
colnames(train_results) <- c("class","predict")
#create liftchart
train_results$dec = as.numeric(with(train_results, cut(predict, breaks=quantile(train_results$predict,seq(0,1,0.2)),include.lowest=TRUE)))
lift = aggregate(train_results$class, by=list(decile=train_results$dec), FUN = "mean")
bad = aggregate(train_results$class, by=list(decile=train_results$dec), FUN = "sum")
good = c(table(train_results$dec)) - bad[,2]
goodbad = data.frame(decile=1:5, bad = bad[,2], good)
#create function to calculate Somers'D
SD = function(goodbad) {
A = abs(0.5*sum((c(0,cumsum(goodbad$bad)[1:4])/sum(goodbad$bad) + (cumsum(goodbad$bad)/sum(goodbad$bad)))*
(cumsum(goodbad$good)/sum(goodbad$good) - c(0,cumsum(goodbad$good)[1:4])/sum(goodbad$good)))-0.5)/0.5
return(A)
}
SD(goodbad)
plot(lift,type="b",pch=16,ylim=c(0,max(lift$x)+0.01),main=paste("Liftchart \nSomers'D = ",round(SD(goodbad),4)),xlab = "Qintiles",ylab = 'Malignant Rate')
#tabulate results
train_results$c_hat = as.numeric(with(train_results, cut(predict, breaks=c(-1,0.1,1.2),include.lowest=TRUE)))
xtabs(~c_hat+class,train_results)
test_results$c_hat = as.numeric(with(test_results, cut(predict, breaks=c(-1,0.1,1.2),include.lowest=TRUE)))
xtabs(~c_hat+class,test_results)
save.image() | /Neural Networks/nn.R | no_license | kevicao/BreastCancer | R | false | false | 2,659 | r | #install.packages('neuralnet')
library("neuralnet")
#?neuralnet
#read train and test data
train = read.csv("C:/Users/Jianghui/Desktop/BreastCancer/train.csv", header = TRUE)
data = train[!is.na(train$Nuclei),2:11]
dim(data)
write.table(train, "C:/Users/Jianghui/Desktop/BreastCancer/guide/train.txt", sep=",", row.names = FALSE,col.names = FALSE)
# Basic Scatterplot Matrix
pairs(~Thickness+Size+Shape+ Adhesion+ Epithelial + Nuclei+ Bland +Nucleoli+ Mitoses+Class,data=train,
main="Simple Scatterplot Matrix")
test = read.csv("C:/Users/Jianghui/Desktop/BreastCancer/test.csv", header = TRUE)
valid = test[!is.na(test$Nuclei),2:10]
dim(valid)
write.table(test, "C:/Users/Jianghui/Desktop/BreastCancer/guide/test.txt", sep=",", row.names = FALSE,col.names = FALSE)
save(test, file = "C:/Users/Jianghui/Desktop/BreastCancer/guide/test.RData")
#fit the neural network model
fit = neuralnet(Class ~ Thickness+Size+Shape+ Adhesion+ Epithelial + Nuclei+ Bland +Nucleoli+ Mitoses,data, hidden=10, threshold=0.01)
print(fit)
#Plot the neural network
plot(fit)
# apply model to test data
predict = compute(fit, valid)
ls(predict)
test_results = cbind(test[!is.na(test$Nuclei),11], as.data.frame(predict$net.result))
colnames(test_results) <- c("class","predict")
#apply model to train data
predict = compute(fit, data[,1:9])
train_results = cbind(data[,10], as.data.frame(predict$net.result))
colnames(train_results) <- c("class","predict")
#create liftchart
train_results$dec = as.numeric(with(train_results, cut(predict, breaks=quantile(train_results$predict,seq(0,1,0.2)),include.lowest=TRUE)))
lift = aggregate(train_results$class, by=list(decile=train_results$dec), FUN = "mean")
bad = aggregate(train_results$class, by=list(decile=train_results$dec), FUN = "sum")
good = c(table(train_results$dec)) - bad[,2]
goodbad = data.frame(decile=1:5, bad = bad[,2], good)
#create function to calculate Somers'D
SD = function(goodbad) {
A = abs(0.5*sum((c(0,cumsum(goodbad$bad)[1:4])/sum(goodbad$bad) + (cumsum(goodbad$bad)/sum(goodbad$bad)))*
(cumsum(goodbad$good)/sum(goodbad$good) - c(0,cumsum(goodbad$good)[1:4])/sum(goodbad$good)))-0.5)/0.5
return(A)
}
SD(goodbad)
plot(lift,type="b",pch=16,ylim=c(0,max(lift$x)+0.01),main=paste("Liftchart \nSomers'D = ",round(SD(goodbad),4)),xlab = "Qintiles",ylab = 'Malignant Rate')
#tabulate results
train_results$c_hat = as.numeric(with(train_results, cut(predict, breaks=c(-1,0.1,1.2),include.lowest=TRUE)))
xtabs(~c_hat+class,train_results)
test_results$c_hat = as.numeric(with(test_results, cut(predict, breaks=c(-1,0.1,1.2),include.lowest=TRUE)))
xtabs(~c_hat+class,test_results)
save.image() |
#' NPMLE for Gaussian Variance Heterogeneity
#'
#' A Kiefer-Wolfowitz MLE for Gaussian models with independent variances. This
#' can be viewed as a general form for \eqn{\chi^2} mixtures, see \code{Gammamix}
#' for a more general form for Gamma mixtures.
#'
#' @param x vector of observed variances
#' @param m vector of sample sizes corresponding to x
#' @param v A vector of bin boundaries, if scalar then v equally spaced bins
#' are constructed
#' @param weights replicate weights for x obervations, should sum to 1
#' @param ... optional parameters passed to KWDual to control optimization
#' @return An object of class \code{density} with components:
#' \item{x}{midpoints of the bin boundaries}
#' \item{y}{estimated function values of the mixing density}
#' \item{g}{function values of the mixture density at the observed x's.}
#' \item{logLik}{the value of the log likelihood at the solution}
#' \item{dy}{Bayes rule estimates of }
#' \item{status}{the Mosek convergence status.}
#' @author R. Koenker
#' @seealso Gammamix for a general implementation for Gamma mixtures
#' @references
#' Koenker, R and I. Mizera, (2013) ``Convex Optimization, Shape Constraints,
#' Compound Decisions, and Empirical Bayes Rules,'' \emph{JASA}, 109, 674--685.
#'
#' Gu J. and R. Koenker (2014) Unobserved heterogeneity in
#' income dynamics: an empirical Bayes perspective, \emph{JBES}, 35, 1-16.
#'
#' Koenker, R. and J. Gu, (2017) REBayes: An {R} Package for Empirical Bayes Mixture Methods,
#' \emph{Journal of Statistical Software}, 82, 1--26.
#' @keywords nonparametric
#' @export
GVmix <- function(x, m, v = 300, weights = NULL, ...){
n = length(x)
eps <- 1e-4
if (length(v) == 1)
v <- seq(min(x) - eps, max(x) + eps, length = v)
p <- length(v)
d <- diff(v)
v = (v[-1]+v[-p])/2
p <- length(v)
if(length(weights)) w <- weights
else w <- rep(1, n)/n
r <- (m - 1)/2
R <- outer(r * x, v, "/")
G <- outer(x * gamma(r), rep(1, p))
r <- outer(r, rep(1, p))
A <- (exp(-R) * R^r)/G
f <- KWDual(A, d, w, ...)
y <- f$f
g <- f$g
logLik <- n * sum(w * log(g))
dy <- as.vector((A %*% (y * d * v))/g)
z <- list(x = v, y = y, g = g, logLik = logLik, dy = dy, status = f$status)
class(z) <- c("GVmix", "density")
return(z)
}
| /BayesianStatistics/REBayesReferences/GVmix.R | no_license | simon1405/R | R | false | false | 2,329 | r | #' NPMLE for Gaussian Variance Heterogeneity
#'
#' A Kiefer-Wolfowitz MLE for Gaussian models with independent variances. This
#' can be viewed as a general form for \eqn{\chi^2} mixtures, see \code{Gammamix}
#' for a more general form for Gamma mixtures.
#'
#' @param x vector of observed variances
#' @param m vector of sample sizes corresponding to x
#' @param v A vector of bin boundaries, if scalar then v equally spaced bins
#' are constructed
#' @param weights replicate weights for x obervations, should sum to 1
#' @param ... optional parameters passed to KWDual to control optimization
#' @return An object of class \code{density} with components:
#' \item{x}{midpoints of the bin boundaries}
#' \item{y}{estimated function values of the mixing density}
#' \item{g}{function values of the mixture density at the observed x's.}
#' \item{logLik}{the value of the log likelihood at the solution}
#' \item{dy}{Bayes rule estimates of }
#' \item{status}{the Mosek convergence status.}
#' @author R. Koenker
#' @seealso Gammamix for a general implementation for Gamma mixtures
#' @references
#' Koenker, R and I. Mizera, (2013) ``Convex Optimization, Shape Constraints,
#' Compound Decisions, and Empirical Bayes Rules,'' \emph{JASA}, 109, 674--685.
#'
#' Gu J. and R. Koenker (2014) Unobserved heterogeneity in
#' income dynamics: an empirical Bayes perspective, \emph{JBES}, 35, 1-16.
#'
#' Koenker, R. and J. Gu, (2017) REBayes: An {R} Package for Empirical Bayes Mixture Methods,
#' \emph{Journal of Statistical Software}, 82, 1--26.
#' @keywords nonparametric
#' @export
GVmix <- function(x, m, v = 300, weights = NULL, ...){
n = length(x)
eps <- 1e-4
if (length(v) == 1)
v <- seq(min(x) - eps, max(x) + eps, length = v)
p <- length(v)
d <- diff(v)
v = (v[-1]+v[-p])/2
p <- length(v)
if(length(weights)) w <- weights
else w <- rep(1, n)/n
r <- (m - 1)/2
R <- outer(r * x, v, "/")
G <- outer(x * gamma(r), rep(1, p))
r <- outer(r, rep(1, p))
A <- (exp(-R) * R^r)/G
f <- KWDual(A, d, w, ...)
y <- f$f
g <- f$g
logLik <- n * sum(w * log(g))
dy <- as.vector((A %*% (y * d * v))/g)
z <- list(x = v, y = y, g = g, logLik = logLik, dy = dy, status = f$status)
class(z) <- c("GVmix", "density")
return(z)
}
|
library(dplyr) # for data cleaning
library(ISLR) # for college dataset
library(cluster) # for gower similarity and pam
library(Rtsne) # for t-SNE plot
library(ggplot2) # for visualization
library(WGCNA)
data<-read.csv("C:/Users/falk/Google Drive/PhD/PhD Projects/Blue Steel/Paper#2/GWAS/KGF_AdjustedBLUPsAllDays_thinned_Oct19.csv")
metadata <- read.csv("C:/Users/falk/Google Drive/PhD/PhD Projects/Blue Steel/2017 Data - Growth Chamber/Randomizations Origin Data GWAS Names/Meta_data.csv", sep=",", header=T, check.names = FALSE)
str(Merged_df)
colnames(Merged_df)
Merged_df <- left_join(metadata, data, by="Entry")
Merged_df$Country=as.factor(Merged_df$Country)
Merged_df$Region=as.factor(Merged_df$Region)
Merged_df$Diversity=as.factor(Merged_df$Diversity)
Merged_df$Diversity=as.factor(Merged_df$Diversity)
Merged_df$`Stem Termination`=as.factor(Merged_df$`Stem Termination`)
Merged_df$`seed coat color`=as.factor(Merged_df$`seed coat color`)
Merged_df$`hilum color`=as.factor(Merged_df$`hilum color`)
Merged_df$MG=as.factor(Merged_df$MG)
Merged_df$Cluster.6=as.factor(Merged_df$Cluster.6)
Merged_df$Cluster.8=as.factor(Merged_df$Cluster.8)
Merged_df$Cluster.9=as.factor(Merged_df$Cluster.9)
str(Merged_df)
Merged_df <- Merged_df[-c(9,98,167,182,190,202,204,273),]
new_df <- Merged_df[,c(3:7,9:13,18:136)]
row.names(new_df)
str(new_df)
colnames(new_df)
new_df <- new_df[-c(9,98,167,182,190,202,204,273),]
new_df[1:292,1:2]
dataonly <- new_df[, -c(1:3)]
gower_dist <- daisy(new_df[, -c(1:3)],
metric = "gower",
type = list(logratio = 3))
summary(gower_dist)
gower_mat <- as.matrix(gower_dist)
str(gower_dist)
new_df[
which(gower_mat == min(gower_mat[gower_mat != min(gower_mat)]),
arr.ind = TRUE)[1, ], ]
new_df[
which(gower_mat == max(gower_mat[gower_mat != max(gower_mat)]),
arr.ind = TRUE)[1, ], ]
sil_width <- c(NA)
for(i in 2:10){
pam_fit <- pam(gower_dist,
diss = TRUE,
k = i)
sil_width[i] <- pam_fit$silinfo$avg.width
}
plot(1:10, sil_width,
xlab = "Number of clusters",
ylab = "Silhouette Width")
lines(1:10, sil_width)
pam_fit <- pam(gower_dist, diss = TRUE, k = 2)
pam_results <- new_df %>%
mutate(cluster = pam_fit$clustering) %>%
group_by(cluster) %>%
do(the_summary = summary(.))
pam_results$the_summary
new_df[pam_fit$medoids, ]
tsne_obj <- Rtsne(gower_dist, is_distance = TRUE)
tsne_data <- tsne_obj$Y %>%
data.frame() %>%
setNames(c("X", "Y")) %>%
mutate(cluster = factor(pam_fit$clustering),
name = Merged_df$Name)
ggplot(aes(x = X, y = Y), data = tsne_data) +
geom_point(aes(color = cluster))
tsne_data %>%
filter(X > 15 & X < 25,
Y > -15 & Y < -10) %>%
left_join(college_clean, by = "name") %>%
collect %>%
.[["name"]]
| /Gower_FullDataSet.R | no_license | mighster/Clustering_approaches | R | false | false | 2,909 | r | library(dplyr) # for data cleaning
library(ISLR) # for college dataset
library(cluster) # for gower similarity and pam
library(Rtsne) # for t-SNE plot
library(ggplot2) # for visualization
library(WGCNA)
data<-read.csv("C:/Users/falk/Google Drive/PhD/PhD Projects/Blue Steel/Paper#2/GWAS/KGF_AdjustedBLUPsAllDays_thinned_Oct19.csv")
metadata <- read.csv("C:/Users/falk/Google Drive/PhD/PhD Projects/Blue Steel/2017 Data - Growth Chamber/Randomizations Origin Data GWAS Names/Meta_data.csv", sep=",", header=T, check.names = FALSE)
str(Merged_df)
colnames(Merged_df)
Merged_df <- left_join(metadata, data, by="Entry")
Merged_df$Country=as.factor(Merged_df$Country)
Merged_df$Region=as.factor(Merged_df$Region)
Merged_df$Diversity=as.factor(Merged_df$Diversity)
Merged_df$Diversity=as.factor(Merged_df$Diversity)
Merged_df$`Stem Termination`=as.factor(Merged_df$`Stem Termination`)
Merged_df$`seed coat color`=as.factor(Merged_df$`seed coat color`)
Merged_df$`hilum color`=as.factor(Merged_df$`hilum color`)
Merged_df$MG=as.factor(Merged_df$MG)
Merged_df$Cluster.6=as.factor(Merged_df$Cluster.6)
Merged_df$Cluster.8=as.factor(Merged_df$Cluster.8)
Merged_df$Cluster.9=as.factor(Merged_df$Cluster.9)
str(Merged_df)
Merged_df <- Merged_df[-c(9,98,167,182,190,202,204,273),]
new_df <- Merged_df[,c(3:7,9:13,18:136)]
row.names(new_df)
str(new_df)
colnames(new_df)
new_df <- new_df[-c(9,98,167,182,190,202,204,273),]
new_df[1:292,1:2]
dataonly <- new_df[, -c(1:3)]
gower_dist <- daisy(new_df[, -c(1:3)],
metric = "gower",
type = list(logratio = 3))
summary(gower_dist)
gower_mat <- as.matrix(gower_dist)
str(gower_dist)
new_df[
which(gower_mat == min(gower_mat[gower_mat != min(gower_mat)]),
arr.ind = TRUE)[1, ], ]
new_df[
which(gower_mat == max(gower_mat[gower_mat != max(gower_mat)]),
arr.ind = TRUE)[1, ], ]
sil_width <- c(NA)
for(i in 2:10){
pam_fit <- pam(gower_dist,
diss = TRUE,
k = i)
sil_width[i] <- pam_fit$silinfo$avg.width
}
plot(1:10, sil_width,
xlab = "Number of clusters",
ylab = "Silhouette Width")
lines(1:10, sil_width)
pam_fit <- pam(gower_dist, diss = TRUE, k = 2)
pam_results <- new_df %>%
mutate(cluster = pam_fit$clustering) %>%
group_by(cluster) %>%
do(the_summary = summary(.))
pam_results$the_summary
new_df[pam_fit$medoids, ]
tsne_obj <- Rtsne(gower_dist, is_distance = TRUE)
tsne_data <- tsne_obj$Y %>%
data.frame() %>%
setNames(c("X", "Y")) %>%
mutate(cluster = factor(pam_fit$clustering),
name = Merged_df$Name)
ggplot(aes(x = X, y = Y), data = tsne_data) +
geom_point(aes(color = cluster))
tsne_data %>%
filter(X > 15 & X < 25,
Y > -15 & Y < -10) %>%
left_join(college_clean, by = "name") %>%
collect %>%
.[["name"]]
|
# Exercise-1: practice with basic syntax
# Create a variable `hometown` that stores the city in which you were born
hometown<-'Taipei'
# Assign your name to the variable `my.name`
my.name<-"Cindy"
# Assign your height to a variable `my.height`
my.height <- 161
# Create a variable `puppies` equal to the number of puppies you'd like to have
puppies <- 1
# Create a variable `puppy.price`, which is how expensive you think a puppy is
puppy.price <- 50
# Create a variable `total.cost` that has the total cost of all of your puppies
total.cost<-puppy.price*puppies
# Create a boolean variable `too.expensive`, set to true if the cost is greater than $1,000
too.expensive<- total.cost>1000
# Create a variable `max.puppies`, which is the nuber of puppies you can afford for $1K.
max.puppies<- round(1000/puppy.price)
| /exercise-1/exercise.R | permissive | chiuyt19/m6-r-intro | R | false | false | 815 | r | # Exercise-1: practice with basic syntax
# Create a variable `hometown` that stores the city in which you were born
hometown<-'Taipei'
# Assign your name to the variable `my.name`
my.name<-"Cindy"
# Assign your height to a variable `my.height`
my.height <- 161
# Create a variable `puppies` equal to the number of puppies you'd like to have
puppies <- 1
# Create a variable `puppy.price`, which is how expensive you think a puppy is
puppy.price <- 50
# Create a variable `total.cost` that has the total cost of all of your puppies
total.cost<-puppy.price*puppies
# Create a boolean variable `too.expensive`, set to true if the cost is greater than $1,000
too.expensive<- total.cost>1000
# Create a variable `max.puppies`, which is the nuber of puppies you can afford for $1K.
max.puppies<- round(1000/puppy.price)
|
# aim: get expression from 293T cells data
#
setwd("/home/wangjl/data/apa/20200701Fig/f1/3T3exp/")
getwd()
#
library(dplyr)
library(Seurat)
library(patchwork)
# Load the scRNA dataset
scRNA.data <- Read10X(data.dir = "/home/wangjl/data/ref/293T/filtered_matrices_mex/hg19/") #
#scRNA.data <- Read10X(data.dir ="/data/jinwf/wangjl/ref/pbmc10k/filtered_feature_bc_matrix") #v3.1 cellrange4.0.0
#
dim(scRNA.data) #32738 2885
head(scRNA.data)
n=1;scRNA.data[1:100, n:(40+n)]
# Initialize the Seurat object with the raw (non-normalized data).
scRNA <- CreateSeuratObject(counts = scRNA.data, project = "293T", min.cells = 3, min.features = 200)
dim(scRNA@assays$RNA@counts) #16316 2885
hist(apply(scRNA@assays$RNA@counts, 2, sum), n=100) #counts
hist(apply(scRNA@assays$RNA@counts>0, 2, sum), n=100) #gene number
# write to file
write.table(scRNA@assays$RNA@counts, '293T_exp.dt.txt')
########
#(1) plot counts / gene per gene
rnaM=scRNA@assays$RNA@counts
dim(rnaM) #16316 2885
countsPerCell=apply(rnaM, 2, sum)
genePerCell=apply(rnaM>0, 2, sum)
mean(countsPerCell) #[1] 15268.85 counts per cell
library(ggplot2)
p01=ggplot(data.frame(value=countsPerCell), aes(x=1, y=value/1e6))+geom_boxplot()+
geom_jitter(alpha=0.3)+
labs(x="293T cells(by 10x)", y='Million counts per cell')+
theme_classic()
p01
#
p02=ggplot(data.frame(value=genePerCell), aes(x=1, y=value))+geom_boxplot()+
geom_jitter(alpha=0.3)+
labs(x="293T cells(by 10x)", y='Gene number per cell')+
theme_classic()
p01
pdf('10x_readPerCell_GenePerCell.pdf',width=3, height=4)
print(p01)
print(p02)
dev.off()
#
##
# read RNA matrix of C1
rnaMc1=read.csv('/data/jinwf/wangjl/apa/20200701Fig/f2/BC_HeLa/BC_HeLa.222cells.count.V4.csv', row.names = 1)
head(rnaMc1[,1:10])
dim(rnaMc1) #18662 222
#
countsPerCell2=apply(rnaMc1, 2, sum)
genePerCell2=apply(rnaMc1>0, 2, sum)
#
hist(countsPerCell2, n=100)
hist(genePerCell2, n=100)
#
mean(countsPerCell2) #[1] 1465466 counts per cell
#########
# compare counts
mean(countsPerCell2)/mean(countsPerCell) #95.97752
compare_countsPerCell=t.test(countsPerCell, countsPerCell2)
compare_countsPerCell
compare_countsPerCell$p.value #1.689043e-77
# compare gene
mean(genePerCell2)/mean(genePerCell) #1.86842
compare_genePerCell=t.test(genePerCell2, genePerCell)
compare_genePerCell
compare_genePerCell$p.value # 4.333437e-92
####################
# counts
df1=data.frame(
tech=c( rep('10x', length(countsPerCell)) , rep('c1', length(countsPerCell2)) ),
value=c(as.numeric(countsPerCell), as.numeric(countsPerCell2))
)
g1=ggplot(df1, aes(x=tech, y=value/1e6, color=tech))+geom_boxplot(outlier.alpha = 0)+
#geom_jitter(size=0.1, alpha=0.2)+
scale_x_discrete(labels = c('10x scRNA-seq', 'scPolyA-seq')) +
labs(x="", y='Million counts per cell')+
theme_classic()+theme(legend.position="none")+
theme(axis.text.x=element_text(angle=60, hjust=1,size=10) ); g1
#
#
# gene
df2=data.frame(
tech=c( rep('10x', length(genePerCell)) , rep('c1', length(genePerCell2)) ),
value=c(as.numeric(genePerCell), as.numeric(genePerCell2))
)
g2=ggplot(df2, aes(x=tech, y=value, color=tech))+geom_boxplot(outlier.alpha = 0)+
#geom_jitter(size=0.1, alpha=0.05)+
scale_x_discrete(labels = c('10x scRNA-seq', 'scPolyA-seq')) +
labs(x="", y='Gene number per cell')+
theme_classic()+theme(legend.position="none")+
theme(axis.text.x=element_text(angle=60, hjust=1,size=10) ); g2
#
# combine to one
library(gridExtra)
CairoPDF('10x_c1_gene_counts_per_cell-2.pdf', width=3, height=3)
grid.arrange(g1,g2, nrow=1)
dev.off()
###
| /step3/a02_feature_counts_c1_vs_10x.R | no_license | WangJL2021/scPolyA-seq | R | false | false | 3,563 | r | # aim: get expression from 293T cells data
#
setwd("/home/wangjl/data/apa/20200701Fig/f1/3T3exp/")
getwd()
#
library(dplyr)
library(Seurat)
library(patchwork)
# Load the scRNA dataset
scRNA.data <- Read10X(data.dir = "/home/wangjl/data/ref/293T/filtered_matrices_mex/hg19/") #
#scRNA.data <- Read10X(data.dir ="/data/jinwf/wangjl/ref/pbmc10k/filtered_feature_bc_matrix") #v3.1 cellrange4.0.0
#
dim(scRNA.data) #32738 2885
head(scRNA.data)
n=1;scRNA.data[1:100, n:(40+n)]
# Initialize the Seurat object with the raw (non-normalized data).
scRNA <- CreateSeuratObject(counts = scRNA.data, project = "293T", min.cells = 3, min.features = 200)
dim(scRNA@assays$RNA@counts) #16316 2885
hist(apply(scRNA@assays$RNA@counts, 2, sum), n=100) #counts
hist(apply(scRNA@assays$RNA@counts>0, 2, sum), n=100) #gene number
# write to file
write.table(scRNA@assays$RNA@counts, '293T_exp.dt.txt')
########
#(1) plot counts / gene per gene
rnaM=scRNA@assays$RNA@counts
dim(rnaM) #16316 2885
countsPerCell=apply(rnaM, 2, sum)
genePerCell=apply(rnaM>0, 2, sum)
mean(countsPerCell) #[1] 15268.85 counts per cell
library(ggplot2)
p01=ggplot(data.frame(value=countsPerCell), aes(x=1, y=value/1e6))+geom_boxplot()+
geom_jitter(alpha=0.3)+
labs(x="293T cells(by 10x)", y='Million counts per cell')+
theme_classic()
p01
#
p02=ggplot(data.frame(value=genePerCell), aes(x=1, y=value))+geom_boxplot()+
geom_jitter(alpha=0.3)+
labs(x="293T cells(by 10x)", y='Gene number per cell')+
theme_classic()
p01
pdf('10x_readPerCell_GenePerCell.pdf',width=3, height=4)
print(p01)
print(p02)
dev.off()
#
##
# read RNA matrix of C1
rnaMc1=read.csv('/data/jinwf/wangjl/apa/20200701Fig/f2/BC_HeLa/BC_HeLa.222cells.count.V4.csv', row.names = 1)
head(rnaMc1[,1:10])
dim(rnaMc1) #18662 222
#
countsPerCell2=apply(rnaMc1, 2, sum)
genePerCell2=apply(rnaMc1>0, 2, sum)
#
hist(countsPerCell2, n=100)
hist(genePerCell2, n=100)
#
mean(countsPerCell2) #[1] 1465466 counts per cell
#########
# compare counts
mean(countsPerCell2)/mean(countsPerCell) #95.97752
compare_countsPerCell=t.test(countsPerCell, countsPerCell2)
compare_countsPerCell
compare_countsPerCell$p.value #1.689043e-77
# compare gene
mean(genePerCell2)/mean(genePerCell) #1.86842
compare_genePerCell=t.test(genePerCell2, genePerCell)
compare_genePerCell
compare_genePerCell$p.value # 4.333437e-92
####################
# counts
df1=data.frame(
tech=c( rep('10x', length(countsPerCell)) , rep('c1', length(countsPerCell2)) ),
value=c(as.numeric(countsPerCell), as.numeric(countsPerCell2))
)
g1=ggplot(df1, aes(x=tech, y=value/1e6, color=tech))+geom_boxplot(outlier.alpha = 0)+
#geom_jitter(size=0.1, alpha=0.2)+
scale_x_discrete(labels = c('10x scRNA-seq', 'scPolyA-seq')) +
labs(x="", y='Million counts per cell')+
theme_classic()+theme(legend.position="none")+
theme(axis.text.x=element_text(angle=60, hjust=1,size=10) ); g1
#
#
# gene
df2=data.frame(
tech=c( rep('10x', length(genePerCell)) , rep('c1', length(genePerCell2)) ),
value=c(as.numeric(genePerCell), as.numeric(genePerCell2))
)
g2=ggplot(df2, aes(x=tech, y=value, color=tech))+geom_boxplot(outlier.alpha = 0)+
#geom_jitter(size=0.1, alpha=0.05)+
scale_x_discrete(labels = c('10x scRNA-seq', 'scPolyA-seq')) +
labs(x="", y='Gene number per cell')+
theme_classic()+theme(legend.position="none")+
theme(axis.text.x=element_text(angle=60, hjust=1,size=10) ); g2
#
# combine to one
library(gridExtra)
CairoPDF('10x_c1_gene_counts_per_cell-2.pdf', width=3, height=3)
grid.arrange(g1,g2, nrow=1)
dev.off()
###
|
#' Function to create a rmt_list object from RMODFLOW discete boundary condition object objects
#'
#' @param obj RMODFLOW discete boundary condition object. Allowed objects are chd, ghb, riv, drn, wel
#' @param conc vector, data.frame or matrix with concentration values.
#' @param itype integer column (or single value) to indicate flux type
#' @param kper integer value(s) indicating during which stress-periods this rmf_list is active
#'
#' @return a \code{rmt_list} object
#' @keywords internal
rmti_create_bc_list <- function(obj, conc, itype, kper) {
# kper can only be different if flow has only 1 stress-period (should be nr 1) which should be steady-state (not known in this code)
kper_flow <- unique(obj$kper$kper)
if(!identical(sort(kper_flow), sort(unique(kper)))) {
if(length(kper_flow) > 1 || kper_flow != 1) {
stop('kper of rmt_list differs from kper of flow object', call. = FALSE)
} else {
kper_select <- 1
}
} else {
kper_select <- kper
}
names_act <- colnames(obj$kper)[which(obj$kper[kper_select,which(!is.na(obj$kper[kper_select,]))] != FALSE)[-1]]
if(length(names_act) == 0) stop('No active obj features for kper ', kper_select, call. = FALSE)
df <- as.data.frame(obj$data)
df <- df[df$name %in% names_act, ]
# concentrations
conc <- structure(as.data.frame(matrix(conc, ncol = ncol(conc))), names = paste0('css', 1:ncol(conc)))
df <- cbind(df, itype, conc)
df <- rmt_create_list(df, kper = kper)
return(df)
}
#' Conditional return
#'
#' \code{rmti_ifelse0} returns \code{yes} if \code{test} is \code{TRUE}. If \code{test} is \code{FALSE}, it returns \code{no}.
#' @param test an object which can be coerced to logical mode.
#' @param yes return value for \code{test==TRUE}
#' @param no return value for \code{test==FALSE}
#' @keywords internal
rmti_ifelse0 <- function(test, yes, no) {
if(test) {
return(yes)
} else {
return(no)
}
}
#' List supported MT3DMS/MT3D-USGS packages
#'
#' @param type character denoting type of packages to list; possible values are \code{'usgs' (default), 'mt3dms', 'output'}
#'
#' @return data.frame with ftype and rmt columns denoting the MT3DMS and \code{RMT3DMS} abbreviations for the requested packages
#' @keywords internal
#' @details 'usgs' holds all packages; 'mt3dms' is a subset, 'output' lists all supported output types
#' @note this function should be updated every time a new MT3DMS package is supported in \code{RMT3DMS}
rmti_list_packages <- function(type = 'usgs') {
# update rmfd_supported_packages in /data-raw/ when a new package is supported
# NAM file is not in here but is supported
df <- rmtd_supported_packages
# Below is an exhaustive overview of all packages in MT3D-USGS & MT3DMS (latter is a subset of the former)
# MT3D-USGS
usgs <- c('btn', 'ft6', 'adv', 'dsp', 'ssm', 'rct', 'gcg', 'tob', 'hss', 'cts', 'tso', 'uzt', 'lkt', 'sft')
# MT3DMS
mt3dms <- c('btn', 'adv', 'dsp', 'ssm', 'rct', 'gcg', 'tob', 'hss')
# subset or output
if(type == 'output') {
df <- rmtd_supported_output
} else {
df <- subset(df, rmt %in% get(type))
}
return(df)
}
#' Read comments
#' Internal function used in the read_* functions to read comments
#' @param id optional integers specifying which lines are comments. If NULL (default), lines starting with "#" indicate commented lines
#' @details removes empty comments and prevents copying of RMT3DMS header comment
#' @keywords internal
rmti_parse_comments <- function(remaining_lines, id = NULL) {
v <- paste("RMT3DMS, version", packageDescription("RMT3DMS")$Version)
comments <- NULL
if(is.null(id)) {
comment_tag <- substr(remaining_lines, 1, 1)
comment_id <- which(comment_tag == "#")
} else {
comment_id <- id
}
if(length(comment_id) > 0) {
comments <- gsub('#', '', remaining_lines[comment_id])
# remove empty comments
empty <- which(nchar(trimws(comments)) == 0)
if(length(empty) > 0) comments <- comments[-empty]
# remove RMT3DMS header
header <- grep(v, comments)
if(length(header) > 0) comments <- comments[-header]
remaining_lines <- remaining_lines[-comment_id]
}
return(list(comments = comments, remaining_lines = remaining_lines))
}
#' Check if the FTL file is binary
#'
#' @param file pathname to the flow-transport link file, typically '*.ftl'
#'
#' @return logical indicating if the FTL file is binary or keyword 'empty' if file is empty.
#' @keywords internal
rmti_check_ftl_binary <- function(file) {
lines <- readr::read_lines_raw(file, n_max = 10) # readr::read_lines might crash session
# TODO this is a weak check for binary
header <- rawToChar(unlist(lines)[1:4])
binary <- toupper(header) %in% c('MT3D', 'MTGS')
# if(rawToChar(unlist(lines)[2]) == '') binary <- TRUE
if(!binary) {
cnv <- unlist(lapply(lines, rawToChar))
binary <- ifelse(is.na(cnv[2]), 'empty', binary)
}
return(binary)
}
#' Read the package header from a flow-transport link file
#'
#' @param file pathname to the flow-transport link file, typically '*.ftl'
#' @param binary is the FTL file binary?
#'
#' @return logical vector of length 3, indicating if the rch, evt or uzf packages are active in the flow simulation
#' @details This function is used in \code{\link{rmt_read_ssm}} as a replacement for reading data set 1. It is used to determine if
#' rch, evt and/or uzf concentration arrays have to be read. All other active flow packages are read from the point source/sink data sets.
#' \code{rmti_parse_ftl_header} can only be used with flow-transport link files using MT3DMS or MT3D-USGS headers.
#' The UZF package can only be used with MT3D-USGS.
#' @keywords internal
rmti_parse_ftl_header <- function(file, binary = NULL) {
lg <- structure(rep(FALSE, 3), names = c('frch', 'fevt', 'fuzf'))
# check if ftl file is binary
if(is.null(binary)) {
binary <- rmti_check_ftl_binary(file)
}
if(!(binary == 'empty')) {
if(binary) { # binary
con <- file(file, open = 'rb')
try({
v <- readChar(con, nchars = 11)
rec <- readBin(con, what = 'integer', n = 9)
if(rec[3] > 0) lg['frch'] <- TRUE
if(rec[4] > 0) lg['fevt'] <- TRUE
# s <- grepl('MT3D', v, ignore.case = TRUE) # MT3DMS header
usgs <- grepl('MTGS', v, ignore.case = TRUE) # MT3D-USGS
if(usgs) {
npk <- readBin(con, what = 'integer', n = 1)
if(npk > 0) {
rec2 <- vector(mode = 'character', length = npk)
for(i in 1:npk) rec2[i] <- toupper(trimws(readChar(con, nchars = 20)))
if(any(rec2 == 'UZF')) lg['fuzf'] <- TRUE
}
}
# if(s) {
# version <- sub('MT3D', '', v, ignore.case = TRUE)
# vn <- as.numeric(strsplit(version, '\\.')[[1]][1])
# # standard header (v < 4, not supported by MODFLOW-2005) or extended header (v >= 4)
# # not necessary to read
# # ext_header <- vn >= 4
# # if(ext_header) rec2 <- readBin(con, what = 'integer', n = 12)
#
# } else if(usgs) {
# npk <- readBin(con, what = 'integer', n = 1)
# rec2 <- vector(mode = 'character', length = npk)
# for(i in 1:npk) rec2[i] <- readChar(con, nchars = 12)
#
# } else {
# stop('Can only read flow-transport link with MT3DMS or MT3D-USGS headers', call. = FALSE)
# }
})
close(con)
} else { # ASCII
lines <- readr::read_lines(file, n_max = 40, lazy = FALSE)
rec <- rmti_parse_variables(lines, n = 10, format = 'free', character = TRUE)
v <- trimws(rec$variables[1])
if(as.numeric(rec$variables[4]) > 0) lg['frch'] <- TRUE
if(as.numeric(rec$variables[5]) > 0) lg['fevt'] <- TRUE
usgs <- grepl('MTGS', v, ignore.case = TRUE) # MT3D-USGS
if(usgs) {
remaining_lines <- rec$remaining_lines
# if(length(rec$variables) < 9) remaining_lines <- remaining_lines[-1]
ds <- rmti_parse_variables(remaining_lines, n = 1, format = 'free')
npk <- as.numeric(ds$variables)
remaining_lines <- ds$remaining_lines
if(npk > 0) {
rec2 <- trimws(gsub('\'', '', remaining_lines[1:npk]))
if(any(toupper(rec2) == 'UZF')) lg['fuzf'] <- TRUE
}
}
}
}
return(lg)
}
#' Get an array specified by a control record from the text lines analyzed in a \code{RMT3DMS} \code{rmt_read_*} function
#' @param remaining_lines lines to read the array from
#' @param nrow number of rows in the array
#' @param ncol number of columns in the array
#' @param nlay number of layers in the array that should be read
#' @param ndim dimensions of the array to read; either 1, 2 or 3. Denotes the if the returned array should be 1D, 2D or 3D.
#' @param skip_header optional; should the control record be skipped
#' @param nam a \code{RMT3DMS} nam object. Required when reading external arrays
#' @param precision character: either \code{'single'} (default) or \code{'double'}. Denotes the precision of binary files
#' @param file pathname to the input file which is currently being read. Required when reading fixed-format or MODFLOW-style OPEN/CLOSE arrays
#' @param integer logical; does the binary array hold integer values. Might not work optimally.
#' @param ... ignored
#' @return A list containing the array and the remaining text of the MT3DMS input file
#' @keywords internal
rmti_parse_array <- function(remaining_lines, nrow, ncol, nlay, ndim,
skip_header = FALSE, nam = NULL, precision = "single", file = NULL, integer = FALSE, ...) {
# Initialize array object
array <- array(dim=c(nrow,ncol,nlay))
# Read array according to format type if there is anything to be read
if(prod(dim(array))!=0)
{
for(k in 1:nlay)
{
header <- rmti_parse_variables(remaining_lines[1], n = 2, format = 'fixed')
# MODFLOW-style free format control header
if(header$variables[1] %in% c('CONSTANT', 'INTERNAL', 'EXTERNAL', 'OPEN/CLOSE') || skip_header) {
rmf_data_set <- RMODFLOW:::rmfi_parse_array(remaining_lines, nrow = nrow, ncol = ncol, nlay = 1, ndim = ndim,
skip_header = skip_header, nam = nam, precision = precision, file = file, integer = integer, ...)
array[,,k] <- rmt_convert_rmf_to_rmt(rmf_data_set$array)
remaining_lines <- rmf_data_set$remaining_lines
rm(rmf_data_set)
} else {
# MT3DMS fixed format control header
fortranfmt <- FALSE
iread <- as.numeric(header$variables[1])
cnst <- as.numeric(header$variables[2])
fmtin <- trimws(paste0(strsplit(remaining_lines[1], split = '')[[1]][21:40], collapse = ''))
if(iread == 0) { # CONSTANT
array[,,k] <- cnst
nLines <- 1
} else if(iread == 100) { # INTERNAL-ARRAY
lengths <- RMODFLOW:::rmfi_fortran_format(fmtin)
remaining_lines <- remaining_lines[-1]
if(cnst == 0) cnst <- 1.0
remaining_lines[1] <- paste(substring(remaining_lines[1], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' ')
nPerLine <- length(lengths)
nLines <- (ncol %/% nPerLine + ifelse((ncol %% nPerLine)==0, 0, 1))*nrow
if(nLines > 1) remaining_lines[2:nLines] <- vapply(2:(nLines), function(i) paste(substring(remaining_lines[i], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' '), 'text')
array[,,k] <- cnst*matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(remaining_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
array[,,k] <- array[,,k]*cnst
} else if(iread == 101) { # INTERNAL-BLOCK
remaining_lines <- remaining_lines[-1]
if(cnst == 0) cnst <- 1.0
ds <- rmti_parse_variables(remaining_lines, n = 1, format = 'free')
nblock <- as.numeric(ds$variables[1])
remaining_lines <- ds$remaining_lines
nLines <- 0
for(block in 1:nblock) {
ds2 <- rmti_parse_variables(remaining_lines, n = 5, format = 'free')
block_values <- as.numeric(ds2$variables[1:5])
array[block_values[1]:block_values[2], block_values[3]:block_values[4], k] <- block_values[5]
remaining_lines <- ds2$remaining_lines
}
array[,,k] <- array[,,k]*cnst
} else if(iread == 102) { # INTERNAL-ZONE
lengths <- RMODFLOW:::rmfi_fortran_format(fmtin)
remaining_lines <- remaining_lines[-1]
if(cnst == 0) cnst <- 1.0
ds <- rmti_parse_variables(remaining_lines, n = 1, format = 'free')
nzone <- as.numeric(ds$variables[1])
remaining_lines <- ds$remaining_lines
ds2 <- rmti_parse_variables(remaining_lines, n = nzone, format = 'free')
zv <- as.numeric(ds2$variables[1:nzone])
remaining_lines <- ds2$remaining_lines
zone <- array(dim = c(nrow, ncol))
remaining_lines[1] <- paste(substring(remaining_lines[1], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' ')
nPerLine <- length(lengths)
nLines <- (ncol %/% nPerLine + ifelse((ncol %% nPerLine)==0, 0, 1))*nrow
if(nLines > 1) remaining_lines[2:nLines] <- vapply(2:(nLines), function(i) paste(substring(remaining_lines[i], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' '), 'text')
# zone <- cnst*matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(remaining_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
zone <- matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(remaining_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
for(nz in 1:nzone) {
zone[which(zone == nz)] <- zv[nz]
}
array[,,k] <- zone*cnst
} else if(iread == 103) { # INTERNAL-LIST
remaining_lines <- remaining_lines[-1]
if(cnst == 0) cnst <- 1.0
n_final <- nrow*ncol
n <- 0
nLines <- 0
values <- vector(mode = 'numeric')
end <- FALSE
while(n < n_final && !end) {
nLines <- nLines + 1
values_ch <- rmti_remove_empty_strings(strsplit(paste(remaining_lines[nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])
# terminate if / is encountered
slash <- grepl('/', values_ch)
if(any(slash)) {
last_value <- which(slash)[1]
values_ch[last_value] <- sub('/.*', '', values_ch[last_value])
values_ch <- values_ch[1:last_value]
end <- TRUE
}
values_ch <- strsplit(values_ch, '\\*')
values_ch <- lapply(values_ch, function(i) rmti_ifelse0(length(i) > 1, rep(as.numeric(i[[2]]), as.numeric(i[[1]])), as.numeric(i)))
values <- c(values, unlist(values_ch))
n <- length(values)
}
array[,,k] <- matrix(as.numeric(values[1:n_final])*cnst, nrow = nrow, ncol = ncol, byrow = TRUE)
} else { # EXTERNAL
if(cnst == 0) cnst <- 1.0
if(is.null(nam)) stop('Please supply a MT3DMS nam object when reading EXTERNAL arrays', call. = FALSE)
if(abs(iread) %in% nam$nunit) {
fname <- nam$fname[which(nam$nunit == abs(iread))]
} else if(any(nam$nunit == 0) && iread %in% rmtd_internal_nunit$nunit) { # nunit = 0
ext_ftype <- rmtd_internal_nunit$ftype[which(rmtd_internal_nunit$nunit == iread)]
fname <- nam$fname[which(nam$ftype == ext_ftype)]
} else {
stop('nunit for EXTERNAL array not found in NAM file', call. = FALSE)
}
direct <- attr(nam, 'dir')
absfile <- file.path(fname)
if(!file.exists(absfile)) absfile <- file.path(direct, fname) # try full name
if(!file.exists(absfile)) stop('Could not determine path to EXTERNAL file on unit number ', iread, call. = FALSE)
ext_file <- TRUE
# ASCII
if(iread > 0) {
if(!(toupper(fmtin) %in% c('(FREE)', 'FREE', '(BINARY)','BINARY'))) {
lengths <- RMODFLOW:::rmfi_fortran_format(fmtin)
fortranfmt <- TRUE
}
if(iread == nam$nunit[which(basename(nam$fname) == basename(file))] || normalizePath(absfile) == normalizePath(file)) { # read from current file
ext_file <- FALSE
remaining_lines <- remaining_lines[-1]
if(fortranfmt) {
remaining_lines[1] <- paste(substring(remaining_lines[1], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' ')
nPerLine <- length(lengths)
nLines <- (ncol %/% nPerLine + ifelse((ncol %% nPerLine)==0, 0, 1))*nrow
if(nLines > 1) remaining_lines[2:nLines] <- vapply(2:(nLines), function(i) paste(substring(remaining_lines[i], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' '), 'text')
array[,,k] <- cnst*matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(remaining_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
} else {
nPerLine <- length(as.numeric(rmti_remove_empty_strings(strsplit(remaining_lines[1],' |\t|,')[[1]])))
nLines <- (ncol %/% nPerLine + ifelse((ncol %% nPerLine)==0, 0, 1))*nrow
array[,,k] <- cnst*matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(remaining_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
}
} else { # read from external file
external_lines <- readr::read_lines(absfile, lazy = FALSE)
# remove lines of previous arrays
if(!is.null(attr(nam, as.character(iread)))) external_lines <- external_lines[-c(1:attr(nam, as.character(iread)))]
if(fortranfmt) {
external_lines[1] <- paste(substring(external_lines[1], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' ')
nPerLine <- length(lengths)
nLines <- (ncol %/% nPerLine + ifelse((ncol %% nPerLine)==0, 0, 1))*nrow
if(nLines > 1) external_lines[2:nLines] <- vapply(2:(nLines), function(i) paste(substring(external_lines[i], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' '), 'text')
array[,,k] <- cnst*matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(external_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
} else {
nPerLine <- length(as.numeric(rmti_remove_empty_strings(strsplit(external_lines[1],' |\t|,')[[1]])))
nLines <- (ncol %/% nPerLine + ifelse((ncol %% nPerLine)==0, 0, 1))*nrow
array[,,k] <- cnst*matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(external_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
}
}
} else if(iread < 0) { # read binary from external file
con <- file(absfile,open='rb')
real_number_bytes <- ifelse(precision == 'single', 4, 8)
type <- ifelse(integer, 'integer', 'numeric')
size <- ifelse(type == 'integer', NA_integer_, real_number_bytes)
if(type=='integer') warning('Reading integer binary EXTERNAL array might not work optimally')
try({
if(!is.null(attr(nam, as.character(iread)))) {
for(jj in 1:attr(nam, as.character(iread))) {
invisible(readBin(con, what = 'integer', n = 2))
invisible(readBin(con,what='numeric',n = 2, size = real_number_bytes))
invisible(readChar(con,nchars=16))
nncol <- readBin(con, what = 'integer', n = 1)
nnrow <- readBin(con, what = 'integer', n = 1)
invisible(readBin(con, what = 'integer', n = 1))
invisible(readBin(con,what='numeric',n = nncol * nnrow, size = real_number_bytes))
}
}
# integer binary arrays should not have headers in MODFLOW (2005, v1.12 - see U2DINT subroutine, line 682)
if(!integer) {
invisible(readBin(con, what = 'integer', n = 2))
invisible(readBin(con,what='numeric',n = 2, size = real_number_bytes))
invisible(readChar(con,nchars=16))
invisible(readBin(con, what = 'integer', n = 3))
}
array[,,k] <- cnst*aperm(array(readBin(con,what=type,n = ncol * nrow, size = size),dim=c(ncol, nrow)), c(2, 1))
nLines <- 1})
close(con)
}
if(ext_file) {
if(is.null(attr(nam, as.character(iread)))) {
attr(nam, as.character(iread)) <- nLines
} else {
attr(nam, as.character(iread)) <- attr(nam, as.character(iread)) + nLines
}
nLines <- 1
}
}
if(nLines > 0) remaining_lines <- remaining_lines[-c(1:nLines)]
}
}
}
# Set class of object (2darray; 3darray)
if(ndim == 1) {
array <- c(array(array,dim=nrow*ncol*nlay))
} else if(ndim == 2) {
array <- rmt_create_array(array[,,1], dim = c(nrow, ncol))
} else if(ndim == 3) {
array <- rmt_create_array(array, dim = c(nrow, ncol, nlay))
} else {
stop('ndim should be 1, 2 or 3')
}
# Return output of reading function
data_set <- list(array = array, remaining_lines = remaining_lines)
return(data_set)
}
#' Read MT3DMS single-line variables
#' @param n integer; number of variables to be returned. If zero for \code{'free'} format, reads all values on a single line and does not check if n values are read or empty lines, '*' or '/' are present.
#' @param width integer; length of a single variable. Only used when format is \code{'fixed'}.
#' @param nlay integer; number of layers for which values are to be read. Only use when a 1D(NLAY) variable is read which may be specified on multiple lines. Only used when format is \code{'fixed'}.
#' @param character logical; should a character vector be returned. Prevents conversion from character names to numeric. Defaults to FALSE. Useful if only characters are present on the line.
#' @param format character, either \code{'free'} or \code{'fixed'}. When 'fixed', reads character fields of length 'width' and converts to numeric. Empty fields are set to zero.
#' @param ... ignored
#' @return vector with values
#' @keywords internal
rmti_parse_variables <- function(remaining_lines, n, width = 10, nlay = NULL, character = FALSE, format = 'fixed', ...) {
if(format == 'free') {
if(n == 0) {
nLines <- 1
variables <- rmti_remove_empty_strings(strsplit(rmti_remove_comments_end_of_line(remaining_lines[1]),' |\t|,')[[1]])
} else {
n.cnt <- nLines <- 0
end <- FALSE
variables <- vector(mode = 'character')
while(n.cnt < n && !end) {
nLines <- nLines + 1
values_ch <- rmti_remove_empty_strings(strsplit(paste(remaining_lines[nLines],collapse='\n'),' |\t|,')[[1]])
# terminate if / is encountered
slash <- grepl('/', values_ch)
if(any(slash)) {
last_value <- which(slash)[1]
values_ch[last_value] <- sub('/.*', '', values_ch[last_value])
values_ch <- values_ch[1:last_value]
end <- TRUE
}
values_ch <- strsplit(values_ch, '\\*')
values_ch <- lapply(values_ch, function(i) rmti_ifelse0(length(i) > 1, rep(i[[2]], as.numeric(i[[1]])), i))
variables <- c(variables, unlist(values_ch))
n.cnt <- length(variables)
}
variables <- variables[1:n]
}
if(!character && !any(is.na(suppressWarnings(as.numeric(variables))))) variables <- as.numeric(variables)
return(list(variables=variables,remaining_lines=remaining_lines[-c(1:nLines)]))
} else if(format == 'fixed') { # every value has 'width' characters; empty values are zero
variables <- (unlist(lapply(seq(1,nchar(remaining_lines[1]), by=width),
function(i) paste0(strsplit(rmti_remove_comments_end_of_line(remaining_lines[1]),'')[[1]][i:(i+min(width, nchar(remaining_lines[1])-i+1)-1)], collapse=''))))
variables <- lapply(strsplit(variables, " |\t"), rmti_remove_empty_strings)
variables[which(lengths(variables)==0)] <- 0 # empty values are set to 0
variables <- unlist(variables)
if(!is.null(nlay)) {
while(length(variables) < nlay) {
remaining_lines <- remaining_lines[-1]
variables <- append(variables, rmti_remove_empty_strings(strsplit(rmti_remove_comments_end_of_line(remaining_lines[1]),' |\t|,')[[1]]))
}
} else if(!character && !any(is.na(suppressWarnings(as.numeric(variables))))) { # convert to numeric
variables <- as.numeric(variables)
if(length(variables) < n) variables <- c(variables, rep(0, n - length(variables))) # append 0's if values are missing
} else { # remain as character
if(length(variables) < n) variables <- c(variables, rep('0', n - length(variables))) # append 0's if values are missing
}
return(list(variables=variables,remaining_lines=remaining_lines[-1]))
}
}
#' Remove comments at the end of a string
#' @param line A string.
#' @return The string, without the commented part.
#' @keywords internal
rmti_remove_comments_end_of_line <- function(line) {
if(grepl('!',line)) return(substr(line,1,regexpr('!',line)-1))
else return(line)
}
#' Remove empty elements from a vector of strings.
#' @param vector_of_strings Vector of strings.
#' @return Vector of strings without the empty items.
#' @keywords internal
rmti_remove_empty_strings <- function(vector_of_strings) {
return(vector_of_strings[which(vector_of_strings!='')])
}
#' Write MT3DMS array
#' Internal function used in the rmt_write_* functions for writing array datasets
#' @param array array to write
#' @param file pathname to the file to write the array to
#' @param mf_style logical, should MODFLOW-style array headers be used (i.e. INTERNAL, EXTERNAL, OPEN/CLOSE, ...) ? Defaults to FALSE
#' @param format either 'free' (iread = 103, i.e. FORTRAN free format) or 'fixed' (iread = 100 using FORTRAN format 10G11.4). In both cases, iread = 0 when the array only contains 1 unique value. Defaults to 'free'.
#' @param cnstnt numeric constant to add to the array header which acts as a multiplier for the array values in MODFLOW. Default to 1
#' @param iprn iprn code to add to array header. Defaults to -1
#' @param append logical; should array be appended to the file. Defaults to TRUE
#' @param ... passed to \code{\link{RMODFLOW:::rmfi_write_array}} when mf_style is TRUE
#' @return NULL
#' @keywords internal
rmti_write_array <- function(array, file, mf_style = FALSE, format = 'free', cnstnt=1, iprn=-1, append=TRUE, ...) {
if(mf_style) {
RMODFLOW:::rmfi_write_array(array = array, file = file, cnstnt = cnstnt, iprn = iprn, append = append, ...)
} else {
# MT3DMS
# only iread 0 & 100 & 103 supported (CONSTANT & INTERNAL-ARRAY fixed and free format)
# format: (10G11.4)
fmt <- '(10G11.4)'
nPerLine <- 10
width <- 11
decimals <- 4
iprn <- as.integer(iprn)
if(is.null(dim(array))) {
if(prod(c(array)[1] == c(array))==1) {
rmti_write_variables(0L, cnstnt * c(array)[1], file = file, append = append, width = 10)
} else {
if(format == 'free') {
rmti_write_variables(103L, cnstnt, '(free)', iprn, file = file, append = append, width = c(10, 10, 20, 10))
cat(paste(paste(array, collapse=' '), '\n', sep=' '), file=file, append=append)
} else if(format == 'fixed') {
rmti_write_variables(100L, cnstnt, fmt, iprn, file = file, append = append, width = c(10, 10, 20, 10))
n <- length(c(array))
nLines <- n %/% nPerLine
remainder <- n %% nPerLine
if(remainder > 0) nLines <- nLines + 1
if(n > nPerLine) {
formatted_array <- t(matrix('', nrow = nLines, ncol = nPerLine))
formatted_array[1:n] <- formatC(c(array), format = 'g', digits = 4, width = 11)
formatted_array <- t(formatted_array)
} else {
formatted_array <- formatC(c(array), format = 'g', digits = 4, width = 11)
}
write.table(formatted_array, file=file, append=append, sep='', col.names=FALSE, row.names=FALSE, quote = FALSE)
}
}
} else if(length(dim(array))==2) {
if(prod(c(array)[1] == c(array))==1) {
rmti_write_variables(0L, cnstnt * c(array)[1], file = file, append = append, width = 10)
} else {
if(format == 'free') {
rmti_write_variables(103L, cnstnt, '(free)', iprn, file = file, append = append, width = c(10, 10, 20, 10))
if(dim(array)[1] == 1) {
cat(paste0(paste(array, collapse=' '),'\n'), file=file, append=append)
} else {
write.table(array, file=file, append=append, sep=' ', col.names=FALSE, row.names=FALSE)
}
} else if(format == 'fixed') {
rmti_write_variables(100L, cnstnt, fmt, iprn, file = file, append = append, width = c(10, 10, 20, 10))
n <- length(c(array))
nLines <- n %/% nPerLine
remainder <- n %% nPerLine
if(remainder > 0) nLines <- nLines + 1
if(n > nPerLine) {
formatted_array <- t(matrix('', nrow = nLines, ncol = nPerLine))
formatted_array[1:n] <- formatC(c(array), format = 'g', digits = 4, width = 11)
formatted_array <- t(formatted_array)
} else {
formatted_array <- formatC(c(array), format = 'g', digits = 4, width = 11)
}
write.table(formatted_array, file=file, append=append, sep='', col.names=FALSE, row.names=FALSE, quote = FALSE)
}
}
} else {
for(i in 1:dim(array)[3])
{
if(prod(c(array[,,i])[1] == c(array[,,i]))==1) {
rmti_write_variables(0L, cnstnt * c(array[,,i])[1], file = file, append = ifelse(i == 1, append, TRUE), width = 10)
} else {
if(format == 'free') {
rmti_write_variables(103L, cnstnt, '(free)', iprn, file = file, append = ifelse(i == 1, append, TRUE), width = c(10, 10, 20, 10))
if(dim(array)[1] == 1) {
cat(paste0(paste(array[,,i], collapse=' '),'\n'), file=file, append=ifelse(i == 1, append, TRUE))
} else {
write.table(array[,,i], file=file, append=ifelse(i == 1, append, TRUE), sep=' ', col.names=FALSE, row.names=FALSE)
}
} else if(format == 'fixed') {
rmti_write_variables(100L, cnstnt, fmt, iprn, file = file, append = ifelse(i == 1, append, TRUE), width = c(10, 10, 20, 10))
n <- length(c(array[,,i]))
nLines <- length(c(array[,,i])) %/% nPerLine
remainder <- length(c(array[,,i])) %% nPerLine
if(remainder > 0) nLines <- nLines + 1
if(length(c(array[,,i])) > nPerLine) {
formatted_array <- t(matrix('', nrow = nLines, ncol = nPerLine))
formatted_array[1:n] <- formatC(c(array[,,i]), format = 'g', digits = 4, width = 11)
formatted_array <- t(formatted_array)
} else {
formatted_array <- formatC(c(array[,,i]), format = 'g', digits = 4, width = 11)
}
write.table(formatted_array, file=file, append=ifelse(i == 1, append, TRUE), sep='', col.names=FALSE, row.names=FALSE, quote = FALSE)
}
}
}
}
}
}
#' Write MT3DMS variables
#' Internal function used in the rmt_write_* functions for writing single line datasets
#' @param format either \code{'fixed'} or \code{'free'}. Fixed format assumes fixed width character spaces for each value as determined by the width argument
#' @param width numeric vector with the character widths for each variable. If a single value, it is repeated.
#' @param integer logical; should all values be converted to integers? MT3D does not allow for exponents in integer values
#' @param iprn ignored
#' @return NULL
#' @keywords internal
rmti_write_variables <- function(..., file, append=TRUE, width = 10, format = 'fixed', integer = FALSE, iprn = -1) {
arg <- list(...)
arg <- arg[vapply(arg, function(i) all(nchar(i) > 0), TRUE)] # removes empty elements
if(integer) arg <- lapply(arg, as.integer)
# sets integers in proper format since Type is converted to double when vectorized
if(format == 'free') {
if(integer) {
arg <- lapply(arg, formatC)
} else {
arg <- lapply(arg, as.character)
}
arg <- unlist(arg)
cat(paste0(paste(arg, sep = ' ', collapse = ' '), '\n'), file=file, append=append)
} else if(format == 'fixed') {
arg <- unlist(lapply(arg, as.list), recursive = FALSE)
if(length(width) == 1) width <- rep(width, length(arg))
arg <- lapply(1:length(arg), function(i) rmti_ifelse0(nchar(arg[[i]]) > width[i], formatC(arg[[i]], width = width[i]), paste0(paste0(rep(' ', width[i]-nchar(arg[[i]])), collapse = ''), as.character(arg[[i]]), collapse = '')))
arg <- lapply(1:length(arg), function(i) paste0(strsplit(arg[[i]], '')[[1]][1:width[i]], collapse = ''))
arg <- unlist(arg)
cat(paste0(paste0(arg, collapse=''), '\n'), file=file, append=append)
}
}
| /R/internals.R | no_license | cneyens/RMT3DMS | R | false | false | 34,587 | r |
#' Function to create a rmt_list object from RMODFLOW discete boundary condition object objects
#'
#' @param obj RMODFLOW discete boundary condition object. Allowed objects are chd, ghb, riv, drn, wel
#' @param conc vector, data.frame or matrix with concentration values.
#' @param itype integer column (or single value) to indicate flux type
#' @param kper integer value(s) indicating during which stress-periods this rmf_list is active
#'
#' @return a \code{rmt_list} object
#' @keywords internal
rmti_create_bc_list <- function(obj, conc, itype, kper) {
# kper can only be different if flow has only 1 stress-period (should be nr 1) which should be steady-state (not known in this code)
kper_flow <- unique(obj$kper$kper)
if(!identical(sort(kper_flow), sort(unique(kper)))) {
if(length(kper_flow) > 1 || kper_flow != 1) {
stop('kper of rmt_list differs from kper of flow object', call. = FALSE)
} else {
kper_select <- 1
}
} else {
kper_select <- kper
}
names_act <- colnames(obj$kper)[which(obj$kper[kper_select,which(!is.na(obj$kper[kper_select,]))] != FALSE)[-1]]
if(length(names_act) == 0) stop('No active obj features for kper ', kper_select, call. = FALSE)
df <- as.data.frame(obj$data)
df <- df[df$name %in% names_act, ]
# concentrations
conc <- structure(as.data.frame(matrix(conc, ncol = ncol(conc))), names = paste0('css', 1:ncol(conc)))
df <- cbind(df, itype, conc)
df <- rmt_create_list(df, kper = kper)
return(df)
}
#' Conditional return
#'
#' \code{rmti_ifelse0} returns \code{yes} if \code{test} is \code{TRUE}. If \code{test} is \code{FALSE}, it returns \code{no}.
#' @param test an object which can be coerced to logical mode.
#' @param yes return value for \code{test==TRUE}
#' @param no return value for \code{test==FALSE}
#' @keywords internal
rmti_ifelse0 <- function(test, yes, no) {
if(test) {
return(yes)
} else {
return(no)
}
}
#' List supported MT3DMS/MT3D-USGS packages
#'
#' @param type character denoting type of packages to list; possible values are \code{'usgs' (default), 'mt3dms', 'output'}
#'
#' @return data.frame with ftype and rmt columns denoting the MT3DMS and \code{RMT3DMS} abbreviations for the requested packages
#' @keywords internal
#' @details 'usgs' holds all packages; 'mt3dms' is a subset, 'output' lists all supported output types
#' @note this function should be updated every time a new MT3DMS package is supported in \code{RMT3DMS}
rmti_list_packages <- function(type = 'usgs') {
# update rmfd_supported_packages in /data-raw/ when a new package is supported
# NAM file is not in here but is supported
df <- rmtd_supported_packages
# Below is an exhaustive overview of all packages in MT3D-USGS & MT3DMS (latter is a subset of the former)
# MT3D-USGS
usgs <- c('btn', 'ft6', 'adv', 'dsp', 'ssm', 'rct', 'gcg', 'tob', 'hss', 'cts', 'tso', 'uzt', 'lkt', 'sft')
# MT3DMS
mt3dms <- c('btn', 'adv', 'dsp', 'ssm', 'rct', 'gcg', 'tob', 'hss')
# subset or output
if(type == 'output') {
df <- rmtd_supported_output
} else {
df <- subset(df, rmt %in% get(type))
}
return(df)
}
#' Read comments
#' Internal function used in the read_* functions to read comments
#' @param id optional integers specifying which lines are comments. If NULL (default), lines starting with "#" indicate commented lines
#' @details removes empty comments and prevents copying of RMT3DMS header comment
#' @keywords internal
rmti_parse_comments <- function(remaining_lines, id = NULL) {
v <- paste("RMT3DMS, version", packageDescription("RMT3DMS")$Version)
comments <- NULL
if(is.null(id)) {
comment_tag <- substr(remaining_lines, 1, 1)
comment_id <- which(comment_tag == "#")
} else {
comment_id <- id
}
if(length(comment_id) > 0) {
comments <- gsub('#', '', remaining_lines[comment_id])
# remove empty comments
empty <- which(nchar(trimws(comments)) == 0)
if(length(empty) > 0) comments <- comments[-empty]
# remove RMT3DMS header
header <- grep(v, comments)
if(length(header) > 0) comments <- comments[-header]
remaining_lines <- remaining_lines[-comment_id]
}
return(list(comments = comments, remaining_lines = remaining_lines))
}
#' Check if the FTL file is binary
#'
#' @param file pathname to the flow-transport link file, typically '*.ftl'
#'
#' @return logical indicating if the FTL file is binary or keyword 'empty' if file is empty.
#' @keywords internal
rmti_check_ftl_binary <- function(file) {
lines <- readr::read_lines_raw(file, n_max = 10) # readr::read_lines might crash session
# TODO this is a weak check for binary
header <- rawToChar(unlist(lines)[1:4])
binary <- toupper(header) %in% c('MT3D', 'MTGS')
# if(rawToChar(unlist(lines)[2]) == '') binary <- TRUE
if(!binary) {
cnv <- unlist(lapply(lines, rawToChar))
binary <- ifelse(is.na(cnv[2]), 'empty', binary)
}
return(binary)
}
#' Read the package header from a flow-transport link file
#'
#' @param file pathname to the flow-transport link file, typically '*.ftl'
#' @param binary is the FTL file binary?
#'
#' @return logical vector of length 3, indicating if the rch, evt or uzf packages are active in the flow simulation
#' @details This function is used in \code{\link{rmt_read_ssm}} as a replacement for reading data set 1. It is used to determine if
#' rch, evt and/or uzf concentration arrays have to be read. All other active flow packages are read from the point source/sink data sets.
#' \code{rmti_parse_ftl_header} can only be used with flow-transport link files using MT3DMS or MT3D-USGS headers.
#' The UZF package can only be used with MT3D-USGS.
#' @keywords internal
rmti_parse_ftl_header <- function(file, binary = NULL) {
lg <- structure(rep(FALSE, 3), names = c('frch', 'fevt', 'fuzf'))
# check if ftl file is binary
if(is.null(binary)) {
binary <- rmti_check_ftl_binary(file)
}
if(!(binary == 'empty')) {
if(binary) { # binary
con <- file(file, open = 'rb')
try({
v <- readChar(con, nchars = 11)
rec <- readBin(con, what = 'integer', n = 9)
if(rec[3] > 0) lg['frch'] <- TRUE
if(rec[4] > 0) lg['fevt'] <- TRUE
# s <- grepl('MT3D', v, ignore.case = TRUE) # MT3DMS header
usgs <- grepl('MTGS', v, ignore.case = TRUE) # MT3D-USGS
if(usgs) {
npk <- readBin(con, what = 'integer', n = 1)
if(npk > 0) {
rec2 <- vector(mode = 'character', length = npk)
for(i in 1:npk) rec2[i] <- toupper(trimws(readChar(con, nchars = 20)))
if(any(rec2 == 'UZF')) lg['fuzf'] <- TRUE
}
}
# if(s) {
# version <- sub('MT3D', '', v, ignore.case = TRUE)
# vn <- as.numeric(strsplit(version, '\\.')[[1]][1])
# # standard header (v < 4, not supported by MODFLOW-2005) or extended header (v >= 4)
# # not necessary to read
# # ext_header <- vn >= 4
# # if(ext_header) rec2 <- readBin(con, what = 'integer', n = 12)
#
# } else if(usgs) {
# npk <- readBin(con, what = 'integer', n = 1)
# rec2 <- vector(mode = 'character', length = npk)
# for(i in 1:npk) rec2[i] <- readChar(con, nchars = 12)
#
# } else {
# stop('Can only read flow-transport link with MT3DMS or MT3D-USGS headers', call. = FALSE)
# }
})
close(con)
} else { # ASCII
lines <- readr::read_lines(file, n_max = 40, lazy = FALSE)
rec <- rmti_parse_variables(lines, n = 10, format = 'free', character = TRUE)
v <- trimws(rec$variables[1])
if(as.numeric(rec$variables[4]) > 0) lg['frch'] <- TRUE
if(as.numeric(rec$variables[5]) > 0) lg['fevt'] <- TRUE
usgs <- grepl('MTGS', v, ignore.case = TRUE) # MT3D-USGS
if(usgs) {
remaining_lines <- rec$remaining_lines
# if(length(rec$variables) < 9) remaining_lines <- remaining_lines[-1]
ds <- rmti_parse_variables(remaining_lines, n = 1, format = 'free')
npk <- as.numeric(ds$variables)
remaining_lines <- ds$remaining_lines
if(npk > 0) {
rec2 <- trimws(gsub('\'', '', remaining_lines[1:npk]))
if(any(toupper(rec2) == 'UZF')) lg['fuzf'] <- TRUE
}
}
}
}
return(lg)
}
#' Get an array specified by a control record from the text lines analyzed in a \code{RMT3DMS} \code{rmt_read_*} function
#' @param remaining_lines lines to read the array from
#' @param nrow number of rows in the array
#' @param ncol number of columns in the array
#' @param nlay number of layers in the array that should be read
#' @param ndim dimensions of the array to read; either 1, 2 or 3. Denotes the if the returned array should be 1D, 2D or 3D.
#' @param skip_header optional; should the control record be skipped
#' @param nam a \code{RMT3DMS} nam object. Required when reading external arrays
#' @param precision character: either \code{'single'} (default) or \code{'double'}. Denotes the precision of binary files
#' @param file pathname to the input file which is currently being read. Required when reading fixed-format or MODFLOW-style OPEN/CLOSE arrays
#' @param integer logical; does the binary array hold integer values. Might not work optimally.
#' @param ... ignored
#' @return A list containing the array and the remaining text of the MT3DMS input file
#' @keywords internal
rmti_parse_array <- function(remaining_lines, nrow, ncol, nlay, ndim,
skip_header = FALSE, nam = NULL, precision = "single", file = NULL, integer = FALSE, ...) {
# Initialize array object
array <- array(dim=c(nrow,ncol,nlay))
# Read array according to format type if there is anything to be read
if(prod(dim(array))!=0)
{
for(k in 1:nlay)
{
header <- rmti_parse_variables(remaining_lines[1], n = 2, format = 'fixed')
# MODFLOW-style free format control header
if(header$variables[1] %in% c('CONSTANT', 'INTERNAL', 'EXTERNAL', 'OPEN/CLOSE') || skip_header) {
rmf_data_set <- RMODFLOW:::rmfi_parse_array(remaining_lines, nrow = nrow, ncol = ncol, nlay = 1, ndim = ndim,
skip_header = skip_header, nam = nam, precision = precision, file = file, integer = integer, ...)
array[,,k] <- rmt_convert_rmf_to_rmt(rmf_data_set$array)
remaining_lines <- rmf_data_set$remaining_lines
rm(rmf_data_set)
} else {
# MT3DMS fixed format control header
fortranfmt <- FALSE
iread <- as.numeric(header$variables[1])
cnst <- as.numeric(header$variables[2])
fmtin <- trimws(paste0(strsplit(remaining_lines[1], split = '')[[1]][21:40], collapse = ''))
if(iread == 0) { # CONSTANT
array[,,k] <- cnst
nLines <- 1
} else if(iread == 100) { # INTERNAL-ARRAY
lengths <- RMODFLOW:::rmfi_fortran_format(fmtin)
remaining_lines <- remaining_lines[-1]
if(cnst == 0) cnst <- 1.0
remaining_lines[1] <- paste(substring(remaining_lines[1], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' ')
nPerLine <- length(lengths)
nLines <- (ncol %/% nPerLine + ifelse((ncol %% nPerLine)==0, 0, 1))*nrow
if(nLines > 1) remaining_lines[2:nLines] <- vapply(2:(nLines), function(i) paste(substring(remaining_lines[i], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' '), 'text')
array[,,k] <- cnst*matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(remaining_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
array[,,k] <- array[,,k]*cnst
} else if(iread == 101) { # INTERNAL-BLOCK
remaining_lines <- remaining_lines[-1]
if(cnst == 0) cnst <- 1.0
ds <- rmti_parse_variables(remaining_lines, n = 1, format = 'free')
nblock <- as.numeric(ds$variables[1])
remaining_lines <- ds$remaining_lines
nLines <- 0
for(block in 1:nblock) {
ds2 <- rmti_parse_variables(remaining_lines, n = 5, format = 'free')
block_values <- as.numeric(ds2$variables[1:5])
array[block_values[1]:block_values[2], block_values[3]:block_values[4], k] <- block_values[5]
remaining_lines <- ds2$remaining_lines
}
array[,,k] <- array[,,k]*cnst
} else if(iread == 102) { # INTERNAL-ZONE
lengths <- RMODFLOW:::rmfi_fortran_format(fmtin)
remaining_lines <- remaining_lines[-1]
if(cnst == 0) cnst <- 1.0
ds <- rmti_parse_variables(remaining_lines, n = 1, format = 'free')
nzone <- as.numeric(ds$variables[1])
remaining_lines <- ds$remaining_lines
ds2 <- rmti_parse_variables(remaining_lines, n = nzone, format = 'free')
zv <- as.numeric(ds2$variables[1:nzone])
remaining_lines <- ds2$remaining_lines
zone <- array(dim = c(nrow, ncol))
remaining_lines[1] <- paste(substring(remaining_lines[1], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' ')
nPerLine <- length(lengths)
nLines <- (ncol %/% nPerLine + ifelse((ncol %% nPerLine)==0, 0, 1))*nrow
if(nLines > 1) remaining_lines[2:nLines] <- vapply(2:(nLines), function(i) paste(substring(remaining_lines[i], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' '), 'text')
# zone <- cnst*matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(remaining_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
zone <- matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(remaining_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
for(nz in 1:nzone) {
zone[which(zone == nz)] <- zv[nz]
}
array[,,k] <- zone*cnst
} else if(iread == 103) { # INTERNAL-LIST
remaining_lines <- remaining_lines[-1]
if(cnst == 0) cnst <- 1.0
n_final <- nrow*ncol
n <- 0
nLines <- 0
values <- vector(mode = 'numeric')
end <- FALSE
while(n < n_final && !end) {
nLines <- nLines + 1
values_ch <- rmti_remove_empty_strings(strsplit(paste(remaining_lines[nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])
# terminate if / is encountered
slash <- grepl('/', values_ch)
if(any(slash)) {
last_value <- which(slash)[1]
values_ch[last_value] <- sub('/.*', '', values_ch[last_value])
values_ch <- values_ch[1:last_value]
end <- TRUE
}
values_ch <- strsplit(values_ch, '\\*')
values_ch <- lapply(values_ch, function(i) rmti_ifelse0(length(i) > 1, rep(as.numeric(i[[2]]), as.numeric(i[[1]])), as.numeric(i)))
values <- c(values, unlist(values_ch))
n <- length(values)
}
array[,,k] <- matrix(as.numeric(values[1:n_final])*cnst, nrow = nrow, ncol = ncol, byrow = TRUE)
} else { # EXTERNAL
if(cnst == 0) cnst <- 1.0
if(is.null(nam)) stop('Please supply a MT3DMS nam object when reading EXTERNAL arrays', call. = FALSE)
if(abs(iread) %in% nam$nunit) {
fname <- nam$fname[which(nam$nunit == abs(iread))]
} else if(any(nam$nunit == 0) && iread %in% rmtd_internal_nunit$nunit) { # nunit = 0
ext_ftype <- rmtd_internal_nunit$ftype[which(rmtd_internal_nunit$nunit == iread)]
fname <- nam$fname[which(nam$ftype == ext_ftype)]
} else {
stop('nunit for EXTERNAL array not found in NAM file', call. = FALSE)
}
direct <- attr(nam, 'dir')
absfile <- file.path(fname)
if(!file.exists(absfile)) absfile <- file.path(direct, fname) # try full name
if(!file.exists(absfile)) stop('Could not determine path to EXTERNAL file on unit number ', iread, call. = FALSE)
ext_file <- TRUE
# ASCII
if(iread > 0) {
if(!(toupper(fmtin) %in% c('(FREE)', 'FREE', '(BINARY)','BINARY'))) {
lengths <- RMODFLOW:::rmfi_fortran_format(fmtin)
fortranfmt <- TRUE
}
if(iread == nam$nunit[which(basename(nam$fname) == basename(file))] || normalizePath(absfile) == normalizePath(file)) { # read from current file
ext_file <- FALSE
remaining_lines <- remaining_lines[-1]
if(fortranfmt) {
remaining_lines[1] <- paste(substring(remaining_lines[1], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' ')
nPerLine <- length(lengths)
nLines <- (ncol %/% nPerLine + ifelse((ncol %% nPerLine)==0, 0, 1))*nrow
if(nLines > 1) remaining_lines[2:nLines] <- vapply(2:(nLines), function(i) paste(substring(remaining_lines[i], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' '), 'text')
array[,,k] <- cnst*matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(remaining_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
} else {
nPerLine <- length(as.numeric(rmti_remove_empty_strings(strsplit(remaining_lines[1],' |\t|,')[[1]])))
nLines <- (ncol %/% nPerLine + ifelse((ncol %% nPerLine)==0, 0, 1))*nrow
array[,,k] <- cnst*matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(remaining_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
}
} else { # read from external file
external_lines <- readr::read_lines(absfile, lazy = FALSE)
# remove lines of previous arrays
if(!is.null(attr(nam, as.character(iread)))) external_lines <- external_lines[-c(1:attr(nam, as.character(iread)))]
if(fortranfmt) {
external_lines[1] <- paste(substring(external_lines[1], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' ')
nPerLine <- length(lengths)
nLines <- (ncol %/% nPerLine + ifelse((ncol %% nPerLine)==0, 0, 1))*nrow
if(nLines > 1) external_lines[2:nLines] <- vapply(2:(nLines), function(i) paste(substring(external_lines[i], first = cumsum(lengths) - lengths + 1, last = cumsum(lengths)), collapse = ' '), 'text')
array[,,k] <- cnst*matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(external_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
} else {
nPerLine <- length(as.numeric(rmti_remove_empty_strings(strsplit(external_lines[1],' |\t|,')[[1]])))
nLines <- (ncol %/% nPerLine + ifelse((ncol %% nPerLine)==0, 0, 1))*nrow
array[,,k] <- cnst*matrix(as.numeric(rmti_remove_empty_strings(strsplit(paste(external_lines[1:nLines],collapse='\n'),' |,| ,|, |\t|\n| \n|\n ')[[1]])),nrow=nrow,ncol=ncol,byrow=TRUE)
}
}
} else if(iread < 0) { # read binary from external file
con <- file(absfile,open='rb')
real_number_bytes <- ifelse(precision == 'single', 4, 8)
type <- ifelse(integer, 'integer', 'numeric')
size <- ifelse(type == 'integer', NA_integer_, real_number_bytes)
if(type=='integer') warning('Reading integer binary EXTERNAL array might not work optimally')
try({
if(!is.null(attr(nam, as.character(iread)))) {
for(jj in 1:attr(nam, as.character(iread))) {
invisible(readBin(con, what = 'integer', n = 2))
invisible(readBin(con,what='numeric',n = 2, size = real_number_bytes))
invisible(readChar(con,nchars=16))
nncol <- readBin(con, what = 'integer', n = 1)
nnrow <- readBin(con, what = 'integer', n = 1)
invisible(readBin(con, what = 'integer', n = 1))
invisible(readBin(con,what='numeric',n = nncol * nnrow, size = real_number_bytes))
}
}
# integer binary arrays should not have headers in MODFLOW (2005, v1.12 - see U2DINT subroutine, line 682)
if(!integer) {
invisible(readBin(con, what = 'integer', n = 2))
invisible(readBin(con,what='numeric',n = 2, size = real_number_bytes))
invisible(readChar(con,nchars=16))
invisible(readBin(con, what = 'integer', n = 3))
}
array[,,k] <- cnst*aperm(array(readBin(con,what=type,n = ncol * nrow, size = size),dim=c(ncol, nrow)), c(2, 1))
nLines <- 1})
close(con)
}
if(ext_file) {
if(is.null(attr(nam, as.character(iread)))) {
attr(nam, as.character(iread)) <- nLines
} else {
attr(nam, as.character(iread)) <- attr(nam, as.character(iread)) + nLines
}
nLines <- 1
}
}
if(nLines > 0) remaining_lines <- remaining_lines[-c(1:nLines)]
}
}
}
# Set class of object (2darray; 3darray)
if(ndim == 1) {
array <- c(array(array,dim=nrow*ncol*nlay))
} else if(ndim == 2) {
array <- rmt_create_array(array[,,1], dim = c(nrow, ncol))
} else if(ndim == 3) {
array <- rmt_create_array(array, dim = c(nrow, ncol, nlay))
} else {
stop('ndim should be 1, 2 or 3')
}
# Return output of reading function
data_set <- list(array = array, remaining_lines = remaining_lines)
return(data_set)
}
#' Read MT3DMS single-line variables
#' @param n integer; number of variables to be returned. If zero for \code{'free'} format, reads all values on a single line and does not check if n values are read or empty lines, '*' or '/' are present.
#' @param width integer; length of a single variable. Only used when format is \code{'fixed'}.
#' @param nlay integer; number of layers for which values are to be read. Only use when a 1D(NLAY) variable is read which may be specified on multiple lines. Only used when format is \code{'fixed'}.
#' @param character logical; should a character vector be returned. Prevents conversion from character names to numeric. Defaults to FALSE. Useful if only characters are present on the line.
#' @param format character, either \code{'free'} or \code{'fixed'}. When 'fixed', reads character fields of length 'width' and converts to numeric. Empty fields are set to zero.
#' @param ... ignored
#' @return vector with values
#' @keywords internal
rmti_parse_variables <- function(remaining_lines, n, width = 10, nlay = NULL, character = FALSE, format = 'fixed', ...) {
if(format == 'free') {
if(n == 0) {
nLines <- 1
variables <- rmti_remove_empty_strings(strsplit(rmti_remove_comments_end_of_line(remaining_lines[1]),' |\t|,')[[1]])
} else {
n.cnt <- nLines <- 0
end <- FALSE
variables <- vector(mode = 'character')
while(n.cnt < n && !end) {
nLines <- nLines + 1
values_ch <- rmti_remove_empty_strings(strsplit(paste(remaining_lines[nLines],collapse='\n'),' |\t|,')[[1]])
# terminate if / is encountered
slash <- grepl('/', values_ch)
if(any(slash)) {
last_value <- which(slash)[1]
values_ch[last_value] <- sub('/.*', '', values_ch[last_value])
values_ch <- values_ch[1:last_value]
end <- TRUE
}
values_ch <- strsplit(values_ch, '\\*')
values_ch <- lapply(values_ch, function(i) rmti_ifelse0(length(i) > 1, rep(i[[2]], as.numeric(i[[1]])), i))
variables <- c(variables, unlist(values_ch))
n.cnt <- length(variables)
}
variables <- variables[1:n]
}
if(!character && !any(is.na(suppressWarnings(as.numeric(variables))))) variables <- as.numeric(variables)
return(list(variables=variables,remaining_lines=remaining_lines[-c(1:nLines)]))
} else if(format == 'fixed') { # every value has 'width' characters; empty values are zero
variables <- (unlist(lapply(seq(1,nchar(remaining_lines[1]), by=width),
function(i) paste0(strsplit(rmti_remove_comments_end_of_line(remaining_lines[1]),'')[[1]][i:(i+min(width, nchar(remaining_lines[1])-i+1)-1)], collapse=''))))
variables <- lapply(strsplit(variables, " |\t"), rmti_remove_empty_strings)
variables[which(lengths(variables)==0)] <- 0 # empty values are set to 0
variables <- unlist(variables)
if(!is.null(nlay)) {
while(length(variables) < nlay) {
remaining_lines <- remaining_lines[-1]
variables <- append(variables, rmti_remove_empty_strings(strsplit(rmti_remove_comments_end_of_line(remaining_lines[1]),' |\t|,')[[1]]))
}
} else if(!character && !any(is.na(suppressWarnings(as.numeric(variables))))) { # convert to numeric
variables <- as.numeric(variables)
if(length(variables) < n) variables <- c(variables, rep(0, n - length(variables))) # append 0's if values are missing
} else { # remain as character
if(length(variables) < n) variables <- c(variables, rep('0', n - length(variables))) # append 0's if values are missing
}
return(list(variables=variables,remaining_lines=remaining_lines[-1]))
}
}
#' Remove comments at the end of a string
#' @param line A string.
#' @return The string, without the commented part.
#' @keywords internal
rmti_remove_comments_end_of_line <- function(line) {
if(grepl('!',line)) return(substr(line,1,regexpr('!',line)-1))
else return(line)
}
#' Remove empty elements from a vector of strings.
#' @param vector_of_strings Vector of strings.
#' @return Vector of strings without the empty items.
#' @keywords internal
rmti_remove_empty_strings <- function(vector_of_strings) {
return(vector_of_strings[which(vector_of_strings!='')])
}
#' Write MT3DMS array
#' Internal function used in the rmt_write_* functions for writing array datasets
#' @param array array to write
#' @param file pathname to the file to write the array to
#' @param mf_style logical, should MODFLOW-style array headers be used (i.e. INTERNAL, EXTERNAL, OPEN/CLOSE, ...) ? Defaults to FALSE
#' @param format either 'free' (iread = 103, i.e. FORTRAN free format) or 'fixed' (iread = 100 using FORTRAN format 10G11.4). In both cases, iread = 0 when the array only contains 1 unique value. Defaults to 'free'.
#' @param cnstnt numeric constant to add to the array header which acts as a multiplier for the array values in MODFLOW. Default to 1
#' @param iprn iprn code to add to array header. Defaults to -1
#' @param append logical; should array be appended to the file. Defaults to TRUE
#' @param ... passed to \code{\link{RMODFLOW:::rmfi_write_array}} when mf_style is TRUE
#' @return NULL
#' @keywords internal
rmti_write_array <- function(array, file, mf_style = FALSE, format = 'free', cnstnt=1, iprn=-1, append=TRUE, ...) {
if(mf_style) {
RMODFLOW:::rmfi_write_array(array = array, file = file, cnstnt = cnstnt, iprn = iprn, append = append, ...)
} else {
# MT3DMS
# only iread 0 & 100 & 103 supported (CONSTANT & INTERNAL-ARRAY fixed and free format)
# format: (10G11.4)
fmt <- '(10G11.4)'
nPerLine <- 10
width <- 11
decimals <- 4
iprn <- as.integer(iprn)
if(is.null(dim(array))) {
if(prod(c(array)[1] == c(array))==1) {
rmti_write_variables(0L, cnstnt * c(array)[1], file = file, append = append, width = 10)
} else {
if(format == 'free') {
rmti_write_variables(103L, cnstnt, '(free)', iprn, file = file, append = append, width = c(10, 10, 20, 10))
cat(paste(paste(array, collapse=' '), '\n', sep=' '), file=file, append=append)
} else if(format == 'fixed') {
rmti_write_variables(100L, cnstnt, fmt, iprn, file = file, append = append, width = c(10, 10, 20, 10))
n <- length(c(array))
nLines <- n %/% nPerLine
remainder <- n %% nPerLine
if(remainder > 0) nLines <- nLines + 1
if(n > nPerLine) {
formatted_array <- t(matrix('', nrow = nLines, ncol = nPerLine))
formatted_array[1:n] <- formatC(c(array), format = 'g', digits = 4, width = 11)
formatted_array <- t(formatted_array)
} else {
formatted_array <- formatC(c(array), format = 'g', digits = 4, width = 11)
}
write.table(formatted_array, file=file, append=append, sep='', col.names=FALSE, row.names=FALSE, quote = FALSE)
}
}
} else if(length(dim(array))==2) {
if(prod(c(array)[1] == c(array))==1) {
rmti_write_variables(0L, cnstnt * c(array)[1], file = file, append = append, width = 10)
} else {
if(format == 'free') {
rmti_write_variables(103L, cnstnt, '(free)', iprn, file = file, append = append, width = c(10, 10, 20, 10))
if(dim(array)[1] == 1) {
cat(paste0(paste(array, collapse=' '),'\n'), file=file, append=append)
} else {
write.table(array, file=file, append=append, sep=' ', col.names=FALSE, row.names=FALSE)
}
} else if(format == 'fixed') {
rmti_write_variables(100L, cnstnt, fmt, iprn, file = file, append = append, width = c(10, 10, 20, 10))
n <- length(c(array))
nLines <- n %/% nPerLine
remainder <- n %% nPerLine
if(remainder > 0) nLines <- nLines + 1
if(n > nPerLine) {
formatted_array <- t(matrix('', nrow = nLines, ncol = nPerLine))
formatted_array[1:n] <- formatC(c(array), format = 'g', digits = 4, width = 11)
formatted_array <- t(formatted_array)
} else {
formatted_array <- formatC(c(array), format = 'g', digits = 4, width = 11)
}
write.table(formatted_array, file=file, append=append, sep='', col.names=FALSE, row.names=FALSE, quote = FALSE)
}
}
} else {
for(i in 1:dim(array)[3])
{
if(prod(c(array[,,i])[1] == c(array[,,i]))==1) {
rmti_write_variables(0L, cnstnt * c(array[,,i])[1], file = file, append = ifelse(i == 1, append, TRUE), width = 10)
} else {
if(format == 'free') {
rmti_write_variables(103L, cnstnt, '(free)', iprn, file = file, append = ifelse(i == 1, append, TRUE), width = c(10, 10, 20, 10))
if(dim(array)[1] == 1) {
cat(paste0(paste(array[,,i], collapse=' '),'\n'), file=file, append=ifelse(i == 1, append, TRUE))
} else {
write.table(array[,,i], file=file, append=ifelse(i == 1, append, TRUE), sep=' ', col.names=FALSE, row.names=FALSE)
}
} else if(format == 'fixed') {
rmti_write_variables(100L, cnstnt, fmt, iprn, file = file, append = ifelse(i == 1, append, TRUE), width = c(10, 10, 20, 10))
n <- length(c(array[,,i]))
nLines <- length(c(array[,,i])) %/% nPerLine
remainder <- length(c(array[,,i])) %% nPerLine
if(remainder > 0) nLines <- nLines + 1
if(length(c(array[,,i])) > nPerLine) {
formatted_array <- t(matrix('', nrow = nLines, ncol = nPerLine))
formatted_array[1:n] <- formatC(c(array[,,i]), format = 'g', digits = 4, width = 11)
formatted_array <- t(formatted_array)
} else {
formatted_array <- formatC(c(array[,,i]), format = 'g', digits = 4, width = 11)
}
write.table(formatted_array, file=file, append=ifelse(i == 1, append, TRUE), sep='', col.names=FALSE, row.names=FALSE, quote = FALSE)
}
}
}
}
}
}
#' Write MT3DMS variables
#' Internal function used in the rmt_write_* functions for writing single line datasets
#' @param format either \code{'fixed'} or \code{'free'}. Fixed format assumes fixed width character spaces for each value as determined by the width argument
#' @param width numeric vector with the character widths for each variable. If a single value, it is repeated.
#' @param integer logical; should all values be converted to integers? MT3D does not allow for exponents in integer values
#' @param iprn ignored
#' @return NULL
#' @keywords internal
rmti_write_variables <- function(..., file, append=TRUE, width = 10, format = 'fixed', integer = FALSE, iprn = -1) {
arg <- list(...)
arg <- arg[vapply(arg, function(i) all(nchar(i) > 0), TRUE)] # removes empty elements
if(integer) arg <- lapply(arg, as.integer)
# sets integers in proper format since Type is converted to double when vectorized
if(format == 'free') {
if(integer) {
arg <- lapply(arg, formatC)
} else {
arg <- lapply(arg, as.character)
}
arg <- unlist(arg)
cat(paste0(paste(arg, sep = ' ', collapse = ' '), '\n'), file=file, append=append)
} else if(format == 'fixed') {
arg <- unlist(lapply(arg, as.list), recursive = FALSE)
if(length(width) == 1) width <- rep(width, length(arg))
arg <- lapply(1:length(arg), function(i) rmti_ifelse0(nchar(arg[[i]]) > width[i], formatC(arg[[i]], width = width[i]), paste0(paste0(rep(' ', width[i]-nchar(arg[[i]])), collapse = ''), as.character(arg[[i]]), collapse = '')))
arg <- lapply(1:length(arg), function(i) paste0(strsplit(arg[[i]], '')[[1]][1:width[i]], collapse = ''))
arg <- unlist(arg)
cat(paste0(paste0(arg, collapse=''), '\n'), file=file, append=append)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/derive_disposition_status.R
\name{derive_disposition_status}
\alias{derive_disposition_status}
\title{Derive a Disposition Status at a Specific Timepoint}
\usage{
derive_disposition_status(
dataset,
dataset_ds,
new_var,
status_var,
format_new_var = format_eoxxstt_default,
filter_ds
)
}
\arguments{
\item{dataset}{Input dataset.}
\item{dataset_ds}{Dataset containing the disposition information (e.g.: ds).
It must contain:
\itemize{
\item \code{STUDYID}, \code{USUBJID},
\item The variable(s) specified in the \code{status_var}
\item The variables used in \code{filter_ds}.
}}
\item{new_var}{Name of the disposition status variable.
A variable name is expected (e.g. \code{EOSSTT}).}
\item{status_var}{The variable used to derive the disposition status.
A variable name is expected (e.g. \code{DSDECOD}).}
\item{format_new_var}{The format used to derive the status.
Default: \code{format_eoxxstt_default()} defined as:\preformatted{format_eoxxstt_default <- function(x) \{
case_when(
x == "COMPLETED" ~ "COMPLETED",
x != "COMPLETED" & !is.na(x) ~ "DISCONTINUED",
TRUE ~ "ONGOING"
)
\}
}
where \code{x} is the \code{status_var.}}
\item{filter_ds}{Filter condition for the disposition data.
one observation per patient. An error is issued otherwise.
Permitted Values: logical expression.}
}
\value{
The input dataset with the disposition status (\code{new_var}) added.
\code{new_var} is derived based on the values given in \code{status_var} and according to the format
defined by \code{format_new_var} (e.g. when the default format is used, the function will derive
\code{new_var} as:
"COMPLETED" if \code{status_var} == "COMPLETED",
"DISCONTINUED" if \code{status_var} is not "COMPLETED" nor NA,
"ONGOING" otherwise).
}
\description{
Derive a disposition status from the the relevant records in the disposition domain.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
data("dm")
data("ds")
# Default derivation: EOSSTT =
#- COMPLETED when status_var = COMPLETED
#- DISCONTINUED when status_var is not COMPLETED nor NA
#- ONGOING otherwise
dm \%>\%
derive_disposition_status(
dataset_ds = ds,
new_var = EOSSTT,
status_var = DSDECOD,
filter_ds = DSCAT == "DISPOSITION EVENT"
) \%>\%
select(STUDYID, USUBJID, EOSSTT)
# Specific derivation: EOSSTT =
#- COMPLETED when status_var = COMPLETED
#- DISCONTINUED DUE TO AE when status_var = ADVERSE EVENT
#- DISCONTINUED NOT DUE TO AE when status_var != ADVERSE EVENT nor COMPLETED nor missing
#- ONGOING otherwise
format_eoxxstt1 <- function(x) {
case_when(
x == "COMPLETED" ~ "COMPLETED",
x == "ADVERSE EVENT" ~ "DISCONTINUED DUE TO AE",
!(x \%in\% c("ADVERSE EVENT", "COMPLETED")) & !is.na(x) ~ "DISCONTINUED NOT DUE TO AE",
TRUE ~ "ONGOING"
)
}
dm \%>\%
derive_disposition_status(
dataset_ds = ds,
new_var = EOSSTT,
status_var = DSDECOD,
format_new_var = format_eoxxstt1,
filter_ds = DSCAT == "DISPOSITION EVENT"
) \%>\%
select(STUDYID, USUBJID, EOSSTT)
}
\author{
Samia Kabi
}
\keyword{adsl}
| /man/derive_disposition_status.Rd | no_license | rajkboddu/admiral | R | false | true | 3,132 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/derive_disposition_status.R
\name{derive_disposition_status}
\alias{derive_disposition_status}
\title{Derive a Disposition Status at a Specific Timepoint}
\usage{
derive_disposition_status(
dataset,
dataset_ds,
new_var,
status_var,
format_new_var = format_eoxxstt_default,
filter_ds
)
}
\arguments{
\item{dataset}{Input dataset.}
\item{dataset_ds}{Dataset containing the disposition information (e.g.: ds).
It must contain:
\itemize{
\item \code{STUDYID}, \code{USUBJID},
\item The variable(s) specified in the \code{status_var}
\item The variables used in \code{filter_ds}.
}}
\item{new_var}{Name of the disposition status variable.
A variable name is expected (e.g. \code{EOSSTT}).}
\item{status_var}{The variable used to derive the disposition status.
A variable name is expected (e.g. \code{DSDECOD}).}
\item{format_new_var}{The format used to derive the status.
Default: \code{format_eoxxstt_default()} defined as:\preformatted{format_eoxxstt_default <- function(x) \{
case_when(
x == "COMPLETED" ~ "COMPLETED",
x != "COMPLETED" & !is.na(x) ~ "DISCONTINUED",
TRUE ~ "ONGOING"
)
\}
}
where \code{x} is the \code{status_var.}}
\item{filter_ds}{Filter condition for the disposition data.
one observation per patient. An error is issued otherwise.
Permitted Values: logical expression.}
}
\value{
The input dataset with the disposition status (\code{new_var}) added.
\code{new_var} is derived based on the values given in \code{status_var} and according to the format
defined by \code{format_new_var} (e.g. when the default format is used, the function will derive
\code{new_var} as:
"COMPLETED" if \code{status_var} == "COMPLETED",
"DISCONTINUED" if \code{status_var} is not "COMPLETED" nor NA,
"ONGOING" otherwise).
}
\description{
Derive a disposition status from the the relevant records in the disposition domain.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
data("dm")
data("ds")
# Default derivation: EOSSTT =
#- COMPLETED when status_var = COMPLETED
#- DISCONTINUED when status_var is not COMPLETED nor NA
#- ONGOING otherwise
dm \%>\%
derive_disposition_status(
dataset_ds = ds,
new_var = EOSSTT,
status_var = DSDECOD,
filter_ds = DSCAT == "DISPOSITION EVENT"
) \%>\%
select(STUDYID, USUBJID, EOSSTT)
# Specific derivation: EOSSTT =
#- COMPLETED when status_var = COMPLETED
#- DISCONTINUED DUE TO AE when status_var = ADVERSE EVENT
#- DISCONTINUED NOT DUE TO AE when status_var != ADVERSE EVENT nor COMPLETED nor missing
#- ONGOING otherwise
format_eoxxstt1 <- function(x) {
case_when(
x == "COMPLETED" ~ "COMPLETED",
x == "ADVERSE EVENT" ~ "DISCONTINUED DUE TO AE",
!(x \%in\% c("ADVERSE EVENT", "COMPLETED")) & !is.na(x) ~ "DISCONTINUED NOT DUE TO AE",
TRUE ~ "ONGOING"
)
}
dm \%>\%
derive_disposition_status(
dataset_ds = ds,
new_var = EOSSTT,
status_var = DSDECOD,
format_new_var = format_eoxxstt1,
filter_ds = DSCAT == "DISPOSITION EVENT"
) \%>\%
select(STUDYID, USUBJID, EOSSTT)
}
\author{
Samia Kabi
}
\keyword{adsl}
|
\alias{GCancellable}
\alias{gCancellable}
\name{GCancellable}
\title{GCancellable}
\description{Thread-safe Operation Cancellation Stack}
\section{Methods and Functions}{
\code{\link{gCancellableNew}()}\cr
\code{\link{gCancellableIsCancelled}(object)}\cr
\code{\link{gCancellableSetErrorIfCancelled}(object, .errwarn = TRUE)}\cr
\code{\link{gCancellableGetFd}(object)}\cr
\code{\link{gCancellableReleaseFd}(object)}\cr
\code{\link{gCancellableGetCurrent}()}\cr
\code{\link{gCancellablePopCurrent}(object)}\cr
\code{\link{gCancellablePushCurrent}(object)}\cr
\code{\link{gCancellableReset}(object)}\cr
\code{\link{gCancellableDisconnect}(object, handler.id)}\cr
\code{\link{gCancellableCancel}(object)}\cr
\code{gCancellable()}
}
\section{Hierarchy}{\preformatted{GObject
+----GCancellable}}
\section{Detailed Description}{GCancellable is a thread-safe operation cancellation stack used
throughout GIO to allow for cancellation of synchronous and
asynchronous operations.}
\section{Structures}{\describe{\item{\verb{GCancellable}}{
Allows actions to be cancelled.
}}}
\section{Convenient Construction}{\code{gCancellable} is the equivalent of \code{\link{gCancellableNew}}.}
\section{Signals}{\describe{\item{\code{cancelled(cancellable, user.data)}}{
Emitted when the operation has been cancelled.
Can be used by implementations of cancellable operations. If the
operation is cancelled from another thread, the signal will be
emitted in the thread that cancelled the operation, not the
thread that is running the operation.
Note that disconnecting from this signal (or any signal) in a
multi-threaded program is prone to race conditions. For instance
it is possible that a signal handler may be invoked even
\emph{after} a call to
\code{\link{gSignalHandlerDisconnect}} for that handler has already
returned.
There is also a problem when cancellation happen
right before connecting to the signal. If this happens the
signal will unexpectedly not be emitted, and checking before
connecting to the signal leaves a race condition where this is
still happening.
In order to make it safe and easy to connect handlers there
are two helper functions: \code{gCancellableConnect()} and
\code{\link{gCancellableDisconnect}} which protect against problems
like this.
An example of how to us this:
\preformatted{
## Make sure we don't do any unnecessary work if already cancelled
if (cancellable$setErrorIfCancelled())
return()
## Set up all the data needed to be able to
## handle cancellation of the operation
my_data <- myData(...)
id <- 0
if (!is.null(cancellable))
id <- cancellable$connect(cancelled_handler, data, NULL)
## cancellable operation here...
cancellable$disconnect(id)
}
Note that the cancelled signal is emitted in the thread that
the user cancelled from, which may be the main thread. So, the
cancellable signal should not do something that can block.
\describe{
\item{\code{cancellable}}{a \code{\link{GCancellable}}.}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}}}
\references{\url{https://developer.gnome.org/gio/stable/GCancellable.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/GCancellable.Rd | no_license | cran/RGtk2 | R | false | false | 3,192 | rd | \alias{GCancellable}
\alias{gCancellable}
\name{GCancellable}
\title{GCancellable}
\description{Thread-safe Operation Cancellation Stack}
\section{Methods and Functions}{
\code{\link{gCancellableNew}()}\cr
\code{\link{gCancellableIsCancelled}(object)}\cr
\code{\link{gCancellableSetErrorIfCancelled}(object, .errwarn = TRUE)}\cr
\code{\link{gCancellableGetFd}(object)}\cr
\code{\link{gCancellableReleaseFd}(object)}\cr
\code{\link{gCancellableGetCurrent}()}\cr
\code{\link{gCancellablePopCurrent}(object)}\cr
\code{\link{gCancellablePushCurrent}(object)}\cr
\code{\link{gCancellableReset}(object)}\cr
\code{\link{gCancellableDisconnect}(object, handler.id)}\cr
\code{\link{gCancellableCancel}(object)}\cr
\code{gCancellable()}
}
\section{Hierarchy}{\preformatted{GObject
+----GCancellable}}
\section{Detailed Description}{GCancellable is a thread-safe operation cancellation stack used
throughout GIO to allow for cancellation of synchronous and
asynchronous operations.}
\section{Structures}{\describe{\item{\verb{GCancellable}}{
Allows actions to be cancelled.
}}}
\section{Convenient Construction}{\code{gCancellable} is the equivalent of \code{\link{gCancellableNew}}.}
\section{Signals}{\describe{\item{\code{cancelled(cancellable, user.data)}}{
Emitted when the operation has been cancelled.
Can be used by implementations of cancellable operations. If the
operation is cancelled from another thread, the signal will be
emitted in the thread that cancelled the operation, not the
thread that is running the operation.
Note that disconnecting from this signal (or any signal) in a
multi-threaded program is prone to race conditions. For instance
it is possible that a signal handler may be invoked even
\emph{after} a call to
\code{\link{gSignalHandlerDisconnect}} for that handler has already
returned.
There is also a problem when cancellation happen
right before connecting to the signal. If this happens the
signal will unexpectedly not be emitted, and checking before
connecting to the signal leaves a race condition where this is
still happening.
In order to make it safe and easy to connect handlers there
are two helper functions: \code{gCancellableConnect()} and
\code{\link{gCancellableDisconnect}} which protect against problems
like this.
An example of how to us this:
\preformatted{
## Make sure we don't do any unnecessary work if already cancelled
if (cancellable$setErrorIfCancelled())
return()
## Set up all the data needed to be able to
## handle cancellation of the operation
my_data <- myData(...)
id <- 0
if (!is.null(cancellable))
id <- cancellable$connect(cancelled_handler, data, NULL)
## cancellable operation here...
cancellable$disconnect(id)
}
Note that the cancelled signal is emitted in the thread that
the user cancelled from, which may be the main thread. So, the
cancellable signal should not do something that can block.
\describe{
\item{\code{cancellable}}{a \code{\link{GCancellable}}.}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}}}
\references{\url{https://developer.gnome.org/gio/stable/GCancellable.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
## Initialize the inverse property
matt <- NULL
## Method to set the matrix
set <- function(mtx){
x <<- mtx
matt <<- NULL
}
## Method the get the matrix
get <- function(){
x
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
matt <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
matt
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and
## the matrix has not changed), then cacheSolve should retrieve the inverse
## from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
| /cachematrix.R | no_license | Alok0108/ProgrammingAssignment2 | R | false | false | 1,424 | r |
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
## Initialize the inverse property
matt <- NULL
## Method to set the matrix
set <- function(mtx){
x <<- mtx
matt <<- NULL
}
## Method the get the matrix
get <- function(){
x
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
matt <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
matt
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and
## the matrix has not changed), then cacheSolve should retrieve the inverse
## from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
|
####时间分析
library(plyr)
nyt_17_articles = readRDS("nyt_17_articles.rds")
text_from_nyt = nyt_17_articles$text_list_nyt
nyt_token = tokenize(as.character(text_from_nyt),removePunct = T)
ul1 = lapply(nyt_token, ldply)
for(i in seq(1)){
data_frame = data.frame(ul1[[i]])
data_frame["article"] = i
#data_frame
}
total = c()
for(i in seq(1:17)){
data_frame = data.frame(ul1[[i]])
data_frame["article"] = i
total = rbind(total,data_frame)
}
total
nyt_17_articles["article"] = rownames(nyt_17_articles)
dtm = dtm.create(total$article, total$V1)
s = c(13,27,15,11,22,11,12,14,12,11,26,11,11,11,13,13,10)
nyt_17_articles$time_number = s
a = nyt_17_articles$time_nyt
wordfreqs = dtm.to.df(dtm)
wordfreqs = merge(nyt_17_articles,wordfreqs,by.x="article",by.y="doc")
mmode <- function(v){uniqv <- unique(v); uniqv[which.max(tabulate(match(v, uniqv)))]}
dates = aggregate(wordfreqs["time_number"], by=wordfreqs["term"], FUN=mmode)
#cmp = corpora.compare(dtm, select.rows = obama)
cmp = arrange(merge(cmp, dates), -termfreq)
cmp
length(cmp$term)
with(head(cmp, 150), plotWords(x = time_number,words=term,wordfreq=termfreq,random.y =T,col=col,scale=1))
#######
| /time_analysis_1.R | no_license | peiweihe/united_airlines | R | false | false | 1,177 | r | ####时间分析
library(plyr)
nyt_17_articles = readRDS("nyt_17_articles.rds")
text_from_nyt = nyt_17_articles$text_list_nyt
nyt_token = tokenize(as.character(text_from_nyt),removePunct = T)
ul1 = lapply(nyt_token, ldply)
for(i in seq(1)){
data_frame = data.frame(ul1[[i]])
data_frame["article"] = i
#data_frame
}
total = c()
for(i in seq(1:17)){
data_frame = data.frame(ul1[[i]])
data_frame["article"] = i
total = rbind(total,data_frame)
}
total
nyt_17_articles["article"] = rownames(nyt_17_articles)
dtm = dtm.create(total$article, total$V1)
s = c(13,27,15,11,22,11,12,14,12,11,26,11,11,11,13,13,10)
nyt_17_articles$time_number = s
a = nyt_17_articles$time_nyt
wordfreqs = dtm.to.df(dtm)
wordfreqs = merge(nyt_17_articles,wordfreqs,by.x="article",by.y="doc")
mmode <- function(v){uniqv <- unique(v); uniqv[which.max(tabulate(match(v, uniqv)))]}
dates = aggregate(wordfreqs["time_number"], by=wordfreqs["term"], FUN=mmode)
#cmp = corpora.compare(dtm, select.rows = obama)
cmp = arrange(merge(cmp, dates), -termfreq)
cmp
length(cmp$term)
with(head(cmp, 150), plotWords(x = time_number,words=term,wordfreq=termfreq,random.y =T,col=col,scale=1))
#######
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.url.R
\name{download.url}
\alias{download.url}
\title{Download file from the url.}
\usage{
download.url(url, file, timeout = 600, .opts = list(), retry404 = TRUE)
}
\arguments{
\item{url}{the url of the file to download}
\item{file}{the filename}
\item{timeout}{number of seconds to wait for file (default 600)}
\item{.opts}{list of options for curl, for example to download from a
protected site use list(userpwd=userpass, httpauth = 1L)}
\item{retry404}{retry on a 404, this is used by Brown Dog}
}
\value{
returns name of file if successful or NA if not.
}
\description{
Try and download a file.
}
\details{
This will download a file, if retry404 and 404 is returned it will
wait until the file is available. If the file is still not available
after timeout tries, it will return NA. If the file is downloaded
it will return the name of the file
}
\examples{
\dontrun{
download.url('http://localhost/', index.html)
}
}
| /base/utils/man/download.url.Rd | permissive | ashiklom/pecan | R | false | true | 1,013 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.url.R
\name{download.url}
\alias{download.url}
\title{Download file from the url.}
\usage{
download.url(url, file, timeout = 600, .opts = list(), retry404 = TRUE)
}
\arguments{
\item{url}{the url of the file to download}
\item{file}{the filename}
\item{timeout}{number of seconds to wait for file (default 600)}
\item{.opts}{list of options for curl, for example to download from a
protected site use list(userpwd=userpass, httpauth = 1L)}
\item{retry404}{retry on a 404, this is used by Brown Dog}
}
\value{
returns name of file if successful or NA if not.
}
\description{
Try and download a file.
}
\details{
This will download a file, if retry404 and 404 is returned it will
wait until the file is available. If the file is still not available
after timeout tries, it will return NA. If the file is downloaded
it will return the name of the file
}
\examples{
\dontrun{
download.url('http://localhost/', index.html)
}
}
|
library('KernSmooth')
v <- c(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.75992761767219e-10, 2.6898412386344717e-09, 0.0, 0.0, 7.05148273105749e-10, 0.0, 0.0, 2.15676076997795e-09, 7.86069154212754e-10, 1.0530158078836394e-08, 8.05010003102069e-10, 2.750184524558108e-09, 6.70825084370108e-09, 0.0, 7.831103543409768e-10, 3.3711879954267943e-09, 0.0, 1.3376082463878447e-08, 5.1371777942677e-09, 2.7137407876409725e-09, 5.488800525199622e-09, 2.3588215825043335e-09, 0.0, 2.297882772950288e-09, 4.0264418466051666e-09, 4.509577600231296e-09, 4.163419387026579e-09, 0.0, 4.119892427212335e-09, 6.3566565344785886e-09, 8.508320270550485e-09, 1.964633256079651e-08, 3.816146953994348e-09, 1.6158185900394528e-09, 1.7579219857566386e-08, 9.113046317565932e-09, 0.0, 4.693678778977528e-09, 4.117363783251449e-09, 5.547278192352678e-09, 2.7532935931162683e-09, 2.5676545334363254e-09, 4.069465209255441e-09, 1.1604386340735573e-09, 1.1896278406808847e-09, 1.176486685849909e-09, 1.1991960757740117e-09, 1.7211456704302464e-09, 2.1776846992338506e-09, 5.247626777560299e-10, 0.0, 0.0, 2.3102715296374754e-09, 8.147825192317271e-10, 1.1145626643838114e-09, 2.369873186580662e-09, 0.0, 1.9535804085535347e-09, 1.735336985220215e-09, 1.927556558811716e-09, 7.308098570746324e-09, 7.218641240314128e-10, 1.4529188963052775e-09, 1.4018888272460117e-09, 2.822707845240302e-09, 2.0576063075594675e-09, 7.053182482508191e-10, 2.593494530245266e-09, 2.323692571692959e-09, 8.988535471488035e-10, 1.546565875365502e-09, 1.091439050249221e-09, 2.10411466028404e-09, 1.6525671941991504e-09, 1.6463121976784123e-09, 1.968059271106881e-09, 1.3374278351463431e-09, 1.0758712809533222e-09, 6.953358444583557e-10, 4.928406016624365e-10, 1.2826857354042431e-09, 9.224092600845779e-10, 1.453305253917847e-09, 2.1524351190294055e-09, 2.2081922956829203e-09, 1.503659197155116e-09, 2.795319975490429e-09, 3.2015656792339087e-09, 2.530552434265587e-09, 2.612969396409426e-09, 2.861999526260206e-09, 2.8343078994907955e-09, 2.2857795656250346e-09, 4.338205350506996e-09)
myts <- ts(v, start=1900, end=2000, frequency=1)
Data <- data.frame(year=time(myts), Y=as.matrix(myts))
model.l = loess(Y ~ year,
data = Data,
span = 0.75, ### higher numbers for smoother fits
degree=2, ### use polynomials of order 2
family="gaussian") ### the default, use least squares to fit
summary(model.l)
smoothed <- predict(model.l)
plot(myts)
plot(x, y)
h <- dpill(x, y)
fit <- locpoly(x, y, bandwidth = h)
lines(fit)
#lines(smoothed, x=Data$year, col="red")
plotPredy(data = Data,
x = year,
y = Y,
model = model.l,
xlab = "Year",
ylab = "Words")
| /Hw9_Ex_2.R | no_license | manuelgacos/AMSI_HW9 | R | false | false | 2,745 | r | library('KernSmooth')
v <- c(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.75992761767219e-10, 2.6898412386344717e-09, 0.0, 0.0, 7.05148273105749e-10, 0.0, 0.0, 2.15676076997795e-09, 7.86069154212754e-10, 1.0530158078836394e-08, 8.05010003102069e-10, 2.750184524558108e-09, 6.70825084370108e-09, 0.0, 7.831103543409768e-10, 3.3711879954267943e-09, 0.0, 1.3376082463878447e-08, 5.1371777942677e-09, 2.7137407876409725e-09, 5.488800525199622e-09, 2.3588215825043335e-09, 0.0, 2.297882772950288e-09, 4.0264418466051666e-09, 4.509577600231296e-09, 4.163419387026579e-09, 0.0, 4.119892427212335e-09, 6.3566565344785886e-09, 8.508320270550485e-09, 1.964633256079651e-08, 3.816146953994348e-09, 1.6158185900394528e-09, 1.7579219857566386e-08, 9.113046317565932e-09, 0.0, 4.693678778977528e-09, 4.117363783251449e-09, 5.547278192352678e-09, 2.7532935931162683e-09, 2.5676545334363254e-09, 4.069465209255441e-09, 1.1604386340735573e-09, 1.1896278406808847e-09, 1.176486685849909e-09, 1.1991960757740117e-09, 1.7211456704302464e-09, 2.1776846992338506e-09, 5.247626777560299e-10, 0.0, 0.0, 2.3102715296374754e-09, 8.147825192317271e-10, 1.1145626643838114e-09, 2.369873186580662e-09, 0.0, 1.9535804085535347e-09, 1.735336985220215e-09, 1.927556558811716e-09, 7.308098570746324e-09, 7.218641240314128e-10, 1.4529188963052775e-09, 1.4018888272460117e-09, 2.822707845240302e-09, 2.0576063075594675e-09, 7.053182482508191e-10, 2.593494530245266e-09, 2.323692571692959e-09, 8.988535471488035e-10, 1.546565875365502e-09, 1.091439050249221e-09, 2.10411466028404e-09, 1.6525671941991504e-09, 1.6463121976784123e-09, 1.968059271106881e-09, 1.3374278351463431e-09, 1.0758712809533222e-09, 6.953358444583557e-10, 4.928406016624365e-10, 1.2826857354042431e-09, 9.224092600845779e-10, 1.453305253917847e-09, 2.1524351190294055e-09, 2.2081922956829203e-09, 1.503659197155116e-09, 2.795319975490429e-09, 3.2015656792339087e-09, 2.530552434265587e-09, 2.612969396409426e-09, 2.861999526260206e-09, 2.8343078994907955e-09, 2.2857795656250346e-09, 4.338205350506996e-09)
myts <- ts(v, start=1900, end=2000, frequency=1)
Data <- data.frame(year=time(myts), Y=as.matrix(myts))
model.l = loess(Y ~ year,
data = Data,
span = 0.75, ### higher numbers for smoother fits
degree=2, ### use polynomials of order 2
family="gaussian") ### the default, use least squares to fit
summary(model.l)
smoothed <- predict(model.l)
plot(myts)
plot(x, y)
h <- dpill(x, y)
fit <- locpoly(x, y, bandwidth = h)
lines(fit)
#lines(smoothed, x=Data$year, col="red")
plotPredy(data = Data,
x = year,
y = Y,
model = model.l,
xlab = "Year",
ylab = "Words")
|
/R上课/1y.R | no_license | ShuhanZhao/shuhanzhao1 | R | false | false | 201 | r | ||
# Docstrings -------------------------------------------------------------------
test_that("base functions don't have docstrings", {
expect_equal(docstring(`[`), NULL)
expect_equal(docstring(mean), NULL)
})
test_that("function return string doesn't have docstring", {
expect_equal(docstring(function() "a"), NULL)
expect_equal(docstring(function() {"a"}), NULL)
})
test_that("first string in function is docstring", {
expect_equal(docstring(function() {"a"; 1}), "a")
})
test_that("trim_docstring handles indentation correctly", {
expect_equal(trim_docstring("a\n b\n c"), "a\nb\nc")
expect_equal(trim_docstring("a\nb\nc"), "a\nb\nc")
expect_equal(trim_docstring("a\n b\n c"), "a\nb\n c")
expect_equal(trim_docstring(" a\n b\n c"), "a\nb\n c")
})
# Method documentation ---------------------------------------------------------
env <- pkg_env()
A1 <- setRefClass("A1", methods = list(
f = function() {
"This function has a docstring"
1
},
g = function() {
"This function doesn't"
}
), where = env)
B1 <- setRefClass("B1", contains = "A1", methods = list(
f1 = function() {
"This function has a docstring"
1
},
g1 = function() {
"This function doesn't"
}
), where = env)
classA <- getClass("A1", where = env)
classB <- getClass("B1", where = env)
test_that("rc_methods only lists methods belong to class (not parents)", {
expect_equal(length(rc_methods(classA)), 2)
expect_equal(length(rc_methods(classB)), 2)
})
test_that("RC methods included included in own section", {
out <- roc_proc_text(rd_roclet(), "
#' Class ABC
setRefClass('ABC', methods = list(
f = function() {
'This function has a docstring'
1
}
))
")[[1]]
methods <- out$get_value("rcmethods")
expect_equal(names(methods), "f()")
expect_match(methods[[1]], "This function has a docstring")
})
removeClass("B1", where = env)
removeClass("A1", where = env)
| /tests/testthat/test-object-rc.R | permissive | r-lib/roxygen2 | R | false | false | 1,949 | r | # Docstrings -------------------------------------------------------------------
test_that("base functions don't have docstrings", {
expect_equal(docstring(`[`), NULL)
expect_equal(docstring(mean), NULL)
})
test_that("function return string doesn't have docstring", {
expect_equal(docstring(function() "a"), NULL)
expect_equal(docstring(function() {"a"}), NULL)
})
test_that("first string in function is docstring", {
expect_equal(docstring(function() {"a"; 1}), "a")
})
test_that("trim_docstring handles indentation correctly", {
expect_equal(trim_docstring("a\n b\n c"), "a\nb\nc")
expect_equal(trim_docstring("a\nb\nc"), "a\nb\nc")
expect_equal(trim_docstring("a\n b\n c"), "a\nb\n c")
expect_equal(trim_docstring(" a\n b\n c"), "a\nb\n c")
})
# Method documentation ---------------------------------------------------------
env <- pkg_env()
A1 <- setRefClass("A1", methods = list(
f = function() {
"This function has a docstring"
1
},
g = function() {
"This function doesn't"
}
), where = env)
B1 <- setRefClass("B1", contains = "A1", methods = list(
f1 = function() {
"This function has a docstring"
1
},
g1 = function() {
"This function doesn't"
}
), where = env)
classA <- getClass("A1", where = env)
classB <- getClass("B1", where = env)
test_that("rc_methods only lists methods belong to class (not parents)", {
expect_equal(length(rc_methods(classA)), 2)
expect_equal(length(rc_methods(classB)), 2)
})
test_that("RC methods included included in own section", {
out <- roc_proc_text(rd_roclet(), "
#' Class ABC
setRefClass('ABC', methods = list(
f = function() {
'This function has a docstring'
1
}
))
")[[1]]
methods <- out$get_value("rcmethods")
expect_equal(names(methods), "f()")
expect_match(methods[[1]], "This function has a docstring")
})
removeClass("B1", where = env)
removeClass("A1", where = env)
|
library(methylKit)
######################## KG1A CTRL VS KG1A 48h ###########################################################
file.list=list(
"/root/annalisa_wgbs/bismark_report/DT24_KG1A_Control/DT24_r1_bismark_bt2_pe.CpG_report.txt",
"/root/annalisa_wgbs/bismark_report/DT80_KG1A_AZA_2uM_48h/DT80_r1_bismark_bt2_pe.CpG_report.txt")
myobj=methRead(file.list,
sample.id=list("KG1A_CTRL","KG1A_48h"),
assembly="hg38",
treatment=c(0,1),
context="CpG",
pipeline="bismarkCytosineReport",
header=FALSE,
mincov=4)
meth=unite(myobj, destrand=TRUE,mc.cores=40)
pooled.myDiff=calculateDiffMeth(meth,num.cores=40)
methData=getData(meth)
fisher_neb=data.frame(pooled.myDiff,KG1A_CTRL=(methData[,6]/methData[,5]),KG1A_48h=(methData[,9]/methData[,8]) )
saveRDS(fisher_neb,"KG1A_CTRL_VS_48h.rds")
######################## KG1A CTRL VS KG1A 72h ###########################################################
file.list=list(
"/root/annalisa_wgbs/bismark_report/DT24_KG1A_Control/DT24_r1_bismark_bt2_pe.CpG_report.txt",
"/root/annalisa_wgbs/bismark_report/DT82_KG1A_AZA_2uM_72h/DT82_r1_bismark_bt2_pe.CpG_report.txt")
myobj=methRead(file.list,
sample.id=list("KG1A_CTRL","KG1A_72h"),
assembly="hg38",
treatment=c(0,1),
context="CpG",
pipeline="bismarkCytosineReport",
header=FALSE,
mincov=4)
meth=unite(myobj, destrand=TRUE,mc.cores=40)
pooled.myDiff=calculateDiffMeth(meth,num.cores=40)
methData=getData(meth)
fisher_neb=data.frame(pooled.myDiff,KG1A_CTRL=(methData[,6]/methData[,5]),KG1A_72h=(methData[,9]/methData[,8]) )
saveRDS(fisher_neb,"KG1A_CTRL_VS_72h.rds")
######################## K562 CTRL VS K562 72h ###########################################################
file.list=list(
"/root/annalisa_wgbs/bismark_report/DT6_K562_Control/DT6_r1_bismark_bt2_pe.CpG_report.txt",
"/root/annalisa_wgbs/bismark_report/DT58_K562_AZA_2uM_48h/DT58_r1_bismark_bt2_pe.CpG_report.txt")
myobj=methRead(file.list,
sample.id=list("K562_CTRL","K562_48h"),
assembly="hg38",
treatment=c(0,1),
context="CpG",
pipeline="bismarkCytosineReport",
header=FALSE,
mincov=4)
meth=unite(myobj, destrand=TRUE,mc.cores=40)
pooled.myDiff=calculateDiffMeth(meth,num.cores=40)
methData=getData(meth)
fisher_neb=data.frame(pooled.myDiff,K562_CTRL=(methData[,6]/methData[,5]),K562_48h=(methData[,9]/methData[,8]) )
saveRDS(fisher_neb,"K562_CTRL_VS_48h.rds")
######################## K562 CTRL VS K562 72h ###########################################################
file.list=list(
"/root/annalisa_wgbs/bismark_report/DT6_K562_Control/DT6_r1_bismark_bt2_pe.CpG_report.txt",
"/root/annalisa_wgbs/bismark_report/DT60_K562_AZA_2uM_72h/DT60_r1_bismark_bt2_pe.CpG_report.txt")
myobj=methRead(file.list,
sample.id=list("K562_CTRL","K562_72h"),
assembly="hg38",
treatment=c(0,1),
context="CpG",
pipeline="bismarkCytosineReport",
header=FALSE,
mincov=4)
meth=unite(myobj, destrand=TRUE,mc.cores=40)
pooled.myDiff=calculateDiffMeth(meth,num.cores=40)
methData=getData(meth)
fisher_neb=data.frame(pooled.myDiff,K562_CTRL=(methData[,6]/methData[,5]),K562_72h=(methData[,9]/methData[,8]) )
saveRDS(fisher_neb,"K562_CTRL_VS_72h.rds")
| /analissa/3_methylkit.R | no_license | rtmag/5az | R | false | false | 3,399 | r |
library(methylKit)
######################## KG1A CTRL VS KG1A 48h ###########################################################
file.list=list(
"/root/annalisa_wgbs/bismark_report/DT24_KG1A_Control/DT24_r1_bismark_bt2_pe.CpG_report.txt",
"/root/annalisa_wgbs/bismark_report/DT80_KG1A_AZA_2uM_48h/DT80_r1_bismark_bt2_pe.CpG_report.txt")
myobj=methRead(file.list,
sample.id=list("KG1A_CTRL","KG1A_48h"),
assembly="hg38",
treatment=c(0,1),
context="CpG",
pipeline="bismarkCytosineReport",
header=FALSE,
mincov=4)
meth=unite(myobj, destrand=TRUE,mc.cores=40)
pooled.myDiff=calculateDiffMeth(meth,num.cores=40)
methData=getData(meth)
fisher_neb=data.frame(pooled.myDiff,KG1A_CTRL=(methData[,6]/methData[,5]),KG1A_48h=(methData[,9]/methData[,8]) )
saveRDS(fisher_neb,"KG1A_CTRL_VS_48h.rds")
######################## KG1A CTRL VS KG1A 72h ###########################################################
file.list=list(
"/root/annalisa_wgbs/bismark_report/DT24_KG1A_Control/DT24_r1_bismark_bt2_pe.CpG_report.txt",
"/root/annalisa_wgbs/bismark_report/DT82_KG1A_AZA_2uM_72h/DT82_r1_bismark_bt2_pe.CpG_report.txt")
myobj=methRead(file.list,
sample.id=list("KG1A_CTRL","KG1A_72h"),
assembly="hg38",
treatment=c(0,1),
context="CpG",
pipeline="bismarkCytosineReport",
header=FALSE,
mincov=4)
meth=unite(myobj, destrand=TRUE,mc.cores=40)
pooled.myDiff=calculateDiffMeth(meth,num.cores=40)
methData=getData(meth)
fisher_neb=data.frame(pooled.myDiff,KG1A_CTRL=(methData[,6]/methData[,5]),KG1A_72h=(methData[,9]/methData[,8]) )
saveRDS(fisher_neb,"KG1A_CTRL_VS_72h.rds")
######################## K562 CTRL VS K562 72h ###########################################################
file.list=list(
"/root/annalisa_wgbs/bismark_report/DT6_K562_Control/DT6_r1_bismark_bt2_pe.CpG_report.txt",
"/root/annalisa_wgbs/bismark_report/DT58_K562_AZA_2uM_48h/DT58_r1_bismark_bt2_pe.CpG_report.txt")
myobj=methRead(file.list,
sample.id=list("K562_CTRL","K562_48h"),
assembly="hg38",
treatment=c(0,1),
context="CpG",
pipeline="bismarkCytosineReport",
header=FALSE,
mincov=4)
meth=unite(myobj, destrand=TRUE,mc.cores=40)
pooled.myDiff=calculateDiffMeth(meth,num.cores=40)
methData=getData(meth)
fisher_neb=data.frame(pooled.myDiff,K562_CTRL=(methData[,6]/methData[,5]),K562_48h=(methData[,9]/methData[,8]) )
saveRDS(fisher_neb,"K562_CTRL_VS_48h.rds")
######################## K562 CTRL VS K562 72h ###########################################################
file.list=list(
"/root/annalisa_wgbs/bismark_report/DT6_K562_Control/DT6_r1_bismark_bt2_pe.CpG_report.txt",
"/root/annalisa_wgbs/bismark_report/DT60_K562_AZA_2uM_72h/DT60_r1_bismark_bt2_pe.CpG_report.txt")
myobj=methRead(file.list,
sample.id=list("K562_CTRL","K562_72h"),
assembly="hg38",
treatment=c(0,1),
context="CpG",
pipeline="bismarkCytosineReport",
header=FALSE,
mincov=4)
meth=unite(myobj, destrand=TRUE,mc.cores=40)
pooled.myDiff=calculateDiffMeth(meth,num.cores=40)
methData=getData(meth)
fisher_neb=data.frame(pooled.myDiff,K562_CTRL=(methData[,6]/methData[,5]),K562_72h=(methData[,9]/methData[,8]) )
saveRDS(fisher_neb,"K562_CTRL_VS_72h.rds")
|
# Las Vegas Zip code Miner
my_lv_zip_codes<-function() {
options(warn=-1)
url<-"https://zip-codes.com/city/nv-las-vegas.asp"
my_con<-file(url,"r")
my_txt<-readLines(my_con,-1)
close(my_con)
tb_loc<-grep("id=\"tblZIP\"",my_txt)
tb_end<-grep("[<][/]table[>]",my_txt)
my_line_loc<-tb_end[tb_end>tb_loc][1]
my_chunks<-strsplit(my_txt[my_line_loc],"[<][/]tr[>]")[[1]]
my_matrix<-matrix(NA,nrow=(length(my_chunks)-1),ncol=5)
for (i in 1:(length(my_chunks)-1)) {
i_chunk<-strsplit(my_chunks[i],"[<][/]td[>]")[[1]]
j_vec<-rep(NA,length(i_chunk))
for (j in 1:length(i_chunk)) {
j_vec[j]<-gsub("<.*?>","",i_chunk[j])
}
my_matrix[i,]<-j_vec
}
my_datfr<-data.frame(my_matrix,stringsAsFactors = FALSE)
names(my_datfr)<-c("ZIP","Type","County","Population","Area.Code")
my_datfr$ZIP<-gsub("ZIP Code ","",my_datfr$ZIP)
my_datfr$Population<-as.numeric(gsub("[,]","",my_datfr$Population))
return(my_datfr[my_datfr$Population>0,])
}
# lv<-my_lv_zip_codes()
# write.table(lv,file="lv-zip-codes.csv",sep=",",row.names=FALSE)
| /script-scrape-las-vegas-zips.R | no_license | jcrooker/Coursera_Capstone | R | false | false | 1,071 | r | # Las Vegas Zip code Miner
my_lv_zip_codes<-function() {
options(warn=-1)
url<-"https://zip-codes.com/city/nv-las-vegas.asp"
my_con<-file(url,"r")
my_txt<-readLines(my_con,-1)
close(my_con)
tb_loc<-grep("id=\"tblZIP\"",my_txt)
tb_end<-grep("[<][/]table[>]",my_txt)
my_line_loc<-tb_end[tb_end>tb_loc][1]
my_chunks<-strsplit(my_txt[my_line_loc],"[<][/]tr[>]")[[1]]
my_matrix<-matrix(NA,nrow=(length(my_chunks)-1),ncol=5)
for (i in 1:(length(my_chunks)-1)) {
i_chunk<-strsplit(my_chunks[i],"[<][/]td[>]")[[1]]
j_vec<-rep(NA,length(i_chunk))
for (j in 1:length(i_chunk)) {
j_vec[j]<-gsub("<.*?>","",i_chunk[j])
}
my_matrix[i,]<-j_vec
}
my_datfr<-data.frame(my_matrix,stringsAsFactors = FALSE)
names(my_datfr)<-c("ZIP","Type","County","Population","Area.Code")
my_datfr$ZIP<-gsub("ZIP Code ","",my_datfr$ZIP)
my_datfr$Population<-as.numeric(gsub("[,]","",my_datfr$Population))
return(my_datfr[my_datfr$Population>0,])
}
# lv<-my_lv_zip_codes()
# write.table(lv,file="lv-zip-codes.csv",sep=",",row.names=FALSE)
|
library(dplyr)
library(mgcv)
#dat <- select(TMAO_1_4_5,-one_of("LVEF","ECG","blood_sugar","NYHA","C_reactive_protein","Aspirin","total_bilirubin", "direct_bilirubin","indirect_bilirubin","touxi"))
dat <- select(TMAO_1_4_5,one_of("age","sex","hypertension","Cardiothoracic_ratio","heart_ultrasound","eGFR","CHOL",
"Hemoglobin","C_reactive_protein","TMAO"))
gg_na(dat)
#dat <- to_impute(dat)
#dat <- na.omit(dat)
gg_na(dat)
dim(dat)
colnames(dat)
dat$Cardiothoracic_ratio <- factor(dat$Cardiothoracic_ratio,levels = c("正常","异常"))
gamfit.CTR <- mgcv::gam(Cardiothoracic_ratio~s(TMAO,k=4),data = dat,family = binomial)
plot(gamfit.CTR)
summary(gamfit.CTR)
dat$heart_ultrasound <- factor(dat$heart_ultrasound,levels = c("正常","异常"))
gamfit.UCG <- mgcv::gam(heart_ultrasound~s(TMAO,k=4),data = dat,family = binomial)
plot(gamfit.UCG)
summary(gamfit.UCG)
gamfit.eGFR <- mgcv::gam(eGFR~s(TMAO,k=4),data = dat)
plot(gamfit.eGFR)
summary(gamfit.eGFR)
par(mfrow = c(1,3))
gamfit <- gam::gam(Cardiothoracic_ratio~s(TMAO,df=4)+heart_ultrasound+eGFR,data = dat,family = binomial)
summary(gamfit)
plot(gamfit,se=T)
#model for CTR 1 2 3
glm(Cardiothoracic_ratio ~TMAO,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio ~TMAO+eGFR,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio ~TMAO+age,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio ~TMAO+hypertension,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio ~TMAO+heart_ultrasound,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio ~TMAO+CHOL,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio ~TMAO+Hemoglobin,data = dat, family = binomial) %>% summary()#**0.02
glm(Cardiothoracic_ratio ~TMAO+C_reactive_protein,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio~.,data = dat[,-c(4)],family = binomial) %>% summary()
glm(Cardiothoracic_ratio~.,data = dat[,-c(4,5)],family = binomial) %>% summary()
glm(Cardiothoracic_ratio~.,data = dat[,-c(4,5)],family = binomial) %>% step()
#model for UCG
glm(heart_ultrasound ~TMAO,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+eGFR,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+age,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+hypertension,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+Cardiothoracic_ratio,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+CHOL,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+Hemoglobin,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+C_reactive_protein,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound~.,data = dat[,-c(3)],family = binomial) %>% summary()
glm(heart_ultrasound~.,data = dat[,-c(3,5)],family = binomial) %>% summary()
glm(heart_ultrasound~.,data = dat[,-c(3,5)],family = binomial) %>% step()
glm(formula = heart_ultrasound ~ age + hypertension + CHOL +
Hemoglobin + C_reactive_protein + TMAO, family = binomial,
data = dat[, -c(3, 5)]) %>% summary()
##**************************************************##
## ##
## Model for TMAO ##
## ##
##**************************************************##
dat$heart_ultrasound <- factor(dat$heart_ultrasound,levels = c("正常","异常"))
dat$Cardiothoracic_ratio <- factor(dat$Cardiothoracic_ratio,levels = c("正常","异常"))
dat$hypertension <- factor(dat$hypertension,levels = c("无","有"))
glm(TMAO ~heart_ultrasound+age+CHOL,data = dat) %>% summary()
glm(TMAO ~heart_ultrasound+age+CHOL+hypertension+eGFR,data = dat) %>% summary()
##**************************************************##
## ##
## Model for TMAO ##
## ##
##**************************************************##
glm(TMAO ~heart_ultrasound+eGFR+age+CHOL+
hypertension+C_reactive_protein,data = dat) %>% summary()
glm(TMAO ~eGFR,data = dat) %>% summary()#age 0.02
glm(TMAO ~eGFR+Hemoglobin,data = dat) %>% summary()
glm(TMAO ~eGFR+HDL,data = dat) %>% summary()
glm(TMAO ~eGFR+C_reactive_protein,data = dat) %>% summary()
glm(TMAO ~eGFR+hypertension,data = dat) %>% summary()
glm(TMAO ~Cardiothoracic_ratio+age+
CHOL+eGFR+hypertension,data = dat) %>% summary()#no eGFR _Hemoglobin
glm(TMAO ~eGFR+Cardiothoracic_ratio+age+hypertension+
HDL+Hemoglobin+C_reactive_protein,data = dat) %>% summary()#age
glm(TMAO ~eGFR+Cardiothoracic_ratio+age+hypertension+
CHOL+HDL+Hemoglobin+C_reactive_protein,data = dat) %>% summary()##age 0.02
| /02_scripts/demo/gam.R | no_license | jixing475/workflower2office | R | false | false | 4,860 | r | library(dplyr)
library(mgcv)
#dat <- select(TMAO_1_4_5,-one_of("LVEF","ECG","blood_sugar","NYHA","C_reactive_protein","Aspirin","total_bilirubin", "direct_bilirubin","indirect_bilirubin","touxi"))
dat <- select(TMAO_1_4_5,one_of("age","sex","hypertension","Cardiothoracic_ratio","heart_ultrasound","eGFR","CHOL",
"Hemoglobin","C_reactive_protein","TMAO"))
gg_na(dat)
#dat <- to_impute(dat)
#dat <- na.omit(dat)
gg_na(dat)
dim(dat)
colnames(dat)
dat$Cardiothoracic_ratio <- factor(dat$Cardiothoracic_ratio,levels = c("正常","异常"))
gamfit.CTR <- mgcv::gam(Cardiothoracic_ratio~s(TMAO,k=4),data = dat,family = binomial)
plot(gamfit.CTR)
summary(gamfit.CTR)
dat$heart_ultrasound <- factor(dat$heart_ultrasound,levels = c("正常","异常"))
gamfit.UCG <- mgcv::gam(heart_ultrasound~s(TMAO,k=4),data = dat,family = binomial)
plot(gamfit.UCG)
summary(gamfit.UCG)
gamfit.eGFR <- mgcv::gam(eGFR~s(TMAO,k=4),data = dat)
plot(gamfit.eGFR)
summary(gamfit.eGFR)
par(mfrow = c(1,3))
gamfit <- gam::gam(Cardiothoracic_ratio~s(TMAO,df=4)+heart_ultrasound+eGFR,data = dat,family = binomial)
summary(gamfit)
plot(gamfit,se=T)
#model for CTR 1 2 3
glm(Cardiothoracic_ratio ~TMAO,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio ~TMAO+eGFR,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio ~TMAO+age,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio ~TMAO+hypertension,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio ~TMAO+heart_ultrasound,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio ~TMAO+CHOL,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio ~TMAO+Hemoglobin,data = dat, family = binomial) %>% summary()#**0.02
glm(Cardiothoracic_ratio ~TMAO+C_reactive_protein,data = dat, family = binomial) %>% summary()
glm(Cardiothoracic_ratio~.,data = dat[,-c(4)],family = binomial) %>% summary()
glm(Cardiothoracic_ratio~.,data = dat[,-c(4,5)],family = binomial) %>% summary()
glm(Cardiothoracic_ratio~.,data = dat[,-c(4,5)],family = binomial) %>% step()
#model for UCG
glm(heart_ultrasound ~TMAO,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+eGFR,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+age,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+hypertension,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+Cardiothoracic_ratio,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+CHOL,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+Hemoglobin,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound ~TMAO+C_reactive_protein,data = dat, family = binomial) %>% summary()
glm(heart_ultrasound~.,data = dat[,-c(3)],family = binomial) %>% summary()
glm(heart_ultrasound~.,data = dat[,-c(3,5)],family = binomial) %>% summary()
glm(heart_ultrasound~.,data = dat[,-c(3,5)],family = binomial) %>% step()
glm(formula = heart_ultrasound ~ age + hypertension + CHOL +
Hemoglobin + C_reactive_protein + TMAO, family = binomial,
data = dat[, -c(3, 5)]) %>% summary()
##**************************************************##
## ##
## Model for TMAO ##
## ##
##**************************************************##
dat$heart_ultrasound <- factor(dat$heart_ultrasound,levels = c("正常","异常"))
dat$Cardiothoracic_ratio <- factor(dat$Cardiothoracic_ratio,levels = c("正常","异常"))
dat$hypertension <- factor(dat$hypertension,levels = c("无","有"))
glm(TMAO ~heart_ultrasound+age+CHOL,data = dat) %>% summary()
glm(TMAO ~heart_ultrasound+age+CHOL+hypertension+eGFR,data = dat) %>% summary()
##**************************************************##
## ##
## Model for TMAO ##
## ##
##**************************************************##
glm(TMAO ~heart_ultrasound+eGFR+age+CHOL+
hypertension+C_reactive_protein,data = dat) %>% summary()
glm(TMAO ~eGFR,data = dat) %>% summary()#age 0.02
glm(TMAO ~eGFR+Hemoglobin,data = dat) %>% summary()
glm(TMAO ~eGFR+HDL,data = dat) %>% summary()
glm(TMAO ~eGFR+C_reactive_protein,data = dat) %>% summary()
glm(TMAO ~eGFR+hypertension,data = dat) %>% summary()
glm(TMAO ~Cardiothoracic_ratio+age+
CHOL+eGFR+hypertension,data = dat) %>% summary()#no eGFR _Hemoglobin
glm(TMAO ~eGFR+Cardiothoracic_ratio+age+hypertension+
HDL+Hemoglobin+C_reactive_protein,data = dat) %>% summary()#age
glm(TMAO ~eGFR+Cardiothoracic_ratio+age+hypertension+
CHOL+HDL+Hemoglobin+C_reactive_protein,data = dat) %>% summary()##age 0.02
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fpemreporting.R
\name{get_posterior_probability_from_cutoff_target}
\alias{get_posterior_probability_from_cutoff_target}
\title{Get posterior probability from proportion or population}
\usage{
get_posterior_probability_from_cutoff_target(
posterior_samples,
population_count_year = 1,
population_count_relative_to_year = 1,
indicator,
year,
relative_to_year = NULL,
above,
cutoff
)
}
\arguments{
\item{posterior_samples}{\emph{\sQuote{Array}} The samples array from \code{\link{fit_fp_csub}}.}
\item{indicator}{`integer` Indicator index (1 = modern, 2 = traditional, 3 = unmet)}
\item{year}{`integer` Year index}
\item{above}{`logical` If FALSE then it's below}
\item{cutoff}{`numeric` Cutoff proportion or population}
\item{population_count}{`integer` Number of individuals in the sample population (1 to calculate from proportion)}
}
\value{
`numeric` Posterior probability
}
\description{
Get posterior probability from proportion or population
}
| /man/get_posterior_probability_from_cutoff_target.Rd | permissive | AlkemaLab/fpemlocal | R | false | true | 1,049 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fpemreporting.R
\name{get_posterior_probability_from_cutoff_target}
\alias{get_posterior_probability_from_cutoff_target}
\title{Get posterior probability from proportion or population}
\usage{
get_posterior_probability_from_cutoff_target(
posterior_samples,
population_count_year = 1,
population_count_relative_to_year = 1,
indicator,
year,
relative_to_year = NULL,
above,
cutoff
)
}
\arguments{
\item{posterior_samples}{\emph{\sQuote{Array}} The samples array from \code{\link{fit_fp_csub}}.}
\item{indicator}{`integer` Indicator index (1 = modern, 2 = traditional, 3 = unmet)}
\item{year}{`integer` Year index}
\item{above}{`logical` If FALSE then it's below}
\item{cutoff}{`numeric` Cutoff proportion or population}
\item{population_count}{`integer` Number of individuals in the sample population (1 to calculate from proportion)}
}
\value{
`numeric` Posterior probability
}
\description{
Get posterior probability from proportion or population
}
|
### R code from vignette source 'RRIWS_3_ESDA_R.Rnw'
###################################################
### code chunk number 1: install-libraries
###################################################
install.packages(c('sp','spdep','maptools'))
###################################################
### code chunk number 2: library
###################################################
library(spdep)
library(maptools)
###################################################
### code chunk number 3: data01
###################################################
shape <- readShapePoly("Statesmod.shp",IDvar="NAME" )
summary(shape)
###################################################
### code chunk number 4: Create-weight-matrixdataqueen
###################################################
nb2=poly2nb(shape, queen=TRUE)
nb2
plot(shape, border="grey60")
plot(nb2, coordinates(shape), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 5: Create-weight-matrixqueen1
###################################################
nb2=poly2nb(shape, queen=TRUE)
plot(shape, border="grey60")
plot(nb2, coordinates(shape), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 6: Create-weight-matrixdatarook
###################################################
nb2=poly2nb(shape, queen=FALSE)
nb2
plot(shape, border="grey60")
plot(nb2, coordinates(shape), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 7: Create-weight-matrixrook
###################################################
nb2=poly2nb(shape, queen=F)
plot(shape, border="grey60")
plot(nb2, coordinates(shape), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 8: weight-matrixsidsrook
###################################################
sids <- readShapePoly("sids.shp", ID="FIPSNO")
sids_nbr<-poly2nb(sids,queen=F)
plot(sids, border="grey60")
plot(sids_nbr, coordinates(sids), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 9: Create-weight-matrixsidsqueen
###################################################
sids_nbq<-poly2nb(sids,queen=T)
plot(sids, border="grey60")
plot(sids_nbq, coordinates(sids), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 10: Compare-weight-matrixsidsqueen
###################################################
sids_nbq<-poly2nb(sids,queen=T)
sids_nbr<-poly2nb(sids,queen=F)
plot(sids, border="grey60")
plot(sids_nbq, coordinates(sids), add=TRUE, pcv=".",col="red", lwd=2)
plot(sids_nbr, coordinates(sids), add=TRUE, pcv=".",col="blue", lwd=2)
###################################################
### code chunk number 11: matrixstatesK4
###################################################
coords <- coordinates(shape)
states.knn <- knearneigh(coords, k=4)
plot(shape, border="grey60")
plot(knn2nb(states.knn), coordinates(shape), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 12: matrixstatesK3
###################################################
coords <- coordinates(shape)
states.knn <- knearneigh(coords, k=3)
plot(shape, border="grey60")
plot(knn2nb(states.knn), coordinates(shape), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 13: listw-weightmatrixdata
###################################################
nb2_B=nb2listw(nb2, style="B", zero.policy=TRUE)
nb2_W=nb2listw(nb2, style="W", zero.policy=TRUE)
###################################################
### code chunk number 14: save-weightmatrixdata
###################################################
write.nb.gal(nb2,"pesoscol.GAL")
###################################################
### code chunk number 15: Moran1
###################################################
moran.test(shape$Murder, listw=nb2_W,alternative="two.sided")
###################################################
### code chunk number 16: Moran1
###################################################
moran=moran.test(shape$Murder, listw=nb2_B)
moran
###################################################
### code chunk number 17: Moran1
###################################################
set.seed(1234)
bperm=moran.mc(shape$Murder, listw=nb2_W, nsim=999)
bperm
###################################################
### code chunk number 18: Moran1
###################################################
gearyR=geary.test(shape$Murder, listw=nb2_W)
gearyR
###################################################
### code chunk number 19: Moran1
###################################################
moran.plot(shape$Murder, listw=nb2_W,label=as.character(shape$STUSPS),xlab="Murder Rate", ylab="Spatially Lagged Murder Rate")
###################################################
### code chunk number 20: Moran_scatter1
###################################################
moran.plot2 <- function(x,wfile)
{
xname <- deparse(substitute(x)) # get name of variable
zx <- (x - mean(x))/sd(x)
wzx <- lag.listw(wfile,zx, zero.policy=TRUE)
morlm <- lm(wzx ~ zx)
aa <- morlm$coefficients[1]
mori <- morlm$coefficients[2]
par(pty="s")
plot(zx,wzx,xlab=xname,ylab=paste("Spatial Lag of ",xname))
abline(aa,mori,col=2)
abline(h=0,lty=2,col=4)
abline(v=0,lty=2,col=4)
title(paste("Moran Scatterplot I= ",format(round(mori,4))))
}
moran.plot2(shape$Murder, nb2_W)
###################################################
### code chunk number 21: Moran_scatter2
###################################################
moran.plot2(shape$Murder, nb2_W)
###################################################
### code chunk number 22: Moran_scatter3
###################################################
moran.plot3 <- function(x,y,wfile)
{
xname <- deparse(substitute(x)) # get name of variable
yname =deparse(substitute(y))
zx <- (x - mean(x))/sd(x)
zy =(y - mean(y))/sd(y)
wzy <- lag.listw(wfile,zy, zero.policy=TRUE)
morlm <- lm(wzy ~ zx)
aa <- morlm$coefficients[1]
mori <- morlm$coefficients[2]
par(pty="s")
plot(zx,wzy,xlab=xname,ylab=paste("Spatial Lag of ",yname))
abline(aa,mori,col=2)
abline(h=0,lty=2,col=4)
abline(v=0,lty=2,col=4)
title(paste("Moran Scatterplot I= ",format(round(mori,4))))
}
moran.plot3(shape$Murder,shape$Robbery, nb2_W)
###################################################
### code chunk number 23: Moran_scatter4
###################################################
moran.plot3(shape$Murder,shape$Robbery, nb2_W)
###################################################
### code chunk number 24: LMoran0
###################################################
LISA.plot <- function(var,listw,signif,mapa) {
mI.loc <- localmoran(var,listw, zero.policy=T)
c.var <- var - mean(var)
c.mI <- mI.loc[,1] - mean(mI.loc[,1])
quadrant <- vector(mode="numeric",length=nrow(mI.loc))
wzx <- lag.listw(listw,var, zero.policy=TRUE)
c.mI1 <- wzx - mean(wzx)
quadrant[c.var>0 & c.mI1>0] <- 4
quadrant[c.var<0 & c.mI1<0] <- 1
quadrant[c.var<0 & c.mI1>0] <- 2
quadrant[c.var>0 & c.mI1<0] <- 3
quadrant[mI.loc[,5]>signif] <- 0
brks <- c(0,1,2,3,4)
colors <- c("white","blue",rgb(0,0,1,alpha=0.4),rgb(1,0,0,alpha=0.4),"red")
plot(mapa,border="gray90",col=colors[findInterval(quadrant,brks,all.inside=FALSE)])
box()
legend("bottomright",legend=c("not significant","low-low","low-high","high-low","high-high"), fill=colors,bty="n", cex=0.7,y.intersp=1,x.intersp=1)
title("LISA Mapa de Clusters")
}
###################################################
### code chunk number 25: LMoran_scatter1
###################################################
LISA.plot(shape$Murder, nb2_W,0.1, shape)
###################################################
### code chunk number 26: LMoran_scatter2
###################################################
LISA.plot(shape$Murder, nb2_W,0.05, shape)
###################################################
### code chunk number 27: datacounties
###################################################
shapec <- readShapePoly("countiesmod.shp",IDvar="GEOID" )
summary(shapec)
###################################################
### code chunk number 28: county-matrixdataqueen
###################################################
nb2c=poly2nb(shapec, queen=TRUE)
nb2c
plot(shapec, border="grey60")
plot(nb2c, coordinates(shapec), add=TRUE, pcv=".", lwd=2)
nb2_Wc=nb2listw(nb2c, style="W", zero.policy=TRUE)
###################################################
### code chunk number 29: Moran_scattercounty1
###################################################
moran.plot2(shapec$Bachelor, nb2_Wc)
###################################################
### code chunk number 30: LMoran_county1
###################################################
LISA.plot(shapec$Bachelor, nb2_Wc,0.05, shapec)
| /ESDA notes/Class_3_ESDA_R.R | permissive | jtsayagog/Lectures-ESDA | R | false | false | 8,947 | r | ### R code from vignette source 'RRIWS_3_ESDA_R.Rnw'
###################################################
### code chunk number 1: install-libraries
###################################################
install.packages(c('sp','spdep','maptools'))
###################################################
### code chunk number 2: library
###################################################
library(spdep)
library(maptools)
###################################################
### code chunk number 3: data01
###################################################
shape <- readShapePoly("Statesmod.shp",IDvar="NAME" )
summary(shape)
###################################################
### code chunk number 4: Create-weight-matrixdataqueen
###################################################
nb2=poly2nb(shape, queen=TRUE)
nb2
plot(shape, border="grey60")
plot(nb2, coordinates(shape), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 5: Create-weight-matrixqueen1
###################################################
nb2=poly2nb(shape, queen=TRUE)
plot(shape, border="grey60")
plot(nb2, coordinates(shape), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 6: Create-weight-matrixdatarook
###################################################
nb2=poly2nb(shape, queen=FALSE)
nb2
plot(shape, border="grey60")
plot(nb2, coordinates(shape), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 7: Create-weight-matrixrook
###################################################
nb2=poly2nb(shape, queen=F)
plot(shape, border="grey60")
plot(nb2, coordinates(shape), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 8: weight-matrixsidsrook
###################################################
sids <- readShapePoly("sids.shp", ID="FIPSNO")
sids_nbr<-poly2nb(sids,queen=F)
plot(sids, border="grey60")
plot(sids_nbr, coordinates(sids), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 9: Create-weight-matrixsidsqueen
###################################################
sids_nbq<-poly2nb(sids,queen=T)
plot(sids, border="grey60")
plot(sids_nbq, coordinates(sids), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 10: Compare-weight-matrixsidsqueen
###################################################
sids_nbq<-poly2nb(sids,queen=T)
sids_nbr<-poly2nb(sids,queen=F)
plot(sids, border="grey60")
plot(sids_nbq, coordinates(sids), add=TRUE, pcv=".",col="red", lwd=2)
plot(sids_nbr, coordinates(sids), add=TRUE, pcv=".",col="blue", lwd=2)
###################################################
### code chunk number 11: matrixstatesK4
###################################################
coords <- coordinates(shape)
states.knn <- knearneigh(coords, k=4)
plot(shape, border="grey60")
plot(knn2nb(states.knn), coordinates(shape), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 12: matrixstatesK3
###################################################
coords <- coordinates(shape)
states.knn <- knearneigh(coords, k=3)
plot(shape, border="grey60")
plot(knn2nb(states.knn), coordinates(shape), add=TRUE, pcv=".", lwd=2)
###################################################
### code chunk number 13: listw-weightmatrixdata
###################################################
nb2_B=nb2listw(nb2, style="B", zero.policy=TRUE)
nb2_W=nb2listw(nb2, style="W", zero.policy=TRUE)
###################################################
### code chunk number 14: save-weightmatrixdata
###################################################
write.nb.gal(nb2,"pesoscol.GAL")
###################################################
### code chunk number 15: Moran1
###################################################
moran.test(shape$Murder, listw=nb2_W,alternative="two.sided")
###################################################
### code chunk number 16: Moran1
###################################################
moran=moran.test(shape$Murder, listw=nb2_B)
moran
###################################################
### code chunk number 17: Moran1
###################################################
set.seed(1234)
bperm=moran.mc(shape$Murder, listw=nb2_W, nsim=999)
bperm
###################################################
### code chunk number 18: Moran1
###################################################
gearyR=geary.test(shape$Murder, listw=nb2_W)
gearyR
###################################################
### code chunk number 19: Moran1
###################################################
moran.plot(shape$Murder, listw=nb2_W,label=as.character(shape$STUSPS),xlab="Murder Rate", ylab="Spatially Lagged Murder Rate")
###################################################
### code chunk number 20: Moran_scatter1
###################################################
moran.plot2 <- function(x,wfile)
{
xname <- deparse(substitute(x)) # get name of variable
zx <- (x - mean(x))/sd(x)
wzx <- lag.listw(wfile,zx, zero.policy=TRUE)
morlm <- lm(wzx ~ zx)
aa <- morlm$coefficients[1]
mori <- morlm$coefficients[2]
par(pty="s")
plot(zx,wzx,xlab=xname,ylab=paste("Spatial Lag of ",xname))
abline(aa,mori,col=2)
abline(h=0,lty=2,col=4)
abline(v=0,lty=2,col=4)
title(paste("Moran Scatterplot I= ",format(round(mori,4))))
}
moran.plot2(shape$Murder, nb2_W)
###################################################
### code chunk number 21: Moran_scatter2
###################################################
moran.plot2(shape$Murder, nb2_W)
###################################################
### code chunk number 22: Moran_scatter3
###################################################
moran.plot3 <- function(x,y,wfile)
{
xname <- deparse(substitute(x)) # get name of variable
yname =deparse(substitute(y))
zx <- (x - mean(x))/sd(x)
zy =(y - mean(y))/sd(y)
wzy <- lag.listw(wfile,zy, zero.policy=TRUE)
morlm <- lm(wzy ~ zx)
aa <- morlm$coefficients[1]
mori <- morlm$coefficients[2]
par(pty="s")
plot(zx,wzy,xlab=xname,ylab=paste("Spatial Lag of ",yname))
abline(aa,mori,col=2)
abline(h=0,lty=2,col=4)
abline(v=0,lty=2,col=4)
title(paste("Moran Scatterplot I= ",format(round(mori,4))))
}
moran.plot3(shape$Murder,shape$Robbery, nb2_W)
###################################################
### code chunk number 23: Moran_scatter4
###################################################
moran.plot3(shape$Murder,shape$Robbery, nb2_W)
###################################################
### code chunk number 24: LMoran0
###################################################
LISA.plot <- function(var,listw,signif,mapa) {
mI.loc <- localmoran(var,listw, zero.policy=T)
c.var <- var - mean(var)
c.mI <- mI.loc[,1] - mean(mI.loc[,1])
quadrant <- vector(mode="numeric",length=nrow(mI.loc))
wzx <- lag.listw(listw,var, zero.policy=TRUE)
c.mI1 <- wzx - mean(wzx)
quadrant[c.var>0 & c.mI1>0] <- 4
quadrant[c.var<0 & c.mI1<0] <- 1
quadrant[c.var<0 & c.mI1>0] <- 2
quadrant[c.var>0 & c.mI1<0] <- 3
quadrant[mI.loc[,5]>signif] <- 0
brks <- c(0,1,2,3,4)
colors <- c("white","blue",rgb(0,0,1,alpha=0.4),rgb(1,0,0,alpha=0.4),"red")
plot(mapa,border="gray90",col=colors[findInterval(quadrant,brks,all.inside=FALSE)])
box()
legend("bottomright",legend=c("not significant","low-low","low-high","high-low","high-high"), fill=colors,bty="n", cex=0.7,y.intersp=1,x.intersp=1)
title("LISA Mapa de Clusters")
}
###################################################
### code chunk number 25: LMoran_scatter1
###################################################
LISA.plot(shape$Murder, nb2_W,0.1, shape)
###################################################
### code chunk number 26: LMoran_scatter2
###################################################
LISA.plot(shape$Murder, nb2_W,0.05, shape)
###################################################
### code chunk number 27: datacounties
###################################################
shapec <- readShapePoly("countiesmod.shp",IDvar="GEOID" )
summary(shapec)
###################################################
### code chunk number 28: county-matrixdataqueen
###################################################
nb2c=poly2nb(shapec, queen=TRUE)
nb2c
plot(shapec, border="grey60")
plot(nb2c, coordinates(shapec), add=TRUE, pcv=".", lwd=2)
nb2_Wc=nb2listw(nb2c, style="W", zero.policy=TRUE)
###################################################
### code chunk number 29: Moran_scattercounty1
###################################################
moran.plot2(shapec$Bachelor, nb2_Wc)
###################################################
### code chunk number 30: LMoran_county1
###################################################
LISA.plot(shapec$Bachelor, nb2_Wc,0.05, shapec)
|
#' Write a data.frame/disk.frame to a disk.frame location. If df is a
#' data.frame, then df must contain the column .out.disk.frame.id. This is
#' intended to be a low-level version of writing disk.frames. Using the
#' as.disk.frame function is recommended for most cases
#' @param df a disk.frame
#' @param outdir output directory for the disk.frame
#' @param nchunks number of chunks
#' @param overwrite overwrite output directory
#' @param shardby the columns to shard by
#' @param compress compression ratio for fst files
#' @param ... passed to map.disk.frame
#' @export
#' @import fst fs
#' @importFrom glue glue
#' @examples
#' cars.df = as.disk.frame(cars)
#'
#' # write out a lazy disk.frame to disk
#' cars2.df = write_disk.frame(map(cars.df, ~.x[1,]), overwrite = TRUE)
#' collect(cars2.df)
#'
#' # clean up cars.df
#' delete(cars.df)
#' delete(cars2.df)
write_disk.frame <- function(df, outdir = tempfile(fileext = ".df"), nchunks = nchunks.disk.frame(df), overwrite = FALSE, shardby=NULL, compress = 50, ...) {
overwrite_check(outdir, overwrite)
if(is.null(outdir)) {
stop("outdir must not be NULL")
}
if(is_disk.frame(df)) {
map.disk.frame(df, ~.x, outdir = outdir, lazy = F, ..., compress = compress, overwrite = overwrite)
} else if ("data.frame" %in% class(df)) {
df[,{
if (base::nrow(.SD) > 0) {
fst::write_fst(.SD, file.path(outdir, paste0(.BY, ".fst")), compress = compress)
NULL
}
NULL
}, .out.disk.frame.id]
res = disk.frame(outdir)
add_meta(res, shardkey = shardby, shardchunks = nchunks, compress = compress)
} else {
stop("write_disk.frame error: df must be a disk.frame or data.frame")
}
}
#' @rdname write_disk.frame
output_disk.frame <- function(...) {
warning("output_disk.frame is DEPRECATED. Use write_disk.frame istead")
write_disk.frame(...)
} | /R/write_disk.frame.r | no_license | iqis/disk.frame | R | false | false | 1,864 | r | #' Write a data.frame/disk.frame to a disk.frame location. If df is a
#' data.frame, then df must contain the column .out.disk.frame.id. This is
#' intended to be a low-level version of writing disk.frames. Using the
#' as.disk.frame function is recommended for most cases
#' @param df a disk.frame
#' @param outdir output directory for the disk.frame
#' @param nchunks number of chunks
#' @param overwrite overwrite output directory
#' @param shardby the columns to shard by
#' @param compress compression ratio for fst files
#' @param ... passed to map.disk.frame
#' @export
#' @import fst fs
#' @importFrom glue glue
#' @examples
#' cars.df = as.disk.frame(cars)
#'
#' # write out a lazy disk.frame to disk
#' cars2.df = write_disk.frame(map(cars.df, ~.x[1,]), overwrite = TRUE)
#' collect(cars2.df)
#'
#' # clean up cars.df
#' delete(cars.df)
#' delete(cars2.df)
write_disk.frame <- function(df, outdir = tempfile(fileext = ".df"), nchunks = nchunks.disk.frame(df), overwrite = FALSE, shardby=NULL, compress = 50, ...) {
overwrite_check(outdir, overwrite)
if(is.null(outdir)) {
stop("outdir must not be NULL")
}
if(is_disk.frame(df)) {
map.disk.frame(df, ~.x, outdir = outdir, lazy = F, ..., compress = compress, overwrite = overwrite)
} else if ("data.frame" %in% class(df)) {
df[,{
if (base::nrow(.SD) > 0) {
fst::write_fst(.SD, file.path(outdir, paste0(.BY, ".fst")), compress = compress)
NULL
}
NULL
}, .out.disk.frame.id]
res = disk.frame(outdir)
add_meta(res, shardkey = shardby, shardchunks = nchunks, compress = compress)
} else {
stop("write_disk.frame error: df must be a disk.frame or data.frame")
}
}
#' @rdname write_disk.frame
output_disk.frame <- function(...) {
warning("output_disk.frame is DEPRECATED. Use write_disk.frame istead")
write_disk.frame(...)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/report_descriptives.r
\name{report_descriptives}
\alias{report_descriptives}
\alias{M}
\alias{SD}
\title{Report descriptives helper functions}
\usage{
M(identifier, var = NULL, group = NULL,
results = getOption("tidystats_list"))
SD(identifier, var = NULL, group = NULL,
results = getOption("tidystats_list"))
}
\arguments{
\item{identifier}{A character string identifying the descriptives.}
\item{var}{A character string identifying the exact variable, if needed.}
\item{group}{A character string identifiying the group, if needed.}
\item{results}{A tidystats list.}
}
\description{
Report descriptives helper functions
}
\examples{
# Read in a list of results
descriptives <- read_stats(system.file("descriptives.csv",
package = "tidystats"))
options(tidystats_list = descriptives)
# Report the mean
M("D4_avoidance")
M("D5_avoidance_anxiety", var = "avoidance")
# Report the standard deviation
SD("D4_avoidance")
}
| /man/report_descriptives.Rd | permissive | ikbentimkramer/tidystats-v0.3 | R | false | true | 1,010 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/report_descriptives.r
\name{report_descriptives}
\alias{report_descriptives}
\alias{M}
\alias{SD}
\title{Report descriptives helper functions}
\usage{
M(identifier, var = NULL, group = NULL,
results = getOption("tidystats_list"))
SD(identifier, var = NULL, group = NULL,
results = getOption("tidystats_list"))
}
\arguments{
\item{identifier}{A character string identifying the descriptives.}
\item{var}{A character string identifying the exact variable, if needed.}
\item{group}{A character string identifiying the group, if needed.}
\item{results}{A tidystats list.}
}
\description{
Report descriptives helper functions
}
\examples{
# Read in a list of results
descriptives <- read_stats(system.file("descriptives.csv",
package = "tidystats"))
options(tidystats_list = descriptives)
# Report the mean
M("D4_avoidance")
M("D5_avoidance_anxiety", var = "avoidance")
# Report the standard deviation
SD("D4_avoidance")
}
|
library(qgraph)
### Name: flow
### Title: Draws network as a flow diagram showing how one node is
### connected to all other nodes
### Aliases: flow
### ** Examples
## Not run:
##D # Load data:
##D library("psych")
##D data(bfi)
##D
##D # Compute polychoric correlations:
##D corMat <- cor_auto(bfi[,1:25])
##D
##D # Glasso network:
##D g2 <- qgraph(corMat, cut = 0, graph = "glasso", sampleSize = nrow(bfi),
##D threshold = TRUE)
##D
##D # Flow from A2:
##D flow(g2, "A2", horizontal = TRUE)
## End(Not run)
| /data/genthat_extracted_code/qgraph/examples/flow.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 536 | r | library(qgraph)
### Name: flow
### Title: Draws network as a flow diagram showing how one node is
### connected to all other nodes
### Aliases: flow
### ** Examples
## Not run:
##D # Load data:
##D library("psych")
##D data(bfi)
##D
##D # Compute polychoric correlations:
##D corMat <- cor_auto(bfi[,1:25])
##D
##D # Glasso network:
##D g2 <- qgraph(corMat, cut = 0, graph = "glasso", sampleSize = nrow(bfi),
##D threshold = TRUE)
##D
##D # Flow from A2:
##D flow(g2, "A2", horizontal = TRUE)
## End(Not run)
|
#' Substitutes NaN in a vector with NA
#'
#' Substitutes NaN in a vector with NA
#'
#' @param x the vecor with NaN
#'
#' @return the vecor without NaN, but with NA instead
#'
#' @export
rm_nan = function(x) {
x[is.nan(x)] = NA
return(x)
}
#' Finds gaps in a vector and returns their startpoints and lengths
#'
#' Finds gaps in a vector and returns their startpoints and lengths
#'
#' @param x the vector
#'
#' @return a data.table with startpoints and lengths of all gaps in x
#'
#' @export
get_gaps = function(x) {
ind = which(!is.na(x)) # get positions of nonmissing values
if (is.na(x[1])) # if it begins with NA
ind = c(0, ind) # add first pos
gap_size = diff(c(ind, length(x) + 1)) - 1
gap_start = ind[which(gap_size > 0)] + 1
gap_size = gap_size[which(gap_size > 0)]
return (data.table(gap_start, gap_size))
}
#' Exclude long 0-sequences
#'
#' Removes all rows with long 0-sequences in var from a table
#'
#' @param data the table
#' @param var the variable to be checked
#' @param limit the max. allowed 0-sequence-length
#'
#' @return the table without 0-sequences in var
#'
#' @export
remove_long_zero = function(data, var = "b", limit = 120)
{
# starts and lengths of same-value-intervals in data$var
rnums = rle(unlist(data[, .SD, .SDcols = var]))
# starts of same-value-intervals in data
length_sums = cumsum(c(1,rnums$lengths))
# indizes of long 0-sequences in rnums
long_series_length_idx = which(rnums$lengths > limit & rnums$values == 0)
# lengths of long 0-sequences
long_series_lengths = rnums$lengths[long_series_length_idx]
# starts of 0-sequences in data
long_series_idx = length_sums[long_series_length_idx]
# indizes of long 0-sequence-elements in data
all_long_element_idx = as.vector(unlist(mapply(seq, from=long_series_idx, length.out=long_series_lengths)))
# remove long 0-sequences from data
if (length(all_long_element_idx) != 0) {
data = data[-(all_long_element_idx),]
}
return (data)
}
| /R/helper.R | permissive | thuzarwin/imputeData | R | false | false | 2,021 | r |
#' Substitutes NaN in a vector with NA
#'
#' Substitutes NaN in a vector with NA
#'
#' @param x the vecor with NaN
#'
#' @return the vecor without NaN, but with NA instead
#'
#' @export
rm_nan = function(x) {
x[is.nan(x)] = NA
return(x)
}
#' Finds gaps in a vector and returns their startpoints and lengths
#'
#' Finds gaps in a vector and returns their startpoints and lengths
#'
#' @param x the vector
#'
#' @return a data.table with startpoints and lengths of all gaps in x
#'
#' @export
get_gaps = function(x) {
ind = which(!is.na(x)) # get positions of nonmissing values
if (is.na(x[1])) # if it begins with NA
ind = c(0, ind) # add first pos
gap_size = diff(c(ind, length(x) + 1)) - 1
gap_start = ind[which(gap_size > 0)] + 1
gap_size = gap_size[which(gap_size > 0)]
return (data.table(gap_start, gap_size))
}
#' Exclude long 0-sequences
#'
#' Removes all rows with long 0-sequences in var from a table
#'
#' @param data the table
#' @param var the variable to be checked
#' @param limit the max. allowed 0-sequence-length
#'
#' @return the table without 0-sequences in var
#'
#' @export
remove_long_zero = function(data, var = "b", limit = 120)
{
# starts and lengths of same-value-intervals in data$var
rnums = rle(unlist(data[, .SD, .SDcols = var]))
# starts of same-value-intervals in data
length_sums = cumsum(c(1,rnums$lengths))
# indizes of long 0-sequences in rnums
long_series_length_idx = which(rnums$lengths > limit & rnums$values == 0)
# lengths of long 0-sequences
long_series_lengths = rnums$lengths[long_series_length_idx]
# starts of 0-sequences in data
long_series_idx = length_sums[long_series_length_idx]
# indizes of long 0-sequence-elements in data
all_long_element_idx = as.vector(unlist(mapply(seq, from=long_series_idx, length.out=long_series_lengths)))
# remove long 0-sequences from data
if (length(all_long_element_idx) != 0) {
data = data[-(all_long_element_idx),]
}
return (data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit.climr.R
\name{fit}
\alias{fit}
\title{Fit basic statistical models to climate data}
\usage{
fit(obj, fit_type = c("lm", "loess", "smooth.spline"))
}
\arguments{
\item{obj}{An object of class \code{climr} from \code{\link{load_clim}}}
\item{fit_type}{The type of model required, either linear regression (\code{lm}), loess, or smoothing spline (\code{smooth.spline})}
}
\value{
Returns a list of class \code{climr_fit} which includes the model details as well as the data set and fit type used
}
\description{
Fit basic statistical models to climate data
}
\examples{
ans1 = load_clim('SH')
ans2 = fit(ans1, 'lm')
}
\seealso{
\code{\link{load_clim}}, \code{\link{plot.climr_fit}}, \code{\link{plot.climr_gp_fit}},\code{\link{gp_fit}}
}
| /climr/man/fit.Rd | no_license | nikhilbadriprasad/r-package | R | false | true | 818 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit.climr.R
\name{fit}
\alias{fit}
\title{Fit basic statistical models to climate data}
\usage{
fit(obj, fit_type = c("lm", "loess", "smooth.spline"))
}
\arguments{
\item{obj}{An object of class \code{climr} from \code{\link{load_clim}}}
\item{fit_type}{The type of model required, either linear regression (\code{lm}), loess, or smoothing spline (\code{smooth.spline})}
}
\value{
Returns a list of class \code{climr_fit} which includes the model details as well as the data set and fit type used
}
\description{
Fit basic statistical models to climate data
}
\examples{
ans1 = load_clim('SH')
ans2 = fit(ans1, 'lm')
}
\seealso{
\code{\link{load_clim}}, \code{\link{plot.climr_fit}}, \code{\link{plot.climr_gp_fit}},\code{\link{gp_fit}}
}
|
devtools::install_github("rstudio/keras")
library(keras)
install_tensorflow()
library(tidyverse)
library(MLmetrics)
library (ROCR)
# function for F1-score calculation
calc_f1_score <- function(threshold, mse_output, true_values){
return(F1_Score(true_values, as.integer(mse_output > threshold)))
}
# program works in this loop
for(org_number in seq(20, 39)){
T_y <- read_csv(stringr::str_c("T_y_", org_number, ".csv"), col_names = FALSE)
if(sum(T_y) >= 45){
Z12_x <- read_csv(stringr::str_c("Z12_x_", org_number, ".csv"), col_names = FALSE)
Z12_y <- read_csv(stringr::str_c("Z12_y_", org_number, ".csv"), col_names = FALSE)
Z3_x <- read_csv(stringr::str_c("Z3_x_", org_number, ".csv"), col_names = FALSE)
Z3_y <- read_csv(stringr::str_c("Z3_y_", org_number, ".csv"), col_names = FALSE)
Ones_x <- read_csv(stringr::str_c("Ones_x_", org_number, ".csv"), col_names = FALSE)
Ones_y <- read_csv(stringr::str_c("Ones_y_", org_number, ".csv"), col_names = FALSE)
Z12_x_mat = as.matrix(Z12_x)
#Z12_y_mat = matrix(data = 0, nrow = dim(Z12_x)[1], ncol = dim(Z12_x)[2])
#Algorithm 2 RCP 13
ES = keras::callback_early_stopping(monitor = "val_loss", min_delta = 10^-5, patience = 2)
# #for (i in 1:100){
# model = keras_model_sequential()
# model %>%
# layer_dense(22, activation = 'tanh', input_shape = c(65)) %>%
# layer_dense(65, activation = 'linear') %>%
# compile(
# loss = 'mse',
# optimizer = 'sgd'
# )
model2 = keras_model_sequential()
model2 %>%
layer_dense(10, activation = 'tanh', input_shape = c(65)) %>%
layer_dense(65, activation = 'linear') %>%
compile(
loss = 'mse',
optimizer = 'sgd'
)
#one_hot_labels_Z12 <- to_categorical(Z12_y_mat)
#history <- model %>% keras::fit(x = Z12_x_mat, y = Z12_x_mat, epochs = 250, callbacks = c(ES), validation_split = 0.1)
history <- model2 %>% keras::fit(x = Z12_x_mat, y = Z12_x_mat, epochs = 250, callbacks = c(ES), validation_split = 0.1)
#classes <- model %>% predict(x_test, batch_size = 128)
Z3_union_O_x <- bind_rows(Z3_x, Ones_x)
#Z3_union_O_y <- bind_rows(Z3_y, Ones_y)
#write_csv(x = Z3_union_O_y, path = "What_Sean_calls_Y3.csv")
n_net_predictions <- model2 %>% predict(as.matrix(Z3_union_O_x))
#mse_for_test_split <- apply((n_net_predictions - as.matrix(Z3_union_O_x))^2, MARGIN = 1, FUN = mean)
# y <- ... # logical array of positive / negative cases
# predictions <- ... # array of predictions
#
# pred <- prediction(mse_for_test_split, as.matrix(Z3_union_O_y))
#
# f1_perf <- performance(pred, "f")
#
# # Recall-Precision curve
# RP.perf <- performance(pred, "prec", "rec")
#
# plot (RP.perf)
#
# # ROC curve
# ROC.perf <- performance(pred, "tpr", "fpr")
# plot (ROC.perf)
#
# # ROC area under the curve
# auc.tmp <- performance(pred,"auc")
# auc <- as.numeric(auc.tmp@y.values)
# best_threshold <- optimize(calc_f1_score, c(min(mse_for_test_split), max(mse_for_test_split)),
# mse_output = mse_for_test_split,
# true_values = as.matrix(Z3_union_O_y), maximum = TRUE)
# possible_cutoffs <- seq(0.1, 5, 0.1)
# scores <- vector(length = length(possible_cutoffs))
# for(i in 1:length(scores)){
# scores[i] = calc_f1_score(possible_cutoffs[i], mse_for_test_split, as.matrix(Z3_union_O_y))
# }
#
# plot(possible_cutoffs, scores)
#}
T_x <- read_csv("T_x.csv", col_names = FALSE)
pred_for_test_data <- model2 %>% predict(as.matrix(T_x))
#mse_for_test_data <- apply((pred_for_test_data - as.matrix(T_x))^2, MARGIN = 1, FUN = mean)
write_csv(x = as_data_frame(mse_for_test_split), path = "MSE_Output_to_find_cutoff.csv")
T_labels <- as.integer(mse_for_test_data > 2.6255574753683319)
write_csv(as_data_frame(T_labels), stringr::str_c("T_labels", org_number, ".csv"))
}
} #end of the loop
keras::save_model_hdf5(object = model2, filepath = "keras_model2_from_org39.hdf5")
model2 <- keras::load_model_hdf5("keras_model2_from_org39.hdf")
# print(sum(T_labels))
#
# print(mean(mse_for_test_data))
# print(mean(mse_for_test_split[1:25980]))
# print(mean(mse_for_test_split[-(1:25980)]))
# print(median(mse_for_test_split[1:25980]))
# print(median(mse_for_test_split[-(1:25980)]))
#
# # create model
# model <- keras_model_sequential()
#
# # define and compile the model
# model %>%
# layer_dense(units = 32, activation = 'relu', input_shape = c(100)) %>%
# layer_dense(units = 10, activation = 'softmax') %>%
# compile(
# optimizer = 'rmsprop',
# loss = 'categorical_crossentropy',
# metrics = c('accuracy')
# )
#
# # Generate dummy data
# data <- matrix(runif(1000*100), nrow = 1000, ncol = 100)
# labels <- matrix(round(runif(1000, min = 0, max = 9)), nrow = 1000, ncol = 1)
#
# # Convert labels to categorical one-hot encoding
# one_hot_labels <- to_categorical(labels, num_classes = 10)
#
# # Train the model, iterating on the data in batches of 32 samples
# model %>% fit(data, one_hot_labels, epochs=10, batch_size=32)
# | /RCPs Fifth Quarter/RCP13/keras_model.R | no_license | imran1570/IEM | R | false | false | 5,180 | r | devtools::install_github("rstudio/keras")
library(keras)
install_tensorflow()
library(tidyverse)
library(MLmetrics)
library (ROCR)
# function for F1-score calculation
calc_f1_score <- function(threshold, mse_output, true_values){
return(F1_Score(true_values, as.integer(mse_output > threshold)))
}
# program works in this loop
for(org_number in seq(20, 39)){
T_y <- read_csv(stringr::str_c("T_y_", org_number, ".csv"), col_names = FALSE)
if(sum(T_y) >= 45){
Z12_x <- read_csv(stringr::str_c("Z12_x_", org_number, ".csv"), col_names = FALSE)
Z12_y <- read_csv(stringr::str_c("Z12_y_", org_number, ".csv"), col_names = FALSE)
Z3_x <- read_csv(stringr::str_c("Z3_x_", org_number, ".csv"), col_names = FALSE)
Z3_y <- read_csv(stringr::str_c("Z3_y_", org_number, ".csv"), col_names = FALSE)
Ones_x <- read_csv(stringr::str_c("Ones_x_", org_number, ".csv"), col_names = FALSE)
Ones_y <- read_csv(stringr::str_c("Ones_y_", org_number, ".csv"), col_names = FALSE)
Z12_x_mat = as.matrix(Z12_x)
#Z12_y_mat = matrix(data = 0, nrow = dim(Z12_x)[1], ncol = dim(Z12_x)[2])
#Algorithm 2 RCP 13
ES = keras::callback_early_stopping(monitor = "val_loss", min_delta = 10^-5, patience = 2)
# #for (i in 1:100){
# model = keras_model_sequential()
# model %>%
# layer_dense(22, activation = 'tanh', input_shape = c(65)) %>%
# layer_dense(65, activation = 'linear') %>%
# compile(
# loss = 'mse',
# optimizer = 'sgd'
# )
model2 = keras_model_sequential()
model2 %>%
layer_dense(10, activation = 'tanh', input_shape = c(65)) %>%
layer_dense(65, activation = 'linear') %>%
compile(
loss = 'mse',
optimizer = 'sgd'
)
#one_hot_labels_Z12 <- to_categorical(Z12_y_mat)
#history <- model %>% keras::fit(x = Z12_x_mat, y = Z12_x_mat, epochs = 250, callbacks = c(ES), validation_split = 0.1)
history <- model2 %>% keras::fit(x = Z12_x_mat, y = Z12_x_mat, epochs = 250, callbacks = c(ES), validation_split = 0.1)
#classes <- model %>% predict(x_test, batch_size = 128)
Z3_union_O_x <- bind_rows(Z3_x, Ones_x)
#Z3_union_O_y <- bind_rows(Z3_y, Ones_y)
#write_csv(x = Z3_union_O_y, path = "What_Sean_calls_Y3.csv")
n_net_predictions <- model2 %>% predict(as.matrix(Z3_union_O_x))
#mse_for_test_split <- apply((n_net_predictions - as.matrix(Z3_union_O_x))^2, MARGIN = 1, FUN = mean)
# y <- ... # logical array of positive / negative cases
# predictions <- ... # array of predictions
#
# pred <- prediction(mse_for_test_split, as.matrix(Z3_union_O_y))
#
# f1_perf <- performance(pred, "f")
#
# # Recall-Precision curve
# RP.perf <- performance(pred, "prec", "rec")
#
# plot (RP.perf)
#
# # ROC curve
# ROC.perf <- performance(pred, "tpr", "fpr")
# plot (ROC.perf)
#
# # ROC area under the curve
# auc.tmp <- performance(pred,"auc")
# auc <- as.numeric(auc.tmp@y.values)
# best_threshold <- optimize(calc_f1_score, c(min(mse_for_test_split), max(mse_for_test_split)),
# mse_output = mse_for_test_split,
# true_values = as.matrix(Z3_union_O_y), maximum = TRUE)
# possible_cutoffs <- seq(0.1, 5, 0.1)
# scores <- vector(length = length(possible_cutoffs))
# for(i in 1:length(scores)){
# scores[i] = calc_f1_score(possible_cutoffs[i], mse_for_test_split, as.matrix(Z3_union_O_y))
# }
#
# plot(possible_cutoffs, scores)
#}
T_x <- read_csv("T_x.csv", col_names = FALSE)
pred_for_test_data <- model2 %>% predict(as.matrix(T_x))
#mse_for_test_data <- apply((pred_for_test_data - as.matrix(T_x))^2, MARGIN = 1, FUN = mean)
write_csv(x = as_data_frame(mse_for_test_split), path = "MSE_Output_to_find_cutoff.csv")
T_labels <- as.integer(mse_for_test_data > 2.6255574753683319)
write_csv(as_data_frame(T_labels), stringr::str_c("T_labels", org_number, ".csv"))
}
} #end of the loop
keras::save_model_hdf5(object = model2, filepath = "keras_model2_from_org39.hdf5")
model2 <- keras::load_model_hdf5("keras_model2_from_org39.hdf")
# print(sum(T_labels))
#
# print(mean(mse_for_test_data))
# print(mean(mse_for_test_split[1:25980]))
# print(mean(mse_for_test_split[-(1:25980)]))
# print(median(mse_for_test_split[1:25980]))
# print(median(mse_for_test_split[-(1:25980)]))
#
# # create model
# model <- keras_model_sequential()
#
# # define and compile the model
# model %>%
# layer_dense(units = 32, activation = 'relu', input_shape = c(100)) %>%
# layer_dense(units = 10, activation = 'softmax') %>%
# compile(
# optimizer = 'rmsprop',
# loss = 'categorical_crossentropy',
# metrics = c('accuracy')
# )
#
# # Generate dummy data
# data <- matrix(runif(1000*100), nrow = 1000, ncol = 100)
# labels <- matrix(round(runif(1000, min = 0, max = 9)), nrow = 1000, ncol = 1)
#
# # Convert labels to categorical one-hot encoding
# one_hot_labels <- to_categorical(labels, num_classes = 10)
#
# # Train the model, iterating on the data in batches of 32 samples
# model %>% fit(data, one_hot_labels, epochs=10, batch_size=32)
# |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knitr.R
\name{register_knitr_engine}
\alias{register_knitr_engine}
\title{Register CmdStanR's knitr engine for Stan}
\usage{
register_knitr_engine(override = TRUE)
}
\arguments{
\item{override}{(logical) Override knitr's built-in, RStan-based engine for
Stan? The default is \code{TRUE}. See \strong{Details}.}
}
\description{
Registers CmdStanR's knitr engine \code{\link[=eng_cmdstan]{eng_cmdstan()}} for processing Stan chunks.
Refer to the vignette
\href{https://mc-stan.org/cmdstanr/articles/r-markdown.html}{R Markdown CmdStan Engine}
for a demonstration.
}
\details{
If \code{override = TRUE} (default), this registers CmdStanR's knitr engine as the
engine for \code{stan} chunks, replacing knitr's built-in, RStan-based engine. If
\code{override = FALSE}, this registers a \code{cmdstan} engine so that both engines
may be used in the same R Markdown document. If the template supports syntax
highlighting for the Stan language, the \code{cmdstan} chunks will have \code{stan}
syntax highlighting applied to them.
See the vignette
\href{https://mc-stan.org/cmdstanr/articles/r-markdown.html}{R Markdown CmdStan Engine}
for an example.
\strong{Note:} When running chunks interactively in RStudio (e.g. when using
\href{https://bookdown.org/yihui/rmarkdown/notebook.html}{R Notebooks}), it has
been observed that the built-in, RStan-based engine is used for \code{stan}
chunks even when CmdStanR's engine has been registered in the session. When
the R Markdown document is knit/rendered, the correct engine is used. As a
workaround, when running chunks interactively, it is recommended to use the
\code{override = FALSE} option and change \code{stan} chunks to be \code{cmdstan} chunks.
If you would like to keep \code{stan} chunks as \code{stan} chunks, it is possible to
specify \code{engine = "cmdstan"} in the chunk options after registering the
\code{cmdstan} engine with \code{override = FALSE}.
}
\references{
\itemize{
\item \href{https://bookdown.org/yihui/rmarkdown-cookbook/custom-engine.html}{Register a custom language engine for knitr}
\item \href{https://bookdown.org/yihui/rmarkdown/language-engines.html#stan}{knitr's built-in Stan language engine}
}
}
| /man/register_knitr_engine.Rd | permissive | stan-dev/cmdstanr | R | false | true | 2,257 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knitr.R
\name{register_knitr_engine}
\alias{register_knitr_engine}
\title{Register CmdStanR's knitr engine for Stan}
\usage{
register_knitr_engine(override = TRUE)
}
\arguments{
\item{override}{(logical) Override knitr's built-in, RStan-based engine for
Stan? The default is \code{TRUE}. See \strong{Details}.}
}
\description{
Registers CmdStanR's knitr engine \code{\link[=eng_cmdstan]{eng_cmdstan()}} for processing Stan chunks.
Refer to the vignette
\href{https://mc-stan.org/cmdstanr/articles/r-markdown.html}{R Markdown CmdStan Engine}
for a demonstration.
}
\details{
If \code{override = TRUE} (default), this registers CmdStanR's knitr engine as the
engine for \code{stan} chunks, replacing knitr's built-in, RStan-based engine. If
\code{override = FALSE}, this registers a \code{cmdstan} engine so that both engines
may be used in the same R Markdown document. If the template supports syntax
highlighting for the Stan language, the \code{cmdstan} chunks will have \code{stan}
syntax highlighting applied to them.
See the vignette
\href{https://mc-stan.org/cmdstanr/articles/r-markdown.html}{R Markdown CmdStan Engine}
for an example.
\strong{Note:} When running chunks interactively in RStudio (e.g. when using
\href{https://bookdown.org/yihui/rmarkdown/notebook.html}{R Notebooks}), it has
been observed that the built-in, RStan-based engine is used for \code{stan}
chunks even when CmdStanR's engine has been registered in the session. When
the R Markdown document is knit/rendered, the correct engine is used. As a
workaround, when running chunks interactively, it is recommended to use the
\code{override = FALSE} option and change \code{stan} chunks to be \code{cmdstan} chunks.
If you would like to keep \code{stan} chunks as \code{stan} chunks, it is possible to
specify \code{engine = "cmdstan"} in the chunk options after registering the
\code{cmdstan} engine with \code{override = FALSE}.
}
\references{
\itemize{
\item \href{https://bookdown.org/yihui/rmarkdown-cookbook/custom-engine.html}{Register a custom language engine for knitr}
\item \href{https://bookdown.org/yihui/rmarkdown/language-engines.html#stan}{knitr's built-in Stan language engine}
}
}
|
### logical_abbr() works when given quoted objects, but doesn’t work when given
### an existing function, as in the example below. Why not? How could you modify
### logical_abbr() to work with functions? Think about what components make up
### a function.
logical_abbr <- function(x) {
if (is.atomic(x)) {
FALSE
} else if (is.name(x)) {
identical(x, quote(T)) || identical(x, quote(F))
} else if (is.call(x) || is.pairlist(x)) {
for (i in seq_along(x)) {
if (logical_abbr(x[[i]])) return(TRUE)
}
FALSE
} else if (is.function(x)) { # Add this to handle functions.
logical_abbr(body(x)) || logical_abbr(formals(x))
} else {
stop("Don't know how to handle type ", typeof(x),
call. = FALSE)
}
}
f <- function(x = TRUE) {
g(x + T)
}
logical_abbr(f)
# [1] TRUE
| /13_expressions/07_walking_ast/exercise2.r | no_license | Bohdan-Khomtchouk/adv-r-book-solutions | R | false | false | 818 | r | ### logical_abbr() works when given quoted objects, but doesn’t work when given
### an existing function, as in the example below. Why not? How could you modify
### logical_abbr() to work with functions? Think about what components make up
### a function.
logical_abbr <- function(x) {
if (is.atomic(x)) {
FALSE
} else if (is.name(x)) {
identical(x, quote(T)) || identical(x, quote(F))
} else if (is.call(x) || is.pairlist(x)) {
for (i in seq_along(x)) {
if (logical_abbr(x[[i]])) return(TRUE)
}
FALSE
} else if (is.function(x)) { # Add this to handle functions.
logical_abbr(body(x)) || logical_abbr(formals(x))
} else {
stop("Don't know how to handle type ", typeof(x),
call. = FALSE)
}
}
f <- function(x = TRUE) {
g(x + T)
}
logical_abbr(f)
# [1] TRUE
|
read.ecopath.model <-
function(filename)
{
if(missing(filename))
{
cat("filname is missing\n")
}
else
{
top <- xmlRoot(xmlTreeParse(filename,useInternalNodes=TRUE))
xmlName(top)
names(top)
groupname<-as.vector(xmlSApply(top[["groupname"]],xmlValue))
v<-xmlSApply(top,function(x) as.vector(xmlSApply(x,xmlValue)))
catches_tmp<-xmlSApply(top[["catches"]],function(x) as.numeric(xmlSApply(x,xmlValue)))
catches_tmp2<-data.frame(catches_tmp[1:v$numfleet])[1:length(groupname),]
names(catches_tmp2)<-paste("catch",v$fleetname[-length(v$fleetname)])
#ecopath<-data.frame(v$groupname[-1],as.numeric(v$TL[-1]),as.numeric(v$B[-1]),as.numeric(v$PROD[-1]),as.numeric(v$accessibility[-1]),as.numeric(v$OI[-1]))
ecopath<-data.frame(v$groupname,as.numeric(v$TL),as.numeric(v$B),as.numeric(v$PROD),as.numeric(v$accessibility),as.numeric(v$OI))
names(ecopath)<-c("group_name","TL","biomass","prod","accessibility","OI")
if (is.null(dim(catches_tmp2)))
{
ecopath<-data.frame(ecopath,as.data.frame(catches_tmp2[1:length(rownames(ecopath))]))
names(ecopath)<-c("group_name","TL","biomass","prod","accessibility","OI",paste("catch.",v$fleetname[-length(v$fleetname)],sep='')
)
}
if (!is.null(dim(catches_tmp2)))
{
ecopath<-data.frame(ecopath,as.data.frame(catches_tmp2[1:length(rownames(ecopath)),]))
}
return (ecopath[!(ecopath$group_name==''),])
}
}
| /EcoTroph/R/read.ecopath.model.R | no_license | ingted/R-Examples | R | false | false | 1,389 | r | read.ecopath.model <-
function(filename)
{
if(missing(filename))
{
cat("filname is missing\n")
}
else
{
top <- xmlRoot(xmlTreeParse(filename,useInternalNodes=TRUE))
xmlName(top)
names(top)
groupname<-as.vector(xmlSApply(top[["groupname"]],xmlValue))
v<-xmlSApply(top,function(x) as.vector(xmlSApply(x,xmlValue)))
catches_tmp<-xmlSApply(top[["catches"]],function(x) as.numeric(xmlSApply(x,xmlValue)))
catches_tmp2<-data.frame(catches_tmp[1:v$numfleet])[1:length(groupname),]
names(catches_tmp2)<-paste("catch",v$fleetname[-length(v$fleetname)])
#ecopath<-data.frame(v$groupname[-1],as.numeric(v$TL[-1]),as.numeric(v$B[-1]),as.numeric(v$PROD[-1]),as.numeric(v$accessibility[-1]),as.numeric(v$OI[-1]))
ecopath<-data.frame(v$groupname,as.numeric(v$TL),as.numeric(v$B),as.numeric(v$PROD),as.numeric(v$accessibility),as.numeric(v$OI))
names(ecopath)<-c("group_name","TL","biomass","prod","accessibility","OI")
if (is.null(dim(catches_tmp2)))
{
ecopath<-data.frame(ecopath,as.data.frame(catches_tmp2[1:length(rownames(ecopath))]))
names(ecopath)<-c("group_name","TL","biomass","prod","accessibility","OI",paste("catch.",v$fleetname[-length(v$fleetname)],sep='')
)
}
if (!is.null(dim(catches_tmp2)))
{
ecopath<-data.frame(ecopath,as.data.frame(catches_tmp2[1:length(rownames(ecopath)),]))
}
return (ecopath[!(ecopath$group_name==''),])
}
}
|
# load packages ----
library(tidyverse)
library(readxl)
library(stringr)
# clear jobs ----
rm(list = ls())
# user defined functions ----
PE_keep_idx <- function(PE, N, p = 0.5){
mu <- p
sigma <- sqrt(p * (1 - p) / N)
PE < 1 - (mu + qnorm(0.95) * sigma)
}
rm_dup_id <- function(tbl, var_crit, fun = min){
var_crit <- enquo(var_crit)
tbl %>%
group_by(id) %>%
mutate(ranking = row_number(!!var_crit)) %>%
filter(ranking == fun(ranking)) %>%
ungroup() %>%
select(-ranking)
}
# set some configurations ----
base_dir <- getSrcDirectory(function(x) x)
data_dir <- file.path(base_dir, "EFRes")
filt_dir <- file.path(base_dir, "EFFiltered")
rate <- 0.8
data_suffix <- "Result"
filt_suffix <- "Filtered"
file_ext <- ".csv"
indices <- read_csv(file.path(filt_dir, "index_map.csv"))
index_map <- setNames(indices$index, indices$Taskname)
# AntiSac ----
taskname <- "AntiSac"
antisac <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
antisac_filtered <- antisac %>%
# remove subjects without enough responses
filter(NResp > rate * NTrial) %>%
# remove subjects with too many errors
filter(PE_keep_idx(PE, NInclude, 1 / 3)) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(antisac_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# CateSwitch ----
taskname <- "CateSwitch"
cateSwitch <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
cateSwitch_filtered <- cateSwitch %>%
# remove subjects without enough responses
filter(NInclude > rate * NTrial) %>%
# remove subjects with too many errors
filter(PE_keep_idx(PE, NInclude)) %>%
# remove subjects with abnormal switch costs
filter(! MRT_diff %in% boxplot.stats(MRT_diff)$out) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(cateSwitch_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# ShiftColor ----
taskname <- "ShiftColor"
shiftColor <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
shiftColor_filtered <- shiftColor %>%
# remove subjects without enough responses
filter(NInclude > rate * NTrial) %>%
# remove subjects with too many errors
filter(PE_keep_idx(PE, NInclude)) %>%
# remove subjects with abnormal switch costs
filter(! MRT_diff %in% boxplot.stats(MRT_diff)$out) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(shiftColor_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# ShiftNumber ----
taskname <- "ShiftNumber"
shiftNumber <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
shiftNumber_filtered <- shiftNumber %>%
# remove subjects without enough responses
filter(NInclude > rate * NTrial) %>%
# remove subjects with too many errors
filter(PE_keep_idx(PE, NInclude)) %>%
# remove subjects with abnormal switch costs
filter(! MRT_diff %in% boxplot.stats(MRT_diff)$out) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(shiftNumber_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# spatialWM ----
taskname <- "spatialWM"
spatialWM <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
spatialWM_filtered <- spatialWM %>%
filter(
# remove subjects without enough responses
NResp > rate * NTrial,
NInclude > rate * NTrial,
# remove subjects with too many errors
PE_keep_idx(PE, NInclude),
# remove subjects with abnormal dprime
! dprime %in% boxplot.stats(dprime)$out
) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(spatialWM_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# StopSignal ----
taskname <- "StopSignal"
stopSignal <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
stopSignal_filtered <- stopSignal %>%
gather(type, value, MSSD1:SSSD4) %>%
separate(type, c("var", "ssd_cat"), -2) %>%
spread(var, value) %>%
# remove nonpositive MSSD
filter(SSSD != 0) %>%
group_by(ssd_cat) %>%
# remove MSSD outlier
mutate(MSSD = ifelse(MSSD %in% robustbase::adjboxStats(MSSD)$out, NA, MSSD)) %>%
group_by_at(vars(id:PE_Stop)) %>%
summarise(
SSSD = sd(MSSD, na.rm = TRUE),
MSSD = mean(MSSD, na.rm = TRUE)
) %>%
ungroup() %>%
mutate(SSRT = MRT_Go - MSSD) %>%
filter(
# remove subjects without enough responses
NResp > rate * NTrial,
NInclude > rate * NTrial,
# remove subjects with too many errors
PE_keep_idx(PE_Go, NInclude),
# remove subjects with NaN results
! is.nan(SSRT),
# remove subjects with too spread SSDs
! SSSD %in% boxplot.stats(SSSD)$out,
# remove subjects with abnormal stop signal RT
! SSRT %in% boxplot.stats(SSRT)$out
) %>%
# remove duplicate id"s
rm_dup_id(PE_Go)
write_csv(stopSignal_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# Stroop ----
taskname <- "Stroop"
stroop <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
stroop_filtered <- stroop %>%
# remove subjects without enough responses
filter(NInclude > rate * NTrial) %>%
# remove subjects with too many errors
filter(PE_keep_idx(PE, NInclude, 1 / 4)) %>%
# remove subjects with abnormal Incongruent-Congruent RT
filter(! MRT_diff %in% boxplot.stats(MRT_diff)$out) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(stroop_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# WM3 ----
taskname <- "WM3"
WM3 <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
WM3_filtered <- WM3 %>%
filter(
# remove subjects without enough responses
NResp > rate * NTrial,
NInclude > rate * NTrial,
# remove subjects with too many errors
PE_keep_idx(PE, NInclude),
# remove subjects with abnormal dprime
! dprime %in% boxplot.stats(dprime)$out
) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(WM3_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# Keep track ----
taskname <- "KeepTrack"
keepTrack <- read_excel(file.path(data_dir, "KeepTrack.xlsx"))
keepTrack_filtered <- keepTrack %>%
filter(
# remove subjects with NA results
! is.na(score),
# remove subjects with abnormal score
! score %in% boxplot.stats(score)$out
) %>%
rename(id = ID)
write_csv(keepTrack_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# merge datasets ----
data_files <- list.files(filt_dir, "*Filtered*")
data_merged <- data_files %>%
map(
function (x){
taskname <- str_match(x, ".+(?=Filtered)")
index <- index_map[taskname]
read_csv(file.path(filt_dir, x)) %>%
select(one_of("id", index)) %>%
# filter(id %in% sublist) %>%
rename(!!taskname := !!index)
}
) %>%
reduce(
function(x, y){
full_join(x, y, by = "id")
}
) %>%
filter_all(all_vars(!is.na(.)))
write_csv(data_merged, file.path(filt_dir, "ef_behav_all.csv"))
| /filterIndex.R | permissive | Cynthia1229/Behav_EF | R | false | false | 6,904 | r | # load packages ----
library(tidyverse)
library(readxl)
library(stringr)
# clear jobs ----
rm(list = ls())
# user defined functions ----
PE_keep_idx <- function(PE, N, p = 0.5){
mu <- p
sigma <- sqrt(p * (1 - p) / N)
PE < 1 - (mu + qnorm(0.95) * sigma)
}
rm_dup_id <- function(tbl, var_crit, fun = min){
var_crit <- enquo(var_crit)
tbl %>%
group_by(id) %>%
mutate(ranking = row_number(!!var_crit)) %>%
filter(ranking == fun(ranking)) %>%
ungroup() %>%
select(-ranking)
}
# set some configurations ----
base_dir <- getSrcDirectory(function(x) x)
data_dir <- file.path(base_dir, "EFRes")
filt_dir <- file.path(base_dir, "EFFiltered")
rate <- 0.8
data_suffix <- "Result"
filt_suffix <- "Filtered"
file_ext <- ".csv"
indices <- read_csv(file.path(filt_dir, "index_map.csv"))
index_map <- setNames(indices$index, indices$Taskname)
# AntiSac ----
taskname <- "AntiSac"
antisac <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
antisac_filtered <- antisac %>%
# remove subjects without enough responses
filter(NResp > rate * NTrial) %>%
# remove subjects with too many errors
filter(PE_keep_idx(PE, NInclude, 1 / 3)) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(antisac_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# CateSwitch ----
taskname <- "CateSwitch"
cateSwitch <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
cateSwitch_filtered <- cateSwitch %>%
# remove subjects without enough responses
filter(NInclude > rate * NTrial) %>%
# remove subjects with too many errors
filter(PE_keep_idx(PE, NInclude)) %>%
# remove subjects with abnormal switch costs
filter(! MRT_diff %in% boxplot.stats(MRT_diff)$out) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(cateSwitch_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# ShiftColor ----
taskname <- "ShiftColor"
shiftColor <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
shiftColor_filtered <- shiftColor %>%
# remove subjects without enough responses
filter(NInclude > rate * NTrial) %>%
# remove subjects with too many errors
filter(PE_keep_idx(PE, NInclude)) %>%
# remove subjects with abnormal switch costs
filter(! MRT_diff %in% boxplot.stats(MRT_diff)$out) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(shiftColor_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# ShiftNumber ----
taskname <- "ShiftNumber"
shiftNumber <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
shiftNumber_filtered <- shiftNumber %>%
# remove subjects without enough responses
filter(NInclude > rate * NTrial) %>%
# remove subjects with too many errors
filter(PE_keep_idx(PE, NInclude)) %>%
# remove subjects with abnormal switch costs
filter(! MRT_diff %in% boxplot.stats(MRT_diff)$out) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(shiftNumber_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# spatialWM ----
taskname <- "spatialWM"
spatialWM <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
spatialWM_filtered <- spatialWM %>%
filter(
# remove subjects without enough responses
NResp > rate * NTrial,
NInclude > rate * NTrial,
# remove subjects with too many errors
PE_keep_idx(PE, NInclude),
# remove subjects with abnormal dprime
! dprime %in% boxplot.stats(dprime)$out
) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(spatialWM_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# StopSignal ----
taskname <- "StopSignal"
stopSignal <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
stopSignal_filtered <- stopSignal %>%
gather(type, value, MSSD1:SSSD4) %>%
separate(type, c("var", "ssd_cat"), -2) %>%
spread(var, value) %>%
# remove nonpositive MSSD
filter(SSSD != 0) %>%
group_by(ssd_cat) %>%
# remove MSSD outlier
mutate(MSSD = ifelse(MSSD %in% robustbase::adjboxStats(MSSD)$out, NA, MSSD)) %>%
group_by_at(vars(id:PE_Stop)) %>%
summarise(
SSSD = sd(MSSD, na.rm = TRUE),
MSSD = mean(MSSD, na.rm = TRUE)
) %>%
ungroup() %>%
mutate(SSRT = MRT_Go - MSSD) %>%
filter(
# remove subjects without enough responses
NResp > rate * NTrial,
NInclude > rate * NTrial,
# remove subjects with too many errors
PE_keep_idx(PE_Go, NInclude),
# remove subjects with NaN results
! is.nan(SSRT),
# remove subjects with too spread SSDs
! SSSD %in% boxplot.stats(SSSD)$out,
# remove subjects with abnormal stop signal RT
! SSRT %in% boxplot.stats(SSRT)$out
) %>%
# remove duplicate id"s
rm_dup_id(PE_Go)
write_csv(stopSignal_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# Stroop ----
taskname <- "Stroop"
stroop <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
stroop_filtered <- stroop %>%
# remove subjects without enough responses
filter(NInclude > rate * NTrial) %>%
# remove subjects with too many errors
filter(PE_keep_idx(PE, NInclude, 1 / 4)) %>%
# remove subjects with abnormal Incongruent-Congruent RT
filter(! MRT_diff %in% boxplot.stats(MRT_diff)$out) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(stroop_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# WM3 ----
taskname <- "WM3"
WM3 <- read_csv(file.path(data_dir, paste0(taskname, data_suffix, file_ext)))
WM3_filtered <- WM3 %>%
filter(
# remove subjects without enough responses
NResp > rate * NTrial,
NInclude > rate * NTrial,
# remove subjects with too many errors
PE_keep_idx(PE, NInclude),
# remove subjects with abnormal dprime
! dprime %in% boxplot.stats(dprime)$out
) %>%
# remove duplicate id"s
rm_dup_id(PE)
write_csv(WM3_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# Keep track ----
taskname <- "KeepTrack"
keepTrack <- read_excel(file.path(data_dir, "KeepTrack.xlsx"))
keepTrack_filtered <- keepTrack %>%
filter(
# remove subjects with NA results
! is.na(score),
# remove subjects with abnormal score
! score %in% boxplot.stats(score)$out
) %>%
rename(id = ID)
write_csv(keepTrack_filtered, file.path(filt_dir, paste0(taskname, filt_suffix, file_ext)))
# merge datasets ----
data_files <- list.files(filt_dir, "*Filtered*")
data_merged <- data_files %>%
map(
function (x){
taskname <- str_match(x, ".+(?=Filtered)")
index <- index_map[taskname]
read_csv(file.path(filt_dir, x)) %>%
select(one_of("id", index)) %>%
# filter(id %in% sublist) %>%
rename(!!taskname := !!index)
}
) %>%
reduce(
function(x, y){
full_join(x, y, by = "id")
}
) %>%
filter_all(all_vars(!is.na(.)))
write_csv(data_merged, file.path(filt_dir, "ef_behav_all.csv"))
|
# Load the required library
library(quantmod)
library(PerformanceAnalytics)
# Individual symbol
# Set the start and end date of the analysis
symbol <- "AAPL"
start_date <- as.Date("2018-01-01")
end_date <- Sys.Date()
# Get Data
getSymbols(symbol, src = "yahoo", from = start_date, to = end_date)
# Calculate the daily returns
returns <- dailyReturn(Cl(get(symbol)))
# Calculate the Probabilistic Sharpe Ratio
psr_ratio <- ProbSharpeRatio(returns, Rf = 0, refSR = 0)
# Print the Probabilistic Sharpe Ratio
print(symbol)
print(psr_ratio)
#cat(symbol, ': ', psr_ratio)
# Multiple Stock symbols
# Define the stock symbols of the library
tickers <- c("BAC", "GS", "C", "WFC", "JPM")
# Dates
start_date <- as.Date("2020-01-01")
end_date <- Sys.Date()
# Calculate the daily returns for each stock
returns <- list()
for (ticker in tickers) {
returns[[ticker]] <- dailyReturn(Cl(get(ticker)))
}
# Calculate the Probabilistic Sharpe Ratio for each stock
psr_ratios <- data.frame()
for (ticker in tickers) {
psr_ratio <- ProbSharpeRatio(returns[[ticker]], refSR = 0)
psr_ratios <- rbind(psr_ratios, data.frame(Symbol = ticker, PSR_Ratio = psr_ratio))
}
# Print the table of Probabilistic Sharpe Ratios
print(psr_ratios) | /R_Stock/R_Risk_Returns_Ratios/ProbSharpe_Ratio.R | permissive | LastAncientOne/Stock_Analysis_For_Quant | R | false | false | 1,272 | r | # Load the required library
library(quantmod)
library(PerformanceAnalytics)
# Individual symbol
# Set the start and end date of the analysis
symbol <- "AAPL"
start_date <- as.Date("2018-01-01")
end_date <- Sys.Date()
# Get Data
getSymbols(symbol, src = "yahoo", from = start_date, to = end_date)
# Calculate the daily returns
returns <- dailyReturn(Cl(get(symbol)))
# Calculate the Probabilistic Sharpe Ratio
psr_ratio <- ProbSharpeRatio(returns, Rf = 0, refSR = 0)
# Print the Probabilistic Sharpe Ratio
print(symbol)
print(psr_ratio)
#cat(symbol, ': ', psr_ratio)
# Multiple Stock symbols
# Define the stock symbols of the library
tickers <- c("BAC", "GS", "C", "WFC", "JPM")
# Dates
start_date <- as.Date("2020-01-01")
end_date <- Sys.Date()
# Calculate the daily returns for each stock
returns <- list()
for (ticker in tickers) {
returns[[ticker]] <- dailyReturn(Cl(get(ticker)))
}
# Calculate the Probabilistic Sharpe Ratio for each stock
psr_ratios <- data.frame()
for (ticker in tickers) {
psr_ratio <- ProbSharpeRatio(returns[[ticker]], refSR = 0)
psr_ratios <- rbind(psr_ratios, data.frame(Symbol = ticker, PSR_Ratio = psr_ratio))
}
# Print the table of Probabilistic Sharpe Ratios
print(psr_ratios) |
corr <- function (directory, thresold= 0) {
# I could call complete() as below, but since I have to replicate all code from
# complete.R to use the vector c, there is no point to call a function and redo
# all steps again.
#c <- complete("specdata")
#tids <- c[c$nobs>thresold, "ID"]
id <- 1:332
# where the files were unzipd
base_dir <- "/home/miguel/spec_data"
#create a list to access the files regardless the current working dir
full_dest <- paste(base_dir, directory, sep="/")
# creates a list of files that matches the id
lf <- list.files(full_dest, full.names=TRUE)[id]
# load data in a data_frame from the csv file
data_frame <- data.frame()
for (i in 1:length(id)) {
data_frame <- rbind (data_frame, read.csv(lf[i]))
}
# uses complete cases to remove NAs
ok <- complete.cases(data_frame)
c_cases <- data_frame[ok,]
# initialize ID, nobs
ID <- numeric()
nobs <- numeric()
# use nrows to count data for each ID specified
for (i in id) {
# append id from i and nobs from nrows
ID <- c(ID, i)
nobs <- c(nobs, nrow (c_cases[c_cases$ID==i,]))
}
# taa daa !
df <- data.frame(ID, nobs)
# tids is a list of IDs where nobs > thresold
tids <- df[df$nobs>thresold, "ID"]
# calculate correlation for each ID in tids
out <- numeric()
for (i in tids) {
out <- c(out, cor (c_cases[c_cases$ID==i, "sulfate"], c_cases[c_cases$ID==i, "nitrate"]))
}
out
}
| /R/week2/corr.R | no_license | marozsas/datasciencecoursera | R | false | false | 1,464 | r | corr <- function (directory, thresold= 0) {
# I could call complete() as below, but since I have to replicate all code from
# complete.R to use the vector c, there is no point to call a function and redo
# all steps again.
#c <- complete("specdata")
#tids <- c[c$nobs>thresold, "ID"]
id <- 1:332
# where the files were unzipd
base_dir <- "/home/miguel/spec_data"
#create a list to access the files regardless the current working dir
full_dest <- paste(base_dir, directory, sep="/")
# creates a list of files that matches the id
lf <- list.files(full_dest, full.names=TRUE)[id]
# load data in a data_frame from the csv file
data_frame <- data.frame()
for (i in 1:length(id)) {
data_frame <- rbind (data_frame, read.csv(lf[i]))
}
# uses complete cases to remove NAs
ok <- complete.cases(data_frame)
c_cases <- data_frame[ok,]
# initialize ID, nobs
ID <- numeric()
nobs <- numeric()
# use nrows to count data for each ID specified
for (i in id) {
# append id from i and nobs from nrows
ID <- c(ID, i)
nobs <- c(nobs, nrow (c_cases[c_cases$ID==i,]))
}
# taa daa !
df <- data.frame(ID, nobs)
# tids is a list of IDs where nobs > thresold
tids <- df[df$nobs>thresold, "ID"]
# calculate correlation for each ID in tids
out <- numeric()
for (i in tids) {
out <- c(out, cor (c_cases[c_cases$ID==i, "sulfate"], c_cases[c_cases$ID==i, "nitrate"]))
}
out
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CalculatePolicyVmt.R
\name{predictLightVehicles}
\alias{predictLightVehicles}
\title{Function to calculate light vehicle ownership}
\usage{
predictLightVehicles(Data_, LtVehOwnModels_, Type, TargetProp = NA)
}
\arguments{
\item{Data_}{A household data frame consisting of household attributes used to
calculate light vehicle ownership.}
\item{LtVehOwnModels_}{A list of light vehicle ownership models.}
\item{Type}{A string indicating the region type. ("Metro": Default, or "NonMetro")}
\item{TargetProp}{A numeric indicating the target light vehicle ownership rate (
average ratio of light vehicles to driver age population)}
}
\value{
An array of integers representing the number of light vehicles for each
household.
}
\description{
\code{predictLightVehicles} calculates light vehicle ownership.
}
\details{
This function takes a data frame of households and a list of models which
are used to calculate light vehicle ownership for each
household.
}
| /sources/modules/VEHouseholdTravel/man/predictLightVehicles.Rd | permissive | rickdonnelly/VisionEval-Dev | R | false | true | 1,035 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CalculatePolicyVmt.R
\name{predictLightVehicles}
\alias{predictLightVehicles}
\title{Function to calculate light vehicle ownership}
\usage{
predictLightVehicles(Data_, LtVehOwnModels_, Type, TargetProp = NA)
}
\arguments{
\item{Data_}{A household data frame consisting of household attributes used to
calculate light vehicle ownership.}
\item{LtVehOwnModels_}{A list of light vehicle ownership models.}
\item{Type}{A string indicating the region type. ("Metro": Default, or "NonMetro")}
\item{TargetProp}{A numeric indicating the target light vehicle ownership rate (
average ratio of light vehicles to driver age population)}
}
\value{
An array of integers representing the number of light vehicles for each
household.
}
\description{
\code{predictLightVehicles} calculates light vehicle ownership.
}
\details{
This function takes a data frame of households and a list of models which
are used to calculate light vehicle ownership for each
household.
}
|
# Constructing tier SJPF tier 2
#*******************************************************************************
# Notes ####
#*******************************************************************************
#' Inputs:
#' - inputs/data_proc/Data_SFPF_decrements_AV2020_imputed.RData
#' - inputs/data_proc/Data_SFPF_demographics_2020630_fillin.RData
#' What this file does
#' - produce the following model inputs for tier "SC"
#' - decrement table
#' - salary scale
#' - initial demographics
#' - tier specific parameters
#*******************************************************************************
#*******************************************************************************
# Tier specification ####
# Source: AV2019, ep63
##' Members included
#' - SJPF tier 2 members
#' - Police: hired on or after Aug 4, 2013
#' - Fire : hired on or after Jan 2, 2015
#'
###' Service retirement
#'
#' - Benefit rules
#' - Use benefit rules of tier 2
#'
#'
# - Eligibility for unreduced benefits
#' - age 57 & yos 5
#'
#' - Eligibility for reduced benefits
#' - age 50 & yos 5
#' - reduction: 7% per year before age 57
#'
#' - Vesting:
#' - 5 years?
#'
#'
#' - Final compensation (FAS)
#' - The plan policy
#' - 36 highest consecutive months, with anti-spike measures.
#' - Model:
#' - 3 year
#'
#'
#' - Benefit formula
#' - Police and fire:
#' - yos in 1-20: 2.4% per year
#' - yos in 21-25: 3% per year
#' - yos of 26 and up: 3.4% per year
#' - Max of 80% of FAS
#' - survivor: 50% joint and survivor annuity
###' Deferred retirement
#' - Plan policy:
#' - YOS < 5: accumulated EEC with interest
#' - YOS >= 5: servRet benefit, actuarially reduced for early retirment,
#' payable when eligibility is reached
#'
# - Model:
#' - Simplification:
###' Disability retirement, service connected
#'
#' - Plan policy:
#' - no age/yos requirement
#' - Greater of:
#' - 50% of FAS
#' - service retirement, if eligible for service retirement
#' - actuarial reduced retirement benefit from age 50, if not eligible for servRet
#'
# - Model:
#' - only the first 2: max of 50% of fas and servRet benefit
###' Disability retirement, non-service connected
#' - not modeled b/c all disabilities are assumed to be duty related in the AV
# Death benefit:
#' - YOS>= 2 and before servRet eligibility
#' - 24% of FAS + 0.75% for each yos in excess of 2, up to 37.5% of FAS
#' - after servRet eligibility
#' - servRet
#' - death in the line of duty
#' - greater of
#' - 37.5% of FAS
#' - 50% of servRet
# COLA:
# - Policy: CPI-U for SJ, subject to a cap of 2%.
# - model: 2%
###' Member contribution
# - 50% of total Tier 2 contributions (NC + SC + admin)
# - Increases in UAAL contribution are limited to 1/3 % of compensation each year
# - contribution >= 50% of NC
## Assumptions, needs to be revisited
#' Shares of police and fire members
#' Source: AV2019 ep43
#' - police: 541 / 1215
#' - fire: 674/1215
share_fire <- 541/1215 # 44.5%
share_police <- 1 - share_fire
# gender ratio:
# - No gender ratio provided in AV and CAFR,
# - Assumption: 10% female and 90% male
share_male <- 0.9
share_female <- 1 - share_male
## Assumptions on demographics
#' - SJPF tier 1 members
#' - Police: hired before Aug 4, 2013
#' - Fire : hired before Jan 2, 2015
#'
#' How to allocate total active and retiree numbers to tier 1 and tier 2
# t1 and t2 mebmers:
# Active members:
# - According to AV2020 ep 46, there are 565 tier 2 members, and 1144 tier 1 members
# - In AV2020 imputed demographic data, the number of actives with yos <= 5 is 551
# - For now, use yos <= 5 as tier 2 members
# - In theory, some tier 2 police members should have yos = 6 (6 11/12)?. Should
# keep this in mind.
#
#
# Serivice retirees for regular members
# - According to AV2020 ep 30, there are no any type of retirees in tier 2
#
#
# Initial terminated members
# - For now, we assume that for each tier the liability of initial terminated members(in or not in pay status)
# is a fixed percentage of the AL of retirees.
# - As we assume the tier 2 has no retirees in the model, there are no AL for initial terminated members
# under the current simplification method. The should not be an issue because the actual AL for termianted should be
# very small as tier 2 is still new.
#*******************************************************************************
# ## Global settings ####
#*******************************************************************************
dir_data <- "inputs/data_proc/"
dir_outputs <- "model/tiers/tierData/"
# Model settings
range_age <- 20:100
range_ea <- 20:64 # max retirement age is assumed to be 65 (qxr = 1 at age 65 in AV tables)
# Tier specific parameters
tier_name <- "pf.t2.disbMort"
age_vben <- 60 # AV2019 ep60, at age 60 for Tier 2 vested members
v.year <- 5
fasyears <- 3
# bfactor <- 0.02
cola_assumed <- 0.02 # assumed cola rates for valuation
# EEC_rate <- 0.0735 # use EEC and ERC caps
#*******************************************************************************
# ## Loading data ####
#*******************************************************************************
load(paste0(dir_data, "Data_SJPF_decrements_AV2020_imputed.RData"))
load(paste0(dir_data, "Data_SJPF_demographics_20200630_fillin.RData"))
df_mp2019_raw <- readRDS(paste0(dir_data, "MP2019_raw.rds"))
#*******************************************************************************
# ## Decrements 1: combining groups ####
#*******************************************************************************
## Service retirement rates
# groups included
grp_include <- df_qxr_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include , "t2.police|t2.fire")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "t2.police","wgt"] <- share_police
wgts[wgts$grp == "t2.fire","wgt"] <- share_fire
## calculate weighted average
df_qxr_tier <-
df_qxr_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(age, yos) %>%
summarise(qxr = weighted.mean(qxr, wgt), .groups = "drop") %>%
mutate(grp = tier_name) %>%
relocate(grp) %>%
ungroup()
## Disability retirement rates, San Jose
# groups included
grp_include <- df_qxd_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "police|fire")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "police","wgt"] <- share_police
wgts[wgts$grp == "fire", "wgt"] <- share_fire
## calculate weighted average
# Need to combine two types of disability rates: adding the two rates
df_qxd_tier <-
df_qxd_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(age) %>%
summarise(qxd = weighted.mean(qxd, wgt), .groups = "drop") %>%
mutate(grp = tier_name) %>%
relocate(grp) %>%
ungroup()
## Disability retirement rates, PERF
# Disability rates are contructed based on CalPERS rates as follows:
# - Fire: simple average of POFF rates and public agency rates for fire
# - Policy: simple average of POFF rates and public agency rates for police
df_qxd_perf <-
bind_rows(
df_qxd_PERF_imputed %>%
filter(grp %in% c("perf_pa.fire", "perf_poff")) %>%
group_by(age) %>%
summarise(qxd = mean(qxd), .groups = "drop") %>%
mutate(grp = "perf_fire"),
df_qxd_PERF_imputed %>%
filter(grp %in% c("perf_pa.police", "perf_poff")) %>%
group_by(age) %>%
summarise(qxd = mean(qxd), .groups = "drop") %>%
mutate(grp = "perf_police")
)
# groups included
grp_include <- df_qxd_perf$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "perf_police|perf_fire")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "perf_police","wgt"] <- share_police
wgts[wgts$grp == "perf_fire", "wgt"] <- share_fire
## calculate weighted average
# Need to combine two types of disability rates: adding the two rates
df_qxd_perf_tier <-
df_qxd_perf %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(age) %>%
summarise(qxd = weighted.mean(qxd, wgt), .groups = "drop") %>%
mutate(grp = tier_name,) %>%
rename(qxd_perf = qxd) %>%
relocate(grp) %>%
ungroup()
## Termination with refund
# groups included
grp_include <- df_qxt_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "police|fire")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "police","wgt"] <- share_police
wgts[wgts$grp == "fire", "wgt"] <- share_fire
## calculate weighted average
df_qxt_tier <-
df_qxt_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(yos) %>%
summarise(qxt = weighted.mean(qxt, wgt),
.groups = "drop") %>%
mutate(grp = tier_name) %>%
relocate(grp) %>%
arrange(yos) %>%
ungroup()
## Mortality
# Based on SOA pub 2010 decrement table
ls_pub2010_raw <- readRDS(paste0(dir_data, "pub2010_raw.rds"))
df_qxm_tier <-
left_join(
ls_pub2010_raw$pubS2010A %>%
select(age,
qxm.pre_female = qxm.employee.female,
qxm.pre_male = qxm.employee.male,
qxm.post_female = qxm.healthyRet.female,
qxm.post_male = qxm.healthyRet.male),
ls_pub2010_raw$pubS2010 %>%
select(age,
qxmd.post_female = qxm.disbRet.female,
qxmd.post_male = qxm.disbRet.male),
by = "age"
) %>%
filter(age <= 100)
df_qxm_tier %<>%
mutate(
qxm.pre_female = 0.979 * qxm.pre_female,
qxm.pre_male = 0.979 * qxm.pre_male ,
qxm.post_female = 1.002 * qxm.post_female,
qxm.post_male = 1.002 * qxm.post_male,
qxmd.post_female = 0.915 * qxmd.post_female,
qxmd.post_male = 0.915 * qxmd.post_male
)
df_qxm_tier <-
df_qxm_imputed %>%
mutate(qxm.pre = share_female * qxm.pre_female + share_male * qxm.pre_male,
qxm.post = share_female * qxm.post_female + share_male * qxm.post_male,
qxmd.post = share_female * qxmd.post_female + share_male * qxmd.post_male,
grp = tier_name
) %>%
select(grp, age,
qxm.pre, qxm.pre_female, qxm.pre_male,
qxm.post, qxm.post_female, qxm.post_male,
qxmd.post, qxmd.post_female,qxmd.post_male)
# df_qxr_tier
# df_qxd_tier
# df_qxt_tier
# df_qxm_tier
#*******************************************************************************
# ## Decrements 2: Single decrement table ####
#*******************************************************************************
decrements_tier <-
expand.grid(age = range_age,
ea = range_ea) %>%
mutate(yos = age - ea,
grp = tier_name) %>%
filter(age >= ea) %>%
left_join(df_qxm_tier, by = c("grp", "age")) %>% # mortality
left_join(df_qxt_tier, by = c("grp", "yos")) %>% # termination
left_join(df_qxr_tier, by = c("grp", "age", "yos")) %>% # service retirement
left_join(df_qxd_tier, by = c("grp", "age")) %>% # disability, San Jose
left_join(df_qxd_perf_tier, by = c("grp", "age")) %>% # disability, perf
select(grp, ea, age, yos,
qxm.pre,
qxm.pre_female, qxm.pre_male,
qxm.post, qxm.post_female, qxm.post_male,
qxmd.post, qxmd.post_female, qxmd.post_male,
qxt,
qxr,
qxd,
qxd_perf,
everything()
#-qxm.pre
)%>%
arrange(ea, age) %>%
colwise(na2zero)(.)
#*******************************************************************************
# ## Decrements 3: adding eligibility information ####
#*******************************************************************************
# Create 2 columns for each tier
# elig_servRet_full: number of year of being eligible for full or greater retirement benefits
# elig_servRet_early: number of year of being eligible for early retirement benefits;
# 0 after being eligible for full retirement benefits
# year_b4full: number of years below the full/unreduced retirement age
# - Eligibility for unreduced benefits
#' - age 57 & yos 5
#'
#' - Eligibility for reduced benefits
#' - age 50 & yos 5
#' - reduction: 7% per year before age 57
decrements_tier %<>%
group_by(ea) %>%
mutate(
# Eligibility for full (or greater) retirement benefit
elig_servRet_full = ifelse( (age >= 57 & yos >= 5),
1, 0) %>% cumsum,
# Eligibility for early retirement benefit
elig_servRet_early = ifelse( (age >= 50 & yos >= 5), 1, 0) %>% cumsum,
elig_servRet_early = ifelse( elig_servRet_full, 0, elig_servRet_early),
# number of years before full retirement
year_b4full = order_by(-age, cumsum(!as.logical(elig_servRet_full))),
year_b4full = ifelse(as.logical(elig_servRet_early), year_b4full, 0)
) %>%
## Adjustments to decrement rates based on eligibility
# 1. Only keep retirement rates when a member is eligible
# 2. Coerce termination rates to 0 when eligible for early retirement or full retirement, or age >= age_vben
mutate(
qxr = ifelse(elig_servRet_early | elig_servRet_full, qxr, 0),
qxt = ifelse((elig_servRet_early == 0 & elig_servRet_full == 0) & age < age_vben, qxt, 0)
) %>%
ungroup
# decrements_tier %>%
# filter(ea == 30)
#*******************************************************************************
# ## Decrements 3.1: explicitly define mortality for terminated members ####
#*******************************************************************************
decrements_tier %<>%
mutate(
qxm.defrRet = ifelse(age >= age_vben, qxm.post, qxm.pre),
qxm.defrRet_male = ifelse(age >= age_vben, qxm.post_male, qxm.pre_male),
qxm.defrRet_female = ifelse(age >= age_vben, qxm.post_female, qxm.pre_female))
#*******************************************************************************
# ## Decrements 3.2: Disability rates based on perf ####
#*******************************************************************************
# # Adjustments to service retirement rates
# # - If eligible for service retirement, the difference between the SJ disability rate and the CalPERS based disability rate will be added to service retirement rate.
# # - If not eligible for service retirement, no adjustment will be made to other types of decrements.
#
# decrements_tier %<>%
# mutate(qxr_adj = ifelse(elig_servRet_full|elig_servRet_early, qxr + (qxd - qxd_perf), qxr),
# qxr_adj = pmin(1, qxr_adj)
# )
# # %>%
# # select(grp, ea, age, qxr_adj, qxr) %>%
# # filter(ea == 45)
#
#
# ## replace the SJ rates with perf-based rates
# decrements_tier %<>%
# mutate(qxd = qxd_perf,
# qxr = qxr_adj)
## settting healthy mortality equal to disability mortality
decrements_tier %<>%
mutate(qxm.post = qxmd.post,
qxm.post_male = qxmd.post_male,
qxm.post_female = qxmd.post_female)
#*******************************************************************************
# ## Decrements 4: Improvement table ####
#*******************************************************************************
# Target format:
# data frame indexed by year and age.
# each row is the improvement factor to be applied to the value in that year-age cell
# extending to 1900 to 2220
# - assume 0 for year < 1951
# - assume 2035 value for year > 2035
df_mp2019 <-
bind_rows(
df_mp2019_raw$male %>%
gather(year, fct, -age, -gender) %>%
mutate(year = as.numeric(year),
fct = as.numeric(fct)),
df_mp2019_raw$female %>%
gather(year, fct, -age, -gender) %>%
mutate(year = as.numeric(year),
fct = as.numeric(fct))
)
decrements_improvement <-
expand_grid(gender = c("male", "female"),
age = range_age,
year = 1900:2220) %>%
left_join(df_mp2019,
by = c("gender", "age", "year"))
decrements_improvement %<>%
group_by(gender, age) %>%
mutate(fct = ifelse(year < 1951, 0, fct),
fct = ifelse(year > 2035, fct[year == 2035], fct),
) %>%
mutate(impr = ifelse(year > 2010, lag(cumprod(1 - ifelse(year>=2010,fct,0))), 1),
impr = ifelse(year < 2010, lead(order_by(-year, cumprod(1/(1 - ifelse(year<=2010,fct,0))))), impr)
)
decrements_improvement %<>%
select(-fct) %>%
spread(gender, impr) %>%
rename(impr_male = male,
impr_female = female)
#*******************************************************************************
# ## Salary Scale ####
#*******************************************************************************
# df_salScale_imputed
# wage inflation: 3.25% AV2019, ep 53
df_salScale_tier <-
df_salScale_imputed %>%
mutate(grp = tier_name,
salScale = salScale_merit + 0.0325 ) %>%
select(grp, yos, salScale) %>%
arrange(yos)
#*******************************************************************************
# ## Initial demographics ####
#*******************************************************************************
## View the inputs
# df_nactives_fillin
# df_n_servRet_fillin
# df_n_beneficiaries_fillin
## groups included
grp_include <- df_nactives_fillin$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "all")]
## Active members
# all active members
df_n_actives_tier <-
df_nactives_fillin %>%
filter(grp %in% grp_include) %>%
group_by(yos, ea) %>%
summarise(salary = weighted.mean(salary, nactives, na.rm = TRUE) %>% na2zero(),
nactives = sum(nactives, na.rm= TRUE) %>% na2zero,
.groups = "drop") %>%
mutate(grp = tier_name,
age = ea + yos) %>%
relocate(grp) %>%
arrange(ea, age) %>%
ungroup() %>%
filter(ea >=20,
age <= 64,
yos <= 44) # no members are removed
# df_n_actives_tier %>% pull(nactives) %>% sum
# Keep tier 2 members only
# assume
# - members with yos <= 4 are tier 2 members
df_n_actives_tier %<>%
mutate(nactives = case_when(
yos > 5 ~ 0,
TRUE ~ nactives
))
df_n_actives_tier$nactives %>% sum # 551, compared to 565 in AV2020
weighted.mean(df_n_actives_tier$salary, df_n_actives_tier$nactives) #11.16k vs 11.28k from AV2019
sum(df_n_actives_tier$salary*df_n_actives_tier$nactives) # total: $63.87m vs 60.92 from AV2020, ep29
## Retirees (all types included)
# all service retirees are tier 1 members
df_n_servRet_tier <-
df_n_servRet_fillin %>%
filter(grp %in% grp_include) %>%
group_by(age) %>%
summarise(benefit_servRet = weighted.mean(benefit_servRet, n_servRet, na.rm= TRUE),
n_servRet = sum(n_servRet, na.rm = TRUE),
.groups = "drop") %>%
colwise(na2zero)(.) %>%
mutate(grp = tier_name) %>%
select(grp, age, n_servRet, benefit_servRet) %>%
arrange(age) %>%
ungroup()
# no retirees in tier 2
df_n_servRet_tier %<>%
mutate(n_servRet = 0)
## View the results
# df_n_actives_tier
# df_n_servRet_tier
#*******************************************************************************
# ## Saving tier information in a list ####
#*******************************************************************************
# collect tier-specific parameters in a list
tier_params <-
list(
tier_name = tier_name,
age_vben = age_vben,
v.year = v.year,
fasyears = fasyears, # based on policy before PEPRA
cola_assumed = cola_assumed,
share_male = share_male,
share_female = share_female
#bfactor = bfactor
#EEC_rate = EEC_rate
)
# Store all tier data in a list
assign(paste0("tierData_", tier_name),
list(
tier_name = tier_name,
decrements = decrements_tier,
decrements_improvement = decrements_improvement,
df_n_actives = df_n_actives_tier,
df_n_servRet = df_n_servRet_tier,
df_salScale = df_salScale_tier,
tier_params = tier_params
)
)
# Save the list of tier data in a .rds (single object) file
saveRDS(get(paste0("tierData_", tier_name)),
file = paste0(dir_outputs, "tierData_", tier_name, ".rds"))
# tierData <- readRDS(paste0(dir_outputs, "tierData_", tier_name, ".rds"))
| /model/tiers/Tier_pf.t2.disbMort.R | no_license | yimengyin16/model_SJ | R | false | false | 21,139 | r | # Constructing tier SJPF tier 2
#*******************************************************************************
# Notes ####
#*******************************************************************************
#' Inputs:
#' - inputs/data_proc/Data_SFPF_decrements_AV2020_imputed.RData
#' - inputs/data_proc/Data_SFPF_demographics_2020630_fillin.RData
#' What this file does
#' - produce the following model inputs for tier "SC"
#' - decrement table
#' - salary scale
#' - initial demographics
#' - tier specific parameters
#*******************************************************************************
#*******************************************************************************
# Tier specification ####
# Source: AV2019, ep63
##' Members included
#' - SJPF tier 2 members
#' - Police: hired on or after Aug 4, 2013
#' - Fire : hired on or after Jan 2, 2015
#'
###' Service retirement
#'
#' - Benefit rules
#' - Use benefit rules of tier 2
#'
#'
# - Eligibility for unreduced benefits
#' - age 57 & yos 5
#'
#' - Eligibility for reduced benefits
#' - age 50 & yos 5
#' - reduction: 7% per year before age 57
#'
#' - Vesting:
#' - 5 years?
#'
#'
#' - Final compensation (FAS)
#' - The plan policy
#' - 36 highest consecutive months, with anti-spike measures.
#' - Model:
#' - 3 year
#'
#'
#' - Benefit formula
#' - Police and fire:
#' - yos in 1-20: 2.4% per year
#' - yos in 21-25: 3% per year
#' - yos of 26 and up: 3.4% per year
#' - Max of 80% of FAS
#' - survivor: 50% joint and survivor annuity
###' Deferred retirement
#' - Plan policy:
#' - YOS < 5: accumulated EEC with interest
#' - YOS >= 5: servRet benefit, actuarially reduced for early retirment,
#' payable when eligibility is reached
#'
# - Model:
#' - Simplification:
###' Disability retirement, service connected
#'
#' - Plan policy:
#' - no age/yos requirement
#' - Greater of:
#' - 50% of FAS
#' - service retirement, if eligible for service retirement
#' - actuarial reduced retirement benefit from age 50, if not eligible for servRet
#'
# - Model:
#' - only the first 2: max of 50% of fas and servRet benefit
###' Disability retirement, non-service connected
#' - not modeled b/c all disabilities are assumed to be duty related in the AV
# Death benefit:
#' - YOS>= 2 and before servRet eligibility
#' - 24% of FAS + 0.75% for each yos in excess of 2, up to 37.5% of FAS
#' - after servRet eligibility
#' - servRet
#' - death in the line of duty
#' - greater of
#' - 37.5% of FAS
#' - 50% of servRet
# COLA:
# - Policy: CPI-U for SJ, subject to a cap of 2%.
# - model: 2%
###' Member contribution
# - 50% of total Tier 2 contributions (NC + SC + admin)
# - Increases in UAAL contribution are limited to 1/3 % of compensation each year
# - contribution >= 50% of NC
## Assumptions, needs to be revisited
#' Shares of police and fire members
#' Source: AV2019 ep43
#' - police: 541 / 1215
#' - fire: 674/1215
share_fire <- 541/1215 # 44.5%
share_police <- 1 - share_fire
# gender ratio:
# - No gender ratio provided in AV and CAFR,
# - Assumption: 10% female and 90% male
share_male <- 0.9
share_female <- 1 - share_male
## Assumptions on demographics
#' - SJPF tier 1 members
#' - Police: hired before Aug 4, 2013
#' - Fire : hired before Jan 2, 2015
#'
#' How to allocate total active and retiree numbers to tier 1 and tier 2
# t1 and t2 mebmers:
# Active members:
# - According to AV2020 ep 46, there are 565 tier 2 members, and 1144 tier 1 members
# - In AV2020 imputed demographic data, the number of actives with yos <= 5 is 551
# - For now, use yos <= 5 as tier 2 members
# - In theory, some tier 2 police members should have yos = 6 (6 11/12)?. Should
# keep this in mind.
#
#
# Serivice retirees for regular members
# - According to AV2020 ep 30, there are no any type of retirees in tier 2
#
#
# Initial terminated members
# - For now, we assume that for each tier the liability of initial terminated members(in or not in pay status)
# is a fixed percentage of the AL of retirees.
# - As we assume the tier 2 has no retirees in the model, there are no AL for initial terminated members
# under the current simplification method. The should not be an issue because the actual AL for termianted should be
# very small as tier 2 is still new.
#*******************************************************************************
# ## Global settings ####
#*******************************************************************************
dir_data <- "inputs/data_proc/"
dir_outputs <- "model/tiers/tierData/"
# Model settings
range_age <- 20:100
range_ea <- 20:64 # max retirement age is assumed to be 65 (qxr = 1 at age 65 in AV tables)
# Tier specific parameters
tier_name <- "pf.t2.disbMort"
age_vben <- 60 # AV2019 ep60, at age 60 for Tier 2 vested members
v.year <- 5
fasyears <- 3
# bfactor <- 0.02
cola_assumed <- 0.02 # assumed cola rates for valuation
# EEC_rate <- 0.0735 # use EEC and ERC caps
#*******************************************************************************
# ## Loading data ####
#*******************************************************************************
load(paste0(dir_data, "Data_SJPF_decrements_AV2020_imputed.RData"))
load(paste0(dir_data, "Data_SJPF_demographics_20200630_fillin.RData"))
df_mp2019_raw <- readRDS(paste0(dir_data, "MP2019_raw.rds"))
#*******************************************************************************
# ## Decrements 1: combining groups ####
#*******************************************************************************
## Service retirement rates
# groups included
grp_include <- df_qxr_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include , "t2.police|t2.fire")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "t2.police","wgt"] <- share_police
wgts[wgts$grp == "t2.fire","wgt"] <- share_fire
## calculate weighted average
df_qxr_tier <-
df_qxr_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(age, yos) %>%
summarise(qxr = weighted.mean(qxr, wgt), .groups = "drop") %>%
mutate(grp = tier_name) %>%
relocate(grp) %>%
ungroup()
## Disability retirement rates, San Jose
# groups included
grp_include <- df_qxd_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "police|fire")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "police","wgt"] <- share_police
wgts[wgts$grp == "fire", "wgt"] <- share_fire
## calculate weighted average
# Need to combine two types of disability rates: adding the two rates
df_qxd_tier <-
df_qxd_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(age) %>%
summarise(qxd = weighted.mean(qxd, wgt), .groups = "drop") %>%
mutate(grp = tier_name) %>%
relocate(grp) %>%
ungroup()
## Disability retirement rates, PERF
# Disability rates are contructed based on CalPERS rates as follows:
# - Fire: simple average of POFF rates and public agency rates for fire
# - Policy: simple average of POFF rates and public agency rates for police
df_qxd_perf <-
bind_rows(
df_qxd_PERF_imputed %>%
filter(grp %in% c("perf_pa.fire", "perf_poff")) %>%
group_by(age) %>%
summarise(qxd = mean(qxd), .groups = "drop") %>%
mutate(grp = "perf_fire"),
df_qxd_PERF_imputed %>%
filter(grp %in% c("perf_pa.police", "perf_poff")) %>%
group_by(age) %>%
summarise(qxd = mean(qxd), .groups = "drop") %>%
mutate(grp = "perf_police")
)
# groups included
grp_include <- df_qxd_perf$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "perf_police|perf_fire")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "perf_police","wgt"] <- share_police
wgts[wgts$grp == "perf_fire", "wgt"] <- share_fire
## calculate weighted average
# Need to combine two types of disability rates: adding the two rates
df_qxd_perf_tier <-
df_qxd_perf %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(age) %>%
summarise(qxd = weighted.mean(qxd, wgt), .groups = "drop") %>%
mutate(grp = tier_name,) %>%
rename(qxd_perf = qxd) %>%
relocate(grp) %>%
ungroup()
## Termination with refund
# groups included
grp_include <- df_qxt_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "police|fire")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "police","wgt"] <- share_police
wgts[wgts$grp == "fire", "wgt"] <- share_fire
## calculate weighted average
df_qxt_tier <-
df_qxt_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(yos) %>%
summarise(qxt = weighted.mean(qxt, wgt),
.groups = "drop") %>%
mutate(grp = tier_name) %>%
relocate(grp) %>%
arrange(yos) %>%
ungroup()
## Mortality
# Based on SOA pub 2010 decrement table
ls_pub2010_raw <- readRDS(paste0(dir_data, "pub2010_raw.rds"))
df_qxm_tier <-
left_join(
ls_pub2010_raw$pubS2010A %>%
select(age,
qxm.pre_female = qxm.employee.female,
qxm.pre_male = qxm.employee.male,
qxm.post_female = qxm.healthyRet.female,
qxm.post_male = qxm.healthyRet.male),
ls_pub2010_raw$pubS2010 %>%
select(age,
qxmd.post_female = qxm.disbRet.female,
qxmd.post_male = qxm.disbRet.male),
by = "age"
) %>%
filter(age <= 100)
df_qxm_tier %<>%
mutate(
qxm.pre_female = 0.979 * qxm.pre_female,
qxm.pre_male = 0.979 * qxm.pre_male ,
qxm.post_female = 1.002 * qxm.post_female,
qxm.post_male = 1.002 * qxm.post_male,
qxmd.post_female = 0.915 * qxmd.post_female,
qxmd.post_male = 0.915 * qxmd.post_male
)
df_qxm_tier <-
df_qxm_imputed %>%
mutate(qxm.pre = share_female * qxm.pre_female + share_male * qxm.pre_male,
qxm.post = share_female * qxm.post_female + share_male * qxm.post_male,
qxmd.post = share_female * qxmd.post_female + share_male * qxmd.post_male,
grp = tier_name
) %>%
select(grp, age,
qxm.pre, qxm.pre_female, qxm.pre_male,
qxm.post, qxm.post_female, qxm.post_male,
qxmd.post, qxmd.post_female,qxmd.post_male)
# df_qxr_tier
# df_qxd_tier
# df_qxt_tier
# df_qxm_tier
#*******************************************************************************
# ## Decrements 2: Single decrement table ####
#*******************************************************************************
decrements_tier <-
expand.grid(age = range_age,
ea = range_ea) %>%
mutate(yos = age - ea,
grp = tier_name) %>%
filter(age >= ea) %>%
left_join(df_qxm_tier, by = c("grp", "age")) %>% # mortality
left_join(df_qxt_tier, by = c("grp", "yos")) %>% # termination
left_join(df_qxr_tier, by = c("grp", "age", "yos")) %>% # service retirement
left_join(df_qxd_tier, by = c("grp", "age")) %>% # disability, San Jose
left_join(df_qxd_perf_tier, by = c("grp", "age")) %>% # disability, perf
select(grp, ea, age, yos,
qxm.pre,
qxm.pre_female, qxm.pre_male,
qxm.post, qxm.post_female, qxm.post_male,
qxmd.post, qxmd.post_female, qxmd.post_male,
qxt,
qxr,
qxd,
qxd_perf,
everything()
#-qxm.pre
)%>%
arrange(ea, age) %>%
colwise(na2zero)(.)
#*******************************************************************************
# ## Decrements 3: adding eligibility information ####
#*******************************************************************************
# Create 2 columns for each tier
# elig_servRet_full: number of year of being eligible for full or greater retirement benefits
# elig_servRet_early: number of year of being eligible for early retirement benefits;
# 0 after being eligible for full retirement benefits
# year_b4full: number of years below the full/unreduced retirement age
# - Eligibility for unreduced benefits
#' - age 57 & yos 5
#'
#' - Eligibility for reduced benefits
#' - age 50 & yos 5
#' - reduction: 7% per year before age 57
decrements_tier %<>%
group_by(ea) %>%
mutate(
# Eligibility for full (or greater) retirement benefit
elig_servRet_full = ifelse( (age >= 57 & yos >= 5),
1, 0) %>% cumsum,
# Eligibility for early retirement benefit
elig_servRet_early = ifelse( (age >= 50 & yos >= 5), 1, 0) %>% cumsum,
elig_servRet_early = ifelse( elig_servRet_full, 0, elig_servRet_early),
# number of years before full retirement
year_b4full = order_by(-age, cumsum(!as.logical(elig_servRet_full))),
year_b4full = ifelse(as.logical(elig_servRet_early), year_b4full, 0)
) %>%
## Adjustments to decrement rates based on eligibility
# 1. Only keep retirement rates when a member is eligible
# 2. Coerce termination rates to 0 when eligible for early retirement or full retirement, or age >= age_vben
mutate(
qxr = ifelse(elig_servRet_early | elig_servRet_full, qxr, 0),
qxt = ifelse((elig_servRet_early == 0 & elig_servRet_full == 0) & age < age_vben, qxt, 0)
) %>%
ungroup
# decrements_tier %>%
# filter(ea == 30)
#*******************************************************************************
# ## Decrements 3.1: explicitly define mortality for terminated members ####
#*******************************************************************************
decrements_tier %<>%
mutate(
qxm.defrRet = ifelse(age >= age_vben, qxm.post, qxm.pre),
qxm.defrRet_male = ifelse(age >= age_vben, qxm.post_male, qxm.pre_male),
qxm.defrRet_female = ifelse(age >= age_vben, qxm.post_female, qxm.pre_female))
#*******************************************************************************
# ## Decrements 3.2: Disability rates based on perf ####
#*******************************************************************************
# # Adjustments to service retirement rates
# # - If eligible for service retirement, the difference between the SJ disability rate and the CalPERS based disability rate will be added to service retirement rate.
# # - If not eligible for service retirement, no adjustment will be made to other types of decrements.
#
# decrements_tier %<>%
# mutate(qxr_adj = ifelse(elig_servRet_full|elig_servRet_early, qxr + (qxd - qxd_perf), qxr),
# qxr_adj = pmin(1, qxr_adj)
# )
# # %>%
# # select(grp, ea, age, qxr_adj, qxr) %>%
# # filter(ea == 45)
#
#
# ## replace the SJ rates with perf-based rates
# decrements_tier %<>%
# mutate(qxd = qxd_perf,
# qxr = qxr_adj)
## settting healthy mortality equal to disability mortality
decrements_tier %<>%
mutate(qxm.post = qxmd.post,
qxm.post_male = qxmd.post_male,
qxm.post_female = qxmd.post_female)
#*******************************************************************************
# ## Decrements 4: Improvement table ####
#*******************************************************************************
# Target format:
# data frame indexed by year and age.
# each row is the improvement factor to be applied to the value in that year-age cell
# extending to 1900 to 2220
# - assume 0 for year < 1951
# - assume 2035 value for year > 2035
df_mp2019 <-
bind_rows(
df_mp2019_raw$male %>%
gather(year, fct, -age, -gender) %>%
mutate(year = as.numeric(year),
fct = as.numeric(fct)),
df_mp2019_raw$female %>%
gather(year, fct, -age, -gender) %>%
mutate(year = as.numeric(year),
fct = as.numeric(fct))
)
decrements_improvement <-
expand_grid(gender = c("male", "female"),
age = range_age,
year = 1900:2220) %>%
left_join(df_mp2019,
by = c("gender", "age", "year"))
decrements_improvement %<>%
group_by(gender, age) %>%
mutate(fct = ifelse(year < 1951, 0, fct),
fct = ifelse(year > 2035, fct[year == 2035], fct),
) %>%
mutate(impr = ifelse(year > 2010, lag(cumprod(1 - ifelse(year>=2010,fct,0))), 1),
impr = ifelse(year < 2010, lead(order_by(-year, cumprod(1/(1 - ifelse(year<=2010,fct,0))))), impr)
)
decrements_improvement %<>%
select(-fct) %>%
spread(gender, impr) %>%
rename(impr_male = male,
impr_female = female)
#*******************************************************************************
# ## Salary Scale ####
#*******************************************************************************
# df_salScale_imputed
# wage inflation: 3.25% AV2019, ep 53
df_salScale_tier <-
df_salScale_imputed %>%
mutate(grp = tier_name,
salScale = salScale_merit + 0.0325 ) %>%
select(grp, yos, salScale) %>%
arrange(yos)
#*******************************************************************************
# ## Initial demographics ####
#*******************************************************************************
## View the inputs
# df_nactives_fillin
# df_n_servRet_fillin
# df_n_beneficiaries_fillin
## groups included
grp_include <- df_nactives_fillin$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "all")]
## Active members
# all active members
df_n_actives_tier <-
df_nactives_fillin %>%
filter(grp %in% grp_include) %>%
group_by(yos, ea) %>%
summarise(salary = weighted.mean(salary, nactives, na.rm = TRUE) %>% na2zero(),
nactives = sum(nactives, na.rm= TRUE) %>% na2zero,
.groups = "drop") %>%
mutate(grp = tier_name,
age = ea + yos) %>%
relocate(grp) %>%
arrange(ea, age) %>%
ungroup() %>%
filter(ea >=20,
age <= 64,
yos <= 44) # no members are removed
# df_n_actives_tier %>% pull(nactives) %>% sum
# Keep tier 2 members only
# assume
# - members with yos <= 4 are tier 2 members
df_n_actives_tier %<>%
mutate(nactives = case_when(
yos > 5 ~ 0,
TRUE ~ nactives
))
df_n_actives_tier$nactives %>% sum # 551, compared to 565 in AV2020
weighted.mean(df_n_actives_tier$salary, df_n_actives_tier$nactives) #11.16k vs 11.28k from AV2019
sum(df_n_actives_tier$salary*df_n_actives_tier$nactives) # total: $63.87m vs 60.92 from AV2020, ep29
## Retirees (all types included)
# all service retirees are tier 1 members
df_n_servRet_tier <-
df_n_servRet_fillin %>%
filter(grp %in% grp_include) %>%
group_by(age) %>%
summarise(benefit_servRet = weighted.mean(benefit_servRet, n_servRet, na.rm= TRUE),
n_servRet = sum(n_servRet, na.rm = TRUE),
.groups = "drop") %>%
colwise(na2zero)(.) %>%
mutate(grp = tier_name) %>%
select(grp, age, n_servRet, benefit_servRet) %>%
arrange(age) %>%
ungroup()
# no retirees in tier 2
df_n_servRet_tier %<>%
mutate(n_servRet = 0)
## View the results
# df_n_actives_tier
# df_n_servRet_tier
#*******************************************************************************
# ## Saving tier information in a list ####
#*******************************************************************************
# collect tier-specific parameters in a list
tier_params <-
list(
tier_name = tier_name,
age_vben = age_vben,
v.year = v.year,
fasyears = fasyears, # based on policy before PEPRA
cola_assumed = cola_assumed,
share_male = share_male,
share_female = share_female
#bfactor = bfactor
#EEC_rate = EEC_rate
)
# Store all tier data in a list
assign(paste0("tierData_", tier_name),
list(
tier_name = tier_name,
decrements = decrements_tier,
decrements_improvement = decrements_improvement,
df_n_actives = df_n_actives_tier,
df_n_servRet = df_n_servRet_tier,
df_salScale = df_salScale_tier,
tier_params = tier_params
)
)
# Save the list of tier data in a .rds (single object) file
saveRDS(get(paste0("tierData_", tier_name)),
file = paste0(dir_outputs, "tierData_", tier_name, ".rds"))
# tierData <- readRDS(paste0(dir_outputs, "tierData_", tier_name, ".rds"))
|
# See 'simModel11.R' for a short explanation
id <- "11bin"
mod <- paste("simModel", id, sep= "")
library("lme4")
library("mvtnorm")
library("fields")
source("../R/psglmm.R")
source("../R/psglmmSim.R")
source("../R/makeData.R")
source("../R/makeEnvir.R")
nsim= 100
formula= y ~ spp - 1 + (spp - 1 | plot) + (spp - 1 | plot) + (spp - 1 | plot)
VCVtmp= list("phylogenetic"= kron("I_p", "S"), "spatial"= kron("P", "I_s"),
"spatio-phylogenetic"= kron("P", "S"))
gf= c("plot", "plot", "plot")
px= 10
py= 10
s= 4
spp.fe= seq(-1, 1, length.out= s)
rho= seq(0, 0.98, 0.02)
var.spp= 1
var.plot= 0
var.ind= 0
var.error= 0
x= FALSE
msel= "BS"
family= binomial()
P.lim= 0.1
seeds <- 1:length(rho)
for(i in 1:length(rho)) {
set.seed(seeds[i])
cat(paste("seed:", seeds[i], "\n"))
sims <- psglmmSim(nsim, formula, VCVtmp, px, py, s, spp.fe, rho12= rho[i],
var.spp, var.plot, var.ind, var.error, x, family, gf, msel, envir, P.lim)
save(sims, file= paste("sims/", mod, "_", i, ".Rdata", sep=""))
cat("Data saved\n")
rm(sims)
}
| /TH.data/PSGLMM_MEE/simulationStudy/simModel11bin.R | permissive | solgenomics/R_libs | R | false | false | 1,034 | r | # See 'simModel11.R' for a short explanation
id <- "11bin"
mod <- paste("simModel", id, sep= "")
library("lme4")
library("mvtnorm")
library("fields")
source("../R/psglmm.R")
source("../R/psglmmSim.R")
source("../R/makeData.R")
source("../R/makeEnvir.R")
nsim= 100
formula= y ~ spp - 1 + (spp - 1 | plot) + (spp - 1 | plot) + (spp - 1 | plot)
VCVtmp= list("phylogenetic"= kron("I_p", "S"), "spatial"= kron("P", "I_s"),
"spatio-phylogenetic"= kron("P", "S"))
gf= c("plot", "plot", "plot")
px= 10
py= 10
s= 4
spp.fe= seq(-1, 1, length.out= s)
rho= seq(0, 0.98, 0.02)
var.spp= 1
var.plot= 0
var.ind= 0
var.error= 0
x= FALSE
msel= "BS"
family= binomial()
P.lim= 0.1
seeds <- 1:length(rho)
for(i in 1:length(rho)) {
set.seed(seeds[i])
cat(paste("seed:", seeds[i], "\n"))
sims <- psglmmSim(nsim, formula, VCVtmp, px, py, s, spp.fe, rho12= rho[i],
var.spp, var.plot, var.ind, var.error, x, family, gf, msel, envir, P.lim)
save(sims, file= paste("sims/", mod, "_", i, ".Rdata", sep=""))
cat("Data saved\n")
rm(sims)
}
|
# R intro week 7
# tidyverse --> ggplot2!
# 4-jun-2020
# kd and tv
# Same script start every time:
##################################################################################################################################################
# set wd
setwd("~/Documents/ANALYSIS/Data")
# load packages
library(tidyverse)
library(xlsx)
library(openxlsx)
# read in data
smolt_data <- read.xlsx("nautley_ANALYTICAL_database_2019.xlsx", sheet=3, detectDates=T) # detect dates is important!!!!!
# quickly view data!
##################################################################################################################################################
# ##################################
###################################################### 6. INTRO TO TIDYVERSE: ggplot2 #######################################################
# ##################################
##### What is ggplot2?
# A series of graphing functions that use language similar to dplyr and the rest of the tidyverse
# ggplot2 is specific to the tidyverse series of languages
# in base R, the function to make a graph is plot() or barplot()
# sometimes, base R can be a bit more flexible and streamlined for complicated plots, but you can always ultimately do all you can in base R in
# ggplot.
# In ggplot, it's a lot clearer what is happening, and easier to work through to get a result. Plus, in my opinion, the graphs are much more
# aesthetically pleasing! They also work with things like pipes and other dplyr verbs such as filter if you need to use those.
#####
##### What are the main components to a ggplot script?
# ggplot scrips have 3 main components or requirements: data, coordinates or a 'framework' (e.g., x and y), and a way to draw the data onto the
# coordinates.
# ggplot works like photoshop, or for anyone with GIS background, like GIS layers when mapping.
# You use various functions part of the ggplot world stacked on top of each other to build a graph.
# This also means that ggplot reads functions and lines of code IN THE ORDER THEY ARE WRITTEN - this is useful to remember, particularly for aesthetics
#####
#=================#
# BUILD UP A PLOT #
#=================#
# start by using smolt_data to plot length ~ date
##### ESSENTIAL REQUIREMENTS 1-3: data, aes and a geom
# DATA: the foundation
ggplot(smolt_data)
# AESTHETICS: the framework/coordinates
ggplot(smolt_data, aes(x=date, y=length_mm))
# GEOM: how to visually draw the data
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point()
# GEOM: let's make the points prettier
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5)
##### ADDITIONAL REQUIREMENTS 4+: make it pretty
# SCALES: Those axes are a mess! Lets make better intervals for the x and y axis labels
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5) +
### ***SCROLL THROUGH ALL SCALE OPTIONS TO DEMONSTRATE THE NUMBER OF OPTIONS
### USE HELP FILES/PROMPTS TO PICK THE RIGHT ARGUMENTS****
scale_y_continuous(breaks = seq(75, 200, by=25)) + # seq(from, to, by=interval)
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") # %b is the 3-letter month (e.g., 'Apr'), if you wanted numerical month it would be %m
# Next, lets use labs() to change the axis labels. This could probably go inside a theme() element, but labs() is soooo much easier!
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)")
# THEME: make text larger and all black
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# THEME: Get rid of that ugly gray background
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold")) +
theme_bw() # prime example of how ggplot operates sequentially: theme_bw() has internal defaults, and has overwritten our previous theme() requests
# THEME: switch the order
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
#=======================================#
# PLOTTING BASED ON A GROUPING VARIABLE #
#=======================================#
# So we have our base plot of length~date, but within those individual fish there are different stocks. We should know how to represent
# these visually, because they aren't all the same. Maybe more patterns will become clear...
# STEP 1: add to the 'aes' instructions!
ggplot(smolt_data, aes(x=date, y=length_mm, fill=NEWregion1)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5) + # WHY DOESN'T THIS WORK?
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
ggplot(smolt_data, aes(x=date, y=length_mm, fill=NEWregion1)) +
geom_point(size=3, shape=21, colour="black", stroke=1.5) + # ggplot is sequential - must delete 'fill' from geom_point
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# This is OK, but stock ID isn't really a continuous variable, it's discrete, it's either A, B or neither.
# R has read in our data that the 'NEWregion1' ID is a numerical (continuous) value, because the IDs are initially given as numbers with NAs for
# blanks. We can quickly convert this in our plot by changing NEWregion1 to be a factor in our plot.
# STEP 2: Convert grouping variable to a factor
ggplot(smolt_data, aes(x=date, y=length_mm, fill=as.factor(NEWregion1))) + # as.factor()
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# That's starting to look better, but those NAs are really harshing this plot's vibe...
# we can apply a quick filter() function to 'smolt_data' WITHIN the ggplot function
# STEP 3: Apply filter() to data within ggplot()
# Note that if R read in your smolt_data so that NEWregion1 is a factor, you will have to use filter(NEWregion1 != "NA) instead
ggplot(smolt_data %>% filter(!is.na(NEWregion1)), aes(x=date, y=length_mm, fill=as.factor(NEWregion1))) + # filter()
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# STEP 4: THEME: Not a fan of where that legend is... lets put it on top for now, more out of the way
ggplot(smolt_data %>% filter(!is.na(NEWregion1)), aes(x=date, y=length_mm, fill=as.factor(NEWregion1))) +
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"),
legend.position = "top")
# ggplot is a really powerful tool for manipulating, summarizing and visualizing data relatively quickly (once you get the hang of it...)
# It has inifinite options to customize graphics however you want. *soooo many different theme packages exist*
# But using pipes and as.factor() arguments in the plot code gets clustered
# you can also do all of your data manipulation/pipe operations using dplyr, and then save the final product in a dataframe to plot.
#=========================#
# EXAMPLE: PIPES TO PLOTS #
#=========================#
# PIPE: Pulling an example of a summarized dataset from last week! - this is what we will use to plot!
gsi_length <- smolt_data %>%
select(ufid:prob1, age:lab_identifier) %>% # note now that we have done group_by() and summarize(), select isn't really needed anymore
filter(age==1 & prob1>=0.8 & !is.na(NEWregion1)) %>%
group_by(date, NEWregion1) %>% # note order specified impacts column order
summarize(mean_length = mean(length_mm, na.rm=T), sd_length = sd(length_mm, na.rm=T)) %>% # remember to add na.rm=T to ignore days where no lengths taken
mutate(NEWregion1 = ifelse(NEWregion1==4, "Nadina", "Stellako")) %>% # here, only 1 ifelse() statement works because there are only 2 levels in NEWregion1 - we got rid of the NAs in NEWregion1 using filter
arrange(date, desc(NEWregion1)) %>%
print()
# PLOT: Mean length~date for the two stocks
ggplot(gsi_length, aes(x=date, y=mean_length, fill=NEWregion1)) +
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(80, 140, by=20)) + # NOTE NOW Y SCALE ISNT GOOD - HASH IT OUT
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# Scales: ignore y scale
ggplot(gsi_length, aes(x=date, y=mean_length, fill=NEWregion1)) +
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
#scale_y_continuous(breaks = seq(80, 140, by=20)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# GEOMs: we should add error bars to our plot
ggplot(gsi_length, aes(x=date, y=mean_length, fill=NEWregion1)) +
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
geom_errorbar(aes(ymax=mean_length+sd_length, ymin=mean_length-sd_length)) +
#scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# GEOMs: order is gross, lets put it behind the points
ggplot(gsi_length, aes(x=date, y=mean_length, fill=NEWregion1)) +
geom_errorbar(aes(ymax=mean_length+sd_length, ymin=mean_length-sd_length)) +
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
#scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
#====================#
# EXPORTING GRAPHICS #
#====================#
##### For low-res exported graphics:
# Export > Copy to clipboard
# Export > Save as Image
# Screen shot of R window and crop
# Snip tool of R window and crop
#####
##### For high-res exported graphics: (HEAD ACHE)
# Export > Save as PDF
# Open PDF
# Snip tool
# Copy into word document
#####
# // end INTRO TO GGPLOT2! GO DRINK BEERZzz AND EAT CHiPzZZzz
#------------------------------------------------------------------------------------------------------------------------------------------------
#==========================#
# END OF SECTION 6 SUMMARY #
#==========================#
##### Section 6 summary:
# Know that ggplot2 and ggplot are the same thing
# Know the three key requirements for a plot
# Know how to call a dataframe into ggplot
# Know how to use aesthetics or coordinates to assign the framework
# Know how to use geometries to draw data, and how to customize within geoms (size, shape, etc.)
# Know that there are many other geometries out there
# Apply additional scales to manually adjust axes
# Use themes to customize the final visual touches
# Have a knowledge that stat_() functions exist
# String together multiple ggplot functions using +
#####
#------------------------------------------------------------------------------------------------------------------------------------------------
##################################################################################################################################################
##################################################################################################################################################
| /week7-tidyverse-ggplot-KD.R | permissive | khdavidson/r-intro-course | R | false | false | 14,513 | r | # R intro week 7
# tidyverse --> ggplot2!
# 4-jun-2020
# kd and tv
# Same script start every time:
##################################################################################################################################################
# set wd
setwd("~/Documents/ANALYSIS/Data")
# load packages
library(tidyverse)
library(xlsx)
library(openxlsx)
# read in data
smolt_data <- read.xlsx("nautley_ANALYTICAL_database_2019.xlsx", sheet=3, detectDates=T) # detect dates is important!!!!!
# quickly view data!
##################################################################################################################################################
# ##################################
###################################################### 6. INTRO TO TIDYVERSE: ggplot2 #######################################################
# ##################################
##### What is ggplot2?
# A series of graphing functions that use language similar to dplyr and the rest of the tidyverse
# ggplot2 is specific to the tidyverse series of languages
# in base R, the function to make a graph is plot() or barplot()
# sometimes, base R can be a bit more flexible and streamlined for complicated plots, but you can always ultimately do all you can in base R in
# ggplot.
# In ggplot, it's a lot clearer what is happening, and easier to work through to get a result. Plus, in my opinion, the graphs are much more
# aesthetically pleasing! They also work with things like pipes and other dplyr verbs such as filter if you need to use those.
#####
##### What are the main components to a ggplot script?
# ggplot scrips have 3 main components or requirements: data, coordinates or a 'framework' (e.g., x and y), and a way to draw the data onto the
# coordinates.
# ggplot works like photoshop, or for anyone with GIS background, like GIS layers when mapping.
# You use various functions part of the ggplot world stacked on top of each other to build a graph.
# This also means that ggplot reads functions and lines of code IN THE ORDER THEY ARE WRITTEN - this is useful to remember, particularly for aesthetics
#####
#=================#
# BUILD UP A PLOT #
#=================#
# start by using smolt_data to plot length ~ date
##### ESSENTIAL REQUIREMENTS 1-3: data, aes and a geom
# DATA: the foundation
ggplot(smolt_data)
# AESTHETICS: the framework/coordinates
ggplot(smolt_data, aes(x=date, y=length_mm))
# GEOM: how to visually draw the data
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point()
# GEOM: let's make the points prettier
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5)
##### ADDITIONAL REQUIREMENTS 4+: make it pretty
# SCALES: Those axes are a mess! Lets make better intervals for the x and y axis labels
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5) +
### ***SCROLL THROUGH ALL SCALE OPTIONS TO DEMONSTRATE THE NUMBER OF OPTIONS
### USE HELP FILES/PROMPTS TO PICK THE RIGHT ARGUMENTS****
scale_y_continuous(breaks = seq(75, 200, by=25)) + # seq(from, to, by=interval)
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") # %b is the 3-letter month (e.g., 'Apr'), if you wanted numerical month it would be %m
# Next, lets use labs() to change the axis labels. This could probably go inside a theme() element, but labs() is soooo much easier!
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)")
# THEME: make text larger and all black
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# THEME: Get rid of that ugly gray background
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold")) +
theme_bw() # prime example of how ggplot operates sequentially: theme_bw() has internal defaults, and has overwritten our previous theme() requests
# THEME: switch the order
ggplot(smolt_data, aes(x=date, y=length_mm)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
#=======================================#
# PLOTTING BASED ON A GROUPING VARIABLE #
#=======================================#
# So we have our base plot of length~date, but within those individual fish there are different stocks. We should know how to represent
# these visually, because they aren't all the same. Maybe more patterns will become clear...
# STEP 1: add to the 'aes' instructions!
ggplot(smolt_data, aes(x=date, y=length_mm, fill=NEWregion1)) +
geom_point(size=3, shape=21, fill="blue", colour="black", stroke=1.5) + # WHY DOESN'T THIS WORK?
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
ggplot(smolt_data, aes(x=date, y=length_mm, fill=NEWregion1)) +
geom_point(size=3, shape=21, colour="black", stroke=1.5) + # ggplot is sequential - must delete 'fill' from geom_point
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# This is OK, but stock ID isn't really a continuous variable, it's discrete, it's either A, B or neither.
# R has read in our data that the 'NEWregion1' ID is a numerical (continuous) value, because the IDs are initially given as numbers with NAs for
# blanks. We can quickly convert this in our plot by changing NEWregion1 to be a factor in our plot.
# STEP 2: Convert grouping variable to a factor
ggplot(smolt_data, aes(x=date, y=length_mm, fill=as.factor(NEWregion1))) + # as.factor()
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# That's starting to look better, but those NAs are really harshing this plot's vibe...
# we can apply a quick filter() function to 'smolt_data' WITHIN the ggplot function
# STEP 3: Apply filter() to data within ggplot()
# Note that if R read in your smolt_data so that NEWregion1 is a factor, you will have to use filter(NEWregion1 != "NA) instead
ggplot(smolt_data %>% filter(!is.na(NEWregion1)), aes(x=date, y=length_mm, fill=as.factor(NEWregion1))) + # filter()
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# STEP 4: THEME: Not a fan of where that legend is... lets put it on top for now, more out of the way
ggplot(smolt_data %>% filter(!is.na(NEWregion1)), aes(x=date, y=length_mm, fill=as.factor(NEWregion1))) +
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"),
legend.position = "top")
# ggplot is a really powerful tool for manipulating, summarizing and visualizing data relatively quickly (once you get the hang of it...)
# It has inifinite options to customize graphics however you want. *soooo many different theme packages exist*
# But using pipes and as.factor() arguments in the plot code gets clustered
# you can also do all of your data manipulation/pipe operations using dplyr, and then save the final product in a dataframe to plot.
#=========================#
# EXAMPLE: PIPES TO PLOTS #
#=========================#
# PIPE: Pulling an example of a summarized dataset from last week! - this is what we will use to plot!
gsi_length <- smolt_data %>%
select(ufid:prob1, age:lab_identifier) %>% # note now that we have done group_by() and summarize(), select isn't really needed anymore
filter(age==1 & prob1>=0.8 & !is.na(NEWregion1)) %>%
group_by(date, NEWregion1) %>% # note order specified impacts column order
summarize(mean_length = mean(length_mm, na.rm=T), sd_length = sd(length_mm, na.rm=T)) %>% # remember to add na.rm=T to ignore days where no lengths taken
mutate(NEWregion1 = ifelse(NEWregion1==4, "Nadina", "Stellako")) %>% # here, only 1 ifelse() statement works because there are only 2 levels in NEWregion1 - we got rid of the NAs in NEWregion1 using filter
arrange(date, desc(NEWregion1)) %>%
print()
# PLOT: Mean length~date for the two stocks
ggplot(gsi_length, aes(x=date, y=mean_length, fill=NEWregion1)) +
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
scale_y_continuous(breaks = seq(80, 140, by=20)) + # NOTE NOW Y SCALE ISNT GOOD - HASH IT OUT
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# Scales: ignore y scale
ggplot(gsi_length, aes(x=date, y=mean_length, fill=NEWregion1)) +
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
#scale_y_continuous(breaks = seq(80, 140, by=20)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# GEOMs: we should add error bars to our plot
ggplot(gsi_length, aes(x=date, y=mean_length, fill=NEWregion1)) +
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
geom_errorbar(aes(ymax=mean_length+sd_length, ymin=mean_length-sd_length)) +
#scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
# GEOMs: order is gross, lets put it behind the points
ggplot(gsi_length, aes(x=date, y=mean_length, fill=NEWregion1)) +
geom_errorbar(aes(ymax=mean_length+sd_length, ymin=mean_length-sd_length)) +
geom_point(size=3, shape=21, colour="black", stroke=1.5) +
#scale_y_continuous(breaks = seq(75, 200, by=25)) +
scale_x_date(date_breaks = "5 day", date_labels = "%b %d") +
labs(x="Date", y="Length (mm)") +
theme_bw() +
theme(axis.text = element_text(size=12, colour="black"),
axis.title = element_text(size=15, face="bold"))
#====================#
# EXPORTING GRAPHICS #
#====================#
##### For low-res exported graphics:
# Export > Copy to clipboard
# Export > Save as Image
# Screen shot of R window and crop
# Snip tool of R window and crop
#####
##### For high-res exported graphics: (HEAD ACHE)
# Export > Save as PDF
# Open PDF
# Snip tool
# Copy into word document
#####
# // end INTRO TO GGPLOT2! GO DRINK BEERZzz AND EAT CHiPzZZzz
#------------------------------------------------------------------------------------------------------------------------------------------------
#==========================#
# END OF SECTION 6 SUMMARY #
#==========================#
##### Section 6 summary:
# Know that ggplot2 and ggplot are the same thing
# Know the three key requirements for a plot
# Know how to call a dataframe into ggplot
# Know how to use aesthetics or coordinates to assign the framework
# Know how to use geometries to draw data, and how to customize within geoms (size, shape, etc.)
# Know that there are many other geometries out there
# Apply additional scales to manually adjust axes
# Use themes to customize the final visual touches
# Have a knowledge that stat_() functions exist
# String together multiple ggplot functions using +
#####
#------------------------------------------------------------------------------------------------------------------------------------------------
##################################################################################################################################################
##################################################################################################################################################
|
/FIAP_6AIML.R | no_license | fiapteamrocket/audicao-cognitiva | R | false | false | 3,721 | r |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.