content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Anesthet}
\alias{Anesthet}
\title{Recovery times for anesthetized patients}
\format{A data frame with 10 observations on the following variable.
\describe{
\item{recover}{recovery time in hours}
}}
\source{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Duxbury
}
\usage{
Anesthet
}
\description{
Data used in Exercise 5.58
}
\examples{
qqnorm(Anesthet$recover)
qqline(Anesthet$recover)
with(data = Anesthet,
t.test(recover, conf.level = 0.90)$conf
)
}
\keyword{datasets}
| /man/Anesthet.Rd | no_license | lelou6666/BSDA | R | false | true | 610 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Anesthet}
\alias{Anesthet}
\title{Recovery times for anesthetized patients}
\format{A data frame with 10 observations on the following variable.
\describe{
\item{recover}{recovery time in hours}
}}
\source{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Duxbury
}
\usage{
Anesthet
}
\description{
Data used in Exercise 5.58
}
\examples{
qqnorm(Anesthet$recover)
qqline(Anesthet$recover)
with(data = Anesthet,
t.test(recover, conf.level = 0.90)$conf
)
}
\keyword{datasets}
|
#' Detect whether a factor is truly factor or character
#'
#' If the number of unique values equal to the length of input, we consider the input as factor; otherwise,
#' we consider it as character
#'
#' @param x factor
#'
#' @return boolean; \code{TRUE} if input should be considered as factor,
#' \code{FALSE} if input should be considered as character
#' @export
#'
#' @examples
#' a <- factor(c("high", "high", "low"))
#' b <- factor(c("high", "low", "medium"))
#'
#' detect_fct(a)
#'
#' detect_fct(b)
#'
detect_fct <- function(x) {
if (is.factor(x)) {
length(x) != length(unique(x))
} else {
stop("Input value should belong to class factor, you privided a class of ", class(x)[1])
}
}
| /R/detect_fct.R | no_license | qiaoyuet/foofactors | R | false | false | 707 | r | #' Detect whether a factor is truly factor or character
#'
#' If the number of unique values equal to the length of input, we consider the input as factor; otherwise,
#' we consider it as character
#'
#' @param x factor
#'
#' @return boolean; \code{TRUE} if input should be considered as factor,
#' \code{FALSE} if input should be considered as character
#' @export
#'
#' @examples
#' a <- factor(c("high", "high", "low"))
#' b <- factor(c("high", "low", "medium"))
#'
#' detect_fct(a)
#'
#' detect_fct(b)
#'
detect_fct <- function(x) {
if (is.factor(x)) {
length(x) != length(unique(x))
} else {
stop("Input value should belong to class factor, you privided a class of ", class(x)[1])
}
}
|
> COMING SOON: This is where you will connect external API'S | /src/api/README.rd | permissive | zenitram56/discord-bot-boilerplate | R | false | false | 60 | rd | > COMING SOON: This is where you will connect external API'S |
## Read in test data from UCI HAR dataset and tidy the data for
## analysis of just the mean and standard deviation
## set the proper working directory
setwd("C:/Users/Denise/Documents/UCI HAR Dataset")
## call the appropriate libraries
library(plyr)
library(dplyr)
library(tidyr)
## read in the features dataset and activity labels for use with both test and train datasets
features <- read.table("features.txt")
activity_labels <- read.table("activity_labels.txt")
## read test data and create a labeled dataset
## create test subject dataset
subject_no <- read.table("test/subject_test.txt")
names(subject_no) <- "subject_no"
subject_type <- "test"
## create the test activities table (to merge with x_test)
activity <- read.table("test/y_test.txt")
activities <- merge(activity, activity_labels, by="V1", all.x=TRUE) ##create the activity vector
names(activities) <- c("activity_no","activity")
activities <- as.data.frame(activities$activity)
names(activities) <- "activity"
##name the x_test columns (variables) with feature names
x_test <- read.table("test/X_test.txt")
names(x_test) <- features[,2]
## create the final test dataset
test_data <- cbind(subject_type, subject_no, activities, x_test)
## read training data and create a labeled dataset
## create subject table
subject_no <- read.table("train/subject_train.txt")
names(subject_no) <- "subject_no"
subject_type <- "train"
##activities table (to merge with x_test)
activity <- read.table("train/y_train.txt")
##create the activity vector
activities <- merge(activity, activity_labels, by="V1", all.x=TRUE)
## give them meaningful names
names(activities) <- c("activity_no","activity")
activities <- as.data.frame(activities$activity)
names(activities) <- "activity"
##name the x_train columns (variables) with feature names
x_train <- read.table("train/X_train.txt")
names(x_train) <- features[,2]
## create the final training dataset
train_data <- cbind(subject_type, subject_no, activities, x_train)
## combine the test and train data
combo_data <- rbind(test_data, train_data)
## strip out the unneeded columns with duplicate names causing errors (bandsEnergy columns)
combo_data <- combo_data[,-(464:505)]
combo_data <- combo_data[,-(385:426)]
combo_data <- combo_data[,-(306:347)]
## create an id file to remove duplicate table data from the headers
## can be merged back with the mean or standard deviation files to identify the subject at the end
id_file <- combo_data[,(1:3)]
id_file <- unique(id_file)
id <- 1:nrow(id_file)
id_file <- cbind(id,id_file)
## remove unneeded duplicate data (subject_no, activity, type(test or train))
combo_data <- merge(id_file, combo_data)
combo_data <- combo_data[,-(1:3)]
## fix labels to be more meaningful
names(combo_data) <- sub("BodyBody","Body",names(combo_data))
names(combo_data) <- sub("tBody","body ", names(combo_data))
names(combo_data) <- sub("tGravity","gravity ", names(combo_data))
names(combo_data) <- sub("Acc","acceleration ", names(combo_data))
names(combo_data) <- sub("Gyro","gyroscope ", names(combo_data))
names(combo_data) <- sub("Jerk","with jerk signal ", names(combo_data))
names(combo_data) <- sub("fBody","fft body ", names(combo_data))
names(combo_data) <- sub("Mag","magnitude ", names(combo_data))
names(combo_data) <- sub("\\-"," ", names(combo_data))
names(combo_data) <- sub(" "," ", names(combo_data))
## file of means
combo_means <- select(combo_data, id, contains("mean()"))
names(combo_means) <- sub("\\()","", names(combo_means))
View(combo_means)
## file of standard deviations
combo_std <- select(combo_data, id, contains("std()"))
names(combo_std) <- sub("\\()","", names(combo_std))
View(combo_std)
## remove unneeded files
rm(combo_data, activities, activity, activity_labels, features, subject_no, test_data,
train_data, x_test, x_train, id, subject_type)
## create dataset of means of the means by subject and activity
## merge with id file
mean_means <- merge(id_file, combo_means)
## select the columns of variables to take the means, group by subject_no and activity
mean_means <- aggregate(mean_means[,5:37], list(mean_means$subject_no, mean_means$activity), mean)
## rename the resulting grouped variables to meaningful names
names(mean_means) <- sub("Group.1","subject_no", names(mean_means))
names(mean_means) <- sub("Group.2","activity", names(mean_means))
View(mean_means)
| /run_analysis.R | no_license | neesy/Getting-and-Cleaning-Data-Project | R | false | false | 4,491 | r | ## Read in test data from UCI HAR dataset and tidy the data for
## analysis of just the mean and standard deviation
## set the proper working directory
setwd("C:/Users/Denise/Documents/UCI HAR Dataset")
## call the appropriate libraries
library(plyr)
library(dplyr)
library(tidyr)
## read in the features dataset and activity labels for use with both test and train datasets
features <- read.table("features.txt")
activity_labels <- read.table("activity_labels.txt")
## read test data and create a labeled dataset
## create test subject dataset
subject_no <- read.table("test/subject_test.txt")
names(subject_no) <- "subject_no"
subject_type <- "test"
## create the test activities table (to merge with x_test)
activity <- read.table("test/y_test.txt")
activities <- merge(activity, activity_labels, by="V1", all.x=TRUE) ##create the activity vector
names(activities) <- c("activity_no","activity")
activities <- as.data.frame(activities$activity)
names(activities) <- "activity"
##name the x_test columns (variables) with feature names
x_test <- read.table("test/X_test.txt")
names(x_test) <- features[,2]
## create the final test dataset
test_data <- cbind(subject_type, subject_no, activities, x_test)
## read training data and create a labeled dataset
## create subject table
subject_no <- read.table("train/subject_train.txt")
names(subject_no) <- "subject_no"
subject_type <- "train"
##activities table (to merge with x_test)
activity <- read.table("train/y_train.txt")
##create the activity vector
activities <- merge(activity, activity_labels, by="V1", all.x=TRUE)
## give them meaningful names
names(activities) <- c("activity_no","activity")
activities <- as.data.frame(activities$activity)
names(activities) <- "activity"
##name the x_train columns (variables) with feature names
x_train <- read.table("train/X_train.txt")
names(x_train) <- features[,2]
## create the final training dataset
train_data <- cbind(subject_type, subject_no, activities, x_train)
## combine the test and train data
combo_data <- rbind(test_data, train_data)
## strip out the unneeded columns with duplicate names causing errors (bandsEnergy columns)
combo_data <- combo_data[,-(464:505)]
combo_data <- combo_data[,-(385:426)]
combo_data <- combo_data[,-(306:347)]
## create an id file to remove duplicate table data from the headers
## can be merged back with the mean or standard deviation files to identify the subject at the end
id_file <- combo_data[,(1:3)]
id_file <- unique(id_file)
id <- 1:nrow(id_file)
id_file <- cbind(id,id_file)
## remove unneeded duplicate data (subject_no, activity, type(test or train))
combo_data <- merge(id_file, combo_data)
combo_data <- combo_data[,-(1:3)]
## fix labels to be more meaningful
names(combo_data) <- sub("BodyBody","Body",names(combo_data))
names(combo_data) <- sub("tBody","body ", names(combo_data))
names(combo_data) <- sub("tGravity","gravity ", names(combo_data))
names(combo_data) <- sub("Acc","acceleration ", names(combo_data))
names(combo_data) <- sub("Gyro","gyroscope ", names(combo_data))
names(combo_data) <- sub("Jerk","with jerk signal ", names(combo_data))
names(combo_data) <- sub("fBody","fft body ", names(combo_data))
names(combo_data) <- sub("Mag","magnitude ", names(combo_data))
names(combo_data) <- sub("\\-"," ", names(combo_data))
names(combo_data) <- sub(" "," ", names(combo_data))
## file of means
combo_means <- select(combo_data, id, contains("mean()"))
names(combo_means) <- sub("\\()","", names(combo_means))
View(combo_means)
## file of standard deviations
combo_std <- select(combo_data, id, contains("std()"))
names(combo_std) <- sub("\\()","", names(combo_std))
View(combo_std)
## remove unneeded files
rm(combo_data, activities, activity, activity_labels, features, subject_no, test_data,
train_data, x_test, x_train, id, subject_type)
## create dataset of means of the means by subject and activity
## merge with id file
mean_means <- merge(id_file, combo_means)
## select the columns of variables to take the means, group by subject_no and activity
mean_means <- aggregate(mean_means[,5:37], list(mean_means$subject_no, mean_means$activity), mean)
## rename the resulting grouped variables to meaningful names
names(mean_means) <- sub("Group.1","subject_no", names(mean_means))
names(mean_means) <- sub("Group.2","activity", names(mean_means))
View(mean_means)
|
#' Predict animal locations and velocities using a fitted CTCRW model and
#' calculate measurement error fit statistics
#'
#'
#' The \code{crwMEfilter} function uses a fitted model object from
#' \code{crwMLE} to predict animal locations (with estimated uncertainty) at
#' times in the original data set and supplimented by times in \code{predTime}.
#' If \code{speedEst} is set to \code{TRUE}, then animal log-speed is also
#' estimated. In addition, the measurement error shock detection filter of de
#' Jong and Penzer (1998) is also calculated to provide a measure for outlier
#' detection.
#'
#'
#' The requirements for \code{data} are the same as those for fitting the model
#' in \code{\link{crwMLE}}.
#'
#' @param object.crwFit A model object from \code{\link{crwMLE}}.
#' @param predTime vector of additional prediction times (numeric or POSIXct). Alternatively, a character vector specifying a time interval (see Details).
#' @param flat logical. Should the result be returned as a flat data.frame.
#' @param ... Additional arguments for testing new features
#'
#' @details
#' \itemize{
#' \item("predTime"){
#' \code{predTime} can be either passed as a separate vector of POSIXct or numeric values for additional prediction times beyond the observed times. If the original data were provided as a POSIXct type, then \code{crwPredict} can derive a sequence of regularly spaced prediction times from the original data. This is specified by providing a character string that corresponds to the \code{by} argument of the \code{seq.POSIXt} function (e.g. '1 hour', '30 mins'). \code{crwPredict} will round the first observed time up to the nearest unit (e.g. '1 hour' will round up to the nearest hour, '30 mins' will round up to the nearest minute) and start the sequence from there. The last observation time is truncated down to the nearest unit to specify the end time.
#' }
#' }
#'
#' @return
#'
#' List with the following elements:
#'
#' \item{originalData}{A data.frame with is \code{data} merged with
#' \code{predTime}.}
#'
#' \item{alpha.hat}{Predicted state}
#'
#' \item{Var.hat}{array where \code{Var.hat[,,i]} is the prediction
#' covariance matrix for \code{alpha.hat[,i]}.}
#'
#' \item{fit.test}{A data.frame of chi-square fit (df=2) statistics and naive
#' (pointwise) p-values.}
#'
#' If \code{flat} is set to \code{TRUE} then a data set is returned with the
#' columns of the original data plus the state estimates, standard errors (se),
#' speed estimates, and the fit statistics and naive p-values.
#'
#'
#' @author Devin S. Johnson
#' @references de Jong, P. and Penzer, J. (1998) Diagnosing shocks in time
#' series. Journal of the American Statistical Association 93:796-806.
#' @export
crwPredict=function(object.crwFit, predTime=NULL, flat=TRUE, ...)
{
if(!exists("getUseAvail")) getUseAvail=FALSE
if(flat & getUseAvail){
warning("The 'flat=TRUE' argument cannot be used in conjunction with 'getUseAvail=TRUE' argument.")
flat <- FALSE
}
if(inherits(predTime,"character")) {
t_int <- unlist(strsplit(predTime, " "))
if(t_int[2] %in% c("min","mins","hour","hours","day","days")) {
min_dt <- crawl::intToPOSIX(min(object.crwFit$data$TimeNum,na.rm=TRUE))
max_dt <- crawl::intToPOSIX(max(object.crwFit$data$TimeNum,na.rm=TRUE))
min_dt <- round(min_dt,t_int[2])
max_dt <- trunc(max_dt,t_int[2])
predTime <- seq(min_dt, max_dt, by = predTime)
} else {
stop("predTime not specified correctly. see documentation for seq.POSIXt")
}
}
## Model definition/parameters ##
data <- object.crwFit$data
driftMod <- object.crwFit$random.drift
mov.mf <- object.crwFit$mov.mf
activity <- object.crwFit$activity
err.mfX <- object.crwFit$err.mfX
err.mfY <- object.crwFit$err.mfY
rho = object.crwFit$rho
par <- object.crwFit$par
n.errX <- object.crwFit$n.errX
n.errY <- object.crwFit$n.errY
n.mov <- object.crwFit$n.mov
tn <- object.crwFit$Time.name
if(inherits(predTime, "POSIXct")) predTime <- as.numeric(predTime)#/3600
## Data setup ##
if (!is.null(predTime)) {
if(min(predTime) < data[1, tn]) {
warning("Predictions times given before first observation!\nOnly those after first observation will be used.")
predTime <- predTime[predTime>=data[1,tn]]
}
origTime <- data[, tn]
if (is.null(data$locType)) data$locType <- "o"
predData <- data.frame(predTime, "p")
names(predData) <- c(tn, "locType")
data <- merge(data, predData,
by=c(tn, "locType"), all=TRUE)
dups <- duplicated(data[, tn]) #& data[,"locType"]==1
data <- data[!dups, ]
mov.mf <- as.matrix(expandPred(x=mov.mf, Time=origTime, predTime=predTime))
if (!is.null(activity)) activity <- as.matrix(expandPred(x=activity, Time=origTime, predTime=predTime))
if (!is.null(err.mfX)) err.mfX <- as.matrix(expandPred(x=err.mfX, Time=origTime, predTime=predTime))
if (!is.null(err.mfY)) err.mfY <- as.matrix(expandPred(x=err.mfY, Time=origTime, predTime=predTime))
if (!is.null(rho)) rho <- as.matrix(expandPred(x=rho, Time=origTime, predTime=predTime))
}
data$locType[data[,tn]%in%predTime] <- 'p'
delta <- c(diff(data[, tn]), 1)
a = object.crwFit$initial.state$a
P = object.crwFit$initial.state$P
y = as.matrix(data[,object.crwFit$coord])
noObs <- as.numeric(is.na(y[,1]) | is.na(y[,2]))
y[noObs==1,] = 0
N = nrow(y)
###
### Process parameters for C++
###
if (!is.null(err.mfX)) {
theta.errX <- par[1:n.errX]
Hmat <- exp(2 * err.mfX %*% theta.errX)
} else Hmat <- rep(0.0, N)
if (!is.null(err.mfY)) {
theta.errY <- par[(n.errX + 1):(n.errX + n.errY)]
Hmat <- cbind(Hmat,exp(2 * err.mfY %*% theta.errY))
} else Hmat <- cbind(Hmat, Hmat)
if(!is.null(rho)){
Hmat = cbind(Hmat, sqrt(Hmat[,1])*sqrt(Hmat[,2])*rho)
} else {Hmat = cbind(Hmat, rep(0,N))}
Hmat[noObs==1,] = 0
theta.mov <- par[(n.errX + n.errY + 1):(n.errX + n.errY + 2 * n.mov)]
sig2 <- exp(2 * (mov.mf %*% theta.mov[1:n.mov]))
b <- exp(mov.mf %*% theta.mov[(n.mov + 1):(2 * n.mov)])
if (!is.null(activity)) {
theta.activ <- par[(n.errX + n.errY + 2 * n.mov + 1)]
b <- b / ((activity) ^ exp(theta.activ))
active <- ifelse(b==Inf, 0, 1)
b <- ifelse(b==Inf, 0, b)
} else active = rep(1,N)
if (driftMod) {
theta.drift <- par[(n.errX + n.errY + 2 * n.mov + 1):
(n.errX + n.errY + 2 * n.mov + 2)]
b.drift <- exp(log(b) - log(1+exp(theta.drift[2])))
sig2.drift <- exp(log(sig2) + 2 * theta.drift[1])
out = CTCRWPREDICT_DRIFT(y, Hmat, b, b.drift, sig2, sig2.drift, delta, noObs, active, a, P)
} else {
out=CTCRWPREDICT(y, Hmat, b, sig2, delta, noObs, active, a, P)
}
pred <- data.frame(t(out$pred))
if (driftMod) {
names(pred) <- c("mu.x", "theta.x", "gamma.x","mu.y", "theta.y", "gamma.y")
} else names(pred) <- c("mu.x", "nu.x", "mu.y","nu.y")
var <- zapsmall(out$predVar)
obsFit <- data.frame(predObs.x=out$predObs[1,],
predObs.y=out$predObs[2,])
obsFit$outlier.chisq <- as.vector(out$chisq)
obsFit$naive.p.val <- 1 - pchisq(obsFit$outlier.chisq, 2)
if(getUseAvail){
warning("'getUseAvail' not implemented yet in this version of 'crawl' contact maintainer to fix this! ")
# idx <- data$locType=="p"
# movMatsPred <- getQT(sig2[idx], b[idx], sig2.drift[idx], b.drift[idx], delta=c(diff(data[idx,tn]),1), driftMod)
# TmatP <- movMatsPred$Tmat
# QmatP <- movMatsPred$Qmat
# avail <- t(sapply(1:(nrow(TmatP)-1), makeAvail, Tmat=TmatP, Qmat=QmatP, predx=predx[idx,], predy=predy[idx,],
# vary=vary[,,idx], varx=varx[,,idx], driftMod=driftMod, lonadj=lonAdjVals[idx]))
# avail <- cbind(data[idx,tn][-1], avail)
# colnames(avail) <- c(tn, "meanAvail.x", "meanAvail.y", "varAvail.x", "varAvail.y")
# use <- cbind(data[idx,tn], predx[idx,1], predy[idx,1], varx[1,1,idx], vary[1,1,idx])[-1,]
# colnames(use) <- c(tn, "meanUse.x", "meanUse.y", "varUse.x", "varUse.y")
# UseAvail.lst <- list(use=use, avail=avail)
UseAvail.lst=NULL
}
else UseAvail.lst=NULL
speed = sqrt(apply(as.matrix(pred[,2:(2+driftMod)]), 1, sum)^2 +
apply(as.matrix(pred[,(4+driftMod):(4+2*driftMod)]), 1, sum)^2)
out <- list(originalData=fillCols(data), alpha.hat=pred,
V.hat=var, speed=speed, loglik=out$ll, useAvail=UseAvail.lst)
if (flat) {
out <- cbind(fillCols(crawl::flatten(out)), obsFit)
attr(out, "flat") <- TRUE
attr(out, "coord") <- c(x=object.crwFit$coord[1], y=object.crwFit$coord[2])
attr(out, "random.drift") <- driftMod
attr(out, "activity.model") <- !is.null(object.crwFit$activity)
attr(out, "Time.name") <- tn
} else {
out <- append(out, list(fit.test=obsFit))
attr(out, "flat") <- FALSE
attr(out, "coord") <- c(x=object.crwFit$coord[1], y=object.crwFit$coord[2])
attr(out, "random.drift") <- driftMod
attr(out, "activity.model") <- !is.null(object.crwFit$activity)
attr(out, "Time.name") <- tn
}
class(out) <- c(class(out),"crwPredict")
return(out)
}
| /R/crwPredict.R | no_license | farcego/crawl | R | false | false | 9,172 | r | #' Predict animal locations and velocities using a fitted CTCRW model and
#' calculate measurement error fit statistics
#'
#'
#' The \code{crwMEfilter} function uses a fitted model object from
#' \code{crwMLE} to predict animal locations (with estimated uncertainty) at
#' times in the original data set and supplimented by times in \code{predTime}.
#' If \code{speedEst} is set to \code{TRUE}, then animal log-speed is also
#' estimated. In addition, the measurement error shock detection filter of de
#' Jong and Penzer (1998) is also calculated to provide a measure for outlier
#' detection.
#'
#'
#' The requirements for \code{data} are the same as those for fitting the model
#' in \code{\link{crwMLE}}.
#'
#' @param object.crwFit A model object from \code{\link{crwMLE}}.
#' @param predTime vector of additional prediction times (numeric or POSIXct). Alternatively, a character vector specifying a time interval (see Details).
#' @param flat logical. Should the result be returned as a flat data.frame.
#' @param ... Additional arguments for testing new features
#'
#' @details
#' \itemize{
#' \item("predTime"){
#' \code{predTime} can be either passed as a separate vector of POSIXct or numeric values for additional prediction times beyond the observed times. If the original data were provided as a POSIXct type, then \code{crwPredict} can derive a sequence of regularly spaced prediction times from the original data. This is specified by providing a character string that corresponds to the \code{by} argument of the \code{seq.POSIXt} function (e.g. '1 hour', '30 mins'). \code{crwPredict} will round the first observed time up to the nearest unit (e.g. '1 hour' will round up to the nearest hour, '30 mins' will round up to the nearest minute) and start the sequence from there. The last observation time is truncated down to the nearest unit to specify the end time.
#' }
#' }
#'
#' @return
#'
#' List with the following elements:
#'
#' \item{originalData}{A data.frame with is \code{data} merged with
#' \code{predTime}.}
#'
#' \item{alpha.hat}{Predicted state}
#'
#' \item{Var.hat}{array where \code{Var.hat[,,i]} is the prediction
#' covariance matrix for \code{alpha.hat[,i]}.}
#'
#' \item{fit.test}{A data.frame of chi-square fit (df=2) statistics and naive
#' (pointwise) p-values.}
#'
#' If \code{flat} is set to \code{TRUE} then a data set is returned with the
#' columns of the original data plus the state estimates, standard errors (se),
#' speed estimates, and the fit statistics and naive p-values.
#'
#'
#' @author Devin S. Johnson
#' @references de Jong, P. and Penzer, J. (1998) Diagnosing shocks in time
#' series. Journal of the American Statistical Association 93:796-806.
#' @export
crwPredict=function(object.crwFit, predTime=NULL, flat=TRUE, ...)
{
if(!exists("getUseAvail")) getUseAvail=FALSE
if(flat & getUseAvail){
warning("The 'flat=TRUE' argument cannot be used in conjunction with 'getUseAvail=TRUE' argument.")
flat <- FALSE
}
if(inherits(predTime,"character")) {
t_int <- unlist(strsplit(predTime, " "))
if(t_int[2] %in% c("min","mins","hour","hours","day","days")) {
min_dt <- crawl::intToPOSIX(min(object.crwFit$data$TimeNum,na.rm=TRUE))
max_dt <- crawl::intToPOSIX(max(object.crwFit$data$TimeNum,na.rm=TRUE))
min_dt <- round(min_dt,t_int[2])
max_dt <- trunc(max_dt,t_int[2])
predTime <- seq(min_dt, max_dt, by = predTime)
} else {
stop("predTime not specified correctly. see documentation for seq.POSIXt")
}
}
## Model definition/parameters ##
data <- object.crwFit$data
driftMod <- object.crwFit$random.drift
mov.mf <- object.crwFit$mov.mf
activity <- object.crwFit$activity
err.mfX <- object.crwFit$err.mfX
err.mfY <- object.crwFit$err.mfY
rho = object.crwFit$rho
par <- object.crwFit$par
n.errX <- object.crwFit$n.errX
n.errY <- object.crwFit$n.errY
n.mov <- object.crwFit$n.mov
tn <- object.crwFit$Time.name
if(inherits(predTime, "POSIXct")) predTime <- as.numeric(predTime)#/3600
## Data setup ##
if (!is.null(predTime)) {
if(min(predTime) < data[1, tn]) {
warning("Predictions times given before first observation!\nOnly those after first observation will be used.")
predTime <- predTime[predTime>=data[1,tn]]
}
origTime <- data[, tn]
if (is.null(data$locType)) data$locType <- "o"
predData <- data.frame(predTime, "p")
names(predData) <- c(tn, "locType")
data <- merge(data, predData,
by=c(tn, "locType"), all=TRUE)
dups <- duplicated(data[, tn]) #& data[,"locType"]==1
data <- data[!dups, ]
mov.mf <- as.matrix(expandPred(x=mov.mf, Time=origTime, predTime=predTime))
if (!is.null(activity)) activity <- as.matrix(expandPred(x=activity, Time=origTime, predTime=predTime))
if (!is.null(err.mfX)) err.mfX <- as.matrix(expandPred(x=err.mfX, Time=origTime, predTime=predTime))
if (!is.null(err.mfY)) err.mfY <- as.matrix(expandPred(x=err.mfY, Time=origTime, predTime=predTime))
if (!is.null(rho)) rho <- as.matrix(expandPred(x=rho, Time=origTime, predTime=predTime))
}
data$locType[data[,tn]%in%predTime] <- 'p'
delta <- c(diff(data[, tn]), 1)
a = object.crwFit$initial.state$a
P = object.crwFit$initial.state$P
y = as.matrix(data[,object.crwFit$coord])
noObs <- as.numeric(is.na(y[,1]) | is.na(y[,2]))
y[noObs==1,] = 0
N = nrow(y)
###
### Process parameters for C++
###
if (!is.null(err.mfX)) {
theta.errX <- par[1:n.errX]
Hmat <- exp(2 * err.mfX %*% theta.errX)
} else Hmat <- rep(0.0, N)
if (!is.null(err.mfY)) {
theta.errY <- par[(n.errX + 1):(n.errX + n.errY)]
Hmat <- cbind(Hmat,exp(2 * err.mfY %*% theta.errY))
} else Hmat <- cbind(Hmat, Hmat)
if(!is.null(rho)){
Hmat = cbind(Hmat, sqrt(Hmat[,1])*sqrt(Hmat[,2])*rho)
} else {Hmat = cbind(Hmat, rep(0,N))}
Hmat[noObs==1,] = 0
theta.mov <- par[(n.errX + n.errY + 1):(n.errX + n.errY + 2 * n.mov)]
sig2 <- exp(2 * (mov.mf %*% theta.mov[1:n.mov]))
b <- exp(mov.mf %*% theta.mov[(n.mov + 1):(2 * n.mov)])
if (!is.null(activity)) {
theta.activ <- par[(n.errX + n.errY + 2 * n.mov + 1)]
b <- b / ((activity) ^ exp(theta.activ))
active <- ifelse(b==Inf, 0, 1)
b <- ifelse(b==Inf, 0, b)
} else active = rep(1,N)
if (driftMod) {
theta.drift <- par[(n.errX + n.errY + 2 * n.mov + 1):
(n.errX + n.errY + 2 * n.mov + 2)]
b.drift <- exp(log(b) - log(1+exp(theta.drift[2])))
sig2.drift <- exp(log(sig2) + 2 * theta.drift[1])
out = CTCRWPREDICT_DRIFT(y, Hmat, b, b.drift, sig2, sig2.drift, delta, noObs, active, a, P)
} else {
out=CTCRWPREDICT(y, Hmat, b, sig2, delta, noObs, active, a, P)
}
pred <- data.frame(t(out$pred))
if (driftMod) {
names(pred) <- c("mu.x", "theta.x", "gamma.x","mu.y", "theta.y", "gamma.y")
} else names(pred) <- c("mu.x", "nu.x", "mu.y","nu.y")
var <- zapsmall(out$predVar)
obsFit <- data.frame(predObs.x=out$predObs[1,],
predObs.y=out$predObs[2,])
obsFit$outlier.chisq <- as.vector(out$chisq)
obsFit$naive.p.val <- 1 - pchisq(obsFit$outlier.chisq, 2)
if(getUseAvail){
warning("'getUseAvail' not implemented yet in this version of 'crawl' contact maintainer to fix this! ")
# idx <- data$locType=="p"
# movMatsPred <- getQT(sig2[idx], b[idx], sig2.drift[idx], b.drift[idx], delta=c(diff(data[idx,tn]),1), driftMod)
# TmatP <- movMatsPred$Tmat
# QmatP <- movMatsPred$Qmat
# avail <- t(sapply(1:(nrow(TmatP)-1), makeAvail, Tmat=TmatP, Qmat=QmatP, predx=predx[idx,], predy=predy[idx,],
# vary=vary[,,idx], varx=varx[,,idx], driftMod=driftMod, lonadj=lonAdjVals[idx]))
# avail <- cbind(data[idx,tn][-1], avail)
# colnames(avail) <- c(tn, "meanAvail.x", "meanAvail.y", "varAvail.x", "varAvail.y")
# use <- cbind(data[idx,tn], predx[idx,1], predy[idx,1], varx[1,1,idx], vary[1,1,idx])[-1,]
# colnames(use) <- c(tn, "meanUse.x", "meanUse.y", "varUse.x", "varUse.y")
# UseAvail.lst <- list(use=use, avail=avail)
UseAvail.lst=NULL
}
else UseAvail.lst=NULL
speed = sqrt(apply(as.matrix(pred[,2:(2+driftMod)]), 1, sum)^2 +
apply(as.matrix(pred[,(4+driftMod):(4+2*driftMod)]), 1, sum)^2)
out <- list(originalData=fillCols(data), alpha.hat=pred,
V.hat=var, speed=speed, loglik=out$ll, useAvail=UseAvail.lst)
if (flat) {
out <- cbind(fillCols(crawl::flatten(out)), obsFit)
attr(out, "flat") <- TRUE
attr(out, "coord") <- c(x=object.crwFit$coord[1], y=object.crwFit$coord[2])
attr(out, "random.drift") <- driftMod
attr(out, "activity.model") <- !is.null(object.crwFit$activity)
attr(out, "Time.name") <- tn
} else {
out <- append(out, list(fit.test=obsFit))
attr(out, "flat") <- FALSE
attr(out, "coord") <- c(x=object.crwFit$coord[1], y=object.crwFit$coord[2])
attr(out, "random.drift") <- driftMod
attr(out, "activity.model") <- !is.null(object.crwFit$activity)
attr(out, "Time.name") <- tn
}
class(out) <- c(class(out),"crwPredict")
return(out)
}
|
\name{getDefaultEdgeSelectionColor}
\alias{getDefaultEdgeSelectionColor}
\alias{getDefaultEdgeSelectionColor,CytoscapeConnectionClass-method}
\title{getDefaultEdgeSelectionColor}
\description{
Retrieve the default color used to display selected edges.
}
\usage{
getDefaultEdgeSelectionColor(obj, vizmap.style.name)
}
\arguments{
\item{obj}{a \code{CytoscapeConnectionClass} object. }
\item{vizmap.style.name}{a \code{character} object, 'default' by default }
}
\value{
A character string, eg "java.awt.Color[r=204,g=204,b=255]"
}
\author{Paul Shannon}
\examples{
cy <- CytoscapeConnection ()
print (getDefaultEdgeSelectionColor (cy)) # "java.awt.Color[r=255,g=0,b=0]"
}
\keyword{graph}
| /man/getDefaultEdgeSelectionColor.Rd | no_license | pshannon-bioc/RCy3 | R | false | false | 700 | rd | \name{getDefaultEdgeSelectionColor}
\alias{getDefaultEdgeSelectionColor}
\alias{getDefaultEdgeSelectionColor,CytoscapeConnectionClass-method}
\title{getDefaultEdgeSelectionColor}
\description{
Retrieve the default color used to display selected edges.
}
\usage{
getDefaultEdgeSelectionColor(obj, vizmap.style.name)
}
\arguments{
\item{obj}{a \code{CytoscapeConnectionClass} object. }
\item{vizmap.style.name}{a \code{character} object, 'default' by default }
}
\value{
A character string, eg "java.awt.Color[r=204,g=204,b=255]"
}
\author{Paul Shannon}
\examples{
cy <- CytoscapeConnection ()
print (getDefaultEdgeSelectionColor (cy)) # "java.awt.Color[r=255,g=0,b=0]"
}
\keyword{graph}
|
# Run markdown files for both samples in tempdiscsocialdist data set
# 7.2.20 KLS
library(here)
for (sample in 1:2) {
# rmarkdown::render(here::here('doc', '00_Demo_data.Rmd'),
# output_file = paste0('00_Demo_data_S', sample, '.html'),
# output_dir = here::here('doc'))
# rmarkdown::render(here::here('doc', '01_temp_disc.Rmd'),
# output_file = paste0('01_temp_disc_S', sample, '.html'),
# output_dir = here::here('doc'))
# rmarkdown::render(here::here('doc', '02_social_dist.Rmd'),
# output_file = paste0('02_social_dist_S', sample, '.html'),
# output_dir = here::here('doc'))
rmarkdown::render(here::here('doc', '03_temp_disc_social_dist.Rmd'),
output_file = paste0('03_temp_disc_social_dist_S', sample, '.html'),
output_dir = here::here('doc'))
rmarkdown::render(here::here('doc', '04_values_social_dist.Rmd'),
output_file = paste0('04_values_social_dist_S', sample, '.html'),
output_dir = here::here('doc'))
}
| /scr/run_markdowns.R | no_license | klsea/tempdiscsocialdist | R | false | false | 1,129 | r | # Run markdown files for both samples in tempdiscsocialdist data set
# 7.2.20 KLS
library(here)
for (sample in 1:2) {
# rmarkdown::render(here::here('doc', '00_Demo_data.Rmd'),
# output_file = paste0('00_Demo_data_S', sample, '.html'),
# output_dir = here::here('doc'))
# rmarkdown::render(here::here('doc', '01_temp_disc.Rmd'),
# output_file = paste0('01_temp_disc_S', sample, '.html'),
# output_dir = here::here('doc'))
# rmarkdown::render(here::here('doc', '02_social_dist.Rmd'),
# output_file = paste0('02_social_dist_S', sample, '.html'),
# output_dir = here::here('doc'))
rmarkdown::render(here::here('doc', '03_temp_disc_social_dist.Rmd'),
output_file = paste0('03_temp_disc_social_dist_S', sample, '.html'),
output_dir = here::here('doc'))
rmarkdown::render(here::here('doc', '04_values_social_dist.Rmd'),
output_file = paste0('04_values_social_dist_S', sample, '.html'),
output_dir = here::here('doc'))
}
|
require(readr)
require(plyr)
require(igraph)
require(rgexf)
# set working directory
getwd()
setwd("../query_results/merge_scripts/intersection_merge/")
# read node and edges into dataframe with the name expected by igraph
nodes <- read.csv("time_slice_1515_intersection_merge_pirck_and_era_correspondents.csv", fileEncoding="UTF-8")
links <- read.csv("time_slice_1515_intersection_merge_pirck_and_era_letters_corr_as_nodes.csv", fileEncoding="UTF-8")[ ,c('Source', 'Target')]
setwd("../../")
getwd()
mutcorr <- read.csv("./intersection_overview/id_and_names_of_mut_corr_era_pirck.csv", fileEncoding="UTF-8")
# add colour for all correspondents
nodes$colour <- "#525252"
# add colour column for mutual correspondents t
nodes$colour <- ifelse(nodes$Id %in% mutcorr$correspondents_id, as.character("#C3161F"), nodes$colour)
#assign specific colour for erasmus
nodes$colour <- ifelse(nodes$Id == "17c580aa-3ba7-4851-8f26-9b3a0ebeadbf", as.character("#3C93AF"), nodes$colour)
#assign specific colour for pirckheimer
nodes$colour <- ifelse(nodes$Id == "d9233b24-a98c-4279-8065-e2ab70c0d080 ", as.character("#D5AB5B"), nodes$colour)
#assign edge weight
links$weight <- 1
# create igraph object
net <- graph_from_data_frame(d=links, vertices=nodes, directed=T)
# conduct edge bundling (sum edge weights)
net2 <- igraph::simplify(net, remove.multiple = TRUE, edge.attr.comb=list(weight="sum","ignore"))
# calculate degree for all nodes
degAll <- degree(net2, v = V(net2), mode = "all")
# calculate weighted degree for all nodes
weightDegAll <- strength(net2, vids = V(net2), mode = "all",
loops = TRUE)
# add new node and edge attributes based on the calculated properties, add
net2 <- set.vertex.attribute(net2, "weightDegAll", index = V(net2), value = weightDegAll)
net2 <- set.vertex.attribute(net2, "degree", index = V(net2), value = degAll)
net2 <- set.vertex.attribute(net2, "colour", index = V(net2), value = nodes$colour)
net2 <- set.edge.attribute(net2, "weight", index = E(net2), value = E(net2)$weight)
#assign edge colour according to source node
edge.start <- ends(net2, es=E(net2), names=F)[,1]
edge.col <- V(net2)$colour[edge.start]
# layout with FR
l <- layout_with_fr(net2, weights=E(net2)$weight)*3.5
# plot graph
plot(net2, layout=l*5, vertex.color=nodes$colour, vertex.size=2, vertex.label=V(net2)$Label, vertex.label.font=2, vertex.label.color="gray40",
vertex.label.cex=.3, edge.arrow.size=.2, edge.width=E(net2)$weight*0.5, edge.color=edge.col, vertex.label.family="sans")
#################
# calculate node coordinates
nodes_coord <- as.data.frame(layout.fruchterman.reingold(net2, weights=E(net2)$weight)*50)
nodes_coord <- cbind(nodes_coord, rep(0, times = nrow(nodes_coord)))
# assign a colour for each node
nodes_col <- V(net2)$colour
# transform nodes into a data frame
nodes_col_df <- as.data.frame(t(col2rgb(nodes_col, alpha = FALSE)))
nodes_col_df <- cbind(nodes_col_df, alpha = rep(1, times = nrow(nodes_col_df)))
# assign visual attributes to nodes (RGBA)
nodes_att_viz <- list(color = nodes_col_df, position = nodes_coord)
# assign a colour for each edge
edges_col <- edge.col
# Transform it into a data frame (we have to transpose it first)
edges_col_df <- as.data.frame(t(col2rgb(edges_col, alpha = FALSE)))
edges_col_df <- cbind(edges_col_df, alpha = rep(1, times = nrow(edges_col_df)))
# assign visual attributes to edges (RGBA)
edges_att_viz <- list(color = edges_col_df)
# create data frames for gexf export
nodes_df <- data.frame(ID = c(1:vcount(net2)), NAME = V(net2)$Label)
edges_df <- as.data.frame(get.edges(net2, c(1:ecount(net2))))
#create a dataframe with node attributes
nodes_att <- data.frame(Degree = V(net2)$degree, colour = as.character(nodes$colour), "Weighted Degree" = V(net2)$weightDegAll)
setwd("../")
getwd()
setwd("./network_data/complete_merge_time_slices_gexf_created_by_r")
# write gexf
era_pirck_imerge_1515 <- write.gexf(nodes = nodes_df, edges = edges_df, edgesWeight = E(net2)$weight, nodesAtt = nodes_att, nodesVizAtt = nodes_att_viz, edgesVizAtt = edges_att_viz, defaultedgetype = "directed", meta = list( creator="Christoph Kudella", description="A graph representing the intersection between Erasmus's and Pirckheimer's networks of correspondence in the year 1515"), output="era_pirck_imerge_1515.gexf")
| /intersections_pirckheimer_erasmus/r_scripts/intersection_merge_time_slices_gexf_created_by_r/create_gexf_intersection_mergepirck_era_1515.R | no_license | CKudella/corr_data | R | false | false | 4,324 | r | require(readr)
require(plyr)
require(igraph)
require(rgexf)
# set working directory
getwd()
setwd("../query_results/merge_scripts/intersection_merge/")
# read node and edges into dataframe with the name expected by igraph
nodes <- read.csv("time_slice_1515_intersection_merge_pirck_and_era_correspondents.csv", fileEncoding="UTF-8")
links <- read.csv("time_slice_1515_intersection_merge_pirck_and_era_letters_corr_as_nodes.csv", fileEncoding="UTF-8")[ ,c('Source', 'Target')]
setwd("../../")
getwd()
mutcorr <- read.csv("./intersection_overview/id_and_names_of_mut_corr_era_pirck.csv", fileEncoding="UTF-8")
# add colour for all correspondents
nodes$colour <- "#525252"
# add colour column for mutual correspondents t
nodes$colour <- ifelse(nodes$Id %in% mutcorr$correspondents_id, as.character("#C3161F"), nodes$colour)
#assign specific colour for erasmus
nodes$colour <- ifelse(nodes$Id == "17c580aa-3ba7-4851-8f26-9b3a0ebeadbf", as.character("#3C93AF"), nodes$colour)
#assign specific colour for pirckheimer
nodes$colour <- ifelse(nodes$Id == "d9233b24-a98c-4279-8065-e2ab70c0d080 ", as.character("#D5AB5B"), nodes$colour)
#assign edge weight
links$weight <- 1
# create igraph object
net <- graph_from_data_frame(d=links, vertices=nodes, directed=T)
# conduct edge bundling (sum edge weights)
net2 <- igraph::simplify(net, remove.multiple = TRUE, edge.attr.comb=list(weight="sum","ignore"))
# calculate degree for all nodes
degAll <- degree(net2, v = V(net2), mode = "all")
# calculate weighted degree for all nodes
weightDegAll <- strength(net2, vids = V(net2), mode = "all",
loops = TRUE)
# add new node and edge attributes based on the calculated properties, add
net2 <- set.vertex.attribute(net2, "weightDegAll", index = V(net2), value = weightDegAll)
net2 <- set.vertex.attribute(net2, "degree", index = V(net2), value = degAll)
net2 <- set.vertex.attribute(net2, "colour", index = V(net2), value = nodes$colour)
net2 <- set.edge.attribute(net2, "weight", index = E(net2), value = E(net2)$weight)
#assign edge colour according to source node
edge.start <- ends(net2, es=E(net2), names=F)[,1]
edge.col <- V(net2)$colour[edge.start]
# layout with FR
l <- layout_with_fr(net2, weights=E(net2)$weight)*3.5
# plot graph
plot(net2, layout=l*5, vertex.color=nodes$colour, vertex.size=2, vertex.label=V(net2)$Label, vertex.label.font=2, vertex.label.color="gray40",
vertex.label.cex=.3, edge.arrow.size=.2, edge.width=E(net2)$weight*0.5, edge.color=edge.col, vertex.label.family="sans")
#################
# calculate node coordinates
nodes_coord <- as.data.frame(layout.fruchterman.reingold(net2, weights=E(net2)$weight)*50)
nodes_coord <- cbind(nodes_coord, rep(0, times = nrow(nodes_coord)))
# assign a colour for each node
nodes_col <- V(net2)$colour
# transform nodes into a data frame
nodes_col_df <- as.data.frame(t(col2rgb(nodes_col, alpha = FALSE)))
nodes_col_df <- cbind(nodes_col_df, alpha = rep(1, times = nrow(nodes_col_df)))
# assign visual attributes to nodes (RGBA)
nodes_att_viz <- list(color = nodes_col_df, position = nodes_coord)
# assign a colour for each edge
edges_col <- edge.col
# Transform it into a data frame (we have to transpose it first)
edges_col_df <- as.data.frame(t(col2rgb(edges_col, alpha = FALSE)))
edges_col_df <- cbind(edges_col_df, alpha = rep(1, times = nrow(edges_col_df)))
# assign visual attributes to edges (RGBA)
edges_att_viz <- list(color = edges_col_df)
# create data frames for gexf export
nodes_df <- data.frame(ID = c(1:vcount(net2)), NAME = V(net2)$Label)
edges_df <- as.data.frame(get.edges(net2, c(1:ecount(net2))))
#create a dataframe with node attributes
nodes_att <- data.frame(Degree = V(net2)$degree, colour = as.character(nodes$colour), "Weighted Degree" = V(net2)$weightDegAll)
setwd("../")
getwd()
setwd("./network_data/complete_merge_time_slices_gexf_created_by_r")
# write gexf
era_pirck_imerge_1515 <- write.gexf(nodes = nodes_df, edges = edges_df, edgesWeight = E(net2)$weight, nodesAtt = nodes_att, nodesVizAtt = nodes_att_viz, edgesVizAtt = edges_att_viz, defaultedgetype = "directed", meta = list( creator="Christoph Kudella", description="A graph representing the intersection between Erasmus's and Pirckheimer's networks of correspondence in the year 1515"), output="era_pirck_imerge_1515.gexf")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coder-package.R
\docType{package}
\name{coder-package}
\alias{coder}
\alias{coder-package}
\title{coder: Deterministic Categorization of Items Based on External Code Data}
\description{
Fast categorization of items based on external code data identified by
regular expressions. A typical use case considers patient with medically coded
data, such as codes from the International Classification of Diseases ('ICD') or
the Anatomic Therapeutic Chemical ('ATC') classification system.
Functions of the package relies on a triad of objects: (1) case data with unit
id:s and possible dates of interest; (2) external code data for corresponding
units in (1) and with optional dates of interest and; (3) a classification
scheme ('classcodes' object) with regular expressions to identify and
categorize relevant codes from (2).
It is easy to introduce new classification schemes ('classcodes' objects) or
to use default schemes included in the package. Use cases includes patient
categorization based on 'comorbidity indices' such as 'Charlson', 'Elixhauser',
'RxRisk V', or the 'comorbidity-polypharmacy' score (CPS), as well as adverse
events after hip and knee replacement surgery.
}
\seealso{
Useful links:
\itemize{
\item \url{https://docs.ropensci.org/coder/}
\item Report bugs at \url{https://github.com/ropensci/coder/issues}
}
}
\author{
\strong{Maintainer}: Erik Bulow \email{eriklgb@gmail.com} (\href{https://orcid.org/0000-0002-9973-456X}{ORCID})
Other contributors:
\itemize{
\item Emely C Zabore (Emily reviewed the package (v. 0.12.1) for rOpenSci, see <https://github.com/ropensci/software-review/issues/381>) [reviewer]
\item David Robinson (David reviewed the package (v. 0.12.1) for rOpenSci, see <https://github.com/ropensci/software-review/issues/381>) [reviewer]
}
}
\keyword{internal}
| /man/coder-package.Rd | no_license | Kuroshiwo/coder | R | false | true | 1,929 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coder-package.R
\docType{package}
\name{coder-package}
\alias{coder}
\alias{coder-package}
\title{coder: Deterministic Categorization of Items Based on External Code Data}
\description{
Fast categorization of items based on external code data identified by
regular expressions. A typical use case considers patient with medically coded
data, such as codes from the International Classification of Diseases ('ICD') or
the Anatomic Therapeutic Chemical ('ATC') classification system.
Functions of the package relies on a triad of objects: (1) case data with unit
id:s and possible dates of interest; (2) external code data for corresponding
units in (1) and with optional dates of interest and; (3) a classification
scheme ('classcodes' object) with regular expressions to identify and
categorize relevant codes from (2).
It is easy to introduce new classification schemes ('classcodes' objects) or
to use default schemes included in the package. Use cases includes patient
categorization based on 'comorbidity indices' such as 'Charlson', 'Elixhauser',
'RxRisk V', or the 'comorbidity-polypharmacy' score (CPS), as well as adverse
events after hip and knee replacement surgery.
}
\seealso{
Useful links:
\itemize{
\item \url{https://docs.ropensci.org/coder/}
\item Report bugs at \url{https://github.com/ropensci/coder/issues}
}
}
\author{
\strong{Maintainer}: Erik Bulow \email{eriklgb@gmail.com} (\href{https://orcid.org/0000-0002-9973-456X}{ORCID})
Other contributors:
\itemize{
\item Emely C Zabore (Emily reviewed the package (v. 0.12.1) for rOpenSci, see <https://github.com/ropensci/software-review/issues/381>) [reviewer]
\item David Robinson (David reviewed the package (v. 0.12.1) for rOpenSci, see <https://github.com/ropensci/software-review/issues/381>) [reviewer]
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_list_audit_tasks}
\alias{iot_list_audit_tasks}
\title{Lists the Device Defender audits that have been performed during a given
time period}
\usage{
iot_list_audit_tasks(startTime, endTime, taskType, taskStatus,
nextToken, maxResults)
}
\arguments{
\item{startTime}{[required] The beginning of the time period. Audit information is retained for a
limited time (90 days). Requesting a start time prior to what is
retained results in an "InvalidRequestException".}
\item{endTime}{[required] The end of the time period.}
\item{taskType}{A filter to limit the output to the specified type of audit: can be one
of "ON\\_DEMAND\\_AUDIT\\\emph{TASK" or "SCHEDULED\\}\\_AUDIT\\_TASK".}
\item{taskStatus}{A filter to limit the output to audits with the specified completion
status: can be one of "IN\\_PROGRESS", "COMPLETED", "FAILED", or
"CANCELED".}
\item{nextToken}{The token for the next set of results.}
\item{maxResults}{The maximum number of results to return at one time. The default is 25.}
}
\description{
Lists the Device Defender audits that have been performed during a given
time period.
}
\section{Request syntax}{
\preformatted{svc$list_audit_tasks(
startTime = as.POSIXct(
"2015-01-01"
),
endTime = as.POSIXct(
"2015-01-01"
),
taskType = "ON_DEMAND_AUDIT_TASK"|"SCHEDULED_AUDIT_TASK",
taskStatus = "IN_PROGRESS"|"COMPLETED"|"FAILED"|"CANCELED",
nextToken = "string",
maxResults = 123
)
}
}
\keyword{internal}
| /cran/paws.internet.of.things/man/iot_list_audit_tasks.Rd | permissive | sanchezvivi/paws | R | false | true | 1,553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_list_audit_tasks}
\alias{iot_list_audit_tasks}
\title{Lists the Device Defender audits that have been performed during a given
time period}
\usage{
iot_list_audit_tasks(startTime, endTime, taskType, taskStatus,
nextToken, maxResults)
}
\arguments{
\item{startTime}{[required] The beginning of the time period. Audit information is retained for a
limited time (90 days). Requesting a start time prior to what is
retained results in an "InvalidRequestException".}
\item{endTime}{[required] The end of the time period.}
\item{taskType}{A filter to limit the output to the specified type of audit: can be one
of "ON\\_DEMAND\\_AUDIT\\\emph{TASK" or "SCHEDULED\\}\\_AUDIT\\_TASK".}
\item{taskStatus}{A filter to limit the output to audits with the specified completion
status: can be one of "IN\\_PROGRESS", "COMPLETED", "FAILED", or
"CANCELED".}
\item{nextToken}{The token for the next set of results.}
\item{maxResults}{The maximum number of results to return at one time. The default is 25.}
}
\description{
Lists the Device Defender audits that have been performed during a given
time period.
}
\section{Request syntax}{
\preformatted{svc$list_audit_tasks(
startTime = as.POSIXct(
"2015-01-01"
),
endTime = as.POSIXct(
"2015-01-01"
),
taskType = "ON_DEMAND_AUDIT_TASK"|"SCHEDULED_AUDIT_TASK",
taskStatus = "IN_PROGRESS"|"COMPLETED"|"FAILED"|"CANCELED",
nextToken = "string",
maxResults = 123
)
}
}
\keyword{internal}
|
testlist <- list(A = structure(c(9.37602117908355e+235, 9.12488123524439e+192, 0, 0, 0, 0, 0), .Dim = c(7L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613103973-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 218 | r | testlist <- list(A = structure(c(9.37602117908355e+235, 9.12488123524439e+192, 0, 0, 0, 0, 0), .Dim = c(7L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
#proof_of_strategy_plot.R
rm(list=ls())
source('benchmarking_functions.R')
# LOAD DATA ---------------------------------------------------------------
#load the pool count reduced data for WGBS and MBDseq, and the normal counts for MethylRAD
ll=load('picomethyl/poolTest/countReducedDifferences.Rdata')
pm.csums=csums
ll
ll=load('mbdSeq/poolTest/countReducedDifferencesWub.Rdata')
mbd.csums=csums
ll
ll=load('methylRAD/datasets/countReducedDifferences.Rdata')
mr.csums=csums
ll
# SELECT DATA -------------------------------------------------------------
#select particular reductions based on correlation plataeus figure 4
#WGBS
pm = pm.dat %>%
dplyr::select(name, pct.50_meth.diff) %>%
set_names(c('name', 'pm')) %>%
mutate(name=as.character(name)) %>%
as_tibble
pm.nreads = 0.5*sum(pm.csums) / 1e6
#MBD
mbd = mbd.dat %>%
dplyr::select(name, pct.25_log2FoldChange) %>%
set_names(c('name', 'mbd')) %>%
as_tibble
mbd.nreads = 0.25*sum(mbd.csums)/1e6
#MethylRAD
mr = mr.dat %>%
dplyr::select(name, pct.100_log2FoldChange) %>%
set_names(c('name', 'mr')) %>%
as_tibble
mr.nreads = 0.125*sum(mr.csums)
dList = list(pm, mbd, mr)
dat = dList %>%
purrr::reduce(full_join, by='name')
# GET PCA -----------------------------------------------------------
log2folds = dat %>%
select(pm, mbd, mr)
scaled = map_dfc(log2folds, ~ (.x - mean(.x, na.rm = TRUE))/sd(.x, na.rm = TRUE)) %>%
data.frame()
rownames(scaled)=dat$name
scaled_nona = na.omit(scaled)
#BUILD PCA
mpca = prcomp(scaled_nona)
percentVar <- mpca$sdev^2/sum(mpca$sdev^2)
mcoords = mpca$x %>%
data.frame() %>%
mutate(PC1=PC1*-1)
pdat = cbind(scaled_nona, mcoords)
#GET RESIDUALS
get_resid = function(var) {
dat = select(pdat, PC1, y = {{ var }})
as.numeric(lm(y ~ PC1, data = dat)$resid)
}
rdat = pdat %>%
mutate(pm.resid = get_resid(pm),
mr.resid = get_resid(mr),
mbd.resid = get_resid(mbd),
name=rownames(scaled_nona)) %>%
as_tibble()
#CALL FALSE POSITIVES
RCUT=1.96
fpdat = rdat %>%
mutate(pm.fp = abs(pm.resid) > RCUT,
mr.fp = abs(mr.resid) > RCUT,
mbd.fp = abs(mbd.resid) > RCUT)
fpdat %>%
ggplot(aes(x=PC1, y=mr, color=mr.fp)) +
geom_point() +
scale_color_manual(values=c('black', 'red'))
# PLOT --------------------------------------------------------------------
assay_resid = function(var){
d2 = dplyr::select(dat, name, y=mr, x={{var}}) %>%
na.omit()
d2$resid = as.numeric(lm(y ~ x, data = d2)$resid)
return(d2)
}
add_resids = function(r, df, colName){
r=r %>%
dplyr::select(name, resid)
colnames(r)[2]=colName
df %>%
left_join({{r}}, by='name')
}
pmr = assay_resid(pm)
mbdr = assay_resid(mbd)
dat2 = add_resids(pmr, dat, 'pmr')
dat2 = add_resids(pmr, dat2, 'mbdr')
plot_scatter_pearsonCor_annotated(dat2, 'pm', 'mr', 'WGBS', 'MethylRAD', ALPHA=0.3)
plot_scatter_pearsonCor_annotated(dat2, 'mbd', 'mr', 'MBD-seq', 'MethylRAD', ALPHA=0.3)
nred = sum(fpdat$mr.fp)
#FINAL PLOT
plot_scatter_pearsonCor_annotated(fpdat, 'PC1', 'mr', 'PC1', 'MethylRAD', ALPHA=0.3) +
geom_point(aes(color=mr.fp)) +
scale_color_manual(values=c('black', 'red')) +
labs(color='|residual| > 1.96',
subtitle=paste('N =', nred),
x=paste('PC1 (', round(percentVar[1], digits=2)*100, '% variance explained)', sep='')) +
theme(plot.subtitle = element_text(color='red'))
#VOLCANO
nsig = sum(mr.dat$pct.12.5_padj < 0.1, na.rm=TRUE)
mr.dat %>%
select(lf=pct.12.5_log2FoldChange, p=pct.12.5_padj) %>%
filter(!is.na(p)) %>%
mutate(sig=factor(p<0.1, levels=c(TRUE, FALSE))) %>%
ggplot(aes(x=lf, y=-log(p, 10), color=sig)) +
geom_point(alpha=0.3) +
scale_color_manual(values=c('red', 'black')) +
labs(x=bquote(log[2]*'fold difference'),
y=bquote("-"*log[10]*'pvalue'),
color='FDR>0.1',
subtitle=paste('N =', nsig)) +
theme(plot.subtitle)
| /figure_plotting/proof_of_strategy_plot.R | no_license | Groves-Dixon-Matz-laboratory/benchmarking_coral_methylation | R | false | false | 3,895 | r | #proof_of_strategy_plot.R
rm(list=ls())
source('benchmarking_functions.R')
# LOAD DATA ---------------------------------------------------------------
#load the pool count reduced data for WGBS and MBDseq, and the normal counts for MethylRAD
ll=load('picomethyl/poolTest/countReducedDifferences.Rdata')
pm.csums=csums
ll
ll=load('mbdSeq/poolTest/countReducedDifferencesWub.Rdata')
mbd.csums=csums
ll
ll=load('methylRAD/datasets/countReducedDifferences.Rdata')
mr.csums=csums
ll
# SELECT DATA -------------------------------------------------------------
#select particular reductions based on correlation plataeus figure 4
#WGBS
pm = pm.dat %>%
dplyr::select(name, pct.50_meth.diff) %>%
set_names(c('name', 'pm')) %>%
mutate(name=as.character(name)) %>%
as_tibble
pm.nreads = 0.5*sum(pm.csums) / 1e6
#MBD
mbd = mbd.dat %>%
dplyr::select(name, pct.25_log2FoldChange) %>%
set_names(c('name', 'mbd')) %>%
as_tibble
mbd.nreads = 0.25*sum(mbd.csums)/1e6
#MethylRAD
mr = mr.dat %>%
dplyr::select(name, pct.100_log2FoldChange) %>%
set_names(c('name', 'mr')) %>%
as_tibble
mr.nreads = 0.125*sum(mr.csums)
dList = list(pm, mbd, mr)
dat = dList %>%
purrr::reduce(full_join, by='name')
# GET PCA -----------------------------------------------------------
log2folds = dat %>%
select(pm, mbd, mr)
scaled = map_dfc(log2folds, ~ (.x - mean(.x, na.rm = TRUE))/sd(.x, na.rm = TRUE)) %>%
data.frame()
rownames(scaled)=dat$name
scaled_nona = na.omit(scaled)
#BUILD PCA
mpca = prcomp(scaled_nona)
percentVar <- mpca$sdev^2/sum(mpca$sdev^2)
mcoords = mpca$x %>%
data.frame() %>%
mutate(PC1=PC1*-1)
pdat = cbind(scaled_nona, mcoords)
#GET RESIDUALS
get_resid = function(var) {
dat = select(pdat, PC1, y = {{ var }})
as.numeric(lm(y ~ PC1, data = dat)$resid)
}
rdat = pdat %>%
mutate(pm.resid = get_resid(pm),
mr.resid = get_resid(mr),
mbd.resid = get_resid(mbd),
name=rownames(scaled_nona)) %>%
as_tibble()
#CALL FALSE POSITIVES
RCUT=1.96
fpdat = rdat %>%
mutate(pm.fp = abs(pm.resid) > RCUT,
mr.fp = abs(mr.resid) > RCUT,
mbd.fp = abs(mbd.resid) > RCUT)
fpdat %>%
ggplot(aes(x=PC1, y=mr, color=mr.fp)) +
geom_point() +
scale_color_manual(values=c('black', 'red'))
# PLOT --------------------------------------------------------------------
assay_resid = function(var){
d2 = dplyr::select(dat, name, y=mr, x={{var}}) %>%
na.omit()
d2$resid = as.numeric(lm(y ~ x, data = d2)$resid)
return(d2)
}
add_resids = function(r, df, colName){
r=r %>%
dplyr::select(name, resid)
colnames(r)[2]=colName
df %>%
left_join({{r}}, by='name')
}
pmr = assay_resid(pm)
mbdr = assay_resid(mbd)
dat2 = add_resids(pmr, dat, 'pmr')
dat2 = add_resids(pmr, dat2, 'mbdr')
plot_scatter_pearsonCor_annotated(dat2, 'pm', 'mr', 'WGBS', 'MethylRAD', ALPHA=0.3)
plot_scatter_pearsonCor_annotated(dat2, 'mbd', 'mr', 'MBD-seq', 'MethylRAD', ALPHA=0.3)
nred = sum(fpdat$mr.fp)
#FINAL PLOT
plot_scatter_pearsonCor_annotated(fpdat, 'PC1', 'mr', 'PC1', 'MethylRAD', ALPHA=0.3) +
geom_point(aes(color=mr.fp)) +
scale_color_manual(values=c('black', 'red')) +
labs(color='|residual| > 1.96',
subtitle=paste('N =', nred),
x=paste('PC1 (', round(percentVar[1], digits=2)*100, '% variance explained)', sep='')) +
theme(plot.subtitle = element_text(color='red'))
#VOLCANO
nsig = sum(mr.dat$pct.12.5_padj < 0.1, na.rm=TRUE)
mr.dat %>%
select(lf=pct.12.5_log2FoldChange, p=pct.12.5_padj) %>%
filter(!is.na(p)) %>%
mutate(sig=factor(p<0.1, levels=c(TRUE, FALSE))) %>%
ggplot(aes(x=lf, y=-log(p, 10), color=sig)) +
geom_point(alpha=0.3) +
scale_color_manual(values=c('red', 'black')) +
labs(x=bquote(log[2]*'fold difference'),
y=bquote("-"*log[10]*'pvalue'),
color='FDR>0.1',
subtitle=paste('N =', nsig)) +
theme(plot.subtitle)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tpmCat.R
\docType{methods}
\name{tpmCat}
\alias{tpmCat}
\alias{tpm}
\alias{tpmCat,data.frame-method}
\alias{tpmCat,tpm-method}
\title{Computerized Adaptive Testing Birnbaum's Three Parameter Model}
\usage{
\S4method{tpmCat}{data.frame}(data, quadraturePoints = 21, ...)
\S4method{tpmCat}{tpm}(data, quadraturePoints = NULL, ...)
}
\arguments{
\item{data}{A data frame of manifest variables or an object of class \code{tpm}.}
\item{quadraturePoints}{A numeric to be passed into the \code{tpm} function indicating the number of Gauss-Hermite quadrature points. Only applicable when \code{data} is a data frame. Default value is \code{21}.}
\item{...}{arguments to be passed to methods. For more details about the arguments, see \code{tpm} in the \code{ltm} package.}
}
\value{
The function \code{tpmCat} returns an object of class \code{Cat} with changes to the following slots:
\itemize{
\item \code{difficulty} A vector consisting of difficulty parameters for each item.
\item \code{discrimination} A vector consisting of discrimination parameters for each item.
\item \code{model} The string \code{"tpm"}, indicating this \code{Cat} object corresponds to Birnbaum's three parameter model.
}
See \code{\link{Cat-class}} for default values of \code{Cat} object slots. See \strong{Examples} and \code{\link{setters}} for example code to change slot values.
}
\description{
This function fits Birnbaum's three parameter model for binary data and populates the fitted values for discrimination, difficulty, and guessing parameters to an object of class \code{Cat}.
}
\details{
The \code{data} argument of the function \code{tpmCat} is either a data frame or an object of class \code{tpm} from the \code{ltm} package. If it is a data frame each row represents a respondent and each column represents a question item. If it is an object of the class \code{tpm}, it is output from the \code{tpm} function in the \code{ltm} package.
The \code{quadraturePoints} argument of the function \code{tpmCat} is used only when the \code{data} argument is a data frame. \code{quadraturePoints} is then passed to the \code{tpm} function from the \code{ltm} package when fitting Birnbaum's three parameter model to the data and is used when approximating the value of integrals.
}
\note{
In case the Hessian matrix at convergence is not positive definite try to use \code{start.val = "random"}.
}
\examples{
\dontrun{
## Creating Cat object with first 20 questions of with raw data
data(polknowMT)
tpm_cat1 <- tpmCat(polknowMT[,1:20], quadraturePoints = 100, start.val = "random")
## Creating Cat object with fitted object of class tpm
tpm_fit <- tpm(polknowMT[,1:20], control = list(GHk = 100), start.val = "random")
class(tpm_fit)
tpm_cat2 <- tpmCat(tpm_fit)
## Note the two Cat objects are identical
identical(tpm_cat1, tpm_cat2)
}
## Creating Cat objects from large datasets is computationally expensive
## Load the Cat object created from the above code
data(tpm_cat)
## Slots that have changed from default values
getModel(tpm_cat)
getDifficulty(tpm_cat)
getDiscrimination(tpm_cat)
## Changing slots from default values
setEstimation(tpm_cat) <- "MLE"
setSelection(tpm_cat) <- "MFI"
}
\references{
Baker, Frank B. and Seock-Ho Kim. 2004. Item Response Theory: Parameter Estimation Techniques. New York: Marcel Dekker.
Birnbaum, Allan. 1968. Some Latent Trait Models and their Use in Inferring an Examinee's Ability. In F. M. Lord and M. R. Novick (Eds.), Statistical Theories of Mental Test Scores, 397-479. Reading, MA: Addison-Wesley.
Rizopoulos, Dimitris. 2006. ``ltm: An R Package for Latent Variable Modeling and Item Response Theory Analyses." Journal of Statistical Software 17(5):1-25.
}
\seealso{
\code{\link{Cat-class}}, \code{\link{ltmCat}}, \code{\link{polknowMT}}, \code{\link{probability}}
}
\author{
Haley Acevedo, Ryden Butler, Josh W. Cutler, Matt Malis, Jacob M. Montgomery, Tom Wilkinson, Erin Rossiter, Min Hee Seo, Alex Weil
}
| /man/tpmCat.Rd | no_license | domlockett/catSurv | R | false | true | 4,033 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tpmCat.R
\docType{methods}
\name{tpmCat}
\alias{tpmCat}
\alias{tpm}
\alias{tpmCat,data.frame-method}
\alias{tpmCat,tpm-method}
\title{Computerized Adaptive Testing Birnbaum's Three Parameter Model}
\usage{
\S4method{tpmCat}{data.frame}(data, quadraturePoints = 21, ...)
\S4method{tpmCat}{tpm}(data, quadraturePoints = NULL, ...)
}
\arguments{
\item{data}{A data frame of manifest variables or an object of class \code{tpm}.}
\item{quadraturePoints}{A numeric to be passed into the \code{tpm} function indicating the number of Gauss-Hermite quadrature points. Only applicable when \code{data} is a data frame. Default value is \code{21}.}
\item{...}{arguments to be passed to methods. For more details about the arguments, see \code{tpm} in the \code{ltm} package.}
}
\value{
The function \code{tpmCat} returns an object of class \code{Cat} with changes to the following slots:
\itemize{
\item \code{difficulty} A vector consisting of difficulty parameters for each item.
\item \code{discrimination} A vector consisting of discrimination parameters for each item.
\item \code{model} The string \code{"tpm"}, indicating this \code{Cat} object corresponds to Birnbaum's three parameter model.
}
See \code{\link{Cat-class}} for default values of \code{Cat} object slots. See \strong{Examples} and \code{\link{setters}} for example code to change slot values.
}
\description{
This function fits Birnbaum's three parameter model for binary data and populates the fitted values for discrimination, difficulty, and guessing parameters to an object of class \code{Cat}.
}
\details{
The \code{data} argument of the function \code{tpmCat} is either a data frame or an object of class \code{tpm} from the \code{ltm} package. If it is a data frame each row represents a respondent and each column represents a question item. If it is an object of the class \code{tpm}, it is output from the \code{tpm} function in the \code{ltm} package.
The \code{quadraturePoints} argument of the function \code{tpmCat} is used only when the \code{data} argument is a data frame. \code{quadraturePoints} is then passed to the \code{tpm} function from the \code{ltm} package when fitting Birnbaum's three parameter model to the data and is used when approximating the value of integrals.
}
\note{
In case the Hessian matrix at convergence is not positive definite try to use \code{start.val = "random"}.
}
\examples{
\dontrun{
## Creating Cat object with first 20 questions of with raw data
data(polknowMT)
tpm_cat1 <- tpmCat(polknowMT[,1:20], quadraturePoints = 100, start.val = "random")
## Creating Cat object with fitted object of class tpm
tpm_fit <- tpm(polknowMT[,1:20], control = list(GHk = 100), start.val = "random")
class(tpm_fit)
tpm_cat2 <- tpmCat(tpm_fit)
## Note the two Cat objects are identical
identical(tpm_cat1, tpm_cat2)
}
## Creating Cat objects from large datasets is computationally expensive
## Load the Cat object created from the above code
data(tpm_cat)
## Slots that have changed from default values
getModel(tpm_cat)
getDifficulty(tpm_cat)
getDiscrimination(tpm_cat)
## Changing slots from default values
setEstimation(tpm_cat) <- "MLE"
setSelection(tpm_cat) <- "MFI"
}
\references{
Baker, Frank B. and Seock-Ho Kim. 2004. Item Response Theory: Parameter Estimation Techniques. New York: Marcel Dekker.
Birnbaum, Allan. 1968. Some Latent Trait Models and their Use in Inferring an Examinee's Ability. In F. M. Lord and M. R. Novick (Eds.), Statistical Theories of Mental Test Scores, 397-479. Reading, MA: Addison-Wesley.
Rizopoulos, Dimitris. 2006. ``ltm: An R Package for Latent Variable Modeling and Item Response Theory Analyses." Journal of Statistical Software 17(5):1-25.
}
\seealso{
\code{\link{Cat-class}}, \code{\link{ltmCat}}, \code{\link{polknowMT}}, \code{\link{probability}}
}
\author{
Haley Acevedo, Ryden Butler, Josh W. Cutler, Matt Malis, Jacob M. Montgomery, Tom Wilkinson, Erin Rossiter, Min Hee Seo, Alex Weil
}
|
# Define some fixed parameters
data_list = list(
s_int = 1.03,
s_slope = 2.2,
s_dd = -0.7,
g_int = 8,
g_slope = 0.92,
sd_g = 0.9,
f_r_int = 0.09,
f_r_slope = 0.05,
f_s_int = 0.1,
f_s_slope = 0.005,
f_s_dd = -0.03,
mu_fd = 9,
sd_fd = 2
)
# Now, simulate some random intercepts for growth, survival, and offspring production
g_r_int <- rnorm(5, 0, 0.3)
s_r_int <- rnorm(5, 0, 0.7)
f_s_r_int <- rnorm(5, 0, 0.2)
nms <- paste("r_", 1:5, sep = "")
names(g_r_int) <- paste("g_", nms, sep = "")
names(s_r_int) <- paste("s_", nms, sep = "")
names(f_s_r_int) <- paste("f_s_", nms, sep = "")
params <- c(data_list, g_r_int, s_r_int, f_s_r_int)
x <- init_ipm(sim_gen = "simple",
di_dd = "dd",
det_stoch = "stoch",
"kern") %>%
define_kernel(
name = "P_yr",
formula = s_yr * g_yr,
family = "CC",
s_yr = plogis(s_int + s_r_yr + s_slope * size_1 + s_dd * sum(n_size_t)),
g_yr = dnorm(size_2, g_mu_yr, sd_g),
g_mu_yr = g_int + g_r_yr + g_slope * size_1,
data_list = params,
states = list(c("size")),
has_hier_effs = TRUE,
levels_hier_effs = list(yr = 1:5),
evict_cor = TRUE,
evict_fun = truncated_distributions("norm", "g_yr")
) %>%
define_kernel(
name = "F_yr",
formula = f_r * f_s_yr * f_d,
family = "CC",
f_r = plogis(f_r_int + f_r_slope * size_1),
f_s_yr = exp(f_s_int + f_s_r_yr + f_s_slope * size_1 + f_s_dd * sum(n_size_t)),
f_d = dnorm(size_2, mu_fd, sd_fd),
data_list = params,
states = list(c("size")),
has_hier_effs = TRUE,
levels_hier_effs = list(yr = 1:5),
evict_cor = TRUE,
evict_fun = truncated_distributions("norm", "f_d")
) %>%
define_impl(
make_impl_args_list(
kernel_names = c("P_yr", "F_yr"),
int_rule = rep("midpoint", 2),
state_start = rep("size", 2),
state_end = rep("size", 2)
)
) %>%
define_domains(
size = c(0, 50, 200)
) %>%
define_pop_state(
n_size = runif(200)
) %>%
make_ipm(
iterate = TRUE,
iterations = 50,
kernel_seq = sample(1:5, 50, replace = TRUE)
)
use_seq <- x$env_seq
g_z1z <- function(z1, z, par_list, L, U, yr) {
g_int_r <- par_list[grepl(paste("g_r_", yr, sep = ""),
names(par_list))] %>%
unlist()
mu <- par_list$g_int + g_int_r + par_list$g_slope * z
ev <- pnorm(U, mu, par_list$sd_g) - pnorm(L, mu, par_list$sd_g)
out <- dnorm(z1, mu, par_list$sd_g) / ev
return(out)
}
s_z <- function(z, par_list, yr, pop_size) {
s_int_r <- par_list[grepl(paste("s_r_", yr, sep = ""),
names(par_list))] %>%
unlist()
out <- plogis(par_list$s_int + s_int_r + par_list$s_slope * z + par_list$s_dd * pop_size)
return(out)
}
f_z1z <- function(z1, z, par_list, L, U, yr, pop_size) {
f_s_r <- par_list[grepl(paste("f_s_r_", yr, sep = ""),
names(par_list))] %>%
unlist()
f_s <- exp(par_list$f_s_int + f_s_r + par_list$f_s_slope * z + par_list$f_s_dd * pop_size)
f_r <- plogis(par_list$f_r_int + par_list$f_r_slope * z)
ev <- pnorm(U, par_list$mu_fd, par_list$sd_fd) - pnorm(L, par_list$mu_fd, par_list$sd_fd)
f_d <- dnorm(z1, par_list$mu_fd, par_list$sd_fd) / ev
out <- f_r * f_s * f_d
return(out)
}
k_dd <- function(z1, z, par_list, L, U, yr, pop_size) {
g <- outer(z, z, FUN = g_z1z,
par_list = par_list,
L = L,
U = U,
yr = yr)
s <- s_z(z, par_list, yr, pop_size)
f <- outer(z, z, FUN = f_z1z,
par_list = par_list,
L = L,
U = U,
yr = yr,
pop_size = pop_size)
k <- t(s * t(g)) + f
h <- z[2] - z[1]
return(k * h)
}
pop_holder <- matrix(NA_real_,
nrow = 200,
ncol = 51)
L <- 0
U <- 50
n <- 200
bounds <- seq(L, U, length.out = n + 1)
z <- z1 <- (bounds[2:201] + bounds[1:200]) * 0.5
pop_holder[ , 1] <- x$pop_state$n_size[ , 1]
pop_size <- sum(pop_holder[ , 1])
for(i in 2:51) {
k <- k_dd(z1, z,
par_list = params,
L, U,
yr = use_seq[(i - 1)],
pop_size)
pop_holder[ , i] <- k %*% pop_holder[ , (i - 1)]
pop_size <- sum(pop_holder[ , i])
}
ipmr_lam <- lambda(x, type_lambda = "all")
ipmr_pop_sizes <- colSums(x$pop_state$n_size)
hand_lam <- colSums(pop_holder[ , 2:51]) / colSums(pop_holder[ , 1:50])
hand_pop_sizes <- colSums(pop_holder)
test_that("asymptotic behavior is preserved at every time step", {
expect_equal(as.vector(ipmr_lam), hand_lam, tolerance = 2e-2)
expect_equal(ipmr_pop_sizes, hand_pop_sizes, tolerance = 1)
})
test_that("sub-kernel names and values are generated correctly", {
p_rngs <- vapply(x$sub_kernels[grepl("P", names(x$sub_kernels))],
range,
numeric(2L))
expect_true(all(p_rngs >=0 & p_rngs <= 1))
nms <- vapply(1:5,
function(x) paste(c("P", "F"), x, sep = "_"),
character(2L)) %>%
as.vector() %>%
vapply(., function(x) paste(x,"it", 1:50, sep = "_"), character(50L)) %>%
as.vector()
expect_true(all(nms %in% names(x$sub_kernels)))
})
| /tests/testthat/test-simple_dd_stoch_kern.R | permissive | davan690/ipmr | R | false | false | 5,474 | r |
# Define some fixed parameters
data_list = list(
s_int = 1.03,
s_slope = 2.2,
s_dd = -0.7,
g_int = 8,
g_slope = 0.92,
sd_g = 0.9,
f_r_int = 0.09,
f_r_slope = 0.05,
f_s_int = 0.1,
f_s_slope = 0.005,
f_s_dd = -0.03,
mu_fd = 9,
sd_fd = 2
)
# Now, simulate some random intercepts for growth, survival, and offspring production
g_r_int <- rnorm(5, 0, 0.3)
s_r_int <- rnorm(5, 0, 0.7)
f_s_r_int <- rnorm(5, 0, 0.2)
nms <- paste("r_", 1:5, sep = "")
names(g_r_int) <- paste("g_", nms, sep = "")
names(s_r_int) <- paste("s_", nms, sep = "")
names(f_s_r_int) <- paste("f_s_", nms, sep = "")
params <- c(data_list, g_r_int, s_r_int, f_s_r_int)
x <- init_ipm(sim_gen = "simple",
di_dd = "dd",
det_stoch = "stoch",
"kern") %>%
define_kernel(
name = "P_yr",
formula = s_yr * g_yr,
family = "CC",
s_yr = plogis(s_int + s_r_yr + s_slope * size_1 + s_dd * sum(n_size_t)),
g_yr = dnorm(size_2, g_mu_yr, sd_g),
g_mu_yr = g_int + g_r_yr + g_slope * size_1,
data_list = params,
states = list(c("size")),
has_hier_effs = TRUE,
levels_hier_effs = list(yr = 1:5),
evict_cor = TRUE,
evict_fun = truncated_distributions("norm", "g_yr")
) %>%
define_kernel(
name = "F_yr",
formula = f_r * f_s_yr * f_d,
family = "CC",
f_r = plogis(f_r_int + f_r_slope * size_1),
f_s_yr = exp(f_s_int + f_s_r_yr + f_s_slope * size_1 + f_s_dd * sum(n_size_t)),
f_d = dnorm(size_2, mu_fd, sd_fd),
data_list = params,
states = list(c("size")),
has_hier_effs = TRUE,
levels_hier_effs = list(yr = 1:5),
evict_cor = TRUE,
evict_fun = truncated_distributions("norm", "f_d")
) %>%
define_impl(
make_impl_args_list(
kernel_names = c("P_yr", "F_yr"),
int_rule = rep("midpoint", 2),
state_start = rep("size", 2),
state_end = rep("size", 2)
)
) %>%
define_domains(
size = c(0, 50, 200)
) %>%
define_pop_state(
n_size = runif(200)
) %>%
make_ipm(
iterate = TRUE,
iterations = 50,
kernel_seq = sample(1:5, 50, replace = TRUE)
)
use_seq <- x$env_seq
g_z1z <- function(z1, z, par_list, L, U, yr) {
g_int_r <- par_list[grepl(paste("g_r_", yr, sep = ""),
names(par_list))] %>%
unlist()
mu <- par_list$g_int + g_int_r + par_list$g_slope * z
ev <- pnorm(U, mu, par_list$sd_g) - pnorm(L, mu, par_list$sd_g)
out <- dnorm(z1, mu, par_list$sd_g) / ev
return(out)
}
s_z <- function(z, par_list, yr, pop_size) {
s_int_r <- par_list[grepl(paste("s_r_", yr, sep = ""),
names(par_list))] %>%
unlist()
out <- plogis(par_list$s_int + s_int_r + par_list$s_slope * z + par_list$s_dd * pop_size)
return(out)
}
f_z1z <- function(z1, z, par_list, L, U, yr, pop_size) {
f_s_r <- par_list[grepl(paste("f_s_r_", yr, sep = ""),
names(par_list))] %>%
unlist()
f_s <- exp(par_list$f_s_int + f_s_r + par_list$f_s_slope * z + par_list$f_s_dd * pop_size)
f_r <- plogis(par_list$f_r_int + par_list$f_r_slope * z)
ev <- pnorm(U, par_list$mu_fd, par_list$sd_fd) - pnorm(L, par_list$mu_fd, par_list$sd_fd)
f_d <- dnorm(z1, par_list$mu_fd, par_list$sd_fd) / ev
out <- f_r * f_s * f_d
return(out)
}
k_dd <- function(z1, z, par_list, L, U, yr, pop_size) {
g <- outer(z, z, FUN = g_z1z,
par_list = par_list,
L = L,
U = U,
yr = yr)
s <- s_z(z, par_list, yr, pop_size)
f <- outer(z, z, FUN = f_z1z,
par_list = par_list,
L = L,
U = U,
yr = yr,
pop_size = pop_size)
k <- t(s * t(g)) + f
h <- z[2] - z[1]
return(k * h)
}
pop_holder <- matrix(NA_real_,
nrow = 200,
ncol = 51)
L <- 0
U <- 50
n <- 200
bounds <- seq(L, U, length.out = n + 1)
z <- z1 <- (bounds[2:201] + bounds[1:200]) * 0.5
pop_holder[ , 1] <- x$pop_state$n_size[ , 1]
pop_size <- sum(pop_holder[ , 1])
for(i in 2:51) {
k <- k_dd(z1, z,
par_list = params,
L, U,
yr = use_seq[(i - 1)],
pop_size)
pop_holder[ , i] <- k %*% pop_holder[ , (i - 1)]
pop_size <- sum(pop_holder[ , i])
}
ipmr_lam <- lambda(x, type_lambda = "all")
ipmr_pop_sizes <- colSums(x$pop_state$n_size)
hand_lam <- colSums(pop_holder[ , 2:51]) / colSums(pop_holder[ , 1:50])
hand_pop_sizes <- colSums(pop_holder)
test_that("asymptotic behavior is preserved at every time step", {
expect_equal(as.vector(ipmr_lam), hand_lam, tolerance = 2e-2)
expect_equal(ipmr_pop_sizes, hand_pop_sizes, tolerance = 1)
})
test_that("sub-kernel names and values are generated correctly", {
p_rngs <- vapply(x$sub_kernels[grepl("P", names(x$sub_kernels))],
range,
numeric(2L))
expect_true(all(p_rngs >=0 & p_rngs <= 1))
nms <- vapply(1:5,
function(x) paste(c("P", "F"), x, sep = "_"),
character(2L)) %>%
as.vector() %>%
vapply(., function(x) paste(x,"it", 1:50, sep = "_"), character(50L)) %>%
as.vector()
expect_true(all(nms %in% names(x$sub_kernels)))
})
|
seer <- rlogis(10000, se.er$est[[1]], se.er$est[[2]])
pdf(file=paste("Conc Model Full Pairs",locate,".pdf", sep=""), width=6, height=6, family="Times")
par(mar=c(5.1,4.1,4.1,1.1))
pairs(zz, pch=20, upper.panel=panel.pearson, labels=pair.LAB, cex=1)
dev.off()
pdf(file=paste("Conc Model Pairs",locate,".pdf", sep=""), width=6, height=6, family="Times")
par(mar=c(5.1,4.1,4.1,1.1))
pairs(z, pch=20, upper.panel=panel.pearson, labels=pair.lab, cex=1)
dev.off()
pdf(file=paste("Conc Model lm-fit ",locate,".pdf",sep=""), width=6, height=6, family="Times")
if(locate == "UWTP"){
par(mfrow=c(2,1))
par(mar=c(5.1,4.1,1.5,1.1))
resplot <- nlsResiduals(se.lm)
plot(resplot, which=1)
plot(resplot, which=4)
} else {
par(mfrow=c(2,2))
par(mar=c(5.1,4.1,1.5,1.1))
plot(se.lm, pch=20)
}
dev.off()
pdf(file=paste("Conc Model pred v meas ",locate,".pdf",sep=""), width=6, height=6, family="Times")
ylabel <- expression(Predicted~C[Se]~(mu*g %.% L^-1))
xlabel <- expression(Measured~C[Se]~(mu*g %.% L^-1))
par(mar=c(5.1,4.1,1.1,1.1))
plot(z$se, predict(se.lm), pch=20, xlab=xlabel, ylab=ylabel)
abline(a=0, b=1)
dev.off()
pdf(file=paste("Conc Model res-fit ",locate,".pdf",sep=""), width=6, height=6, family="Times")
plot(se.er, pch=20)
dev.off()
pdf(file=paste("Conc Model ResDist ",locate,".pdf",sep=""), width=6, height=6, family="Times")
par(mar=c(5.1,4.1,1.1,1.1))
h1 <- hist(se.res, plot=F)
h2 <- hist(seer, plot=F)
d1 <- density(se.res)
d2 <- density(seer)
ylimit <- c(0, max(h1$density, h2$density, d1$y, d2$y))
xlimit <- c(min(h1$breaks, h2$breaks, d1$x, d2$x), max(h1$breaks, h2$breaks, d1$x, d2$x))
xlabel <- expression(paste(C[Se] ," (", mu*g %.% L^-1, ")"))
hist(se.res, freq=F, ylim=ylimit, xlim=xlimit, main="", xlab=xlabel)
lines(density(se.res), lwd=2)
lines(density(seer), lwd=2, col="red")
legend("topleft", legend=c("Kernel Density", "Fitted"), lwd=c(2,2), col=c("black","red"))
dev.off()
if(locate == "UWTP"){
tst <- nlsResiduals(se.lm)$resi1[,1]
resid <- nlsResiduals(se.lm)$resi1[,2]
nas <- summary(se.lm)$na.action
} else {tst <- se.lm$fitted
resid <- se.lm$resid
nas <- se.lm$na.action[]
}
if(is.null(nas)){meas <- z$se} else {meas <- z$se[-nas]}
# Get statistics
stats <- signif(rbind(Meas=data.frame(Min=min(meas, na.rm=T),
P_2.5=quantile(meas, 0.025, na.rm=T),
Mean=mean(meas, na.rm=T),
P_97.5=quantile(meas, 0.975, na.rm=T),
Max=max(meas, na.rm=T),
SD=sd(meas, na.rm=T),
Skew=skewness(meas, na.rm=T),
Kurt=kurtosis(meas, na.rm=T)),
Fitted=data.frame(Min=min(tst, na.rm=T),
P_2.5=quantile(tst, 0.025, na.rm=T),
Mean=mean(tst, na.rm=T),
P_97.5=quantile(tst, 0.975, na.rm=T),
Max=max(tst, na.rm=T),
SD=sd(tst, na.rm=T),
Skew=skewness(tst, na.rm=T),
Kurt=kurtosis(tst, na.rm=T)),
Resid=data.frame(Min=min(resid, na.rm=T),
P_2.5=quantile(resid, 0.025, na.rm=T),
Mean=mean(resid, na.rm=T),
P_97.5=quantile(resid, 0.975, na.rm=T),
Max=max(resid, na.rm=T),
SD=sd(resid, na.rm=T),
Skew=skewness(resid, na.rm=T),
Kurt=kurtosis(resid, na.rm=T))),4) | /R/SubScripts/Archive/Conc Linear Model Plots.R | no_license | cekmorse/Calcs | R | false | false | 3,985 | r | seer <- rlogis(10000, se.er$est[[1]], se.er$est[[2]])
pdf(file=paste("Conc Model Full Pairs",locate,".pdf", sep=""), width=6, height=6, family="Times")
par(mar=c(5.1,4.1,4.1,1.1))
pairs(zz, pch=20, upper.panel=panel.pearson, labels=pair.LAB, cex=1)
dev.off()
pdf(file=paste("Conc Model Pairs",locate,".pdf", sep=""), width=6, height=6, family="Times")
par(mar=c(5.1,4.1,4.1,1.1))
pairs(z, pch=20, upper.panel=panel.pearson, labels=pair.lab, cex=1)
dev.off()
pdf(file=paste("Conc Model lm-fit ",locate,".pdf",sep=""), width=6, height=6, family="Times")
if(locate == "UWTP"){
par(mfrow=c(2,1))
par(mar=c(5.1,4.1,1.5,1.1))
resplot <- nlsResiduals(se.lm)
plot(resplot, which=1)
plot(resplot, which=4)
} else {
par(mfrow=c(2,2))
par(mar=c(5.1,4.1,1.5,1.1))
plot(se.lm, pch=20)
}
dev.off()
pdf(file=paste("Conc Model pred v meas ",locate,".pdf",sep=""), width=6, height=6, family="Times")
ylabel <- expression(Predicted~C[Se]~(mu*g %.% L^-1))
xlabel <- expression(Measured~C[Se]~(mu*g %.% L^-1))
par(mar=c(5.1,4.1,1.1,1.1))
plot(z$se, predict(se.lm), pch=20, xlab=xlabel, ylab=ylabel)
abline(a=0, b=1)
dev.off()
pdf(file=paste("Conc Model res-fit ",locate,".pdf",sep=""), width=6, height=6, family="Times")
plot(se.er, pch=20)
dev.off()
pdf(file=paste("Conc Model ResDist ",locate,".pdf",sep=""), width=6, height=6, family="Times")
par(mar=c(5.1,4.1,1.1,1.1))
h1 <- hist(se.res, plot=F)
h2 <- hist(seer, plot=F)
d1 <- density(se.res)
d2 <- density(seer)
ylimit <- c(0, max(h1$density, h2$density, d1$y, d2$y))
xlimit <- c(min(h1$breaks, h2$breaks, d1$x, d2$x), max(h1$breaks, h2$breaks, d1$x, d2$x))
xlabel <- expression(paste(C[Se] ," (", mu*g %.% L^-1, ")"))
hist(se.res, freq=F, ylim=ylimit, xlim=xlimit, main="", xlab=xlabel)
lines(density(se.res), lwd=2)
lines(density(seer), lwd=2, col="red")
legend("topleft", legend=c("Kernel Density", "Fitted"), lwd=c(2,2), col=c("black","red"))
dev.off()
if(locate == "UWTP"){
tst <- nlsResiduals(se.lm)$resi1[,1]
resid <- nlsResiduals(se.lm)$resi1[,2]
nas <- summary(se.lm)$na.action
} else {tst <- se.lm$fitted
resid <- se.lm$resid
nas <- se.lm$na.action[]
}
if(is.null(nas)){meas <- z$se} else {meas <- z$se[-nas]}
# Get statistics
stats <- signif(rbind(Meas=data.frame(Min=min(meas, na.rm=T),
P_2.5=quantile(meas, 0.025, na.rm=T),
Mean=mean(meas, na.rm=T),
P_97.5=quantile(meas, 0.975, na.rm=T),
Max=max(meas, na.rm=T),
SD=sd(meas, na.rm=T),
Skew=skewness(meas, na.rm=T),
Kurt=kurtosis(meas, na.rm=T)),
Fitted=data.frame(Min=min(tst, na.rm=T),
P_2.5=quantile(tst, 0.025, na.rm=T),
Mean=mean(tst, na.rm=T),
P_97.5=quantile(tst, 0.975, na.rm=T),
Max=max(tst, na.rm=T),
SD=sd(tst, na.rm=T),
Skew=skewness(tst, na.rm=T),
Kurt=kurtosis(tst, na.rm=T)),
Resid=data.frame(Min=min(resid, na.rm=T),
P_2.5=quantile(resid, 0.025, na.rm=T),
Mean=mean(resid, na.rm=T),
P_97.5=quantile(resid, 0.975, na.rm=T),
Max=max(resid, na.rm=T),
SD=sd(resid, na.rm=T),
Skew=skewness(resid, na.rm=T),
Kurt=kurtosis(resid, na.rm=T))),4) |
# Auteur: Jan van de Kassteele - RIVM
# Fit parallel random forest
rfparallel <- function(formula, data, ntree = 500, ncores = 4, importance = FALSE) {
# formula = random forest formule
# data = data om te fitten
# ntree = aantal bomen om te groeien
# ncores = aantal CPUs
# importance = variable importance bijhouden?
# Laad packages
require(parallel)
require(doParallel)
require(foreach)
# Maak cluster met ncores nodes (CBS computer heeft 4 CPU's)
cl <- makeCluster(ncores)
#clusterEvalQ(cl, expr = .libPaths("G:/8_Utilities/R/Lib3"))
clusterEvalQ(cl, expr = .libPaths())
registerDoParallel(cl)
# Fit Random Forest model aan data
# Omdat we het parallel doen mag je ntree delen door ncores
rf.model <- foreach(
i = 1:ncores,
.combine = combine,
.packages = "randomForest") %dopar%
randomForest(
formula = formula,
data = data,
ntree = round(ntree/ncores),
importance = importance)
# Stop cluster
stopCluster(cl)
# Return modelfit
return(rf.model)
}
| /rfparallel.R | no_license | LeonardV/VaccinatieOpkomst | R | false | false | 1,108 | r | # Auteur: Jan van de Kassteele - RIVM
# Fit parallel random forest
rfparallel <- function(formula, data, ntree = 500, ncores = 4, importance = FALSE) {
# formula = random forest formule
# data = data om te fitten
# ntree = aantal bomen om te groeien
# ncores = aantal CPUs
# importance = variable importance bijhouden?
# Laad packages
require(parallel)
require(doParallel)
require(foreach)
# Maak cluster met ncores nodes (CBS computer heeft 4 CPU's)
cl <- makeCluster(ncores)
#clusterEvalQ(cl, expr = .libPaths("G:/8_Utilities/R/Lib3"))
clusterEvalQ(cl, expr = .libPaths())
registerDoParallel(cl)
# Fit Random Forest model aan data
# Omdat we het parallel doen mag je ntree delen door ncores
rf.model <- foreach(
i = 1:ncores,
.combine = combine,
.packages = "randomForest") %dopar%
randomForest(
formula = formula,
data = data,
ntree = round(ntree/ncores),
importance = importance)
# Stop cluster
stopCluster(cl)
# Return modelfit
return(rf.model)
}
|
library('MonetDB.R')
#install.packages('MonetDBLite')
library('MonetDBLite')
library('dplyr')
library('tidyverse')
library('DBI')
library('beepr')
library('sqlsurvey')
setwd('/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/')
cdiff <- read_csv('data/cdiff.csv', guess_max = 858204)
cdiff
cdiff.preg <- cdiff %>%
mutate(pregnant=as.integer(grepl("V22", dx1) |
grepl("V22", dx2) |
grepl("V22", dx3) |
grepl("V22", dx4) |
grepl("V22", dx5) |
grepl("V22", dx6) |
grepl("V22", dx7) |
grepl("V22", dx8) |
grepl("V22", dx9) |
grepl("V22", dx10) |
grepl("V22", dx11) |
grepl("V22", dx12) |
grepl("V22", dx13) |
grepl("V22", dx14) |
grepl("V22", dx15) |
grepl("V22", dx16) |
grepl("V22", dx17) |
grepl("V22", dx18) |
grepl("V22", dx19) |
grepl("V22", dx20) |
grepl("V22", dx21) |
grepl("V22", dx22) |
grepl("V22", dx23) |
grepl("V22", dx24) |
grepl("V22", dx25) |
grepl("V22", dx26) |
grepl("V22", dx27) |
grepl("V22", dx28) |
grepl("V22", dx29) |
grepl("V22", dx30)))
write_csv(cdiff.preg, "data/cdiff-pregnant.csv")
cdiff.preg
#filter(grepl("V22", dx1) | grepl("V22", dx2)) %>%
#|
#dx3 == '00845' |
#dx4 == '00845' |
#dx5 == '00845' |
#dx6 == '00845' |
#dx7 == '00845' |
#dx8 == '00845' |
#dx9 == '00845' |
#dx10 == '00845' |
#dx11 == '00845' |
#dx12 == '00845' |
#dx13 == '00845' |
#dx14 == '00845' |
#dx15 == '00845' |
#dx16 == '00845' |
#dx17 == '00845' |
#dx18 == '00845' |
#dx19 == '00845' |
#dx20 == '00845' |
#dx21 == '00845' |
#dx22 == '00845' |
#dx23 == '00845' |
#dx24 == '00845' |
#dx25 == '00845' |
#dx26 == '00845' |
#dx27 == '00845' |
#dx28 == '00845' |
#dx29 == '00845' |
#dx30 == '00845'))) %>%
#mutate(cdi=replace(cdi, is.na(cdi), 0))
#nis.DX3 = '00845' OR
#nis.DX4 = '00845' OR
#nis.DX5 = '00845' OR
#nis.DX6 = '00845' OR
#nis.DX7 = '00845' OR
#nis.DX8 = '00845' OR
#nis.DX9 = '00845' OR
#nis.DX10 = '00845' OR
#nis.DX11 = '00845' OR
#nis.DX12 = '00845' OR
#nis.DX13 = '00845' OR
#nis.DX14 = '00845' OR
#nis.DX15 = '00845' OR
#nis.DX16 = '00845' OR
#nis.DX17 = '00845' OR
#nis.DX18 = '00845' OR
#nis.DX19 = '00845' OR
#nis.DX20 = '00845' OR
#nis.DX21 = '00845' OR
#nis.DX22 = '00845' OR
#nis.DX23 = '00845' OR
#nis.DX24 = '00845' OR
#nis.DX25 = '00845')
#MonetDBLite::monetdblite_shutdown()
#con <- DBI::dbConnect(MonetDBLite::MonetDBLite(), "data/nrd_db")
con <- DBI::dbConnect(MonetDBLite::MonetDBLite(), "data/nis_db")
row.count <- DBI::dbGetQuery(con, "SELECT COUNT(*) as count FROM nrd")
row.count
patient.counts <- list()
patient.counts[["total"]] <- DBI::dbGetQuery(con, "SELECT nis_year, COUNT(nis_key) AS n FROM NIS GROUP BY nis_year")
#584 Acute kidney failure
#584.5 Acute kidney failure with lesion of tubular necrosis convert
#584.6 Acute kidney failure with lesion of renal cortical necrosis convert
#584.7 Acute kidney failure with lesion of renal medullary [papillary] necrosis
#584.8 Acute kidney failure with lesion of with other specified pathological lesion in kidney
#584.9 Acute kidney failure, unspecified
#585 Chronic kidney disease (ckd)
#585.1 Chronic kidney disease, Stage I
#585.2 Chronic kidney disease, Stage II (mild)
#585.3 Chronic kidney disease, Stage III (moderate)
#585.4 Chronic kidney disease, Stage IV (severe)
#585.5 Chronic kidney disease, Stage V (mild)
#585.6 End stage renal disease
#585.9 Chronic kidney disease, unspecified
#586 Renal failure, unspecified
# Acute Kidney Infection
aki.count.q <- "SELECT nis_year,
count(nis_key) as n
FROM nis
WHERE nis.DX1 like '584%' OR
nis.DX2 like '584%' OR
nis.DX3 like '584%' OR
nis.DX4 like '584%' OR
nis.DX5 like '584%' OR
nis.DX6 like '584%' OR
nis.DX7 like '584%' OR
nis.DX8 like '584%' OR
nis.DX9 like '584%' OR
nis.DX10 like '584%' OR
nis.DX11 like '584%' OR
nis.DX12 like '584%' OR
nis.DX13 like '584%' OR
nis.DX14 like '584%' OR
nis.DX15 like '584%' OR
nis.DX16 like '584%' OR
nis.DX17 like '584%' OR
nis.DX18 like '584%' OR
nis.DX19 like '584%' OR
nis.DX20 like '584%' OR
nis.DX21 like '584%' OR
nis.DX22 like '584%' OR
nis.DX23 like '584%' OR
nis.DX24 like '584%' OR
nis.DX25 like '584%' OR
nis.DX26 like '584%' OR
nis.DX27 like '584%' OR
nis.DX28 like '584%' OR
nis.DX29 like '584%' OR
nis.DX30 like '584%'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["aki"]] <- DBI::dbGetQuery(con, aki.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
# WOW! AKIs have been linearly increasing
patient.counts[["aki"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Chronic Kidney Disease
# Note: I'm grouping 585 with 585.9, which is Chronic kidney disease, Unspecified
ckd.count.q <- "SELECT nis_year,
count(nis_key) as n
FROM nis
WHERE nis.DX1 = '585' OR
nis.DX1 = '5859' OR
nis.DX2 = '585' OR
nis.DX2 = '5859' OR
nis.DX3 = '585' OR
nis.DX3 = '5859' OR
nis.DX4 = '585' OR
nis.DX4 = '5859' OR
nis.DX5 = '585' OR
nis.DX5 = '5859' OR
nis.DX6 = '585' OR
nis.DX6 = '5859' OR
nis.DX7 = '585' OR
nis.DX7 = '5859' OR
nis.DX8 = '585' OR
nis.DX8 = '5859' OR
nis.DX9 = '585' OR
nis.DX9 = '5859' OR
nis.DX10 = '585' OR
nis.DX10 = '5859' OR
nis.DX11 = '585' OR
nis.DX11 = '5859' OR
nis.DX12 = '585' OR
nis.DX12 = '5859' OR
nis.DX13 = '585' OR
nis.DX13 = '5859' OR
nis.DX14 = '585' OR
nis.DX14 = '5859' OR
nis.DX15 = '585' OR
nis.DX15 = '5859' OR
nis.DX16 = '585' OR
nis.DX16 = '5859' OR
nis.DX17 = '585' OR
nis.DX17 = '5859' OR
nis.DX18 = '585' OR
nis.DX18 = '5859' OR
nis.DX19 = '585' OR
nis.DX19 = '5859' OR
nis.DX20 = '585' OR
nis.DX20 = '5859' OR
nis.DX21 = '585' OR
nis.DX21 = '5859' OR
nis.DX22 = '585' OR
nis.DX22 = '5859' OR
nis.DX23 = '585' OR
nis.DX23 = '5859' OR
nis.DX24 = '585' OR
nis.DX24 = '5859' OR
nis.DX25 = '585' OR
nis.DX25 = '5859' OR
nis.DX26 = '585' OR
nis.DX26 = '5859' OR
nis.DX27 = '585' OR
nis.DX27 = '5859' OR
nis.DX28 = '585' OR
nis.DX28 = '5859' OR
nis.DX29 = '585' OR
nis.DX29 = '5859' OR
nis.DX30 = '585' OR
nis.DX30 = '5859'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd"]] <- DBI::dbGetQuery(con, ckd.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd"]]
patient.counts[["ckd"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, Stage 1
ckd1.count.q <- "SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '5851' OR
nis.DX2 = '5851' OR
nis.DX3 = '5851' OR
nis.DX4 = '5851' OR
nis.DX5 = '5851' OR
nis.DX6 = '5851' OR
nis.DX7 = '5851' OR
nis.DX8 = '5851' OR
nis.DX9 = '5851' OR
nis.DX10 = '5851' OR
nis.DX11 = '5851' OR
nis.DX12 = '5851' OR
nis.DX13 = '5851' OR
nis.DX14 = '5851' OR
nis.DX15 = '5851' OR
nis.DX16 = '5851' OR
nis.DX17 = '5851' OR
nis.DX18 = '5851' OR
nis.DX19 = '5851' OR
nis.DX20 = '5851' OR
nis.DX21 = '5851' OR
nis.DX22 = '5851' OR
nis.DX23 = '5851' OR
nis.DX24 = '5851' OR
nis.DX25 = '5851' OR
nis.DX26 = '5851' OR
nis.DX27 = '5851' OR
nis.DX28 = '5851' OR
nis.DX29 = '5851' OR
nis.DX30 = '5851'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd1"]] <- DBI::dbGetQuery(con, ckd1.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd1"]]
patient.counts[["ckd1"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, Stage 2
ckd2.count.q <- "SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '5852' OR
nis.DX2 = '5852' OR
nis.DX3 = '5852' OR
nis.DX4 = '5852' OR
nis.DX5 = '5852' OR
nis.DX6 = '5852' OR
nis.DX7 = '5852' OR
nis.DX8 = '5852' OR
nis.DX9 = '5852' OR
nis.DX10 = '5852' OR
nis.DX11 = '5852' OR
nis.DX12 = '5852' OR
nis.DX13 = '5852' OR
nis.DX14 = '5852' OR
nis.DX15 = '5852' OR
nis.DX16 = '5852' OR
nis.DX17 = '5852' OR
nis.DX18 = '5852' OR
nis.DX19 = '5852' OR
nis.DX20 = '5852' OR
nis.DX21 = '5852' OR
nis.DX22 = '5852' OR
nis.DX23 = '5852' OR
nis.DX24 = '5852' OR
nis.DX25 = '5852' OR
nis.DX26 = '5852' OR
nis.DX27 = '5852' OR
nis.DX28 = '5852' OR
nis.DX29 = '5852' OR
nis.DX30 = '5852'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd2"]] <- DBI::dbGetQuery(con, ckd2.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd2"]]
patient.counts[["ckd2"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, Stage 3
ckd3.count.q <- "SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '5853' OR
nis.DX2 = '5853' OR
nis.DX3 = '5853' OR
nis.DX4 = '5853' OR
nis.DX5 = '5853' OR
nis.DX6 = '5853' OR
nis.DX7 = '5853' OR
nis.DX8 = '5853' OR
nis.DX9 = '5853' OR
nis.DX10 = '5853' OR
nis.DX11 = '5853' OR
nis.DX12 = '5853' OR
nis.DX13 = '5853' OR
nis.DX14 = '5853' OR
nis.DX15 = '5853' OR
nis.DX16 = '5853' OR
nis.DX17 = '5853' OR
nis.DX18 = '5853' OR
nis.DX19 = '5853' OR
nis.DX20 = '5853' OR
nis.DX21 = '5853' OR
nis.DX22 = '5853' OR
nis.DX23 = '5853' OR
nis.DX24 = '5853' OR
nis.DX25 = '5853' OR
nis.DX26 = '5853' OR
nis.DX27 = '5853' OR
nis.DX28 = '5853' OR
nis.DX29 = '5853' OR
nis.DX30 = '5853'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd3"]] <- DBI::dbGetQuery(con, ckd3.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd3"]]
patient.counts[["ckd3"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, Stage 4
ckd4.count.q <- "SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '5854' OR
nis.DX2 = '5854' OR
nis.DX3 = '5854' OR
nis.DX4 = '5854' OR
nis.DX5 = '5854' OR
nis.DX6 = '5854' OR
nis.DX7 = '5854' OR
nis.DX8 = '5854' OR
nis.DX9 = '5854' OR
nis.DX10 = '5854' OR
nis.DX11 = '5854' OR
nis.DX12 = '5854' OR
nis.DX13 = '5854' OR
nis.DX14 = '5854' OR
nis.DX15 = '5854' OR
nis.DX16 = '5854' OR
nis.DX17 = '5854' OR
nis.DX18 = '5854' OR
nis.DX19 = '5854' OR
nis.DX20 = '5854' OR
nis.DX21 = '5854' OR
nis.DX22 = '5854' OR
nis.DX23 = '5854' OR
nis.DX24 = '5854' OR
nis.DX25 = '5854' OR
nis.DX26 = '5854' OR
nis.DX27 = '5854' OR
nis.DX28 = '5854' OR
nis.DX29 = '5854' OR
nis.DX30 = '5854'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd4"]] <- DBI::dbGetQuery(con, ckd4.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd4"]]
patient.counts[["ckd4"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, Stage 5
ckd5.count.q <- "SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '5855' OR
nis.DX2 = '5855' OR
nis.DX3 = '5855' OR
nis.DX4 = '5855' OR
nis.DX5 = '5855' OR
nis.DX6 = '5855' OR
nis.DX7 = '5855' OR
nis.DX8 = '5855' OR
nis.DX9 = '5855' OR
nis.DX10 = '5855' OR
nis.DX11 = '5855' OR
nis.DX12 = '5855' OR
nis.DX13 = '5855' OR
nis.DX14 = '5855' OR
nis.DX15 = '5855' OR
nis.DX16 = '5855' OR
nis.DX17 = '5855' OR
nis.DX18 = '5855' OR
nis.DX19 = '5855' OR
nis.DX20 = '5855' OR
nis.DX21 = '5855' OR
nis.DX22 = '5855' OR
nis.DX23 = '5855' OR
nis.DX24 = '5855' OR
nis.DX25 = '5855' OR
nis.DX26 = '5855' OR
nis.DX27 = '5855' OR
nis.DX28 = '5855' OR
nis.DX29 = '5855' OR
nis.DX30 = '5855'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd5"]] <- DBI::dbGetQuery(con, ckd5.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd5"]]
patient.counts[["ckd5"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, End Stage (Dialysis)
ckd6.count.q <- "SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '5856' OR
nis.DX2 = '5856' OR
nis.DX3 = '5856' OR
nis.DX4 = '5856' OR
nis.DX5 = '5856' OR
nis.DX6 = '5856' OR
nis.DX7 = '5856' OR
nis.DX8 = '5856' OR
nis.DX9 = '5856' OR
nis.DX10 = '5856' OR
nis.DX11 = '5856' OR
nis.DX12 = '5856' OR
nis.DX13 = '5856' OR
nis.DX14 = '5856' OR
nis.DX15 = '5856' OR
nis.DX16 = '5856' OR
nis.DX17 = '5856' OR
nis.DX18 = '5856' OR
nis.DX19 = '5856' OR
nis.DX20 = '5856' OR
nis.DX21 = '5856' OR
nis.DX22 = '5856' OR
nis.DX23 = '5856' OR
nis.DX24 = '5856' OR
nis.DX25 = '5856' OR
nis.DX26 = '5856' OR
nis.DX27 = '5856' OR
nis.DX28 = '5856' OR
nis.DX29 = '5856' OR
nis.DX30 = '5856'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd6"]] <- DBI::dbGetQuery(con, ckd6.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd6"]]
patient.counts[["ckd6"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, unspecified
renal_unspecified.count.q <-
"SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '586' OR
nis.DX2 = '586' OR
nis.DX3 = '586' OR
nis.DX4 = '586' OR
nis.DX5 = '586' OR
nis.DX6 = '586' OR
nis.DX7 = '586' OR
nis.DX8 = '586' OR
nis.DX9 = '586' OR
nis.DX10 = '586' OR
nis.DX11 = '586' OR
nis.DX12 = '586' OR
nis.DX13 = '586' OR
nis.DX14 = '586' OR
nis.DX15 = '586' OR
nis.DX16 = '586' OR
nis.DX17 = '586' OR
nis.DX18 = '586' OR
nis.DX19 = '586' OR
nis.DX20 = '586' OR
nis.DX21 = '586' OR
nis.DX22 = '586' OR
nis.DX23 = '586' OR
nis.DX24 = '586' OR
nis.DX25 = '586' OR
nis.DX26 = '586' OR
nis.DX27 = '586' OR
nis.DX28 = '586' OR
nis.DX29 = '586' OR
nis.DX30 = '586'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["renal_unspecified"]] <- DBI::dbGetQuery(con, renal_unspecified.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
# C. Diff by itself
cdi.count.q <- "SELECT nis_year,
count(nis_key) as n
FROM nis
WHERE (nis.DX1 = '00845' OR
nis.DX2 = '00845' OR
nis.DX3 = '00845' OR
nis.DX4 = '00845' OR
nis.DX5 = '00845' OR
nis.DX6 = '00845' OR
nis.DX7 = '00845' OR
nis.DX8 = '00845' OR
nis.DX9 = '00845' OR
nis.DX10 = '00845' OR
nis.DX11 = '00845' OR
nis.DX12 = '00845' OR
nis.DX13 = '00845' OR
nis.DX14 = '00845' OR
nis.DX15 = '00845' OR
nis.DX16 = '00845' OR
nis.DX17 = '00845' OR
nis.DX18 = '00845' OR
nis.DX19 = '00845' OR
nis.DX20 = '00845' OR
nis.DX21 = '00845' OR
nis.DX22 = '00845' OR
nis.DX23 = '00845' OR
nis.DX24 = '00845' OR
nis.DX25 = '00845' OR
nis.DX26 = '00845' OR
nis.DX27 = '00845' OR
nis.DX28 = '00845' OR
nis.DX29 = '00845' OR
nis.DX30 = '00845'
)
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["cdi"]] <- DBI::dbGetQuery(con, cdi.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["cdi"]]
patient.counts[["cdi"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# C. diff with Renal Failure (any kind)
cdi_with_renal.count.q <-
"SELECT nis_year,
count(nis_key) as n
FROM nis
WHERE (nis.DX1 = '00845' OR
nis.DX2 = '00845' OR
nis.DX3 = '00845' OR
nis.DX4 = '00845' OR
nis.DX5 = '00845' OR
nis.DX6 = '00845' OR
nis.DX7 = '00845' OR
nis.DX8 = '00845' OR
nis.DX9 = '00845' OR
nis.DX10 = '00845' OR
nis.DX11 = '00845' OR
nis.DX12 = '00845' OR
nis.DX13 = '00845' OR
nis.DX14 = '00845' OR
nis.DX15 = '00845' OR
nis.DX16 = '00845' OR
nis.DX17 = '00845' OR
nis.DX18 = '00845' OR
nis.DX19 = '00845' OR
nis.DX20 = '00845' OR
nis.DX21 = '00845' OR
nis.DX22 = '00845' OR
nis.DX23 = '00845' OR
nis.DX24 = '00845' OR
nis.DX25 = '00845' OR
nis.DX26 = '00845' OR
nis.DX27 = '00845' OR
nis.DX28 = '00845' OR
nis.DX29 = '00845' OR
nis.DX30 = '00845'
)
AND (
(nis.DX1 like '584%' OR
nis.DX2 like '584%' OR
nis.DX3 like '584%' OR
nis.DX4 like '584%' OR
nis.DX5 like '584%' OR
nis.DX6 like '584%' OR
nis.DX7 like '584%' OR
nis.DX8 like '584%' OR
nis.DX9 like '584%' OR
nis.DX10 like '584%' OR
nis.DX11 like '584%' OR
nis.DX12 like '584%' OR
nis.DX13 like '584%' OR
nis.DX14 like '584%' OR
nis.DX15 like '584%' OR
nis.DX16 like '584%' OR
nis.DX17 like '584%' OR
nis.DX18 like '584%' OR
nis.DX19 like '584%' OR
nis.DX20 like '584%' OR
nis.DX21 like '584%' OR
nis.DX22 like '584%' OR
nis.DX23 like '584%' OR
nis.DX24 like '584%' OR
nis.DX25 like '584%' OR
nis.DX26 like '584%' OR
nis.DX27 like '584%' OR
nis.DX28 like '584%' OR
nis.DX29 like '584%' OR
nis.DX30 like '584%'
)
OR (nis.DX1 like '585%' OR
nis.DX2 like '585%' OR
nis.DX3 like '585%' OR
nis.DX4 like '585%' OR
nis.DX5 like '585%' OR
nis.DX6 like '585%' OR
nis.DX7 like '585%' OR
nis.DX8 like '585%' OR
nis.DX9 like '585%' OR
nis.DX10 like '585%' OR
nis.DX11 like '585%' OR
nis.DX12 like '585%' OR
nis.DX13 like '585%' OR
nis.DX14 like '585%' OR
nis.DX15 like '585%' OR
nis.DX16 like '585%' OR
nis.DX17 like '585%' OR
nis.DX18 like '585%' OR
nis.DX19 like '585%' OR
nis.DX20 like '585%' OR
nis.DX21 like '585%' OR
nis.DX22 like '585%' OR
nis.DX23 like '585%' OR
nis.DX24 like '585%' OR
nis.DX25 like '585%' OR
nis.DX26 like '585%' OR
nis.DX27 like '585%' OR
nis.DX28 like '585%' OR
nis.DX29 like '585%' OR
nis.DX30 like '585%'
)
OR (nis.DX1 = '586' OR
nis.DX2 = '586' OR
nis.DX3 = '586' OR
nis.DX4 = '586' OR
nis.DX5 = '586' OR
nis.DX6 = '586' OR
nis.DX7 = '586' OR
nis.DX8 = '586' OR
nis.DX9 = '586' OR
nis.DX10 = '586' OR
nis.DX11 = '586' OR
nis.DX12 = '586' OR
nis.DX13 = '586' OR
nis.DX14 = '586' OR
nis.DX15 = '586' OR
nis.DX16 = '586' OR
nis.DX17 = '586' OR
nis.DX18 = '586' OR
nis.DX19 = '586' OR
nis.DX20 = '586' OR
nis.DX21 = '586' OR
nis.DX22 = '586' OR
nis.DX23 = '586' OR
nis.DX24 = '586' OR
nis.DX25 = '586' OR
nis.DX26 = '586' OR
nis.DX27 = '586' OR
nis.DX28 = '586' OR
nis.DX29 = '586' OR
nis.DX30 = '586'
)
)
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["cdi_with_renal"]] <- DBI::dbGetQuery(con, cdi_with_renal.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["cdi_with_renal"]]
patient.counts[["cdi_with_renal"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# C. Diff by itself
cdi.count.q <- "SELECT nis_year,
count(nis_key) as n
FROM nis
WHERE (nis.DX1 = '00845' OR
nis.DX2 = '00845' OR
nis.DX3 = '00845' OR
nis.DX4 = '00845' OR
nis.DX5 = '00845' OR
nis.DX6 = '00845' OR
nis.DX7 = '00845' OR
nis.DX8 = '00845' OR
nis.DX9 = '00845' OR
nis.DX10 = '00845' OR
nis.DX11 = '00845' OR
nis.DX12 = '00845' OR
nis.DX13 = '00845' OR
nis.DX14 = '00845' OR
nis.DX15 = '00845' OR
nis.DX16 = '00845' OR
nis.DX17 = '00845' OR
nis.DX18 = '00845' OR
nis.DX19 = '00845' OR
nis.DX20 = '00845' OR
nis.DX21 = '00845' OR
nis.DX22 = '00845' OR
nis.DX23 = '00845' OR
nis.DX24 = '00845' OR
nis.DX25 = '00845' OR
nis.DX26 = '00845' OR
nis.DX27 = '00845' OR
nis.DX28 = '00845' OR
nis.DX29 = '00845' OR
nis.DX30 = '00845'
)
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["cdi"]] <- DBI::dbGetQuery(con, cdi.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["cdi"]]
patient.counts[["cdi"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# C. diff with Renal Failure (any kind)
cdi_with_renal.count.q <-
"SELECT nis_year,
count(nis_key) as n
FROM nis
WHERE (nis.DX1 = '00845' OR
nis.DX2 = '00845' OR
nis.DX3 = '00845' OR
nis.DX4 = '00845' OR
nis.DX5 = '00845' OR
nis.DX6 = '00845' OR
nis.DX7 = '00845' OR
nis.DX8 = '00845' OR
nis.DX9 = '00845' OR
nis.DX10 = '00845' OR
nis.DX11 = '00845' OR
nis.DX12 = '00845' OR
nis.DX13 = '00845' OR
nis.DX14 = '00845' OR
nis.DX15 = '00845' OR
nis.DX16 = '00845' OR
nis.DX17 = '00845' OR
nis.DX18 = '00845' OR
nis.DX19 = '00845' OR
nis.DX20 = '00845' OR
nis.DX21 = '00845' OR
nis.DX22 = '00845' OR
nis.DX23 = '00845' OR
nis.DX24 = '00845' OR
nis.DX25 = '00845' OR
nis.DX26 = '00845' OR
nis.DX27 = '00845' OR
nis.DX28 = '00845' OR
nis.DX29 = '00845' OR
nis.DX30 = '00845'
)
AND (
(nis.DX1 like '584%' OR
nis.DX2 like '584%' OR
nis.DX3 like '584%' OR
nis.DX4 like '584%' OR
nis.DX5 like '584%' OR
nis.DX6 like '584%' OR
nis.DX7 like '584%' OR
nis.DX8 like '584%' OR
nis.DX9 like '584%' OR
nis.DX10 like '584%' OR
nis.DX11 like '584%' OR
nis.DX12 like '584%' OR
nis.DX13 like '584%' OR
nis.DX14 like '584%' OR
nis.DX15 like '584%' OR
nis.DX16 like '584%' OR
nis.DX17 like '584%' OR
nis.DX18 like '584%' OR
nis.DX19 like '584%' OR
nis.DX20 like '584%' OR
nis.DX21 like '584%' OR
nis.DX22 like '584%' OR
nis.DX23 like '584%' OR
nis.DX24 like '584%' OR
nis.DX25 like '584%' OR
nis.DX26 like '584%' OR
nis.DX27 like '584%' OR
nis.DX28 like '584%' OR
nis.DX29 like '584%' OR
nis.DX30 like '584%'
)
OR (nis.DX1 like '585%' OR
nis.DX2 like '585%' OR
nis.DX3 like '585%' OR
nis.DX4 like '585%' OR
nis.DX5 like '585%' OR
nis.DX6 like '585%' OR
nis.DX7 like '585%' OR
nis.DX8 like '585%' OR
nis.DX9 like '585%' OR
nis.DX10 like '585%' OR
nis.DX11 like '585%' OR
nis.DX12 like '585%' OR
nis.DX13 like '585%' OR
nis.DX14 like '585%' OR
nis.DX15 like '585%' OR
nis.DX16 like '585%' OR
nis.DX17 like '585%' OR
nis.DX18 like '585%' OR
nis.DX19 like '585%' OR
nis.DX20 like '585%' OR
nis.DX21 like '585%' OR
nis.DX22 like '585%' OR
nis.DX23 like '585%' OR
nis.DX24 like '585%' OR
nis.DX25 like '585%' OR
nis.DX26 like '585%' OR
nis.DX27 like '585%' OR
nis.DX28 like '585%' OR
nis.DX29 like '585%' OR
nis.DX30 like '585%'
)
OR (nis.DX1 = '586' OR
nis.DX2 = '586' OR
nis.DX3 = '586' OR
nis.DX4 = '586' OR
nis.DX5 = '586' OR
nis.DX6 = '586' OR
nis.DX7 = '586' OR
nis.DX8 = '586' OR
nis.DX9 = '586' OR
nis.DX10 = '586' OR
nis.DX11 = '586' OR
nis.DX12 = '586' OR
nis.DX13 = '586' OR
nis.DX14 = '586' OR
nis.DX15 = '586' OR
nis.DX16 = '586' OR
nis.DX17 = '586' OR
nis.DX18 = '586' OR
nis.DX19 = '586' OR
nis.DX20 = '586' OR
nis.DX21 = '586' OR
nis.DX22 = '586' OR
nis.DX23 = '586' OR
nis.DX24 = '586' OR
nis.DX25 = '586' OR
nis.DX26 = '586' OR
nis.DX27 = '586' OR
nis.DX28 = '586' OR
nis.DX29 = '586' OR
nis.DX30 = '586'
)
)
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["cdi_with_renal"]] <- DBI::dbGetQuery(con, cdi_with_renal.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["cdi_with_renal"]]
patient.counts[["cdi_with_renal"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Join all of the stats into a table and write it out
df <- patient.counts[["total"]] %>%
left_join(patient.counts[["aki"]], by="nis_year" ) %>%
rename(total = n.x, aki = n.y) %>%
left_join(patient.counts[["ckd"]], by="nis_year") %>%
rename(ckd = n) %>%
left_join(patient.counts[["ckd1"]], by="nis_year") %>%
rename(ckd1 = n) %>%
left_join(patient.counts[["ckd2"]], by="nis_year") %>%
rename(ckd2 = n) %>%
left_join(patient.counts[["ckd3"]], by="nis_year") %>%
rename(ckd3 = n) %>%
left_join(patient.counts[["ckd4"]], by="nis_year") %>%
rename(ckd4 = n) %>%
left_join(patient.counts[["ckd5"]], by="nis_year") %>%
rename(ckd5 = n) %>%
left_join(patient.counts[["ckd6"]], by="nis_year") %>%
rename(ckd6 = n) %>%
left_join(patient.counts[["renal_unspecified"]], by="nis_year") %>%
rename(renal_unspecified = n) %>%
left_join(patient.counts[["cdi"]], by="nis_year") %>%
rename(cdi = n) %>%
left_join(patient.counts[["cdi_with_renal"]], by="nis_year") %>%
rename(cdi_with_renal = n)
write_csv(df, 'data/cdi_renal_counts.csv')
# Get everything where patients had cdi.and.renal Failure. Need this to do survey calculations.
cdi.and.renal.all.q <-
"SELECT *
FROM nis
WHERE (nis.DX1 = '00845' OR
nis.DX2 = '00845' OR
nis.DX3 = '00845' OR
nis.DX4 = '00845' OR
nis.DX5 = '00845' OR
nis.DX6 = '00845' OR
nis.DX7 = '00845' OR
nis.DX8 = '00845' OR
nis.DX9 = '00845' OR
nis.DX10 = '00845' OR
nis.DX11 = '00845' OR
nis.DX12 = '00845' OR
nis.DX13 = '00845' OR
nis.DX14 = '00845' OR
nis.DX15 = '00845' OR
nis.DX16 = '00845' OR
nis.DX17 = '00845' OR
nis.DX18 = '00845' OR
nis.DX19 = '00845' OR
nis.DX20 = '00845' OR
nis.DX21 = '00845' OR
nis.DX22 = '00845' OR
nis.DX23 = '00845' OR
nis.DX24 = '00845' OR
nis.DX25 = '00845' OR
nis.DX26 = '00845' OR
nis.DX27 = '00845' OR
nis.DX28 = '00845' OR
nis.DX29 = '00845' OR
nis.DX30 = '00845'
)
OR (
(nis.DX1 like '584%' OR
nis.DX2 like '584%' OR
nis.DX3 like '584%' OR
nis.DX4 like '584%' OR
nis.DX5 like '584%' OR
nis.DX6 like '584%' OR
nis.DX7 like '584%' OR
nis.DX8 like '584%' OR
nis.DX9 like '584%' OR
nis.DX10 like '584%' OR
nis.DX11 like '584%' OR
nis.DX12 like '584%' OR
nis.DX13 like '584%' OR
nis.DX14 like '584%' OR
nis.DX15 like '584%' OR
nis.DX16 like '584%' OR
nis.DX17 like '584%' OR
nis.DX18 like '584%' OR
nis.DX19 like '584%' OR
nis.DX20 like '584%' OR
nis.DX21 like '584%' OR
nis.DX22 like '584%' OR
nis.DX23 like '584%' OR
nis.DX24 like '584%' OR
nis.DX25 like '584%' OR
nis.DX26 like '584%' OR
nis.DX27 like '584%' OR
nis.DX28 like '584%' OR
nis.DX29 like '584%' OR
nis.DX30 like '584%'
)
OR (nis.DX1 like '585%' OR
nis.DX2 like '585%' OR
nis.DX3 like '585%' OR
nis.DX4 like '585%' OR
nis.DX5 like '585%' OR
nis.DX6 like '585%' OR
nis.DX7 like '585%' OR
nis.DX8 like '585%' OR
nis.DX9 like '585%' OR
nis.DX10 like '585%' OR
nis.DX11 like '585%' OR
nis.DX12 like '585%' OR
nis.DX13 like '585%' OR
nis.DX14 like '585%' OR
nis.DX15 like '585%' OR
nis.DX16 like '585%' OR
nis.DX17 like '585%' OR
nis.DX18 like '585%' OR
nis.DX19 like '585%' OR
nis.DX20 like '585%' OR
nis.DX21 like '585%' OR
nis.DX22 like '585%' OR
nis.DX23 like '585%' OR
nis.DX24 like '585%' OR
nis.DX25 like '585%' OR
nis.DX26 like '585%' OR
nis.DX27 like '585%' OR
nis.DX28 like '585%' OR
nis.DX29 like '585%' OR
nis.DX30 like '585%'
)
OR (nis.DX1 = '586' OR
nis.DX2 = '586' OR
nis.DX3 = '586' OR
nis.DX4 = '586' OR
nis.DX5 = '586' OR
nis.DX6 = '586' OR
nis.DX7 = '586' OR
nis.DX8 = '586' OR
nis.DX9 = '586' OR
nis.DX10 = '586' OR
nis.DX11 = '586' OR
nis.DX12 = '586' OR
nis.DX13 = '586' OR
nis.DX14 = '586' OR
nis.DX15 = '586' OR
nis.DX16 = '586' OR
nis.DX17 = '586' OR
nis.DX18 = '586' OR
nis.DX19 = '586' OR
nis.DX20 = '586' OR
nis.DX21 = '586' OR
nis.DX22 = '586' OR
nis.DX23 = '586' OR
nis.DX24 = '586' OR
nis.DX25 = '586' OR
nis.DX26 = '586' OR
nis.DX27 = '586' OR
nis.DX28 = '586' OR
nis.DX29 = '586' OR
nis.DX30 = '586'
)
)"
# Track time for query
sw.start <- Sys.time()
cdi.and.renal <- DBI::dbGetQuery(con, cdi_or_renal.all.q)
head(cdi.and.renal)
dim(cdi.and.renal)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
# Encode dummy variables so we can quickly see what the patient had
# 00845 C. diff
# 584.5 Acute kidney failure with lesion of tubular necrosis convert
# 584.6 Acute kidney failure with lesion of renal cortical necrosis convert
# 584.7 Acute kidney failure with lesion of renal medullary [papillary] necrosis
# 584.8 Acute kidney failure with lesion of with other specified pathological lesion in kidney
# 585 Chronic kidney disease (ckd)
# 585.1 Chronic kidney disease, Stage I
# 585.2 Chronic kidney disease, Stage II (mild)
# 585.3 Chronic kidney disease, Stage III (moderate)
# 585.4 Chronic kidney disease, Stage IV (severe)
# 585.5 Chronic kidney disease, Stage V (mild)
# 585.6 End stage renal disease
# 585.9 Chronic kidney disease, unspecified
# 586 Renal failure, unspecified
cdi.and.renal <-
cdi.and.renal %>%
mutate(cdi=as.integer((dx1 == '00845' |
dx2 == '00845' |
dx3 == '00845' |
dx4 == '00845' |
dx5 == '00845' |
dx6 == '00845' |
dx7 == '00845' |
dx8 == '00845' |
dx9 == '00845' |
dx10 == '00845' |
dx11 == '00845' |
dx12 == '00845' |
dx13 == '00845' |
dx14 == '00845' |
dx15 == '00845' |
dx16 == '00845' |
dx17 == '00845' |
dx18 == '00845' |
dx19 == '00845' |
dx20 == '00845' |
dx21 == '00845' |
dx22 == '00845' |
dx23 == '00845' |
dx24 == '00845' |
dx25 == '00845' |
dx26 == '00845' |
dx27 == '00845' |
dx28 == '00845' |
dx29 == '00845' |
dx30 == '00845'))) %>%
mutate(cdi=replace(cdi, is.na(cdi), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(aki=as.integer((dx1 == '584' | dx1 == '5845' | dx1 == '5846' | dx1 == '5847' | dx1 == '5848' | dx1 == '5849' |
dx2 == '584' | dx2 == '5849' | dx2 == '5846' | dx2 == '5847' | dx2 == '5848' | dx2 == '5849' |
dx3 == '584' | dx3 == '5849' | dx3 == '5846' | dx3 == '5847' | dx3 == '5848' | dx3 == '5849' |
dx4 == '584' | dx4 == '5849' | dx4 == '5846' | dx4 == '5847' | dx4 == '5848' | dx4 == '5849' |
dx5 == '584' | dx5 == '5849' | dx5 == '5846' | dx5 == '5847' | dx5 == '5848' | dx5 == '5849' |
dx6 == '584' | dx6 == '5849' | dx6 == '5846' | dx6 == '5847' | dx6 == '5848' | dx6 == '5849' |
dx7 == '584' | dx7 == '5849' | dx7 == '5846' | dx7 == '5847' | dx7 == '5848' | dx7 == '5849' |
dx8 == '584' | dx8 == '5849' | dx8 == '5846' | dx8 == '5847' | dx8 == '5848' | dx8 == '5849' |
dx9 == '584' | dx9 == '5849' | dx9 == '5846' | dx9 == '5847' | dx9 == '5848' | dx9 == '5849' |
dx10 == '584' | dx10 == '5849' | dx10 == '5846' | dx10 == '5847' | dx10 == '5848' | dx10 == '5849' |
dx11 == '584' | dx11 == '5849' | dx11 == '5846' | dx11 == '5847' | dx11 == '5848' | dx11 == '5849' |
dx12 == '584' | dx12 == '5849' | dx12 == '5846' | dx12 == '5847' | dx12 == '5848' | dx12 == '5849' |
dx13 == '584' | dx13 == '5849' | dx13 == '5846' | dx13 == '5847' | dx13 == '5848' | dx13 == '5849' |
dx14 == '584' | dx14 == '5849' | dx14 == '5846' | dx14 == '5847' | dx14 == '5848' | dx14 == '5849' |
dx15 == '584' | dx15 == '5849' | dx15 == '5846' | dx15 == '5847' | dx15 == '5848' | dx15 == '5849' |
dx16 == '584' | dx16 == '5849' | dx16 == '5846' | dx16 == '5847' | dx16 == '5848' | dx16 == '5849' |
dx17 == '584' | dx17 == '5849' | dx17 == '5846' | dx17 == '5847' | dx17 == '5848' | dx17 == '5849' |
dx18 == '584' | dx18 == '5849' | dx18 == '5846' | dx18 == '5847' | dx18 == '5848' | dx18 == '5849' |
dx19 == '584' | dx19 == '5849' | dx19 == '5846' | dx19 == '5847' | dx19 == '5848' | dx19 == '5849' |
dx20 == '584' | dx20 == '5849' | dx20 == '5846' | dx20 == '5847' | dx20 == '5848' | dx20 == '5849' |
dx21 == '584' | dx21 == '5849' | dx21 == '5846' | dx21 == '5847' | dx21 == '5848' | dx21 == '5849' |
dx22 == '584' | dx22 == '5849' | dx22 == '5846' | dx22 == '5847' | dx22 == '5848' | dx22 == '5849' |
dx23 == '584' | dx23 == '5849' | dx23 == '5846' | dx23 == '5847' | dx23 == '5848' | dx23 == '5849' |
dx24 == '584' | dx24 == '5849' | dx24 == '5846' | dx24 == '5847' | dx24 == '5848' | dx24 == '5849' |
dx25 == '584' | dx25 == '5849' | dx25 == '5846' | dx25 == '5847' | dx25 == '5848' | dx25 == '5849' |
dx26 == '584' | dx26 == '5849' | dx26 == '5846' | dx26 == '5847' | dx26 == '5848' | dx26 == '5849' |
dx27 == '584' | dx27 == '5849' | dx27 == '5846' | dx27 == '5847' | dx27 == '5848' | dx27 == '5849' |
dx28 == '584' | dx28 == '5849' | dx28 == '5846' | dx28 == '5847' | dx28 == '5848' | dx28 == '5849' |
dx29 == '584' | dx29 == '5849' | dx29 == '5846' | dx29 == '5847' | dx29 == '5848' | dx29 == '5849' |
dx30 == '584' | dx30 == '5849' | dx30 == '5846' | dx30 == '5847' | dx30 == '5848' | dx30 == '5849'))) %>%
mutate(aki=replace(aki, is.na(aki), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd=as.integer((dx1 == '585' | dx1 == '5859' |
dx2 == '585' | dx2 == '5859' |
dx3 == '585' | dx3 == '5859' |
dx4 == '585' | dx4 == '5859' |
dx5 == '585' | dx5 == '5859' |
dx6 == '585' | dx6 == '5859' |
dx7 == '585' | dx7 == '5859' |
dx8 == '585' | dx8 == '5859' |
dx9 == '585' | dx9 == '5859' |
dx10 == '585' | dx10 == '5859' |
dx11 == '585' | dx11 == '5859' |
dx12 == '585' | dx12 == '5859' |
dx13 == '585' | dx13 == '5859' |
dx14 == '585' | dx14 == '5859' |
dx15 == '585' | dx15 == '5859' |
dx16 == '585' | dx16 == '5859' |
dx17 == '585' | dx17 == '5859' |
dx18 == '585' | dx18 == '5859' |
dx19 == '585' | dx19 == '5859' |
dx20 == '585' | dx20 == '5859' |
dx21 == '585' | dx21 == '5859' |
dx22 == '585' | dx22 == '5859' |
dx23 == '585' | dx23 == '5859' |
dx24 == '585' | dx24 == '5859' |
dx25 == '585' | dx25 == '5859' |
dx26 == '585' | dx26 == '5859' |
dx27 == '585' | dx27 == '5859' |
dx28 == '585' | dx28 == '5859' |
dx29 == '585' | dx29 == '5859' |
dx30 == '585' | dx30 == '5859'))) %>%
mutate(ckd=replace(ckd, is.na(ckd), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd1=as.integer((dx1 == '5851' |
dx2 == '5851' |
dx3 == '5851' |
dx4 == '5851' |
dx5 == '5851' |
dx6 == '5851' |
dx7 == '5851' |
dx8 == '5851' |
dx9 == '5851' |
dx10 == '5851' |
dx11 == '5851' |
dx12 == '5851' |
dx13 == '5851' |
dx14 == '5851' |
dx15 == '5851' |
dx16 == '5851' |
dx17 == '5851' |
dx18 == '5851' |
dx19 == '5851' |
dx20 == '5851' |
dx21 == '5851' |
dx22 == '5851' |
dx23 == '5851' |
dx24 == '5851' |
dx25 == '5851' |
dx26 == '5851' |
dx27 == '5851' |
dx28 == '5851' |
dx29 == '5851' |
dx30 == '5851'))) %>%
mutate(ckd1=replace(ckd1, is.na(ckd1), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd2=as.integer((dx1 == '5852' |
dx2 == '5852' |
dx3 == '5852' |
dx4 == '5852' |
dx5 == '5852' |
dx6 == '5852' |
dx7 == '5852' |
dx8 == '5852' |
dx9 == '5852' |
dx10 == '5852' |
dx11 == '5852' |
dx12 == '5852' |
dx13 == '5852' |
dx14 == '5852' |
dx15 == '5852' |
dx16 == '5852' |
dx17 == '5852' |
dx18 == '5852' |
dx19 == '5852' |
dx20 == '5852' |
dx21 == '5852' |
dx22 == '5852' |
dx23 == '5852' |
dx24 == '5852' |
dx25 == '5852' |
dx26 == '5852' |
dx27 == '5852' |
dx28 == '5852' |
dx29 == '5852' |
dx30 == '5852'))) %>%
mutate(ckd2=replace(ckd2, is.na(ckd2), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd3=as.integer((dx1 == '5853' |
dx2 == '5853' |
dx3 == '5853' |
dx4 == '5853' |
dx5 == '5853' |
dx6 == '5853' |
dx7 == '5853' |
dx8 == '5853' |
dx9 == '5853' |
dx10 == '5853' |
dx11 == '5853' |
dx12 == '5853' |
dx13 == '5853' |
dx14 == '5853' |
dx15 == '5853' |
dx16 == '5853' |
dx17 == '5853' |
dx18 == '5853' |
dx19 == '5853' |
dx20 == '5853' |
dx21 == '5853' |
dx22 == '5853' |
dx23 == '5853' |
dx24 == '5853' |
dx25 == '5853' |
dx26 == '5853' |
dx27 == '5853' |
dx28 == '5853' |
dx29 == '5853' |
dx30 == '5853'))) %>%
mutate(ckd3=replace(ckd3, is.na(ckd3), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd4=as.integer((dx1 == '5854' |
dx2 == '5854' |
dx3 == '5854' |
dx4 == '5854' |
dx5 == '5854' |
dx6 == '5854' |
dx7 == '5854' |
dx8 == '5854' |
dx9 == '5854' |
dx10 == '5854' |
dx11 == '5854' |
dx12 == '5854' |
dx13 == '5854' |
dx14 == '5854' |
dx15 == '5854' |
dx16 == '5854' |
dx17 == '5854' |
dx18 == '5854' |
dx19 == '5854' |
dx20 == '5854' |
dx21 == '5854' |
dx22 == '5854' |
dx23 == '5854' |
dx24 == '5854' |
dx25 == '5854' |
dx26 == '5854' |
dx27 == '5854' |
dx28 == '5854' |
dx29 == '5854' |
dx30 == '5854'))) %>%
mutate(ckd4=replace(ckd4, is.na(ckd4), 0))
glimpse(cdi.and.renal)
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd5=as.integer((dx1 == '5855' |
dx2 == '5855' |
dx3 == '5855' |
dx4 == '5855' |
dx5 == '5855' |
dx6 == '5855' |
dx7 == '5855' |
dx8 == '5855' |
dx9 == '5855' |
dx10 == '5855' |
dx11 == '5855' |
dx12 == '5855' |
dx13 == '5855' |
dx14 == '5855' |
dx15 == '5855' |
dx16 == '5855' |
dx17 == '5855' |
dx18 == '5855' |
dx19 == '5855' |
dx20 == '5855' |
dx21 == '5855' |
dx22 == '5855' |
dx23 == '5855' |
dx24 == '5855' |
dx25 == '5855' |
dx26 == '5855' |
dx27 == '5855' |
dx28 == '5855' |
dx29 == '5855' |
dx30 == '5855'))) %>%
mutate(ckd5=replace(ckd5, is.na(ckd5), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd6=as.integer((dx1 == '5856' |
dx2 == '5856' |
dx3 == '5856' |
dx4 == '5856' |
dx5 == '5856' |
dx6 == '5856' |
dx7 == '5856' |
dx8 == '5856' |
dx9 == '5856' |
dx10 == '5856' |
dx11 == '5856' |
dx12 == '5856' |
dx13 == '5856' |
dx14 == '5856' |
dx15 == '5856' |
dx16 == '5856' |
dx17 == '5856' |
dx18 == '5856' |
dx19 == '5856' |
dx20 == '5856' |
dx21 == '5856' |
dx22 == '5856' |
dx23 == '5856' |
dx24 == '5856' |
dx25 == '5856' |
dx26 == '5856' |
dx27 == '5856' |
dx28 == '5856' |
dx29 == '5856' |
dx30 == '5856'))) %>%
mutate(ckd6=replace(ckd6, is.na(ckd6), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(renal_failure_unspecified=as.integer((dx1 == '586' |
dx2 == '586' |
dx3 == '586' |
dx4 == '586' |
dx5 == '586' |
dx6 == '586' |
dx7 == '586' |
dx8 == '586' |
dx9 == '586' |
dx10 == '586' |
dx11 == '586' |
dx12 == '586' |
dx13 == '586' |
dx14 == '586' |
dx15 == '586' |
dx16 == '586' |
dx17 == '586' |
dx18 == '586' |
dx19 == '586' |
dx20 == '586' |
dx21 == '586' |
dx22 == '586' |
dx23 == '586' |
dx24 == '586' |
dx25 == '586' |
dx26 == '586' |
dx27 == '586' |
dx28 == '586' |
dx29 == '586' |
dx30 == '586'))) %>%
mutate(renal_failure_unspecified=replace(renal_failure_unspecified, is.na(renal_failure_unspecified), 0))
write_csv(cdi.and.renal, '/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/data/cdi_and_renal.csv')
cdi.and.renal.reduced <- cdi.and.renal %>% select(matches("nis_key|nis_year|nis_stratum|age|^discwt$|hospid|aki|cdi|ckd.*|renl.*|^los$|died|renal"))
write_csv(cdi.and.renal.reduced, '/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/data/cdi_and_renal_reduced.csv')
all.q <- "SELECT nis_key,
nis_year,
nis_stratum,
age,
discwt,
hospid,
renlfail,
los,
died,
dx1,
dx2,
dx3,
dx4,
dx5,
dx6,
dx7,
dx8,
dx9,
dx10,
dx11,
dx12,
dx13,
dx14,
dx15,
dx16,
dx17,
dx18,
dx19,
dx20,
dx21,
dx22,
dx23,
dx24,
dx25,
dx26,
dx27,
dx28,
dx29,
dx30
FROM nis"
cdi.and.renal <- DBI::dbGetQuery(con, all.q)
# Join all of the stats into a table and write it out
beep(3)
cdi.and.renal <-
cdi.and.renal %>%
mutate(cdi=as.integer((dx1 == '00845' |
dx2 == '00845' |
dx3 == '00845' |
dx4 == '00845' |
dx5 == '00845' |
dx6 == '00845' |
dx7 == '00845' |
dx8 == '00845' |
dx9 == '00845' |
dx10 == '00845' |
dx11 == '00845' |
dx12 == '00845' |
dx13 == '00845' |
dx14 == '00845' |
dx15 == '00845' |
dx16 == '00845' |
dx17 == '00845' |
dx18 == '00845' |
dx19 == '00845' |
dx20 == '00845' |
dx21 == '00845' |
dx22 == '00845' |
dx23 == '00845' |
dx24 == '00845' |
dx25 == '00845' |
dx26 == '00845' |
dx27 == '00845' |
dx28 == '00845' |
dx29 == '00845' |
dx30 == '00845'))) %>%
mutate(cdi=replace(cdi, is.na(cdi), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(aki=as.integer((dx1 == '584' | dx1 == '5845' | dx1 == '5846' | dx1 == '5847' | dx1 == '5848' | dx1 == '5849' |
dx2 == '584' | dx2 == '5849' | dx2 == '5846' | dx2 == '5847' | dx2 == '5848' | dx2 == '5849' |
dx3 == '584' | dx3 == '5849' | dx3 == '5846' | dx3 == '5847' | dx3 == '5848' | dx3 == '5849' |
dx4 == '584' | dx4 == '5849' | dx4 == '5846' | dx4 == '5847' | dx4 == '5848' | dx4 == '5849' |
dx5 == '584' | dx5 == '5849' | dx5 == '5846' | dx5 == '5847' | dx5 == '5848' | dx5 == '5849' |
dx6 == '584' | dx6 == '5849' | dx6 == '5846' | dx6 == '5847' | dx6 == '5848' | dx6 == '5849' |
dx7 == '584' | dx7 == '5849' | dx7 == '5846' | dx7 == '5847' | dx7 == '5848' | dx7 == '5849' |
dx8 == '584' | dx8 == '5849' | dx8 == '5846' | dx8 == '5847' | dx8 == '5848' | dx8 == '5849' |
dx9 == '584' | dx9 == '5849' | dx9 == '5846' | dx9 == '5847' | dx9 == '5848' | dx9 == '5849' |
dx10 == '584' | dx10 == '5849' | dx10 == '5846' | dx10 == '5847' | dx10 == '5848' | dx10 == '5849' |
dx11 == '584' | dx11 == '5849' | dx11 == '5846' | dx11 == '5847' | dx11 == '5848' | dx11 == '5849' |
dx12 == '584' | dx12 == '5849' | dx12 == '5846' | dx12 == '5847' | dx12 == '5848' | dx12 == '5849' |
dx13 == '584' | dx13 == '5849' | dx13 == '5846' | dx13 == '5847' | dx13 == '5848' | dx13 == '5849' |
dx14 == '584' | dx14 == '5849' | dx14 == '5846' | dx14 == '5847' | dx14 == '5848' | dx14 == '5849' |
dx15 == '584' | dx15 == '5849' | dx15 == '5846' | dx15 == '5847' | dx15 == '5848' | dx15 == '5849' |
dx16 == '584' | dx16 == '5849' | dx16 == '5846' | dx16 == '5847' | dx16 == '5848' | dx16 == '5849' |
dx17 == '584' | dx17 == '5849' | dx17 == '5846' | dx17 == '5847' | dx17 == '5848' | dx17 == '5849' |
dx18 == '584' | dx18 == '5849' | dx18 == '5846' | dx18 == '5847' | dx18 == '5848' | dx18 == '5849' |
dx19 == '584' | dx19 == '5849' | dx19 == '5846' | dx19 == '5847' | dx19 == '5848' | dx19 == '5849' |
dx20 == '584' | dx20 == '5849' | dx20 == '5846' | dx20 == '5847' | dx20 == '5848' | dx20 == '5849' |
dx21 == '584' | dx21 == '5849' | dx21 == '5846' | dx21 == '5847' | dx21 == '5848' | dx21 == '5849' |
dx22 == '584' | dx22 == '5849' | dx22 == '5846' | dx22 == '5847' | dx22 == '5848' | dx22 == '5849' |
dx23 == '584' | dx23 == '5849' | dx23 == '5846' | dx23 == '5847' | dx23 == '5848' | dx23 == '5849' |
dx24 == '584' | dx24 == '5849' | dx24 == '5846' | dx24 == '5847' | dx24 == '5848' | dx24 == '5849' |
dx25 == '584' | dx25 == '5849' | dx25 == '5846' | dx25 == '5847' | dx25 == '5848' | dx25 == '5849' |
dx26 == '584' | dx26 == '5849' | dx26 == '5846' | dx26 == '5847' | dx26 == '5848' | dx26 == '5849' |
dx27 == '584' | dx27 == '5849' | dx27 == '5846' | dx27 == '5847' | dx27 == '5848' | dx27 == '5849' |
dx28 == '584' | dx28 == '5849' | dx28 == '5846' | dx28 == '5847' | dx28 == '5848' | dx28 == '5849' |
dx29 == '584' | dx29 == '5849' | dx29 == '5846' | dx29 == '5847' | dx29 == '5848' | dx29 == '5849' |
dx30 == '584' | dx30 == '5849' | dx30 == '5846' | dx30 == '5847' | dx30 == '5848' | dx30 == '5849'))) %>%
mutate(aki=replace(aki, is.na(aki), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd=as.integer((dx1 == '585' | dx1 == '5859' |
dx2 == '585' | dx2 == '5859' |
dx3 == '585' | dx3 == '5859' |
dx4 == '585' | dx4 == '5859' |
dx5 == '585' | dx5 == '5859' |
dx6 == '585' | dx6 == '5859' |
dx7 == '585' | dx7 == '5859' |
dx8 == '585' | dx8 == '5859' |
dx9 == '585' | dx9 == '5859' |
dx10 == '585' | dx10 == '5859' |
dx11 == '585' | dx11 == '5859' |
dx12 == '585' | dx12 == '5859' |
dx13 == '585' | dx13 == '5859' |
dx14 == '585' | dx14 == '5859' |
dx15 == '585' | dx15 == '5859' |
dx16 == '585' | dx16 == '5859' |
dx17 == '585' | dx17 == '5859' |
dx18 == '585' | dx18 == '5859' |
dx19 == '585' | dx19 == '5859' |
dx20 == '585' | dx20 == '5859' |
dx21 == '585' | dx21 == '5859' |
dx22 == '585' | dx22 == '5859' |
dx23 == '585' | dx23 == '5859' |
dx24 == '585' | dx24 == '5859' |
dx25 == '585' | dx25 == '5859' |
dx26 == '585' | dx26 == '5859' |
dx27 == '585' | dx27 == '5859' |
dx28 == '585' | dx28 == '5859' |
dx29 == '585' | dx29 == '5859' |
dx30 == '585' | dx30 == '5859'))) %>%
mutate(ckd=replace(ckd, is.na(ckd), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd1=as.integer((dx1 == '5851' |
dx2 == '5851' |
dx3 == '5851' |
dx4 == '5851' |
dx5 == '5851' |
dx6 == '5851' |
dx7 == '5851' |
dx8 == '5851' |
dx9 == '5851' |
dx10 == '5851' |
dx11 == '5851' |
dx12 == '5851' |
dx13 == '5851' |
dx14 == '5851' |
dx15 == '5851' |
dx16 == '5851' |
dx17 == '5851' |
dx18 == '5851' |
dx19 == '5851' |
dx20 == '5851' |
dx21 == '5851' |
dx22 == '5851' |
dx23 == '5851' |
dx24 == '5851' |
dx25 == '5851' |
dx26 == '5851' |
dx27 == '5851' |
dx28 == '5851' |
dx29 == '5851' |
dx30 == '5851'))) %>%
mutate(ckd1=replace(ckd1, is.na(ckd1), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd2=as.integer((dx1 == '5852' |
dx2 == '5852' |
dx3 == '5852' |
dx4 == '5852' |
dx5 == '5852' |
dx6 == '5852' |
dx7 == '5852' |
dx8 == '5852' |
dx9 == '5852' |
dx10 == '5852' |
dx11 == '5852' |
dx12 == '5852' |
dx13 == '5852' |
dx14 == '5852' |
dx15 == '5852' |
dx16 == '5852' |
dx17 == '5852' |
dx18 == '5852' |
dx19 == '5852' |
dx20 == '5852' |
dx21 == '5852' |
dx22 == '5852' |
dx23 == '5852' |
dx24 == '5852' |
dx25 == '5852' |
dx26 == '5852' |
dx27 == '5852' |
dx28 == '5852' |
dx29 == '5852' |
dx30 == '5852'))) %>%
mutate(ckd2=replace(ckd2, is.na(ckd2), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd3=as.integer((dx1 == '5853' |
dx2 == '5853' |
dx3 == '5853' |
dx4 == '5853' |
dx5 == '5853' |
dx6 == '5853' |
dx7 == '5853' |
dx8 == '5853' |
dx9 == '5853' |
dx10 == '5853' |
dx11 == '5853' |
dx12 == '5853' |
dx13 == '5853' |
dx14 == '5853' |
dx15 == '5853' |
dx16 == '5853' |
dx17 == '5853' |
dx18 == '5853' |
dx19 == '5853' |
dx20 == '5853' |
dx21 == '5853' |
dx22 == '5853' |
dx23 == '5853' |
dx24 == '5853' |
dx25 == '5853' |
dx26 == '5853' |
dx27 == '5853' |
dx28 == '5853' |
dx29 == '5853' |
dx30 == '5853'))) %>%
mutate(ckd3=replace(ckd3, is.na(ckd3), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd4=as.integer((dx1 == '5854' |
dx2 == '5854' |
dx3 == '5854' |
dx4 == '5854' |
dx5 == '5854' |
dx6 == '5854' |
dx7 == '5854' |
dx8 == '5854' |
dx9 == '5854' |
dx10 == '5854' |
dx11 == '5854' |
dx12 == '5854' |
dx13 == '5854' |
dx14 == '5854' |
dx15 == '5854' |
dx16 == '5854' |
dx17 == '5854' |
dx18 == '5854' |
dx19 == '5854' |
dx20 == '5854' |
dx21 == '5854' |
dx22 == '5854' |
dx23 == '5854' |
dx24 == '5854' |
dx25 == '5854' |
dx26 == '5854' |
dx27 == '5854' |
dx28 == '5854' |
dx29 == '5854' |
dx30 == '5854'))) %>%
mutate(ckd4=replace(ckd4, is.na(ckd4), 0))
glimpse(cdi.and.renal)
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd5=as.integer((dx1 == '5855' |
dx2 == '5855' |
dx3 == '5855' |
dx4 == '5855' |
dx5 == '5855' |
dx6 == '5855' |
dx7 == '5855' |
dx8 == '5855' |
dx9 == '5855' |
dx10 == '5855' |
dx11 == '5855' |
dx12 == '5855' |
dx13 == '5855' |
dx14 == '5855' |
dx15 == '5855' |
dx16 == '5855' |
dx17 == '5855' |
dx18 == '5855' |
dx19 == '5855' |
dx20 == '5855' |
dx21 == '5855' |
dx22 == '5855' |
dx23 == '5855' |
dx24 == '5855' |
dx25 == '5855' |
dx26 == '5855' |
dx27 == '5855' |
dx28 == '5855' |
dx29 == '5855' |
dx30 == '5855'))) %>%
mutate(ckd5=replace(ckd5, is.na(ckd5), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd6=as.integer((dx1 == '5856' |
dx2 == '5856' |
dx3 == '5856' |
dx4 == '5856' |
dx5 == '5856' |
dx6 == '5856' |
dx7 == '5856' |
dx8 == '5856' |
dx9 == '5856' |
dx10 == '5856' |
dx11 == '5856' |
dx12 == '5856' |
dx13 == '5856' |
dx14 == '5856' |
dx15 == '5856' |
dx16 == '5856' |
dx17 == '5856' |
dx18 == '5856' |
dx19 == '5856' |
dx20 == '5856' |
dx21 == '5856' |
dx22 == '5856' |
dx23 == '5856' |
dx24 == '5856' |
dx25 == '5856' |
dx26 == '5856' |
dx27 == '5856' |
dx28 == '5856' |
dx29 == '5856' |
dx30 == '5856'))) %>%
mutate(ckd6=replace(ckd6, is.na(ckd6), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(renal_failure_unspecified=as.integer((dx1 == '586' |
dx2 == '586' |
dx3 == '586' |
dx4 == '586' |
dx5 == '586' |
dx6 == '586' |
dx7 == '586' |
dx8 == '586' |
dx9 == '586' |
dx10 == '586' |
dx11 == '586' |
dx12 == '586' |
dx13 == '586' |
dx14 == '586' |
dx15 == '586' |
dx16 == '586' |
dx17 == '586' |
dx18 == '586' |
dx19 == '586' |
dx20 == '586' |
dx21 == '586' |
dx22 == '586' |
dx23 == '586' |
dx24 == '586' |
dx25 == '586' |
dx26 == '586' |
dx27 == '586' |
dx28 == '586' |
dx29 == '586' |
dx30 == '586'))) %>%
mutate(renal_failure_unspecified=replace(renal_failure_unspecified, is.na(renal_failure_unspecified), 0))
write_csv( select(cdi.and.renal, nis_key,
nis_year,
nis_stratum,
age,
discwt,
hospid,
renlfail,
los,
died,
cdi,
aki,
ckd,
ckd1,
ckd2,
ckd3,
ckd4,
ckd5,
ckd6,
renal_failure_unspecified),
"data/cdiff_and_renal_all.csv")
beep(3)
proportions <- list()
cdi.and.renal.reduced <- list()
y <- 2014
for (y in seq(2001, 2014, by=1)) {
print(y)
#setwd('/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/thesis/')
cdi.and.renal.reduced <- read_csv(paste0('../data/cdiff_and_renal_all_', y, '.csv'))
cdiff.design <- svydesign(ids = ~hospid,
data = cdi.and.renal.reduced,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
proportions[[paste0(y, "_cdi")]] <- svyciprop(~I(cdi==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_aki")]] <- svyciprop(~I(aki==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd")]] <- svyciprop(~I(ckd==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd1")]] <- svyciprop(~I(ckd1==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd2")]] <- svyciprop(~I(ckd2==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd3")]] <- svyciprop(~I(ckd3==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd4")]] <- svyciprop(~I(ckd4==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd5")]] <- svyciprop(~I(ckd5==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd6")]] <- svyciprop(~I(ckd6==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_renal_failure")]] <- svyciprop(~I(aki+ckd+ckd1+ckd2+ckd3+ckd4+ckd5+ckd6 > 0), cdiff.design, method = "logit", level = 0.95)
#svyciprop(~(I(cdi==1) & I(aki+ckd+ckd1+ckd2+ckd3+ckd4+ckd5+ckd6 > 0)), cdiff.design, method = "logit", level = 0.95)
rm(cdi.and.renal.reduced)
rm(cdiff.design)
gc()
}
beep(3)
proportions
diseases <- c("cdi", "aki", "ckd", "ckd1", "ckd2", "ckd3", "ckd4", "ckd5", "ckd6", "renal_failure")
y <- 2001
d <- diseases[1]
final.df <- data_frame(disease="",
year=2000,
theta=0,
ci2.5=0,
ci97.5=0)
for (y in seq(2001, 2014, by=1)) {
for (d in diseases) {
df <- data_frame(disease=d,
year=y,
theta=as.vector(proportions[[paste0(y, "_", d)]]),
ci2.5=attr(proportions[[paste0(y, "_", d)]], "ci")[[1]],
ci97.5=attr(proportions[[paste0(y, "_", d)]], "ci")[[2]])
final.df <- bind_rows(final.df, df)
}
}
write_csv(final.df, "../data/proportions.csv")
# echo "`l NIS* | grep -i CSV | awk '{print $5}' | awk '{s+=$1} END {print s}'` + `l NRD201* | grep CSV | awk '{print $5}' | awk '{s+=$1} END {print s}'`" | bc
proportions <- list()
cdi.and.renal.reduced <- list()
y <- 2014
for (y in seq(2001, 2014, by=1)) {
print(y)
#setwd('/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/thesis/')
cdi.and.renal.reduced <- read_csv(paste0('../data/cdiff_and_renal_all_', y, '.csv'))
cdiff.design <- svydesign(ids = ~hospid,
data = cdi.and.renal.reduced,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
proportions[[paste0(y, "_cdi")]] <- svyciprop(~I(cdi==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_aki")]] <- svyciprop(~I(aki==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd")]] <- svyciprop(~I(ckd==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd1")]] <- svyciprop(~I(ckd1==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd2")]] <- svyciprop(~I(ckd2==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd3")]] <- svyciprop(~I(ckd3==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd4")]] <- svyciprop(~I(ckd4==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd5")]] <- svyciprop(~I(ckd5==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd6")]] <- svyciprop(~I(ckd6==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_renal_failure")]] <- svyciprop(~I(aki+ckd+ckd1+ckd2+ckd3+ckd4+ckd5+ckd6 > 0), cdiff.design, method = "logit", level = 0.95)
#svyciprop(~(I(cdi==1) & I(aki+ckd+ckd1+ckd2+ckd3+ckd4+ckd5+ckd6 > 0)), cdiff.design, method = "logit", level = 0.95)
rm(cdi.and.renal.reduced)
rm(cdiff.design)
gc()
}
beep(3)
proportions
diseases <- c("cdi", "aki", "ckd", "ckd1", "ckd2", "ckd3", "ckd4", "ckd5", "ckd6", "renal_failure")
y <- 2001
d <- diseases[1]
final.df <- data_frame(disease="",
year=2000,
theta=0,
ci2.5=0,
ci97.5=0)
for (y in seq(2001, 2014, by=1)) {
for (d in diseases) {
df <- data_frame(disease=d,
year=y,
theta=as.vector(proportions[[paste0(y, "_", d)]]),
ci2.5=attr(proportions[[paste0(y, "_", d)]], "ci")[[1]],
ci97.5=attr(proportions[[paste0(y, "_", d)]], "ci")[[2]])
final.df <- bind_rows(final.df, df)
}
}
cdiff.ages <- filter(cdiff, !is.na(age))
cdiff.design <- svydesign(ids = ~hospid,
data = cdiff.ages,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
mode <- mlv(cdiff.ages$age, method = "mfv")
mode <- mode$M
qntl <- svyquantile(~age, cdiff.design, c(0.25, 0.5, 0.75))
xbar.weighted <- svymean(x = ~age, design=cdiff.design, deff=TRUE)
p <- cdiff.ages %>%
select(age, discwt) %>%
ggplot(aes(age, group=1, weight=discwt)) +
geom_histogram(stat="bin", bins=30) +
geom_vline(xintercept = qntl[[2]], col="red") +
geom_vline(xintercept = qntl[[1]], col="blue") +
geom_vline(xintercept = qntl[[3]], col="blue") +
labs(title="C. diff infections by age", y="Count", x="Age")
print(p)
ts.by.year <- list()
from <- 1
to <- 0
for (i in 1:20) {
from <- to
to <- from + 5
age.window <- cdiff %>%
filter(!is.na(age) & age >= from & age < to) %>%
select(nis_year) %>%
group_by(nis_year) %>%
summarise(count=n())
my.ts <- ts(age.window$count, start = 2001, end = 2014, frequency = 1)
#if (i == 2001) {
ts.by.year[[paste0(from, "_", to)]] <- my.ts
#} else {
#ts(age.window$count, start = 2001, end = 2014, frequency = 1)
#}
}
plot.ts <- data.frame(year=2001:2014)
plot.ts <- cbind(plot.ts, data.frame('0_5'=ts.by.year[['0_5']]))
plot.ts <- cbind(plot.ts, data.frame('5_10'=ts.by.year[['5_10']]))
plot.ts <- cbind(plot.ts, data.frame('10_15'=ts.by.year[['10_15']]))
plot.ts <- cbind(plot.ts, data.frame('15_20'=ts.by.year[['15_20']]))
plot.ts <- cbind(plot.ts, data.frame('20_25'=ts.by.year[['20_25']]))
plot.ts <- cbind(plot.ts, data.frame('25_30'=ts.by.year[['25_30']]))
plot.ts <- cbind(plot.ts, data.frame('30_35'=ts.by.year[['30_35']]))
plot.ts <- cbind(plot.ts, data.frame('35_40'=ts.by.year[['35_40']]))
plot.ts <- cbind(plot.ts, data.frame('40_45'=ts.by.year[['40_45']]))
plot.ts <- cbind(plot.ts, data.frame('45_50'=ts.by.year[['45_50']]))
plot.ts <- cbind(plot.ts, data.frame('50_55'=ts.by.year[['50_55']]))
plot.ts <- cbind(plot.ts, data.frame('55_60'=ts.by.year[['55_60']]))
plot.ts <- cbind(plot.ts, data.frame('60_65'=ts.by.year[['60_65']]))
plot.ts <- cbind(plot.ts, data.frame('65_70'=ts.by.year[['65_70']]))
plot.ts <- cbind(plot.ts, data.frame('70_75'=ts.by.year[['70_75']]))
plot.ts <- cbind(plot.ts, data.frame('75_80'=ts.by.year[['75_80']]))
plot.ts <- cbind(plot.ts, data.frame('80_85'=ts.by.year[['80_85']]))
plot.ts <- cbind(plot.ts, data.frame('85_90'=ts.by.year[['85_90']]))
plot.ts <- cbind(plot.ts, data.frame('90_95'=ts.by.year[['90_95']]))
plot.ts <- cbind(plot.ts, data.frame('95_100'=ts.by.year[['95_100']]))
plot.ts.m <- melt(plot.ts, id.vars=c('year'))
labels <- gsub('_', '-', gsub('X', replacement = '', as.character(plot.ts.m$variable)))
plot.ts.m$variable <- factor(labels, levels = unique(labels))
cols <- c('0-5' = "#e6e6ff",
'5-10' = "#ccccff",
'10-15' = "#b3b3ff",
'15-20' = "#9999ff",
'20-25' = "#8080ff",
'25-30' = "#6666ff",
'30-35' = "#4d4dff",
'35-40' = "#3333ff",
'40-45' = "#1a1aff",
'45-50' = "#0000ff",
# RED - increasing
'50-55' = "#cc0000",
'55-60' = "#b30000",
'60-65' = "#990000",
'65-70' = "#800000",
'70-75' = "#660000",
# GREEN - Somewhat decreasing
'75-80' = "#006600",
'80-85' = "#004d00",
'85-90' = "#008000",
'90-95' = "#003300",
'95-100' = "#000000")
plot.ts.m %>%
ggplot(aes(x=year, y=value, colour=variable)) +
geom_line() +
scale_colour_manual(values = cols) +
labs(title="Time series of C. diff cases by 5-year age groups", x="Year", y="Count", colour="Ages")
######################
esrd <- list()
y <- 2014
for (y in seq(2001, 2014, by=1)) {
print(y)
#setwd('/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/thesis/')
cdi.and.renal.reduced <- read_csv(paste0('../data/cdiff_and_renal_all_', y, '.csv'))
#cdiff.design <- svydesign(ids = ~hospid,
#data = cdi.and.renal.reduced,
#weights = ~discwt,
#strata = ~nis_stratum,
#nest=TRUE)
#fit <- svyglm(I(ckd6 == 1)~age, cdiff.design, family=quasibinomial())
esrd[[y]] <- cdi.and.renal.reduced %>%
filter(ckd6 == 1) %>%
select(age, nis_year, discwt)
esrd[[2014]]
rm(cdi.and.renal.reduced)
gc()
}
df <- esrd[[2001]]
for (y in seq(from=2002, to=2014, by=1)) {
print(y)
df <- bind_rows(df, esrd[[y]])
}
write_csv(df, '/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/data/esrd.csv')
ggplot(df, aes(x = age, y = nis_year, group = nis_year)) +
geom_density_ridges(aes(height=..density.., weight=discwt), stat="density") +
labs(title="ESRD distribution by age over time", x="Age", y="Year")
beep(3)
### Get ESRD
ages <- list()
y <- 2014
for (y in seq(2001, 2014, by=1)) {
print(y)
#setwd('/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/thesis/')
cdi.and.renal.reduced <- read_csv(paste0('../data/cdiff_and_renal_all_', y, '.csv'))
cdi.and.renal.reduced <- filter(cdi.and.renal.reduced, !is.na(age))
subgroup <- filter(cdi.and.renal.reduced, cdi == 1)
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_cdi")]] <- svymean(~age, ds, level = 0.95)
subgroup <- filter(cdi.and.renal.reduced, (ckd == 1 | ckd1 == 1 | ckd2 == 1 | ckd3 == 1 | ckd4 == 1 | ckd5 == 1))
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_ckd")]] <- svymean(~age, ds, level = 0.95)
subgroup <- filter(cdi.and.renal.reduced, (aki == 1))
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_aki")]] <- svymean(~age, ds, level = 0.95)
subgroup <- filter(cdi.and.renal.reduced, (ckd6 == 1))
if (nrow(subgroup) > 0) {
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_esrd")]] <- svymean(~age, ds, level = 0.95)
}
rm(cdi.and.renal.reduced)
rm(cdiff.design)
gc()
}
ages
beep(3)
y <- 2001
d <- diseases[1]
final.df <- data_frame(disease="",
year=2000,
theta=0,
ci2.5=0,
ci97.5=0)
for (y in seq(2001, 2014, by=1)) {
print(y)
if (y < 2005 ) {
diseases <- c("cdi", "aki", "ckd")
} else {
diseases <- c("cdi", "aki", "ckd", "esrd")
}
for (d in diseases) {
print(d)
df <- data_frame(disease=d,
year=y,
theta=as.vector(ages[[paste0(y, "_", d)]]),
ci2.5=as.vector(a) + sqrt(as.vector(attr(a, "var"))) * 1.96,
ci97.5=as.vector(a) - sqrt(as.vector(attr(a, "var"))) * 1.96)
final.df <- bind_rows(final.df, df)
}
}
write_csv(final.df, '../data/ages.csv')
ages <- list()
y <- 2014
for (y in seq(2001, 2014, by=1)) {
print(y)
#setwd('/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/thesis/')
cdi.and.renal.reduced <- read_csv(paste0('../data/cdiff_and_renal_all_', y, '.csv'))
cdi.and.renal.reduced <- filter(cdi.and.renal.reduced, !is.na(age))
subgroup <- filter(cdi.and.renal.reduced, cdi == 1)
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_cdi")]] <- svyquantile(~age, ds, c(0.25, 0.5, 0.75), ci=TRUE)
subgroup <- filter(cdi.and.renal.reduced, (ckd == 1 | ckd1 == 1 | ckd2 == 1 | ckd3 == 1 | ckd4 == 1 | ckd5 == 1))
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_ckd")]] <- svyquantile(~age, ds, c(0.25, 0.5, 0.75), ci=TRUE)
subgroup <- filter(cdi.and.renal.reduced, (aki == 1))
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_aki")]] <- svyquantile(~age, ds, c(0.25, 0.5, 0.75), ci=TRUE)
subgroup <- filter(cdi.and.renal.reduced, (ckd6 == 1))
if (nrow(subgroup) > 0) {
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_esrd")]] <- svyquantile(~age, ds, c(0.25, 0.5, 0.75), ci=TRUE)
}
rm(cdi.and.renal.reduced)
rm(cdiff.design)
gc()
}
ages
beep(3)
y <- 2001
d <- diseases[1]
final.df <- data_frame(disease="",
year=2000,
theta25=0,
theta25_2.5=0,
theta25_97.5=0,
theta50=0,
theta50_2.5=0,
theta50_97.5=0,
theta75=0,
theta75_2.5=0,
theta75_97.5=0)
final.df
for (y in seq(2001, 2014, by=1)) {
print(y)
if (y < 2005 ) {
diseases <- c("cdi", "aki", "ckd")
} else {
diseases <- c("cdi", "aki", "ckd", "esrd")
}
d <- diseases[1]
for (d in diseases) {
print(d)
df <- data_frame(disease=d,
year=y,
theta25=as.vector(ages[[paste0(y, "_", d)]]$quantiles)[1],
theta25_2.5=as.vector(ages[[paste0(y, "_", d)]]$CIs)[1],
theta25_97.5=as.vector(ages[[paste0(y, "_", d)]]$CIs)[2],
theta50=as.vector(ages[[paste0(y, "_", d)]]$quantiles)[2],
theta50_2.5=as.vector(ages[[paste0(y, "_", d)]]$CIs)[3],
theta50_97.5=as.vector(ages[[paste0(y, "_", d)]]$CIs)[4],
theta75=as.vector(ages[[paste0(y, "_", d)]]$quantiles)[3],
theta75_2.5=as.vector(ages[[paste0(y, "_", d)]]$CIs)[5],
theta75_97.5=as.vector(ages[[paste0(y, "_", d)]]$CIs)[6])
final.df <- bind_rows(final.df, df)
}
}
final.df
write_csv(final.df, '../data/ages_quantiles.csv')
##### Get yearly age trends by age buckets
ts.by.year <- list()
ages <- list()
from <- 1
to <- 0
i <- 1
for (i in 1:20) {
from <- to
to <- from + 5
print(paste0('age group ', from, '_', to))
y <- 2001
for (y in 2001:2014) {
print(y)
age.window <- cdiff %>%
filter(!is.na(age) & age >= from & age < to) %>%
filter(nis_year == y) %>%
select(nis_year, discwt, nis_stratum, hospid) %>%
mutate(dummy=1)
ds <- svydesign(ids = ~hospid,
data = age.window,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(from, "_", to, "_", y)]] <- svytotal(~dummy, ds, ci=TRUE)
}
#age.window
#my.ts <- ts(age.window$count, start = 2001, end = 2014, frequency = 1)
#if (i == 2001) {
#ts.by.year[[paste0(from, "_", to)]] <- my.ts
#} else {
#ts(age.window$count, start = 2001, end = 2014, frequency = 1)
#}
}
from <- 1
to <- 0
i <- 1
df <- data_frame(year=2000, age.bucket='-1', total=0, SE=0)
for (i in 1:20) {
from <- to
to <- from + 5
print(paste0('age group ', from, '_', to))
y <- 2001
for (y in 2001:2014) {
total <- tidy(print(ages[[paste0(from, "_", to, "_", y)]])) %>% select(total, SE) %>% pull(total)
SE <- tidy(print(ages[[paste0(from, "_", to, "_", y)]])) %>% select(total, SE) %>% pull(SE)
df <- bind_rows(df, data_frame(year=y, age.bucket=paste0(from, '_', to), total, SE))
}
}
df <- df %>% filter(year > 2000)
for (age in unique(df$age.bucket)) {
if (age == '95_100') {
break
}
print(age)
age.df <- df %>% filter(age.bucket == age) %>% select(total)
print(age.df)
my.ts <- ts(age.df$total, start = 2001, end = 2014, frequency = 1)
ts.by.year[[paste0(age)]] <- my.ts
}
saveRDS(ts.by.year, '../data/cdi_ages_ts.rds')
df <- data_frame(year=2000, tot.preg=-1, tot.not.preg=-1, prop=0, prop2.5=0, prop97.5=0)
female.preg <- list()
### Get female pregnancy
y <- 2001
for (y in 2001:2014) {
mf <- cdiff %>%
select(female, age, hospid, nis_stratum, discwt, nis_year) %>%
filter(!is.na(female)) %>%
filter(female == 1) %>%
filter(nis_year == y)
mf
ds <- svydesign(ids = ~hospid,
data = mf,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
prop <- svyciprop(~I(female==1), ds, level = .95, rm.na=TRUE)
prop.val <- as.vector(prop)
prop.val.2.5 <- attr(prop, "ci")[[1]]
prop.val.97.5 <- attr(prop, "ci")[[2]]
tot <- svytotal(~I(female==1), ds, level = .95, rm.na=TRUE)
males <- round(as.vector(tot)[1])
females <- round(as.vector(tot)[2])
#svp (~age, ds, level = 0.95)
df <- bind_rows(df, data_frame(year=y, tot.female=females, tot.male=males, prop=prop.val, prop2.5=prop.val.2.5, prop97.5=prop.val.97.5))
}
df <- df %>% filter(year > 2000)
df
write_csv(df, "../data/cdi-male-female.csv")
| /nis-get-general-statistics.R | permissive | alnajar/stat-8960-capstone-project | R | false | false | 85,550 | r | library('MonetDB.R')
#install.packages('MonetDBLite')
library('MonetDBLite')
library('dplyr')
library('tidyverse')
library('DBI')
library('beepr')
library('sqlsurvey')
setwd('/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/')
cdiff <- read_csv('data/cdiff.csv', guess_max = 858204)
cdiff
cdiff.preg <- cdiff %>%
mutate(pregnant=as.integer(grepl("V22", dx1) |
grepl("V22", dx2) |
grepl("V22", dx3) |
grepl("V22", dx4) |
grepl("V22", dx5) |
grepl("V22", dx6) |
grepl("V22", dx7) |
grepl("V22", dx8) |
grepl("V22", dx9) |
grepl("V22", dx10) |
grepl("V22", dx11) |
grepl("V22", dx12) |
grepl("V22", dx13) |
grepl("V22", dx14) |
grepl("V22", dx15) |
grepl("V22", dx16) |
grepl("V22", dx17) |
grepl("V22", dx18) |
grepl("V22", dx19) |
grepl("V22", dx20) |
grepl("V22", dx21) |
grepl("V22", dx22) |
grepl("V22", dx23) |
grepl("V22", dx24) |
grepl("V22", dx25) |
grepl("V22", dx26) |
grepl("V22", dx27) |
grepl("V22", dx28) |
grepl("V22", dx29) |
grepl("V22", dx30)))
write_csv(cdiff.preg, "data/cdiff-pregnant.csv")
cdiff.preg
#filter(grepl("V22", dx1) | grepl("V22", dx2)) %>%
#|
#dx3 == '00845' |
#dx4 == '00845' |
#dx5 == '00845' |
#dx6 == '00845' |
#dx7 == '00845' |
#dx8 == '00845' |
#dx9 == '00845' |
#dx10 == '00845' |
#dx11 == '00845' |
#dx12 == '00845' |
#dx13 == '00845' |
#dx14 == '00845' |
#dx15 == '00845' |
#dx16 == '00845' |
#dx17 == '00845' |
#dx18 == '00845' |
#dx19 == '00845' |
#dx20 == '00845' |
#dx21 == '00845' |
#dx22 == '00845' |
#dx23 == '00845' |
#dx24 == '00845' |
#dx25 == '00845' |
#dx26 == '00845' |
#dx27 == '00845' |
#dx28 == '00845' |
#dx29 == '00845' |
#dx30 == '00845'))) %>%
#mutate(cdi=replace(cdi, is.na(cdi), 0))
#nis.DX3 = '00845' OR
#nis.DX4 = '00845' OR
#nis.DX5 = '00845' OR
#nis.DX6 = '00845' OR
#nis.DX7 = '00845' OR
#nis.DX8 = '00845' OR
#nis.DX9 = '00845' OR
#nis.DX10 = '00845' OR
#nis.DX11 = '00845' OR
#nis.DX12 = '00845' OR
#nis.DX13 = '00845' OR
#nis.DX14 = '00845' OR
#nis.DX15 = '00845' OR
#nis.DX16 = '00845' OR
#nis.DX17 = '00845' OR
#nis.DX18 = '00845' OR
#nis.DX19 = '00845' OR
#nis.DX20 = '00845' OR
#nis.DX21 = '00845' OR
#nis.DX22 = '00845' OR
#nis.DX23 = '00845' OR
#nis.DX24 = '00845' OR
#nis.DX25 = '00845')
#MonetDBLite::monetdblite_shutdown()
#con <- DBI::dbConnect(MonetDBLite::MonetDBLite(), "data/nrd_db")
con <- DBI::dbConnect(MonetDBLite::MonetDBLite(), "data/nis_db")
row.count <- DBI::dbGetQuery(con, "SELECT COUNT(*) as count FROM nrd")
row.count
patient.counts <- list()
patient.counts[["total"]] <- DBI::dbGetQuery(con, "SELECT nis_year, COUNT(nis_key) AS n FROM NIS GROUP BY nis_year")
#584 Acute kidney failure
#584.5 Acute kidney failure with lesion of tubular necrosis convert
#584.6 Acute kidney failure with lesion of renal cortical necrosis convert
#584.7 Acute kidney failure with lesion of renal medullary [papillary] necrosis
#584.8 Acute kidney failure with lesion of with other specified pathological lesion in kidney
#584.9 Acute kidney failure, unspecified
#585 Chronic kidney disease (ckd)
#585.1 Chronic kidney disease, Stage I
#585.2 Chronic kidney disease, Stage II (mild)
#585.3 Chronic kidney disease, Stage III (moderate)
#585.4 Chronic kidney disease, Stage IV (severe)
#585.5 Chronic kidney disease, Stage V (mild)
#585.6 End stage renal disease
#585.9 Chronic kidney disease, unspecified
#586 Renal failure, unspecified
# Acute Kidney Infection
aki.count.q <- "SELECT nis_year,
count(nis_key) as n
FROM nis
WHERE nis.DX1 like '584%' OR
nis.DX2 like '584%' OR
nis.DX3 like '584%' OR
nis.DX4 like '584%' OR
nis.DX5 like '584%' OR
nis.DX6 like '584%' OR
nis.DX7 like '584%' OR
nis.DX8 like '584%' OR
nis.DX9 like '584%' OR
nis.DX10 like '584%' OR
nis.DX11 like '584%' OR
nis.DX12 like '584%' OR
nis.DX13 like '584%' OR
nis.DX14 like '584%' OR
nis.DX15 like '584%' OR
nis.DX16 like '584%' OR
nis.DX17 like '584%' OR
nis.DX18 like '584%' OR
nis.DX19 like '584%' OR
nis.DX20 like '584%' OR
nis.DX21 like '584%' OR
nis.DX22 like '584%' OR
nis.DX23 like '584%' OR
nis.DX24 like '584%' OR
nis.DX25 like '584%' OR
nis.DX26 like '584%' OR
nis.DX27 like '584%' OR
nis.DX28 like '584%' OR
nis.DX29 like '584%' OR
nis.DX30 like '584%'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["aki"]] <- DBI::dbGetQuery(con, aki.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
# WOW! AKIs have been linearly increasing
patient.counts[["aki"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Chronic Kidney Disease
# Note: I'm grouping 585 with 585.9, which is Chronic kidney disease, Unspecified
ckd.count.q <- "SELECT nis_year,
count(nis_key) as n
FROM nis
WHERE nis.DX1 = '585' OR
nis.DX1 = '5859' OR
nis.DX2 = '585' OR
nis.DX2 = '5859' OR
nis.DX3 = '585' OR
nis.DX3 = '5859' OR
nis.DX4 = '585' OR
nis.DX4 = '5859' OR
nis.DX5 = '585' OR
nis.DX5 = '5859' OR
nis.DX6 = '585' OR
nis.DX6 = '5859' OR
nis.DX7 = '585' OR
nis.DX7 = '5859' OR
nis.DX8 = '585' OR
nis.DX8 = '5859' OR
nis.DX9 = '585' OR
nis.DX9 = '5859' OR
nis.DX10 = '585' OR
nis.DX10 = '5859' OR
nis.DX11 = '585' OR
nis.DX11 = '5859' OR
nis.DX12 = '585' OR
nis.DX12 = '5859' OR
nis.DX13 = '585' OR
nis.DX13 = '5859' OR
nis.DX14 = '585' OR
nis.DX14 = '5859' OR
nis.DX15 = '585' OR
nis.DX15 = '5859' OR
nis.DX16 = '585' OR
nis.DX16 = '5859' OR
nis.DX17 = '585' OR
nis.DX17 = '5859' OR
nis.DX18 = '585' OR
nis.DX18 = '5859' OR
nis.DX19 = '585' OR
nis.DX19 = '5859' OR
nis.DX20 = '585' OR
nis.DX20 = '5859' OR
nis.DX21 = '585' OR
nis.DX21 = '5859' OR
nis.DX22 = '585' OR
nis.DX22 = '5859' OR
nis.DX23 = '585' OR
nis.DX23 = '5859' OR
nis.DX24 = '585' OR
nis.DX24 = '5859' OR
nis.DX25 = '585' OR
nis.DX25 = '5859' OR
nis.DX26 = '585' OR
nis.DX26 = '5859' OR
nis.DX27 = '585' OR
nis.DX27 = '5859' OR
nis.DX28 = '585' OR
nis.DX28 = '5859' OR
nis.DX29 = '585' OR
nis.DX29 = '5859' OR
nis.DX30 = '585' OR
nis.DX30 = '5859'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd"]] <- DBI::dbGetQuery(con, ckd.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd"]]
patient.counts[["ckd"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, Stage 1
ckd1.count.q <- "SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '5851' OR
nis.DX2 = '5851' OR
nis.DX3 = '5851' OR
nis.DX4 = '5851' OR
nis.DX5 = '5851' OR
nis.DX6 = '5851' OR
nis.DX7 = '5851' OR
nis.DX8 = '5851' OR
nis.DX9 = '5851' OR
nis.DX10 = '5851' OR
nis.DX11 = '5851' OR
nis.DX12 = '5851' OR
nis.DX13 = '5851' OR
nis.DX14 = '5851' OR
nis.DX15 = '5851' OR
nis.DX16 = '5851' OR
nis.DX17 = '5851' OR
nis.DX18 = '5851' OR
nis.DX19 = '5851' OR
nis.DX20 = '5851' OR
nis.DX21 = '5851' OR
nis.DX22 = '5851' OR
nis.DX23 = '5851' OR
nis.DX24 = '5851' OR
nis.DX25 = '5851' OR
nis.DX26 = '5851' OR
nis.DX27 = '5851' OR
nis.DX28 = '5851' OR
nis.DX29 = '5851' OR
nis.DX30 = '5851'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd1"]] <- DBI::dbGetQuery(con, ckd1.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd1"]]
patient.counts[["ckd1"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, Stage 2
ckd2.count.q <- "SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '5852' OR
nis.DX2 = '5852' OR
nis.DX3 = '5852' OR
nis.DX4 = '5852' OR
nis.DX5 = '5852' OR
nis.DX6 = '5852' OR
nis.DX7 = '5852' OR
nis.DX8 = '5852' OR
nis.DX9 = '5852' OR
nis.DX10 = '5852' OR
nis.DX11 = '5852' OR
nis.DX12 = '5852' OR
nis.DX13 = '5852' OR
nis.DX14 = '5852' OR
nis.DX15 = '5852' OR
nis.DX16 = '5852' OR
nis.DX17 = '5852' OR
nis.DX18 = '5852' OR
nis.DX19 = '5852' OR
nis.DX20 = '5852' OR
nis.DX21 = '5852' OR
nis.DX22 = '5852' OR
nis.DX23 = '5852' OR
nis.DX24 = '5852' OR
nis.DX25 = '5852' OR
nis.DX26 = '5852' OR
nis.DX27 = '5852' OR
nis.DX28 = '5852' OR
nis.DX29 = '5852' OR
nis.DX30 = '5852'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd2"]] <- DBI::dbGetQuery(con, ckd2.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd2"]]
patient.counts[["ckd2"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, Stage 3
ckd3.count.q <- "SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '5853' OR
nis.DX2 = '5853' OR
nis.DX3 = '5853' OR
nis.DX4 = '5853' OR
nis.DX5 = '5853' OR
nis.DX6 = '5853' OR
nis.DX7 = '5853' OR
nis.DX8 = '5853' OR
nis.DX9 = '5853' OR
nis.DX10 = '5853' OR
nis.DX11 = '5853' OR
nis.DX12 = '5853' OR
nis.DX13 = '5853' OR
nis.DX14 = '5853' OR
nis.DX15 = '5853' OR
nis.DX16 = '5853' OR
nis.DX17 = '5853' OR
nis.DX18 = '5853' OR
nis.DX19 = '5853' OR
nis.DX20 = '5853' OR
nis.DX21 = '5853' OR
nis.DX22 = '5853' OR
nis.DX23 = '5853' OR
nis.DX24 = '5853' OR
nis.DX25 = '5853' OR
nis.DX26 = '5853' OR
nis.DX27 = '5853' OR
nis.DX28 = '5853' OR
nis.DX29 = '5853' OR
nis.DX30 = '5853'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd3"]] <- DBI::dbGetQuery(con, ckd3.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd3"]]
patient.counts[["ckd3"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, Stage 4
ckd4.count.q <- "SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '5854' OR
nis.DX2 = '5854' OR
nis.DX3 = '5854' OR
nis.DX4 = '5854' OR
nis.DX5 = '5854' OR
nis.DX6 = '5854' OR
nis.DX7 = '5854' OR
nis.DX8 = '5854' OR
nis.DX9 = '5854' OR
nis.DX10 = '5854' OR
nis.DX11 = '5854' OR
nis.DX12 = '5854' OR
nis.DX13 = '5854' OR
nis.DX14 = '5854' OR
nis.DX15 = '5854' OR
nis.DX16 = '5854' OR
nis.DX17 = '5854' OR
nis.DX18 = '5854' OR
nis.DX19 = '5854' OR
nis.DX20 = '5854' OR
nis.DX21 = '5854' OR
nis.DX22 = '5854' OR
nis.DX23 = '5854' OR
nis.DX24 = '5854' OR
nis.DX25 = '5854' OR
nis.DX26 = '5854' OR
nis.DX27 = '5854' OR
nis.DX28 = '5854' OR
nis.DX29 = '5854' OR
nis.DX30 = '5854'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd4"]] <- DBI::dbGetQuery(con, ckd4.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd4"]]
patient.counts[["ckd4"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, Stage 5
ckd5.count.q <- "SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '5855' OR
nis.DX2 = '5855' OR
nis.DX3 = '5855' OR
nis.DX4 = '5855' OR
nis.DX5 = '5855' OR
nis.DX6 = '5855' OR
nis.DX7 = '5855' OR
nis.DX8 = '5855' OR
nis.DX9 = '5855' OR
nis.DX10 = '5855' OR
nis.DX11 = '5855' OR
nis.DX12 = '5855' OR
nis.DX13 = '5855' OR
nis.DX14 = '5855' OR
nis.DX15 = '5855' OR
nis.DX16 = '5855' OR
nis.DX17 = '5855' OR
nis.DX18 = '5855' OR
nis.DX19 = '5855' OR
nis.DX20 = '5855' OR
nis.DX21 = '5855' OR
nis.DX22 = '5855' OR
nis.DX23 = '5855' OR
nis.DX24 = '5855' OR
nis.DX25 = '5855' OR
nis.DX26 = '5855' OR
nis.DX27 = '5855' OR
nis.DX28 = '5855' OR
nis.DX29 = '5855' OR
nis.DX30 = '5855'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd5"]] <- DBI::dbGetQuery(con, ckd5.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd5"]]
patient.counts[["ckd5"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, End Stage (Dialysis)
ckd6.count.q <- "SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '5856' OR
nis.DX2 = '5856' OR
nis.DX3 = '5856' OR
nis.DX4 = '5856' OR
nis.DX5 = '5856' OR
nis.DX6 = '5856' OR
nis.DX7 = '5856' OR
nis.DX8 = '5856' OR
nis.DX9 = '5856' OR
nis.DX10 = '5856' OR
nis.DX11 = '5856' OR
nis.DX12 = '5856' OR
nis.DX13 = '5856' OR
nis.DX14 = '5856' OR
nis.DX15 = '5856' OR
nis.DX16 = '5856' OR
nis.DX17 = '5856' OR
nis.DX18 = '5856' OR
nis.DX19 = '5856' OR
nis.DX20 = '5856' OR
nis.DX21 = '5856' OR
nis.DX22 = '5856' OR
nis.DX23 = '5856' OR
nis.DX24 = '5856' OR
nis.DX25 = '5856' OR
nis.DX26 = '5856' OR
nis.DX27 = '5856' OR
nis.DX28 = '5856' OR
nis.DX29 = '5856' OR
nis.DX30 = '5856'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["ckd6"]] <- DBI::dbGetQuery(con, ckd6.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["ckd6"]]
patient.counts[["ckd6"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Renal failure, unspecified
renal_unspecified.count.q <-
"SELECT nis_year,
COUNT(*) as n
FROM nis
WHERE nis.DX1 = '586' OR
nis.DX2 = '586' OR
nis.DX3 = '586' OR
nis.DX4 = '586' OR
nis.DX5 = '586' OR
nis.DX6 = '586' OR
nis.DX7 = '586' OR
nis.DX8 = '586' OR
nis.DX9 = '586' OR
nis.DX10 = '586' OR
nis.DX11 = '586' OR
nis.DX12 = '586' OR
nis.DX13 = '586' OR
nis.DX14 = '586' OR
nis.DX15 = '586' OR
nis.DX16 = '586' OR
nis.DX17 = '586' OR
nis.DX18 = '586' OR
nis.DX19 = '586' OR
nis.DX20 = '586' OR
nis.DX21 = '586' OR
nis.DX22 = '586' OR
nis.DX23 = '586' OR
nis.DX24 = '586' OR
nis.DX25 = '586' OR
nis.DX26 = '586' OR
nis.DX27 = '586' OR
nis.DX28 = '586' OR
nis.DX29 = '586' OR
nis.DX30 = '586'
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["renal_unspecified"]] <- DBI::dbGetQuery(con, renal_unspecified.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
# C. Diff by itself
cdi.count.q <- "SELECT nis_year,
count(nis_key) as n
FROM nis
WHERE (nis.DX1 = '00845' OR
nis.DX2 = '00845' OR
nis.DX3 = '00845' OR
nis.DX4 = '00845' OR
nis.DX5 = '00845' OR
nis.DX6 = '00845' OR
nis.DX7 = '00845' OR
nis.DX8 = '00845' OR
nis.DX9 = '00845' OR
nis.DX10 = '00845' OR
nis.DX11 = '00845' OR
nis.DX12 = '00845' OR
nis.DX13 = '00845' OR
nis.DX14 = '00845' OR
nis.DX15 = '00845' OR
nis.DX16 = '00845' OR
nis.DX17 = '00845' OR
nis.DX18 = '00845' OR
nis.DX19 = '00845' OR
nis.DX20 = '00845' OR
nis.DX21 = '00845' OR
nis.DX22 = '00845' OR
nis.DX23 = '00845' OR
nis.DX24 = '00845' OR
nis.DX25 = '00845' OR
nis.DX26 = '00845' OR
nis.DX27 = '00845' OR
nis.DX28 = '00845' OR
nis.DX29 = '00845' OR
nis.DX30 = '00845'
)
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["cdi"]] <- DBI::dbGetQuery(con, cdi.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["cdi"]]
patient.counts[["cdi"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# C. diff with Renal Failure (any kind)
cdi_with_renal.count.q <-
"SELECT nis_year,
count(nis_key) as n
FROM nis
WHERE (nis.DX1 = '00845' OR
nis.DX2 = '00845' OR
nis.DX3 = '00845' OR
nis.DX4 = '00845' OR
nis.DX5 = '00845' OR
nis.DX6 = '00845' OR
nis.DX7 = '00845' OR
nis.DX8 = '00845' OR
nis.DX9 = '00845' OR
nis.DX10 = '00845' OR
nis.DX11 = '00845' OR
nis.DX12 = '00845' OR
nis.DX13 = '00845' OR
nis.DX14 = '00845' OR
nis.DX15 = '00845' OR
nis.DX16 = '00845' OR
nis.DX17 = '00845' OR
nis.DX18 = '00845' OR
nis.DX19 = '00845' OR
nis.DX20 = '00845' OR
nis.DX21 = '00845' OR
nis.DX22 = '00845' OR
nis.DX23 = '00845' OR
nis.DX24 = '00845' OR
nis.DX25 = '00845' OR
nis.DX26 = '00845' OR
nis.DX27 = '00845' OR
nis.DX28 = '00845' OR
nis.DX29 = '00845' OR
nis.DX30 = '00845'
)
AND (
(nis.DX1 like '584%' OR
nis.DX2 like '584%' OR
nis.DX3 like '584%' OR
nis.DX4 like '584%' OR
nis.DX5 like '584%' OR
nis.DX6 like '584%' OR
nis.DX7 like '584%' OR
nis.DX8 like '584%' OR
nis.DX9 like '584%' OR
nis.DX10 like '584%' OR
nis.DX11 like '584%' OR
nis.DX12 like '584%' OR
nis.DX13 like '584%' OR
nis.DX14 like '584%' OR
nis.DX15 like '584%' OR
nis.DX16 like '584%' OR
nis.DX17 like '584%' OR
nis.DX18 like '584%' OR
nis.DX19 like '584%' OR
nis.DX20 like '584%' OR
nis.DX21 like '584%' OR
nis.DX22 like '584%' OR
nis.DX23 like '584%' OR
nis.DX24 like '584%' OR
nis.DX25 like '584%' OR
nis.DX26 like '584%' OR
nis.DX27 like '584%' OR
nis.DX28 like '584%' OR
nis.DX29 like '584%' OR
nis.DX30 like '584%'
)
OR (nis.DX1 like '585%' OR
nis.DX2 like '585%' OR
nis.DX3 like '585%' OR
nis.DX4 like '585%' OR
nis.DX5 like '585%' OR
nis.DX6 like '585%' OR
nis.DX7 like '585%' OR
nis.DX8 like '585%' OR
nis.DX9 like '585%' OR
nis.DX10 like '585%' OR
nis.DX11 like '585%' OR
nis.DX12 like '585%' OR
nis.DX13 like '585%' OR
nis.DX14 like '585%' OR
nis.DX15 like '585%' OR
nis.DX16 like '585%' OR
nis.DX17 like '585%' OR
nis.DX18 like '585%' OR
nis.DX19 like '585%' OR
nis.DX20 like '585%' OR
nis.DX21 like '585%' OR
nis.DX22 like '585%' OR
nis.DX23 like '585%' OR
nis.DX24 like '585%' OR
nis.DX25 like '585%' OR
nis.DX26 like '585%' OR
nis.DX27 like '585%' OR
nis.DX28 like '585%' OR
nis.DX29 like '585%' OR
nis.DX30 like '585%'
)
OR (nis.DX1 = '586' OR
nis.DX2 = '586' OR
nis.DX3 = '586' OR
nis.DX4 = '586' OR
nis.DX5 = '586' OR
nis.DX6 = '586' OR
nis.DX7 = '586' OR
nis.DX8 = '586' OR
nis.DX9 = '586' OR
nis.DX10 = '586' OR
nis.DX11 = '586' OR
nis.DX12 = '586' OR
nis.DX13 = '586' OR
nis.DX14 = '586' OR
nis.DX15 = '586' OR
nis.DX16 = '586' OR
nis.DX17 = '586' OR
nis.DX18 = '586' OR
nis.DX19 = '586' OR
nis.DX20 = '586' OR
nis.DX21 = '586' OR
nis.DX22 = '586' OR
nis.DX23 = '586' OR
nis.DX24 = '586' OR
nis.DX25 = '586' OR
nis.DX26 = '586' OR
nis.DX27 = '586' OR
nis.DX28 = '586' OR
nis.DX29 = '586' OR
nis.DX30 = '586'
)
)
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["cdi_with_renal"]] <- DBI::dbGetQuery(con, cdi_with_renal.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["cdi_with_renal"]]
patient.counts[["cdi_with_renal"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# C. Diff by itself
cdi.count.q <- "SELECT nis_year,
count(nis_key) as n
FROM nis
WHERE (nis.DX1 = '00845' OR
nis.DX2 = '00845' OR
nis.DX3 = '00845' OR
nis.DX4 = '00845' OR
nis.DX5 = '00845' OR
nis.DX6 = '00845' OR
nis.DX7 = '00845' OR
nis.DX8 = '00845' OR
nis.DX9 = '00845' OR
nis.DX10 = '00845' OR
nis.DX11 = '00845' OR
nis.DX12 = '00845' OR
nis.DX13 = '00845' OR
nis.DX14 = '00845' OR
nis.DX15 = '00845' OR
nis.DX16 = '00845' OR
nis.DX17 = '00845' OR
nis.DX18 = '00845' OR
nis.DX19 = '00845' OR
nis.DX20 = '00845' OR
nis.DX21 = '00845' OR
nis.DX22 = '00845' OR
nis.DX23 = '00845' OR
nis.DX24 = '00845' OR
nis.DX25 = '00845' OR
nis.DX26 = '00845' OR
nis.DX27 = '00845' OR
nis.DX28 = '00845' OR
nis.DX29 = '00845' OR
nis.DX30 = '00845'
)
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["cdi"]] <- DBI::dbGetQuery(con, cdi.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["cdi"]]
patient.counts[["cdi"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# C. diff with Renal Failure (any kind)
cdi_with_renal.count.q <-
"SELECT nis_year,
count(nis_key) as n
FROM nis
WHERE (nis.DX1 = '00845' OR
nis.DX2 = '00845' OR
nis.DX3 = '00845' OR
nis.DX4 = '00845' OR
nis.DX5 = '00845' OR
nis.DX6 = '00845' OR
nis.DX7 = '00845' OR
nis.DX8 = '00845' OR
nis.DX9 = '00845' OR
nis.DX10 = '00845' OR
nis.DX11 = '00845' OR
nis.DX12 = '00845' OR
nis.DX13 = '00845' OR
nis.DX14 = '00845' OR
nis.DX15 = '00845' OR
nis.DX16 = '00845' OR
nis.DX17 = '00845' OR
nis.DX18 = '00845' OR
nis.DX19 = '00845' OR
nis.DX20 = '00845' OR
nis.DX21 = '00845' OR
nis.DX22 = '00845' OR
nis.DX23 = '00845' OR
nis.DX24 = '00845' OR
nis.DX25 = '00845' OR
nis.DX26 = '00845' OR
nis.DX27 = '00845' OR
nis.DX28 = '00845' OR
nis.DX29 = '00845' OR
nis.DX30 = '00845'
)
AND (
(nis.DX1 like '584%' OR
nis.DX2 like '584%' OR
nis.DX3 like '584%' OR
nis.DX4 like '584%' OR
nis.DX5 like '584%' OR
nis.DX6 like '584%' OR
nis.DX7 like '584%' OR
nis.DX8 like '584%' OR
nis.DX9 like '584%' OR
nis.DX10 like '584%' OR
nis.DX11 like '584%' OR
nis.DX12 like '584%' OR
nis.DX13 like '584%' OR
nis.DX14 like '584%' OR
nis.DX15 like '584%' OR
nis.DX16 like '584%' OR
nis.DX17 like '584%' OR
nis.DX18 like '584%' OR
nis.DX19 like '584%' OR
nis.DX20 like '584%' OR
nis.DX21 like '584%' OR
nis.DX22 like '584%' OR
nis.DX23 like '584%' OR
nis.DX24 like '584%' OR
nis.DX25 like '584%' OR
nis.DX26 like '584%' OR
nis.DX27 like '584%' OR
nis.DX28 like '584%' OR
nis.DX29 like '584%' OR
nis.DX30 like '584%'
)
OR (nis.DX1 like '585%' OR
nis.DX2 like '585%' OR
nis.DX3 like '585%' OR
nis.DX4 like '585%' OR
nis.DX5 like '585%' OR
nis.DX6 like '585%' OR
nis.DX7 like '585%' OR
nis.DX8 like '585%' OR
nis.DX9 like '585%' OR
nis.DX10 like '585%' OR
nis.DX11 like '585%' OR
nis.DX12 like '585%' OR
nis.DX13 like '585%' OR
nis.DX14 like '585%' OR
nis.DX15 like '585%' OR
nis.DX16 like '585%' OR
nis.DX17 like '585%' OR
nis.DX18 like '585%' OR
nis.DX19 like '585%' OR
nis.DX20 like '585%' OR
nis.DX21 like '585%' OR
nis.DX22 like '585%' OR
nis.DX23 like '585%' OR
nis.DX24 like '585%' OR
nis.DX25 like '585%' OR
nis.DX26 like '585%' OR
nis.DX27 like '585%' OR
nis.DX28 like '585%' OR
nis.DX29 like '585%' OR
nis.DX30 like '585%'
)
OR (nis.DX1 = '586' OR
nis.DX2 = '586' OR
nis.DX3 = '586' OR
nis.DX4 = '586' OR
nis.DX5 = '586' OR
nis.DX6 = '586' OR
nis.DX7 = '586' OR
nis.DX8 = '586' OR
nis.DX9 = '586' OR
nis.DX10 = '586' OR
nis.DX11 = '586' OR
nis.DX12 = '586' OR
nis.DX13 = '586' OR
nis.DX14 = '586' OR
nis.DX15 = '586' OR
nis.DX16 = '586' OR
nis.DX17 = '586' OR
nis.DX18 = '586' OR
nis.DX19 = '586' OR
nis.DX20 = '586' OR
nis.DX21 = '586' OR
nis.DX22 = '586' OR
nis.DX23 = '586' OR
nis.DX24 = '586' OR
nis.DX25 = '586' OR
nis.DX26 = '586' OR
nis.DX27 = '586' OR
nis.DX28 = '586' OR
nis.DX29 = '586' OR
nis.DX30 = '586'
)
)
GROUP BY nis_year"
# Track time for query
sw.start <- Sys.time()
patient.counts[["cdi_with_renal"]] <- DBI::dbGetQuery(con, cdi_with_renal.count.q)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
patient.counts[["cdi_with_renal"]]
patient.counts[["cdi_with_renal"]] %>%
ggplot(aes(nis_year, n)) +
geom_histogram(stat="identity") +
geom_smooth()
# Join all of the stats into a table and write it out
df <- patient.counts[["total"]] %>%
left_join(patient.counts[["aki"]], by="nis_year" ) %>%
rename(total = n.x, aki = n.y) %>%
left_join(patient.counts[["ckd"]], by="nis_year") %>%
rename(ckd = n) %>%
left_join(patient.counts[["ckd1"]], by="nis_year") %>%
rename(ckd1 = n) %>%
left_join(patient.counts[["ckd2"]], by="nis_year") %>%
rename(ckd2 = n) %>%
left_join(patient.counts[["ckd3"]], by="nis_year") %>%
rename(ckd3 = n) %>%
left_join(patient.counts[["ckd4"]], by="nis_year") %>%
rename(ckd4 = n) %>%
left_join(patient.counts[["ckd5"]], by="nis_year") %>%
rename(ckd5 = n) %>%
left_join(patient.counts[["ckd6"]], by="nis_year") %>%
rename(ckd6 = n) %>%
left_join(patient.counts[["renal_unspecified"]], by="nis_year") %>%
rename(renal_unspecified = n) %>%
left_join(patient.counts[["cdi"]], by="nis_year") %>%
rename(cdi = n) %>%
left_join(patient.counts[["cdi_with_renal"]], by="nis_year") %>%
rename(cdi_with_renal = n)
write_csv(df, 'data/cdi_renal_counts.csv')
# Get everything where patients had cdi.and.renal Failure. Need this to do survey calculations.
cdi.and.renal.all.q <-
"SELECT *
FROM nis
WHERE (nis.DX1 = '00845' OR
nis.DX2 = '00845' OR
nis.DX3 = '00845' OR
nis.DX4 = '00845' OR
nis.DX5 = '00845' OR
nis.DX6 = '00845' OR
nis.DX7 = '00845' OR
nis.DX8 = '00845' OR
nis.DX9 = '00845' OR
nis.DX10 = '00845' OR
nis.DX11 = '00845' OR
nis.DX12 = '00845' OR
nis.DX13 = '00845' OR
nis.DX14 = '00845' OR
nis.DX15 = '00845' OR
nis.DX16 = '00845' OR
nis.DX17 = '00845' OR
nis.DX18 = '00845' OR
nis.DX19 = '00845' OR
nis.DX20 = '00845' OR
nis.DX21 = '00845' OR
nis.DX22 = '00845' OR
nis.DX23 = '00845' OR
nis.DX24 = '00845' OR
nis.DX25 = '00845' OR
nis.DX26 = '00845' OR
nis.DX27 = '00845' OR
nis.DX28 = '00845' OR
nis.DX29 = '00845' OR
nis.DX30 = '00845'
)
OR (
(nis.DX1 like '584%' OR
nis.DX2 like '584%' OR
nis.DX3 like '584%' OR
nis.DX4 like '584%' OR
nis.DX5 like '584%' OR
nis.DX6 like '584%' OR
nis.DX7 like '584%' OR
nis.DX8 like '584%' OR
nis.DX9 like '584%' OR
nis.DX10 like '584%' OR
nis.DX11 like '584%' OR
nis.DX12 like '584%' OR
nis.DX13 like '584%' OR
nis.DX14 like '584%' OR
nis.DX15 like '584%' OR
nis.DX16 like '584%' OR
nis.DX17 like '584%' OR
nis.DX18 like '584%' OR
nis.DX19 like '584%' OR
nis.DX20 like '584%' OR
nis.DX21 like '584%' OR
nis.DX22 like '584%' OR
nis.DX23 like '584%' OR
nis.DX24 like '584%' OR
nis.DX25 like '584%' OR
nis.DX26 like '584%' OR
nis.DX27 like '584%' OR
nis.DX28 like '584%' OR
nis.DX29 like '584%' OR
nis.DX30 like '584%'
)
OR (nis.DX1 like '585%' OR
nis.DX2 like '585%' OR
nis.DX3 like '585%' OR
nis.DX4 like '585%' OR
nis.DX5 like '585%' OR
nis.DX6 like '585%' OR
nis.DX7 like '585%' OR
nis.DX8 like '585%' OR
nis.DX9 like '585%' OR
nis.DX10 like '585%' OR
nis.DX11 like '585%' OR
nis.DX12 like '585%' OR
nis.DX13 like '585%' OR
nis.DX14 like '585%' OR
nis.DX15 like '585%' OR
nis.DX16 like '585%' OR
nis.DX17 like '585%' OR
nis.DX18 like '585%' OR
nis.DX19 like '585%' OR
nis.DX20 like '585%' OR
nis.DX21 like '585%' OR
nis.DX22 like '585%' OR
nis.DX23 like '585%' OR
nis.DX24 like '585%' OR
nis.DX25 like '585%' OR
nis.DX26 like '585%' OR
nis.DX27 like '585%' OR
nis.DX28 like '585%' OR
nis.DX29 like '585%' OR
nis.DX30 like '585%'
)
OR (nis.DX1 = '586' OR
nis.DX2 = '586' OR
nis.DX3 = '586' OR
nis.DX4 = '586' OR
nis.DX5 = '586' OR
nis.DX6 = '586' OR
nis.DX7 = '586' OR
nis.DX8 = '586' OR
nis.DX9 = '586' OR
nis.DX10 = '586' OR
nis.DX11 = '586' OR
nis.DX12 = '586' OR
nis.DX13 = '586' OR
nis.DX14 = '586' OR
nis.DX15 = '586' OR
nis.DX16 = '586' OR
nis.DX17 = '586' OR
nis.DX18 = '586' OR
nis.DX19 = '586' OR
nis.DX20 = '586' OR
nis.DX21 = '586' OR
nis.DX22 = '586' OR
nis.DX23 = '586' OR
nis.DX24 = '586' OR
nis.DX25 = '586' OR
nis.DX26 = '586' OR
nis.DX27 = '586' OR
nis.DX28 = '586' OR
nis.DX29 = '586' OR
nis.DX30 = '586'
)
)"
# Track time for query
sw.start <- Sys.time()
cdi.and.renal <- DBI::dbGetQuery(con, cdi_or_renal.all.q)
head(cdi.and.renal)
dim(cdi.and.renal)
sw.end <- Sys.time()
print(sw.end - sw.start)
beep(3)
# Encode dummy variables so we can quickly see what the patient had
# 00845 C. diff
# 584.5 Acute kidney failure with lesion of tubular necrosis convert
# 584.6 Acute kidney failure with lesion of renal cortical necrosis convert
# 584.7 Acute kidney failure with lesion of renal medullary [papillary] necrosis
# 584.8 Acute kidney failure with lesion of with other specified pathological lesion in kidney
# 585 Chronic kidney disease (ckd)
# 585.1 Chronic kidney disease, Stage I
# 585.2 Chronic kidney disease, Stage II (mild)
# 585.3 Chronic kidney disease, Stage III (moderate)
# 585.4 Chronic kidney disease, Stage IV (severe)
# 585.5 Chronic kidney disease, Stage V (mild)
# 585.6 End stage renal disease
# 585.9 Chronic kidney disease, unspecified
# 586 Renal failure, unspecified
cdi.and.renal <-
cdi.and.renal %>%
mutate(cdi=as.integer((dx1 == '00845' |
dx2 == '00845' |
dx3 == '00845' |
dx4 == '00845' |
dx5 == '00845' |
dx6 == '00845' |
dx7 == '00845' |
dx8 == '00845' |
dx9 == '00845' |
dx10 == '00845' |
dx11 == '00845' |
dx12 == '00845' |
dx13 == '00845' |
dx14 == '00845' |
dx15 == '00845' |
dx16 == '00845' |
dx17 == '00845' |
dx18 == '00845' |
dx19 == '00845' |
dx20 == '00845' |
dx21 == '00845' |
dx22 == '00845' |
dx23 == '00845' |
dx24 == '00845' |
dx25 == '00845' |
dx26 == '00845' |
dx27 == '00845' |
dx28 == '00845' |
dx29 == '00845' |
dx30 == '00845'))) %>%
mutate(cdi=replace(cdi, is.na(cdi), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(aki=as.integer((dx1 == '584' | dx1 == '5845' | dx1 == '5846' | dx1 == '5847' | dx1 == '5848' | dx1 == '5849' |
dx2 == '584' | dx2 == '5849' | dx2 == '5846' | dx2 == '5847' | dx2 == '5848' | dx2 == '5849' |
dx3 == '584' | dx3 == '5849' | dx3 == '5846' | dx3 == '5847' | dx3 == '5848' | dx3 == '5849' |
dx4 == '584' | dx4 == '5849' | dx4 == '5846' | dx4 == '5847' | dx4 == '5848' | dx4 == '5849' |
dx5 == '584' | dx5 == '5849' | dx5 == '5846' | dx5 == '5847' | dx5 == '5848' | dx5 == '5849' |
dx6 == '584' | dx6 == '5849' | dx6 == '5846' | dx6 == '5847' | dx6 == '5848' | dx6 == '5849' |
dx7 == '584' | dx7 == '5849' | dx7 == '5846' | dx7 == '5847' | dx7 == '5848' | dx7 == '5849' |
dx8 == '584' | dx8 == '5849' | dx8 == '5846' | dx8 == '5847' | dx8 == '5848' | dx8 == '5849' |
dx9 == '584' | dx9 == '5849' | dx9 == '5846' | dx9 == '5847' | dx9 == '5848' | dx9 == '5849' |
dx10 == '584' | dx10 == '5849' | dx10 == '5846' | dx10 == '5847' | dx10 == '5848' | dx10 == '5849' |
dx11 == '584' | dx11 == '5849' | dx11 == '5846' | dx11 == '5847' | dx11 == '5848' | dx11 == '5849' |
dx12 == '584' | dx12 == '5849' | dx12 == '5846' | dx12 == '5847' | dx12 == '5848' | dx12 == '5849' |
dx13 == '584' | dx13 == '5849' | dx13 == '5846' | dx13 == '5847' | dx13 == '5848' | dx13 == '5849' |
dx14 == '584' | dx14 == '5849' | dx14 == '5846' | dx14 == '5847' | dx14 == '5848' | dx14 == '5849' |
dx15 == '584' | dx15 == '5849' | dx15 == '5846' | dx15 == '5847' | dx15 == '5848' | dx15 == '5849' |
dx16 == '584' | dx16 == '5849' | dx16 == '5846' | dx16 == '5847' | dx16 == '5848' | dx16 == '5849' |
dx17 == '584' | dx17 == '5849' | dx17 == '5846' | dx17 == '5847' | dx17 == '5848' | dx17 == '5849' |
dx18 == '584' | dx18 == '5849' | dx18 == '5846' | dx18 == '5847' | dx18 == '5848' | dx18 == '5849' |
dx19 == '584' | dx19 == '5849' | dx19 == '5846' | dx19 == '5847' | dx19 == '5848' | dx19 == '5849' |
dx20 == '584' | dx20 == '5849' | dx20 == '5846' | dx20 == '5847' | dx20 == '5848' | dx20 == '5849' |
dx21 == '584' | dx21 == '5849' | dx21 == '5846' | dx21 == '5847' | dx21 == '5848' | dx21 == '5849' |
dx22 == '584' | dx22 == '5849' | dx22 == '5846' | dx22 == '5847' | dx22 == '5848' | dx22 == '5849' |
dx23 == '584' | dx23 == '5849' | dx23 == '5846' | dx23 == '5847' | dx23 == '5848' | dx23 == '5849' |
dx24 == '584' | dx24 == '5849' | dx24 == '5846' | dx24 == '5847' | dx24 == '5848' | dx24 == '5849' |
dx25 == '584' | dx25 == '5849' | dx25 == '5846' | dx25 == '5847' | dx25 == '5848' | dx25 == '5849' |
dx26 == '584' | dx26 == '5849' | dx26 == '5846' | dx26 == '5847' | dx26 == '5848' | dx26 == '5849' |
dx27 == '584' | dx27 == '5849' | dx27 == '5846' | dx27 == '5847' | dx27 == '5848' | dx27 == '5849' |
dx28 == '584' | dx28 == '5849' | dx28 == '5846' | dx28 == '5847' | dx28 == '5848' | dx28 == '5849' |
dx29 == '584' | dx29 == '5849' | dx29 == '5846' | dx29 == '5847' | dx29 == '5848' | dx29 == '5849' |
dx30 == '584' | dx30 == '5849' | dx30 == '5846' | dx30 == '5847' | dx30 == '5848' | dx30 == '5849'))) %>%
mutate(aki=replace(aki, is.na(aki), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd=as.integer((dx1 == '585' | dx1 == '5859' |
dx2 == '585' | dx2 == '5859' |
dx3 == '585' | dx3 == '5859' |
dx4 == '585' | dx4 == '5859' |
dx5 == '585' | dx5 == '5859' |
dx6 == '585' | dx6 == '5859' |
dx7 == '585' | dx7 == '5859' |
dx8 == '585' | dx8 == '5859' |
dx9 == '585' | dx9 == '5859' |
dx10 == '585' | dx10 == '5859' |
dx11 == '585' | dx11 == '5859' |
dx12 == '585' | dx12 == '5859' |
dx13 == '585' | dx13 == '5859' |
dx14 == '585' | dx14 == '5859' |
dx15 == '585' | dx15 == '5859' |
dx16 == '585' | dx16 == '5859' |
dx17 == '585' | dx17 == '5859' |
dx18 == '585' | dx18 == '5859' |
dx19 == '585' | dx19 == '5859' |
dx20 == '585' | dx20 == '5859' |
dx21 == '585' | dx21 == '5859' |
dx22 == '585' | dx22 == '5859' |
dx23 == '585' | dx23 == '5859' |
dx24 == '585' | dx24 == '5859' |
dx25 == '585' | dx25 == '5859' |
dx26 == '585' | dx26 == '5859' |
dx27 == '585' | dx27 == '5859' |
dx28 == '585' | dx28 == '5859' |
dx29 == '585' | dx29 == '5859' |
dx30 == '585' | dx30 == '5859'))) %>%
mutate(ckd=replace(ckd, is.na(ckd), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd1=as.integer((dx1 == '5851' |
dx2 == '5851' |
dx3 == '5851' |
dx4 == '5851' |
dx5 == '5851' |
dx6 == '5851' |
dx7 == '5851' |
dx8 == '5851' |
dx9 == '5851' |
dx10 == '5851' |
dx11 == '5851' |
dx12 == '5851' |
dx13 == '5851' |
dx14 == '5851' |
dx15 == '5851' |
dx16 == '5851' |
dx17 == '5851' |
dx18 == '5851' |
dx19 == '5851' |
dx20 == '5851' |
dx21 == '5851' |
dx22 == '5851' |
dx23 == '5851' |
dx24 == '5851' |
dx25 == '5851' |
dx26 == '5851' |
dx27 == '5851' |
dx28 == '5851' |
dx29 == '5851' |
dx30 == '5851'))) %>%
mutate(ckd1=replace(ckd1, is.na(ckd1), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd2=as.integer((dx1 == '5852' |
dx2 == '5852' |
dx3 == '5852' |
dx4 == '5852' |
dx5 == '5852' |
dx6 == '5852' |
dx7 == '5852' |
dx8 == '5852' |
dx9 == '5852' |
dx10 == '5852' |
dx11 == '5852' |
dx12 == '5852' |
dx13 == '5852' |
dx14 == '5852' |
dx15 == '5852' |
dx16 == '5852' |
dx17 == '5852' |
dx18 == '5852' |
dx19 == '5852' |
dx20 == '5852' |
dx21 == '5852' |
dx22 == '5852' |
dx23 == '5852' |
dx24 == '5852' |
dx25 == '5852' |
dx26 == '5852' |
dx27 == '5852' |
dx28 == '5852' |
dx29 == '5852' |
dx30 == '5852'))) %>%
mutate(ckd2=replace(ckd2, is.na(ckd2), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd3=as.integer((dx1 == '5853' |
dx2 == '5853' |
dx3 == '5853' |
dx4 == '5853' |
dx5 == '5853' |
dx6 == '5853' |
dx7 == '5853' |
dx8 == '5853' |
dx9 == '5853' |
dx10 == '5853' |
dx11 == '5853' |
dx12 == '5853' |
dx13 == '5853' |
dx14 == '5853' |
dx15 == '5853' |
dx16 == '5853' |
dx17 == '5853' |
dx18 == '5853' |
dx19 == '5853' |
dx20 == '5853' |
dx21 == '5853' |
dx22 == '5853' |
dx23 == '5853' |
dx24 == '5853' |
dx25 == '5853' |
dx26 == '5853' |
dx27 == '5853' |
dx28 == '5853' |
dx29 == '5853' |
dx30 == '5853'))) %>%
mutate(ckd3=replace(ckd3, is.na(ckd3), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd4=as.integer((dx1 == '5854' |
dx2 == '5854' |
dx3 == '5854' |
dx4 == '5854' |
dx5 == '5854' |
dx6 == '5854' |
dx7 == '5854' |
dx8 == '5854' |
dx9 == '5854' |
dx10 == '5854' |
dx11 == '5854' |
dx12 == '5854' |
dx13 == '5854' |
dx14 == '5854' |
dx15 == '5854' |
dx16 == '5854' |
dx17 == '5854' |
dx18 == '5854' |
dx19 == '5854' |
dx20 == '5854' |
dx21 == '5854' |
dx22 == '5854' |
dx23 == '5854' |
dx24 == '5854' |
dx25 == '5854' |
dx26 == '5854' |
dx27 == '5854' |
dx28 == '5854' |
dx29 == '5854' |
dx30 == '5854'))) %>%
mutate(ckd4=replace(ckd4, is.na(ckd4), 0))
glimpse(cdi.and.renal)
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd5=as.integer((dx1 == '5855' |
dx2 == '5855' |
dx3 == '5855' |
dx4 == '5855' |
dx5 == '5855' |
dx6 == '5855' |
dx7 == '5855' |
dx8 == '5855' |
dx9 == '5855' |
dx10 == '5855' |
dx11 == '5855' |
dx12 == '5855' |
dx13 == '5855' |
dx14 == '5855' |
dx15 == '5855' |
dx16 == '5855' |
dx17 == '5855' |
dx18 == '5855' |
dx19 == '5855' |
dx20 == '5855' |
dx21 == '5855' |
dx22 == '5855' |
dx23 == '5855' |
dx24 == '5855' |
dx25 == '5855' |
dx26 == '5855' |
dx27 == '5855' |
dx28 == '5855' |
dx29 == '5855' |
dx30 == '5855'))) %>%
mutate(ckd5=replace(ckd5, is.na(ckd5), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd6=as.integer((dx1 == '5856' |
dx2 == '5856' |
dx3 == '5856' |
dx4 == '5856' |
dx5 == '5856' |
dx6 == '5856' |
dx7 == '5856' |
dx8 == '5856' |
dx9 == '5856' |
dx10 == '5856' |
dx11 == '5856' |
dx12 == '5856' |
dx13 == '5856' |
dx14 == '5856' |
dx15 == '5856' |
dx16 == '5856' |
dx17 == '5856' |
dx18 == '5856' |
dx19 == '5856' |
dx20 == '5856' |
dx21 == '5856' |
dx22 == '5856' |
dx23 == '5856' |
dx24 == '5856' |
dx25 == '5856' |
dx26 == '5856' |
dx27 == '5856' |
dx28 == '5856' |
dx29 == '5856' |
dx30 == '5856'))) %>%
mutate(ckd6=replace(ckd6, is.na(ckd6), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(renal_failure_unspecified=as.integer((dx1 == '586' |
dx2 == '586' |
dx3 == '586' |
dx4 == '586' |
dx5 == '586' |
dx6 == '586' |
dx7 == '586' |
dx8 == '586' |
dx9 == '586' |
dx10 == '586' |
dx11 == '586' |
dx12 == '586' |
dx13 == '586' |
dx14 == '586' |
dx15 == '586' |
dx16 == '586' |
dx17 == '586' |
dx18 == '586' |
dx19 == '586' |
dx20 == '586' |
dx21 == '586' |
dx22 == '586' |
dx23 == '586' |
dx24 == '586' |
dx25 == '586' |
dx26 == '586' |
dx27 == '586' |
dx28 == '586' |
dx29 == '586' |
dx30 == '586'))) %>%
mutate(renal_failure_unspecified=replace(renal_failure_unspecified, is.na(renal_failure_unspecified), 0))
write_csv(cdi.and.renal, '/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/data/cdi_and_renal.csv')
cdi.and.renal.reduced <- cdi.and.renal %>% select(matches("nis_key|nis_year|nis_stratum|age|^discwt$|hospid|aki|cdi|ckd.*|renl.*|^los$|died|renal"))
write_csv(cdi.and.renal.reduced, '/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/data/cdi_and_renal_reduced.csv')
all.q <- "SELECT nis_key,
nis_year,
nis_stratum,
age,
discwt,
hospid,
renlfail,
los,
died,
dx1,
dx2,
dx3,
dx4,
dx5,
dx6,
dx7,
dx8,
dx9,
dx10,
dx11,
dx12,
dx13,
dx14,
dx15,
dx16,
dx17,
dx18,
dx19,
dx20,
dx21,
dx22,
dx23,
dx24,
dx25,
dx26,
dx27,
dx28,
dx29,
dx30
FROM nis"
cdi.and.renal <- DBI::dbGetQuery(con, all.q)
# Join all of the stats into a table and write it out
beep(3)
cdi.and.renal <-
cdi.and.renal %>%
mutate(cdi=as.integer((dx1 == '00845' |
dx2 == '00845' |
dx3 == '00845' |
dx4 == '00845' |
dx5 == '00845' |
dx6 == '00845' |
dx7 == '00845' |
dx8 == '00845' |
dx9 == '00845' |
dx10 == '00845' |
dx11 == '00845' |
dx12 == '00845' |
dx13 == '00845' |
dx14 == '00845' |
dx15 == '00845' |
dx16 == '00845' |
dx17 == '00845' |
dx18 == '00845' |
dx19 == '00845' |
dx20 == '00845' |
dx21 == '00845' |
dx22 == '00845' |
dx23 == '00845' |
dx24 == '00845' |
dx25 == '00845' |
dx26 == '00845' |
dx27 == '00845' |
dx28 == '00845' |
dx29 == '00845' |
dx30 == '00845'))) %>%
mutate(cdi=replace(cdi, is.na(cdi), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(aki=as.integer((dx1 == '584' | dx1 == '5845' | dx1 == '5846' | dx1 == '5847' | dx1 == '5848' | dx1 == '5849' |
dx2 == '584' | dx2 == '5849' | dx2 == '5846' | dx2 == '5847' | dx2 == '5848' | dx2 == '5849' |
dx3 == '584' | dx3 == '5849' | dx3 == '5846' | dx3 == '5847' | dx3 == '5848' | dx3 == '5849' |
dx4 == '584' | dx4 == '5849' | dx4 == '5846' | dx4 == '5847' | dx4 == '5848' | dx4 == '5849' |
dx5 == '584' | dx5 == '5849' | dx5 == '5846' | dx5 == '5847' | dx5 == '5848' | dx5 == '5849' |
dx6 == '584' | dx6 == '5849' | dx6 == '5846' | dx6 == '5847' | dx6 == '5848' | dx6 == '5849' |
dx7 == '584' | dx7 == '5849' | dx7 == '5846' | dx7 == '5847' | dx7 == '5848' | dx7 == '5849' |
dx8 == '584' | dx8 == '5849' | dx8 == '5846' | dx8 == '5847' | dx8 == '5848' | dx8 == '5849' |
dx9 == '584' | dx9 == '5849' | dx9 == '5846' | dx9 == '5847' | dx9 == '5848' | dx9 == '5849' |
dx10 == '584' | dx10 == '5849' | dx10 == '5846' | dx10 == '5847' | dx10 == '5848' | dx10 == '5849' |
dx11 == '584' | dx11 == '5849' | dx11 == '5846' | dx11 == '5847' | dx11 == '5848' | dx11 == '5849' |
dx12 == '584' | dx12 == '5849' | dx12 == '5846' | dx12 == '5847' | dx12 == '5848' | dx12 == '5849' |
dx13 == '584' | dx13 == '5849' | dx13 == '5846' | dx13 == '5847' | dx13 == '5848' | dx13 == '5849' |
dx14 == '584' | dx14 == '5849' | dx14 == '5846' | dx14 == '5847' | dx14 == '5848' | dx14 == '5849' |
dx15 == '584' | dx15 == '5849' | dx15 == '5846' | dx15 == '5847' | dx15 == '5848' | dx15 == '5849' |
dx16 == '584' | dx16 == '5849' | dx16 == '5846' | dx16 == '5847' | dx16 == '5848' | dx16 == '5849' |
dx17 == '584' | dx17 == '5849' | dx17 == '5846' | dx17 == '5847' | dx17 == '5848' | dx17 == '5849' |
dx18 == '584' | dx18 == '5849' | dx18 == '5846' | dx18 == '5847' | dx18 == '5848' | dx18 == '5849' |
dx19 == '584' | dx19 == '5849' | dx19 == '5846' | dx19 == '5847' | dx19 == '5848' | dx19 == '5849' |
dx20 == '584' | dx20 == '5849' | dx20 == '5846' | dx20 == '5847' | dx20 == '5848' | dx20 == '5849' |
dx21 == '584' | dx21 == '5849' | dx21 == '5846' | dx21 == '5847' | dx21 == '5848' | dx21 == '5849' |
dx22 == '584' | dx22 == '5849' | dx22 == '5846' | dx22 == '5847' | dx22 == '5848' | dx22 == '5849' |
dx23 == '584' | dx23 == '5849' | dx23 == '5846' | dx23 == '5847' | dx23 == '5848' | dx23 == '5849' |
dx24 == '584' | dx24 == '5849' | dx24 == '5846' | dx24 == '5847' | dx24 == '5848' | dx24 == '5849' |
dx25 == '584' | dx25 == '5849' | dx25 == '5846' | dx25 == '5847' | dx25 == '5848' | dx25 == '5849' |
dx26 == '584' | dx26 == '5849' | dx26 == '5846' | dx26 == '5847' | dx26 == '5848' | dx26 == '5849' |
dx27 == '584' | dx27 == '5849' | dx27 == '5846' | dx27 == '5847' | dx27 == '5848' | dx27 == '5849' |
dx28 == '584' | dx28 == '5849' | dx28 == '5846' | dx28 == '5847' | dx28 == '5848' | dx28 == '5849' |
dx29 == '584' | dx29 == '5849' | dx29 == '5846' | dx29 == '5847' | dx29 == '5848' | dx29 == '5849' |
dx30 == '584' | dx30 == '5849' | dx30 == '5846' | dx30 == '5847' | dx30 == '5848' | dx30 == '5849'))) %>%
mutate(aki=replace(aki, is.na(aki), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd=as.integer((dx1 == '585' | dx1 == '5859' |
dx2 == '585' | dx2 == '5859' |
dx3 == '585' | dx3 == '5859' |
dx4 == '585' | dx4 == '5859' |
dx5 == '585' | dx5 == '5859' |
dx6 == '585' | dx6 == '5859' |
dx7 == '585' | dx7 == '5859' |
dx8 == '585' | dx8 == '5859' |
dx9 == '585' | dx9 == '5859' |
dx10 == '585' | dx10 == '5859' |
dx11 == '585' | dx11 == '5859' |
dx12 == '585' | dx12 == '5859' |
dx13 == '585' | dx13 == '5859' |
dx14 == '585' | dx14 == '5859' |
dx15 == '585' | dx15 == '5859' |
dx16 == '585' | dx16 == '5859' |
dx17 == '585' | dx17 == '5859' |
dx18 == '585' | dx18 == '5859' |
dx19 == '585' | dx19 == '5859' |
dx20 == '585' | dx20 == '5859' |
dx21 == '585' | dx21 == '5859' |
dx22 == '585' | dx22 == '5859' |
dx23 == '585' | dx23 == '5859' |
dx24 == '585' | dx24 == '5859' |
dx25 == '585' | dx25 == '5859' |
dx26 == '585' | dx26 == '5859' |
dx27 == '585' | dx27 == '5859' |
dx28 == '585' | dx28 == '5859' |
dx29 == '585' | dx29 == '5859' |
dx30 == '585' | dx30 == '5859'))) %>%
mutate(ckd=replace(ckd, is.na(ckd), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd1=as.integer((dx1 == '5851' |
dx2 == '5851' |
dx3 == '5851' |
dx4 == '5851' |
dx5 == '5851' |
dx6 == '5851' |
dx7 == '5851' |
dx8 == '5851' |
dx9 == '5851' |
dx10 == '5851' |
dx11 == '5851' |
dx12 == '5851' |
dx13 == '5851' |
dx14 == '5851' |
dx15 == '5851' |
dx16 == '5851' |
dx17 == '5851' |
dx18 == '5851' |
dx19 == '5851' |
dx20 == '5851' |
dx21 == '5851' |
dx22 == '5851' |
dx23 == '5851' |
dx24 == '5851' |
dx25 == '5851' |
dx26 == '5851' |
dx27 == '5851' |
dx28 == '5851' |
dx29 == '5851' |
dx30 == '5851'))) %>%
mutate(ckd1=replace(ckd1, is.na(ckd1), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd2=as.integer((dx1 == '5852' |
dx2 == '5852' |
dx3 == '5852' |
dx4 == '5852' |
dx5 == '5852' |
dx6 == '5852' |
dx7 == '5852' |
dx8 == '5852' |
dx9 == '5852' |
dx10 == '5852' |
dx11 == '5852' |
dx12 == '5852' |
dx13 == '5852' |
dx14 == '5852' |
dx15 == '5852' |
dx16 == '5852' |
dx17 == '5852' |
dx18 == '5852' |
dx19 == '5852' |
dx20 == '5852' |
dx21 == '5852' |
dx22 == '5852' |
dx23 == '5852' |
dx24 == '5852' |
dx25 == '5852' |
dx26 == '5852' |
dx27 == '5852' |
dx28 == '5852' |
dx29 == '5852' |
dx30 == '5852'))) %>%
mutate(ckd2=replace(ckd2, is.na(ckd2), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd3=as.integer((dx1 == '5853' |
dx2 == '5853' |
dx3 == '5853' |
dx4 == '5853' |
dx5 == '5853' |
dx6 == '5853' |
dx7 == '5853' |
dx8 == '5853' |
dx9 == '5853' |
dx10 == '5853' |
dx11 == '5853' |
dx12 == '5853' |
dx13 == '5853' |
dx14 == '5853' |
dx15 == '5853' |
dx16 == '5853' |
dx17 == '5853' |
dx18 == '5853' |
dx19 == '5853' |
dx20 == '5853' |
dx21 == '5853' |
dx22 == '5853' |
dx23 == '5853' |
dx24 == '5853' |
dx25 == '5853' |
dx26 == '5853' |
dx27 == '5853' |
dx28 == '5853' |
dx29 == '5853' |
dx30 == '5853'))) %>%
mutate(ckd3=replace(ckd3, is.na(ckd3), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd4=as.integer((dx1 == '5854' |
dx2 == '5854' |
dx3 == '5854' |
dx4 == '5854' |
dx5 == '5854' |
dx6 == '5854' |
dx7 == '5854' |
dx8 == '5854' |
dx9 == '5854' |
dx10 == '5854' |
dx11 == '5854' |
dx12 == '5854' |
dx13 == '5854' |
dx14 == '5854' |
dx15 == '5854' |
dx16 == '5854' |
dx17 == '5854' |
dx18 == '5854' |
dx19 == '5854' |
dx20 == '5854' |
dx21 == '5854' |
dx22 == '5854' |
dx23 == '5854' |
dx24 == '5854' |
dx25 == '5854' |
dx26 == '5854' |
dx27 == '5854' |
dx28 == '5854' |
dx29 == '5854' |
dx30 == '5854'))) %>%
mutate(ckd4=replace(ckd4, is.na(ckd4), 0))
glimpse(cdi.and.renal)
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd5=as.integer((dx1 == '5855' |
dx2 == '5855' |
dx3 == '5855' |
dx4 == '5855' |
dx5 == '5855' |
dx6 == '5855' |
dx7 == '5855' |
dx8 == '5855' |
dx9 == '5855' |
dx10 == '5855' |
dx11 == '5855' |
dx12 == '5855' |
dx13 == '5855' |
dx14 == '5855' |
dx15 == '5855' |
dx16 == '5855' |
dx17 == '5855' |
dx18 == '5855' |
dx19 == '5855' |
dx20 == '5855' |
dx21 == '5855' |
dx22 == '5855' |
dx23 == '5855' |
dx24 == '5855' |
dx25 == '5855' |
dx26 == '5855' |
dx27 == '5855' |
dx28 == '5855' |
dx29 == '5855' |
dx30 == '5855'))) %>%
mutate(ckd5=replace(ckd5, is.na(ckd5), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(ckd6=as.integer((dx1 == '5856' |
dx2 == '5856' |
dx3 == '5856' |
dx4 == '5856' |
dx5 == '5856' |
dx6 == '5856' |
dx7 == '5856' |
dx8 == '5856' |
dx9 == '5856' |
dx10 == '5856' |
dx11 == '5856' |
dx12 == '5856' |
dx13 == '5856' |
dx14 == '5856' |
dx15 == '5856' |
dx16 == '5856' |
dx17 == '5856' |
dx18 == '5856' |
dx19 == '5856' |
dx20 == '5856' |
dx21 == '5856' |
dx22 == '5856' |
dx23 == '5856' |
dx24 == '5856' |
dx25 == '5856' |
dx26 == '5856' |
dx27 == '5856' |
dx28 == '5856' |
dx29 == '5856' |
dx30 == '5856'))) %>%
mutate(ckd6=replace(ckd6, is.na(ckd6), 0))
cdi.and.renal <-
cdi.and.renal %>%
mutate(renal_failure_unspecified=as.integer((dx1 == '586' |
dx2 == '586' |
dx3 == '586' |
dx4 == '586' |
dx5 == '586' |
dx6 == '586' |
dx7 == '586' |
dx8 == '586' |
dx9 == '586' |
dx10 == '586' |
dx11 == '586' |
dx12 == '586' |
dx13 == '586' |
dx14 == '586' |
dx15 == '586' |
dx16 == '586' |
dx17 == '586' |
dx18 == '586' |
dx19 == '586' |
dx20 == '586' |
dx21 == '586' |
dx22 == '586' |
dx23 == '586' |
dx24 == '586' |
dx25 == '586' |
dx26 == '586' |
dx27 == '586' |
dx28 == '586' |
dx29 == '586' |
dx30 == '586'))) %>%
mutate(renal_failure_unspecified=replace(renal_failure_unspecified, is.na(renal_failure_unspecified), 0))
write_csv( select(cdi.and.renal, nis_key,
nis_year,
nis_stratum,
age,
discwt,
hospid,
renlfail,
los,
died,
cdi,
aki,
ckd,
ckd1,
ckd2,
ckd3,
ckd4,
ckd5,
ckd6,
renal_failure_unspecified),
"data/cdiff_and_renal_all.csv")
beep(3)
proportions <- list()
cdi.and.renal.reduced <- list()
y <- 2014
for (y in seq(2001, 2014, by=1)) {
print(y)
#setwd('/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/thesis/')
cdi.and.renal.reduced <- read_csv(paste0('../data/cdiff_and_renal_all_', y, '.csv'))
cdiff.design <- svydesign(ids = ~hospid,
data = cdi.and.renal.reduced,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
proportions[[paste0(y, "_cdi")]] <- svyciprop(~I(cdi==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_aki")]] <- svyciprop(~I(aki==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd")]] <- svyciprop(~I(ckd==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd1")]] <- svyciprop(~I(ckd1==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd2")]] <- svyciprop(~I(ckd2==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd3")]] <- svyciprop(~I(ckd3==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd4")]] <- svyciprop(~I(ckd4==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd5")]] <- svyciprop(~I(ckd5==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd6")]] <- svyciprop(~I(ckd6==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_renal_failure")]] <- svyciprop(~I(aki+ckd+ckd1+ckd2+ckd3+ckd4+ckd5+ckd6 > 0), cdiff.design, method = "logit", level = 0.95)
#svyciprop(~(I(cdi==1) & I(aki+ckd+ckd1+ckd2+ckd3+ckd4+ckd5+ckd6 > 0)), cdiff.design, method = "logit", level = 0.95)
rm(cdi.and.renal.reduced)
rm(cdiff.design)
gc()
}
beep(3)
proportions
diseases <- c("cdi", "aki", "ckd", "ckd1", "ckd2", "ckd3", "ckd4", "ckd5", "ckd6", "renal_failure")
y <- 2001
d <- diseases[1]
final.df <- data_frame(disease="",
year=2000,
theta=0,
ci2.5=0,
ci97.5=0)
for (y in seq(2001, 2014, by=1)) {
for (d in diseases) {
df <- data_frame(disease=d,
year=y,
theta=as.vector(proportions[[paste0(y, "_", d)]]),
ci2.5=attr(proportions[[paste0(y, "_", d)]], "ci")[[1]],
ci97.5=attr(proportions[[paste0(y, "_", d)]], "ci")[[2]])
final.df <- bind_rows(final.df, df)
}
}
write_csv(final.df, "../data/proportions.csv")
# echo "`l NIS* | grep -i CSV | awk '{print $5}' | awk '{s+=$1} END {print s}'` + `l NRD201* | grep CSV | awk '{print $5}' | awk '{s+=$1} END {print s}'`" | bc
proportions <- list()
cdi.and.renal.reduced <- list()
y <- 2014
for (y in seq(2001, 2014, by=1)) {
print(y)
#setwd('/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/thesis/')
cdi.and.renal.reduced <- read_csv(paste0('../data/cdiff_and_renal_all_', y, '.csv'))
cdiff.design <- svydesign(ids = ~hospid,
data = cdi.and.renal.reduced,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
proportions[[paste0(y, "_cdi")]] <- svyciprop(~I(cdi==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_aki")]] <- svyciprop(~I(aki==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd")]] <- svyciprop(~I(ckd==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd1")]] <- svyciprop(~I(ckd1==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd2")]] <- svyciprop(~I(ckd2==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd3")]] <- svyciprop(~I(ckd3==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd4")]] <- svyciprop(~I(ckd4==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd5")]] <- svyciprop(~I(ckd5==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_ckd6")]] <- svyciprop(~I(ckd6==1), cdiff.design, method = "logit", level = 0.95)
proportions[[paste0(y, "_renal_failure")]] <- svyciprop(~I(aki+ckd+ckd1+ckd2+ckd3+ckd4+ckd5+ckd6 > 0), cdiff.design, method = "logit", level = 0.95)
#svyciprop(~(I(cdi==1) & I(aki+ckd+ckd1+ckd2+ckd3+ckd4+ckd5+ckd6 > 0)), cdiff.design, method = "logit", level = 0.95)
rm(cdi.and.renal.reduced)
rm(cdiff.design)
gc()
}
beep(3)
proportions
diseases <- c("cdi", "aki", "ckd", "ckd1", "ckd2", "ckd3", "ckd4", "ckd5", "ckd6", "renal_failure")
y <- 2001
d <- diseases[1]
final.df <- data_frame(disease="",
year=2000,
theta=0,
ci2.5=0,
ci97.5=0)
for (y in seq(2001, 2014, by=1)) {
for (d in diseases) {
df <- data_frame(disease=d,
year=y,
theta=as.vector(proportions[[paste0(y, "_", d)]]),
ci2.5=attr(proportions[[paste0(y, "_", d)]], "ci")[[1]],
ci97.5=attr(proportions[[paste0(y, "_", d)]], "ci")[[2]])
final.df <- bind_rows(final.df, df)
}
}
cdiff.ages <- filter(cdiff, !is.na(age))
cdiff.design <- svydesign(ids = ~hospid,
data = cdiff.ages,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
mode <- mlv(cdiff.ages$age, method = "mfv")
mode <- mode$M
qntl <- svyquantile(~age, cdiff.design, c(0.25, 0.5, 0.75))
xbar.weighted <- svymean(x = ~age, design=cdiff.design, deff=TRUE)
p <- cdiff.ages %>%
select(age, discwt) %>%
ggplot(aes(age, group=1, weight=discwt)) +
geom_histogram(stat="bin", bins=30) +
geom_vline(xintercept = qntl[[2]], col="red") +
geom_vline(xintercept = qntl[[1]], col="blue") +
geom_vline(xintercept = qntl[[3]], col="blue") +
labs(title="C. diff infections by age", y="Count", x="Age")
print(p)
ts.by.year <- list()
from <- 1
to <- 0
for (i in 1:20) {
from <- to
to <- from + 5
age.window <- cdiff %>%
filter(!is.na(age) & age >= from & age < to) %>%
select(nis_year) %>%
group_by(nis_year) %>%
summarise(count=n())
my.ts <- ts(age.window$count, start = 2001, end = 2014, frequency = 1)
#if (i == 2001) {
ts.by.year[[paste0(from, "_", to)]] <- my.ts
#} else {
#ts(age.window$count, start = 2001, end = 2014, frequency = 1)
#}
}
plot.ts <- data.frame(year=2001:2014)
plot.ts <- cbind(plot.ts, data.frame('0_5'=ts.by.year[['0_5']]))
plot.ts <- cbind(plot.ts, data.frame('5_10'=ts.by.year[['5_10']]))
plot.ts <- cbind(plot.ts, data.frame('10_15'=ts.by.year[['10_15']]))
plot.ts <- cbind(plot.ts, data.frame('15_20'=ts.by.year[['15_20']]))
plot.ts <- cbind(plot.ts, data.frame('20_25'=ts.by.year[['20_25']]))
plot.ts <- cbind(plot.ts, data.frame('25_30'=ts.by.year[['25_30']]))
plot.ts <- cbind(plot.ts, data.frame('30_35'=ts.by.year[['30_35']]))
plot.ts <- cbind(plot.ts, data.frame('35_40'=ts.by.year[['35_40']]))
plot.ts <- cbind(plot.ts, data.frame('40_45'=ts.by.year[['40_45']]))
plot.ts <- cbind(plot.ts, data.frame('45_50'=ts.by.year[['45_50']]))
plot.ts <- cbind(plot.ts, data.frame('50_55'=ts.by.year[['50_55']]))
plot.ts <- cbind(plot.ts, data.frame('55_60'=ts.by.year[['55_60']]))
plot.ts <- cbind(plot.ts, data.frame('60_65'=ts.by.year[['60_65']]))
plot.ts <- cbind(plot.ts, data.frame('65_70'=ts.by.year[['65_70']]))
plot.ts <- cbind(plot.ts, data.frame('70_75'=ts.by.year[['70_75']]))
plot.ts <- cbind(plot.ts, data.frame('75_80'=ts.by.year[['75_80']]))
plot.ts <- cbind(plot.ts, data.frame('80_85'=ts.by.year[['80_85']]))
plot.ts <- cbind(plot.ts, data.frame('85_90'=ts.by.year[['85_90']]))
plot.ts <- cbind(plot.ts, data.frame('90_95'=ts.by.year[['90_95']]))
plot.ts <- cbind(plot.ts, data.frame('95_100'=ts.by.year[['95_100']]))
plot.ts.m <- melt(plot.ts, id.vars=c('year'))
labels <- gsub('_', '-', gsub('X', replacement = '', as.character(plot.ts.m$variable)))
plot.ts.m$variable <- factor(labels, levels = unique(labels))
cols <- c('0-5' = "#e6e6ff",
'5-10' = "#ccccff",
'10-15' = "#b3b3ff",
'15-20' = "#9999ff",
'20-25' = "#8080ff",
'25-30' = "#6666ff",
'30-35' = "#4d4dff",
'35-40' = "#3333ff",
'40-45' = "#1a1aff",
'45-50' = "#0000ff",
# RED - increasing
'50-55' = "#cc0000",
'55-60' = "#b30000",
'60-65' = "#990000",
'65-70' = "#800000",
'70-75' = "#660000",
# GREEN - Somewhat decreasing
'75-80' = "#006600",
'80-85' = "#004d00",
'85-90' = "#008000",
'90-95' = "#003300",
'95-100' = "#000000")
plot.ts.m %>%
ggplot(aes(x=year, y=value, colour=variable)) +
geom_line() +
scale_colour_manual(values = cols) +
labs(title="Time series of C. diff cases by 5-year age groups", x="Year", y="Count", colour="Ages")
######################
esrd <- list()
y <- 2014
for (y in seq(2001, 2014, by=1)) {
print(y)
#setwd('/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/thesis/')
cdi.and.renal.reduced <- read_csv(paste0('../data/cdiff_and_renal_all_', y, '.csv'))
#cdiff.design <- svydesign(ids = ~hospid,
#data = cdi.and.renal.reduced,
#weights = ~discwt,
#strata = ~nis_stratum,
#nest=TRUE)
#fit <- svyglm(I(ckd6 == 1)~age, cdiff.design, family=quasibinomial())
esrd[[y]] <- cdi.and.renal.reduced %>%
filter(ckd6 == 1) %>%
select(age, nis_year, discwt)
esrd[[2014]]
rm(cdi.and.renal.reduced)
gc()
}
df <- esrd[[2001]]
for (y in seq(from=2002, to=2014, by=1)) {
print(y)
df <- bind_rows(df, esrd[[y]])
}
write_csv(df, '/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/data/esrd.csv')
ggplot(df, aes(x = age, y = nis_year, group = nis_year)) +
geom_density_ridges(aes(height=..density.., weight=discwt), stat="density") +
labs(title="ESRD distribution by age over time", x="Age", y="Year")
beep(3)
### Get ESRD
ages <- list()
y <- 2014
for (y in seq(2001, 2014, by=1)) {
print(y)
#setwd('/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/thesis/')
cdi.and.renal.reduced <- read_csv(paste0('../data/cdiff_and_renal_all_', y, '.csv'))
cdi.and.renal.reduced <- filter(cdi.and.renal.reduced, !is.na(age))
subgroup <- filter(cdi.and.renal.reduced, cdi == 1)
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_cdi")]] <- svymean(~age, ds, level = 0.95)
subgroup <- filter(cdi.and.renal.reduced, (ckd == 1 | ckd1 == 1 | ckd2 == 1 | ckd3 == 1 | ckd4 == 1 | ckd5 == 1))
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_ckd")]] <- svymean(~age, ds, level = 0.95)
subgroup <- filter(cdi.and.renal.reduced, (aki == 1))
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_aki")]] <- svymean(~age, ds, level = 0.95)
subgroup <- filter(cdi.and.renal.reduced, (ckd6 == 1))
if (nrow(subgroup) > 0) {
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_esrd")]] <- svymean(~age, ds, level = 0.95)
}
rm(cdi.and.renal.reduced)
rm(cdiff.design)
gc()
}
ages
beep(3)
y <- 2001
d <- diseases[1]
final.df <- data_frame(disease="",
year=2000,
theta=0,
ci2.5=0,
ci97.5=0)
for (y in seq(2001, 2014, by=1)) {
print(y)
if (y < 2005 ) {
diseases <- c("cdi", "aki", "ckd")
} else {
diseases <- c("cdi", "aki", "ckd", "esrd")
}
for (d in diseases) {
print(d)
df <- data_frame(disease=d,
year=y,
theta=as.vector(ages[[paste0(y, "_", d)]]),
ci2.5=as.vector(a) + sqrt(as.vector(attr(a, "var"))) * 1.96,
ci97.5=as.vector(a) - sqrt(as.vector(attr(a, "var"))) * 1.96)
final.df <- bind_rows(final.df, df)
}
}
write_csv(final.df, '../data/ages.csv')
ages <- list()
y <- 2014
for (y in seq(2001, 2014, by=1)) {
print(y)
#setwd('/home/bdetweiler/src/Data_Science/stat-8960-capstone-project/thesis/')
cdi.and.renal.reduced <- read_csv(paste0('../data/cdiff_and_renal_all_', y, '.csv'))
cdi.and.renal.reduced <- filter(cdi.and.renal.reduced, !is.na(age))
subgroup <- filter(cdi.and.renal.reduced, cdi == 1)
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_cdi")]] <- svyquantile(~age, ds, c(0.25, 0.5, 0.75), ci=TRUE)
subgroup <- filter(cdi.and.renal.reduced, (ckd == 1 | ckd1 == 1 | ckd2 == 1 | ckd3 == 1 | ckd4 == 1 | ckd5 == 1))
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_ckd")]] <- svyquantile(~age, ds, c(0.25, 0.5, 0.75), ci=TRUE)
subgroup <- filter(cdi.and.renal.reduced, (aki == 1))
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_aki")]] <- svyquantile(~age, ds, c(0.25, 0.5, 0.75), ci=TRUE)
subgroup <- filter(cdi.and.renal.reduced, (ckd6 == 1))
if (nrow(subgroup) > 0) {
ds <- svydesign(ids = ~hospid,
data = subgroup,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(y, "_esrd")]] <- svyquantile(~age, ds, c(0.25, 0.5, 0.75), ci=TRUE)
}
rm(cdi.and.renal.reduced)
rm(cdiff.design)
gc()
}
ages
beep(3)
y <- 2001
d <- diseases[1]
final.df <- data_frame(disease="",
year=2000,
theta25=0,
theta25_2.5=0,
theta25_97.5=0,
theta50=0,
theta50_2.5=0,
theta50_97.5=0,
theta75=0,
theta75_2.5=0,
theta75_97.5=0)
final.df
for (y in seq(2001, 2014, by=1)) {
print(y)
if (y < 2005 ) {
diseases <- c("cdi", "aki", "ckd")
} else {
diseases <- c("cdi", "aki", "ckd", "esrd")
}
d <- diseases[1]
for (d in diseases) {
print(d)
df <- data_frame(disease=d,
year=y,
theta25=as.vector(ages[[paste0(y, "_", d)]]$quantiles)[1],
theta25_2.5=as.vector(ages[[paste0(y, "_", d)]]$CIs)[1],
theta25_97.5=as.vector(ages[[paste0(y, "_", d)]]$CIs)[2],
theta50=as.vector(ages[[paste0(y, "_", d)]]$quantiles)[2],
theta50_2.5=as.vector(ages[[paste0(y, "_", d)]]$CIs)[3],
theta50_97.5=as.vector(ages[[paste0(y, "_", d)]]$CIs)[4],
theta75=as.vector(ages[[paste0(y, "_", d)]]$quantiles)[3],
theta75_2.5=as.vector(ages[[paste0(y, "_", d)]]$CIs)[5],
theta75_97.5=as.vector(ages[[paste0(y, "_", d)]]$CIs)[6])
final.df <- bind_rows(final.df, df)
}
}
final.df
write_csv(final.df, '../data/ages_quantiles.csv')
##### Get yearly age trends by age buckets
ts.by.year <- list()
ages <- list()
from <- 1
to <- 0
i <- 1
for (i in 1:20) {
from <- to
to <- from + 5
print(paste0('age group ', from, '_', to))
y <- 2001
for (y in 2001:2014) {
print(y)
age.window <- cdiff %>%
filter(!is.na(age) & age >= from & age < to) %>%
filter(nis_year == y) %>%
select(nis_year, discwt, nis_stratum, hospid) %>%
mutate(dummy=1)
ds <- svydesign(ids = ~hospid,
data = age.window,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
ages[[paste0(from, "_", to, "_", y)]] <- svytotal(~dummy, ds, ci=TRUE)
}
#age.window
#my.ts <- ts(age.window$count, start = 2001, end = 2014, frequency = 1)
#if (i == 2001) {
#ts.by.year[[paste0(from, "_", to)]] <- my.ts
#} else {
#ts(age.window$count, start = 2001, end = 2014, frequency = 1)
#}
}
from <- 1
to <- 0
i <- 1
df <- data_frame(year=2000, age.bucket='-1', total=0, SE=0)
for (i in 1:20) {
from <- to
to <- from + 5
print(paste0('age group ', from, '_', to))
y <- 2001
for (y in 2001:2014) {
total <- tidy(print(ages[[paste0(from, "_", to, "_", y)]])) %>% select(total, SE) %>% pull(total)
SE <- tidy(print(ages[[paste0(from, "_", to, "_", y)]])) %>% select(total, SE) %>% pull(SE)
df <- bind_rows(df, data_frame(year=y, age.bucket=paste0(from, '_', to), total, SE))
}
}
df <- df %>% filter(year > 2000)
for (age in unique(df$age.bucket)) {
if (age == '95_100') {
break
}
print(age)
age.df <- df %>% filter(age.bucket == age) %>% select(total)
print(age.df)
my.ts <- ts(age.df$total, start = 2001, end = 2014, frequency = 1)
ts.by.year[[paste0(age)]] <- my.ts
}
saveRDS(ts.by.year, '../data/cdi_ages_ts.rds')
df <- data_frame(year=2000, tot.preg=-1, tot.not.preg=-1, prop=0, prop2.5=0, prop97.5=0)
female.preg <- list()
### Get female pregnancy
y <- 2001
for (y in 2001:2014) {
mf <- cdiff %>%
select(female, age, hospid, nis_stratum, discwt, nis_year) %>%
filter(!is.na(female)) %>%
filter(female == 1) %>%
filter(nis_year == y)
mf
ds <- svydesign(ids = ~hospid,
data = mf,
weights = ~discwt,
strata = ~nis_stratum,
nest=TRUE)
prop <- svyciprop(~I(female==1), ds, level = .95, rm.na=TRUE)
prop.val <- as.vector(prop)
prop.val.2.5 <- attr(prop, "ci")[[1]]
prop.val.97.5 <- attr(prop, "ci")[[2]]
tot <- svytotal(~I(female==1), ds, level = .95, rm.na=TRUE)
males <- round(as.vector(tot)[1])
females <- round(as.vector(tot)[2])
#svp (~age, ds, level = 0.95)
df <- bind_rows(df, data_frame(year=y, tot.female=females, tot.male=males, prop=prop.val, prop2.5=prop.val.2.5, prop97.5=prop.val.97.5))
}
df <- df %>% filter(year > 2000)
df
write_csv(df, "../data/cdi-male-female.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Data.R
\docType{data}
\name{BigCity}
\alias{BigCity}
\title{Full Person-level Population Database}
\format{
A data.frame with 150266 rows and 12 variables:
\describe{
\item{HHID}{The identifier of the household. It corresponds to an alphanumeric sequence (four letters and five digits).}
\item{PersonID}{The identifier of the person within the household. NOTE it is not a unique identifier of a person for the whole population. It corresponds to an alphanumeric sequence (five letters and two digits).}
\item{Stratum}{Households are located in geographic strata. There are 119 strata across the city.}
\item{PSU}{Households are clustered in cartographic segments defined as primary sampling units (PSU). There are 1664 PSU and they are nested within strata.}
\item{Zone}{Segments clustered within strata can be located within urban or rural areas along the city.}
\item{Sex}{Sex of the person.}
\item{Income}{Per capita monthly income.}
\item{Expenditure}{Per capita monthly expenditure.}
\item{Employment}{A person's employment status.}
\item{Poverty}{This variable indicates whether the person is poor or not. It depends on income.}
}
}
\source{
\url{https://CRAN.R-project.org/package=TeachingSampling}
}
\usage{
data(BigCity)
}
\description{
This data set corresponds to some socioeconomic variables from 150266 people of a city in a particular year.
}
\references{
Package ‘TeachingSampling’; see \code{\link[TeachingSampling]{BigCity}}
}
\keyword{datasets}
| /man/BigCity.Rd | no_license | cran/BayesSampling | R | false | true | 1,579 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Data.R
\docType{data}
\name{BigCity}
\alias{BigCity}
\title{Full Person-level Population Database}
\format{
A data.frame with 150266 rows and 12 variables:
\describe{
\item{HHID}{The identifier of the household. It corresponds to an alphanumeric sequence (four letters and five digits).}
\item{PersonID}{The identifier of the person within the household. NOTE it is not a unique identifier of a person for the whole population. It corresponds to an alphanumeric sequence (five letters and two digits).}
\item{Stratum}{Households are located in geographic strata. There are 119 strata across the city.}
\item{PSU}{Households are clustered in cartographic segments defined as primary sampling units (PSU). There are 1664 PSU and they are nested within strata.}
\item{Zone}{Segments clustered within strata can be located within urban or rural areas along the city.}
\item{Sex}{Sex of the person.}
\item{Income}{Per capita monthly income.}
\item{Expenditure}{Per capita monthly expenditure.}
\item{Employment}{A person's employment status.}
\item{Poverty}{This variable indicates whether the person is poor or not. It depends on income.}
}
}
\source{
\url{https://CRAN.R-project.org/package=TeachingSampling}
}
\usage{
data(BigCity)
}
\description{
This data set corresponds to some socioeconomic variables from 150266 people of a city in a particular year.
}
\references{
Package ‘TeachingSampling’; see \code{\link[TeachingSampling]{BigCity}}
}
\keyword{datasets}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{taxonomy_about}
\alias{taxonomy_about}
\title{Taxonomy about}
\usage{
taxonomy_about()
}
\value{
Some JSON
}
\description{
Summary information about the OpenTree Taxaonomy (OTT)
}
\details{
Return information about the taxonomy, including version.
}
| /man/taxonomy_about.Rd | no_license | jhpoelen/rotl | R | false | false | 310 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{taxonomy_about}
\alias{taxonomy_about}
\title{Taxonomy about}
\usage{
taxonomy_about()
}
\value{
Some JSON
}
\description{
Summary information about the OpenTree Taxaonomy (OTT)
}
\details{
Return information about the taxonomy, including version.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trunc.rvine.R
\name{trunclevel}
\alias{trunclevel}
\title{Get Truncation Level}
\usage{
trunclevel(G, overall = FALSE)
}
\arguments{
\item{G}{Vine array.}
\item{overall}{Logical; \code{TRUE} returns the overall truncation level,
\code{FALSE} the truncation level of each column.}
}
\description{
Extract the truncation level of a vine array. Intended for internal use.
}
\examples{
G <- AtoG(CopulaModel::Dvinearray(6))
G <- truncvinemat(G, c(0, 1, 2, 1, 2, 4))
trunclevel(G)
trunclevel(G, TRUE)
}
| /man/trunclevel.Rd | permissive | vincenzocoia/copsupp | R | false | true | 577 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trunc.rvine.R
\name{trunclevel}
\alias{trunclevel}
\title{Get Truncation Level}
\usage{
trunclevel(G, overall = FALSE)
}
\arguments{
\item{G}{Vine array.}
\item{overall}{Logical; \code{TRUE} returns the overall truncation level,
\code{FALSE} the truncation level of each column.}
}
\description{
Extract the truncation level of a vine array. Intended for internal use.
}
\examples{
G <- AtoG(CopulaModel::Dvinearray(6))
G <- truncvinemat(G, c(0, 1, 2, 1, 2, 4))
trunclevel(G)
trunclevel(G, TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_delete_internet_gateway}
\alias{ec2_delete_internet_gateway}
\title{Deletes the specified internet gateway}
\usage{
ec2_delete_internet_gateway(DryRun, InternetGatewayId)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{InternetGatewayId}{[required] The ID of the internet gateway.}
}
\description{
Deletes the specified internet gateway. You must detach the internet
gateway from the VPC before you can delete it.
}
\section{Request syntax}{
\preformatted{svc$delete_internet_gateway(
DryRun = TRUE|FALSE,
InternetGatewayId = "string"
)
}
}
\examples{
\dontrun{
# This example deletes the specified Internet gateway.
svc$delete_internet_gateway(
InternetGatewayId = "igw-c0a643a9"
)
}
}
\keyword{internal}
| /cran/paws.compute/man/ec2_delete_internet_gateway.Rd | permissive | johnnytommy/paws | R | false | true | 1,072 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_delete_internet_gateway}
\alias{ec2_delete_internet_gateway}
\title{Deletes the specified internet gateway}
\usage{
ec2_delete_internet_gateway(DryRun, InternetGatewayId)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{InternetGatewayId}{[required] The ID of the internet gateway.}
}
\description{
Deletes the specified internet gateway. You must detach the internet
gateway from the VPC before you can delete it.
}
\section{Request syntax}{
\preformatted{svc$delete_internet_gateway(
DryRun = TRUE|FALSE,
InternetGatewayId = "string"
)
}
}
\examples{
\dontrun{
# This example deletes the specified Internet gateway.
svc$delete_internet_gateway(
InternetGatewayId = "igw-c0a643a9"
)
}
}
\keyword{internal}
|
test_that("install_binary metadata", {
pkg <- binary_test_package("foo")
libpath <- test_temp_dir()
metadata <- c("Foo" = "Bar", "Foobar" = "baz")
suppressMessages(
install_binary(pkg, lib = libpath, metadata = metadata, quiet = TRUE)
)
dsc <- desc::desc(file.path(libpath, "foo"))
expect_equal(dsc$get("Foo")[[1]], "Bar")
expect_equal(dsc$get("Foobar")[[1]], "baz")
rds <- readRDS(file.path(libpath, "foo", "Meta", "package.rds"))
dsc2 <- rds$DESCRIPTION
expect_equal(dsc2[["Foo"]], "Bar")
expect_equal(dsc2[["Foobar"]], "baz")
})
test_that("install_package_plan metadata", {
skip_if_offline()
local_cli_config()
pkg <- source_test_package("foo")
libpath <- test_temp_dir()
expect_snapshot({
plan <- make_install_plan(
paste0("local::", pkg, "?nocache"), lib = libpath)
plan$metadata[[1]] <- c("Foo" = "Bar", "Foobar" = "baz")
plan$vignettes <- FALSE
install_package_plan(plan, lib = libpath, num_workers = 1)
})
dsc <- desc::desc(file.path(libpath, "foo"))
expect_equal(dsc$get("Foo")[[1]], "Bar")
expect_equal(dsc$get("Foobar")[[1]], "baz")
rds <- readRDS(file.path(libpath, "foo", "Meta", "package.rds"))
dsc2 <- rds$DESCRIPTION
expect_equal(dsc2[["Foo"]], "Bar")
expect_equal(dsc2[["Foobar"]], "baz")
})
| /tests/testthat/test-install-metadata.R | permissive | isabella232/pkgdepends | R | false | false | 1,297 | r |
test_that("install_binary metadata", {
pkg <- binary_test_package("foo")
libpath <- test_temp_dir()
metadata <- c("Foo" = "Bar", "Foobar" = "baz")
suppressMessages(
install_binary(pkg, lib = libpath, metadata = metadata, quiet = TRUE)
)
dsc <- desc::desc(file.path(libpath, "foo"))
expect_equal(dsc$get("Foo")[[1]], "Bar")
expect_equal(dsc$get("Foobar")[[1]], "baz")
rds <- readRDS(file.path(libpath, "foo", "Meta", "package.rds"))
dsc2 <- rds$DESCRIPTION
expect_equal(dsc2[["Foo"]], "Bar")
expect_equal(dsc2[["Foobar"]], "baz")
})
test_that("install_package_plan metadata", {
skip_if_offline()
local_cli_config()
pkg <- source_test_package("foo")
libpath <- test_temp_dir()
expect_snapshot({
plan <- make_install_plan(
paste0("local::", pkg, "?nocache"), lib = libpath)
plan$metadata[[1]] <- c("Foo" = "Bar", "Foobar" = "baz")
plan$vignettes <- FALSE
install_package_plan(plan, lib = libpath, num_workers = 1)
})
dsc <- desc::desc(file.path(libpath, "foo"))
expect_equal(dsc$get("Foo")[[1]], "Bar")
expect_equal(dsc$get("Foobar")[[1]], "baz")
rds <- readRDS(file.path(libpath, "foo", "Meta", "package.rds"))
dsc2 <- rds$DESCRIPTION
expect_equal(dsc2[["Foo"]], "Bar")
expect_equal(dsc2[["Foobar"]], "baz")
})
|
makedmat<-function(nnod){
##creates designmatrix D with all possible assignments of the terminal nodes to the three partition classes
##nnod = I = number of terminal nodes after a split
##dmat=K * I matrix: K=number of possible assignments;
rmat<-3^(nnod)
#rmat is total number of rows
dmat<-matrix(unlist(lapply(1:nnod,function(jj,rmat){as.double(gl(3,rmat/(3^jj),rmat))},rmat=rmat)),ncol=nnod,nrow=rmat)
return(dmat)}
makedmats<-function(dmat){
#check of boundary condition: partition class cardinality condition:P1 and P2 may not be empty
#creates D'(dmats): matrix D with admissible assignments(K'); dmats= K' * I matrix;
sel1<-numeric(dim(dmat)[1])
sel2<-numeric(dim(dmat)[1])
#count the assignments to p1 for each row of dmat
sel1<-apply(dmat==1,1,sum)
#count the assignments to p2 for each row of dmat
sel2<-apply(dmat==2,1,sum)
#select the rows for which sel1 & sel2 not equals 0
dmats<-dmat[sel1&sel2!=0,]
return(dmats)}
| /R/dmats.R | no_license | jclaramunt/quint | R | false | false | 968 | r | makedmat<-function(nnod){
##creates designmatrix D with all possible assignments of the terminal nodes to the three partition classes
##nnod = I = number of terminal nodes after a split
##dmat=K * I matrix: K=number of possible assignments;
rmat<-3^(nnod)
#rmat is total number of rows
dmat<-matrix(unlist(lapply(1:nnod,function(jj,rmat){as.double(gl(3,rmat/(3^jj),rmat))},rmat=rmat)),ncol=nnod,nrow=rmat)
return(dmat)}
makedmats<-function(dmat){
#check of boundary condition: partition class cardinality condition:P1 and P2 may not be empty
#creates D'(dmats): matrix D with admissible assignments(K'); dmats= K' * I matrix;
sel1<-numeric(dim(dmat)[1])
sel2<-numeric(dim(dmat)[1])
#count the assignments to p1 for each row of dmat
sel1<-apply(dmat==1,1,sum)
#count the assignments to p2 for each row of dmat
sel2<-apply(dmat==2,1,sum)
#select the rows for which sel1 & sel2 not equals 0
dmats<-dmat[sel1&sel2!=0,]
return(dmats)}
|
library(PTXQC)
### Name: plot_IDRate
### Title: Plot percent of identified MS/MS for each Raw file.
### Aliases: plot_IDRate
### ** Examples
id_rate_bad = 20; id_rate_great = 35;
label_ID = c("bad (<20%)" = "red", "ok (...)" = "blue", "great (>35%)" = "green")
data = data.frame(fc.raw.file = paste('file', letters[1:3]),
ms.ms.identified.... = rnorm(3, 25, 15))
data$cat = factor(cut(data$ms.ms.identified....,
breaks=c(-1, id_rate_bad, id_rate_great, 100),
labels=names(label_ID)))
plot_IDRate(data, id_rate_bad, id_rate_great, label_ID)
| /data/genthat_extracted_code/PTXQC/examples/plot_IDRate.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 636 | r | library(PTXQC)
### Name: plot_IDRate
### Title: Plot percent of identified MS/MS for each Raw file.
### Aliases: plot_IDRate
### ** Examples
id_rate_bad = 20; id_rate_great = 35;
label_ID = c("bad (<20%)" = "red", "ok (...)" = "blue", "great (>35%)" = "green")
data = data.frame(fc.raw.file = paste('file', letters[1:3]),
ms.ms.identified.... = rnorm(3, 25, 15))
data$cat = factor(cut(data$ms.ms.identified....,
breaks=c(-1, id_rate_bad, id_rate_great, 100),
labels=names(label_ID)))
plot_IDRate(data, id_rate_bad, id_rate_great, label_ID)
|
\name{ks.expo.logistic}
\alias{ks.expo.logistic}
\title{Test of Kolmogorov-Smirnov for the Exponentiated Logistic (EL) distribution}
\description{
The function \code{ks.expo.logistic()} gives the values for the KS test assuming a Exponentiated Logistic(EL) with shape
parameter alpha and scale parameter beta. In addition, optionally, this function
allows one to show a comparative graph between the empirical and theoretical cdfs for a specified data set.
}
\usage{
ks.expo.logistic(x, alpha.est, beta.est,
alternative = c("less", "two.sided", "greater"), plot = FALSE, ...)
}
\arguments{
\item{x}{vector of observations.}
\item{alpha.est}{estimate of the parameter alpha}
\item{beta.est}{estimate of the parameter beta}
\item{alternative}{indicates the alternative hypothesis and must be one of \code{"two.sided"} (default), \code{"less"}, or \code{"greater"}.}
\item{plot}{Logical; if TRUE, the cdf plot is provided. }
\item{...}{additional arguments to be passed to the underlying plot function.}
}
\details{The Kolmogorov-Smirnov test is a goodness-of-fit technique based on the maximum distance between the empirical and theoretical cdfs.}
\value{The function \code{ks.expo.logistic()} carries out the KS test for the Exponentiated Logistic(EL)}
\references{
Ali, M.M., Pal, M. and Woo, J. (2007).
\emph{Some Exponentiated Distributions},
The Korean Communications in Statistics, 14(1), 93-109.
Shirke, D.T., Kumbhar, R.R. and Kundu, D. (2005).
\emph{Tolerance intervals for exponentiated scale family of distributions},
Journal of Applied Statistics, 32, 1067-1074
}
\seealso{
\code{\link{pp.expo.logistic}} for \code{PP} plot and \code{\link{qq.expo.logistic}} for \code{QQ} plot
}
\examples{
## Load data sets
data(dataset2)
## Maximum Likelihood(ML) Estimates of alpha & beta for the data(dataset2)
## Estimates of alpha & beta using 'maxLik' package
## alpha.est = 5.31302, beta.est = 139.04515
ks.expo.logistic(dataset2, 5.31302, 139.04515, alternative = "two.sided", plot = TRUE)
}
\keyword{htest}
| /man/ks.expo.logistic.Rd | no_license | statwonk/reliaR | R | false | false | 2,094 | rd | \name{ks.expo.logistic}
\alias{ks.expo.logistic}
\title{Test of Kolmogorov-Smirnov for the Exponentiated Logistic (EL) distribution}
\description{
The function \code{ks.expo.logistic()} gives the values for the KS test assuming a Exponentiated Logistic(EL) with shape
parameter alpha and scale parameter beta. In addition, optionally, this function
allows one to show a comparative graph between the empirical and theoretical cdfs for a specified data set.
}
\usage{
ks.expo.logistic(x, alpha.est, beta.est,
alternative = c("less", "two.sided", "greater"), plot = FALSE, ...)
}
\arguments{
\item{x}{vector of observations.}
\item{alpha.est}{estimate of the parameter alpha}
\item{beta.est}{estimate of the parameter beta}
\item{alternative}{indicates the alternative hypothesis and must be one of \code{"two.sided"} (default), \code{"less"}, or \code{"greater"}.}
\item{plot}{Logical; if TRUE, the cdf plot is provided. }
\item{...}{additional arguments to be passed to the underlying plot function.}
}
\details{The Kolmogorov-Smirnov test is a goodness-of-fit technique based on the maximum distance between the empirical and theoretical cdfs.}
\value{The function \code{ks.expo.logistic()} carries out the KS test for the Exponentiated Logistic(EL)}
\references{
Ali, M.M., Pal, M. and Woo, J. (2007).
\emph{Some Exponentiated Distributions},
The Korean Communications in Statistics, 14(1), 93-109.
Shirke, D.T., Kumbhar, R.R. and Kundu, D. (2005).
\emph{Tolerance intervals for exponentiated scale family of distributions},
Journal of Applied Statistics, 32, 1067-1074
}
\seealso{
\code{\link{pp.expo.logistic}} for \code{PP} plot and \code{\link{qq.expo.logistic}} for \code{QQ} plot
}
\examples{
## Load data sets
data(dataset2)
## Maximum Likelihood(ML) Estimates of alpha & beta for the data(dataset2)
## Estimates of alpha & beta using 'maxLik' package
## alpha.est = 5.31302, beta.est = 139.04515
ks.expo.logistic(dataset2, 5.31302, 139.04515, alternative = "two.sided", plot = TRUE)
}
\keyword{htest}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/estI.R
\name{estI}
\alias{estI}
\title{Estimation of information matrix}
\usage{
estI(x, theta, lambda, gradient, type)
}
\arguments{
\item{x}{values of influental variable for the link function}
\item{theta}{numeric vector of length four with link function's parameters}
\item{lambda}{[\code{function(theta, x)}]\cr
link function for exponential distribution}
\item{gradient}{[\code{function(x, theta, ...)}]\cr
gradient of link function}
\item{type}{[\code{integer}]\cr
if link function is not given a collection of given link function is available, see \code{\link{linkfun}}}
}
\value{
estimated information matrix
}
\description{
Estimation of information matrix
}
| /man/estI.Rd | no_license | szugat/predfat | R | false | false | 760 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/estI.R
\name{estI}
\alias{estI}
\title{Estimation of information matrix}
\usage{
estI(x, theta, lambda, gradient, type)
}
\arguments{
\item{x}{values of influental variable for the link function}
\item{theta}{numeric vector of length four with link function's parameters}
\item{lambda}{[\code{function(theta, x)}]\cr
link function for exponential distribution}
\item{gradient}{[\code{function(x, theta, ...)}]\cr
gradient of link function}
\item{type}{[\code{integer}]\cr
if link function is not given a collection of given link function is available, see \code{\link{linkfun}}}
}
\value{
estimated information matrix
}
\description{
Estimation of information matrix
}
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library(optparse))
# Plots the gates from the FCS and corresponding CLR file following the
# Lymphocyte -> Single cells -> CD4+ ...
# hierarchy.
#for x in RData/pstat5-join/*.RData; do x=`basename $x`; Rscript ~nikolas/Projects/IL2/bin/gating/plot-gates.R --in.file RData/pstat5-join/$x --plot.file CLR/Plots/${x%.RData}.pdf --gate.file CLR/$x ; done
option_list <- list(
make_option(c("--in.file"), default=NULL, help = ".RData file"),
make_option(c("--gate.file"), default=NULL, help = ".RData file"),
make_option(c("--plot.file"), default=NULL, help = ".RData file")
)
OptionParser(option_list=option_list) -> option.parser
parse_args(option.parser) -> opt
#in.file
print(basename(opt$in.file))
load(opt$in.file)
print(colnames(fcs.data))
#gate.file
print(basename(opt$gate.file))
load(opt$gate.file)
print(colnames(CLR))
if (nrow(fcs.data)!=nrow(CLR)) stop('number of rows of in.file and gate.file do not match!')
#plot.file
print(plot.file <- opt$plot.file)
source('~nikolas/bin/FCS/fcs.R',chdir=T)
print(load('~nikolas/dunwich/Projects/IL2/PSTAT5-CD25-CD45RA-CD4-FOXP3/transform-w1.RData'))
print(names(transforms))
#fcs.data <- applyTransforms(fcs.data, transforms)
fcs.data <- cbind(fcs.data[,c('FSCA','SSCA')], applyTransforms(fcs.data[,-grep('FSCA|SSCA',colnames(fcs.data))], transforms))
f <- function(fcs.data, channels, main, outliers=FALSE) {
xquant <- quantile(fcs.data[,channels[[1]]],probs=seq(0,1,.01))
yquant <- quantile(fcs.data[,channels[[2]]],probs=seq(0,1,.01))
print(xlim <- c(xquant[['1%']],xquant[['99%']]))
print(ylim <- c(yquant[['1%']],yquant[['99%']]))
smoothPlot( fcs.data[,channels], xlab=channels[[1]], ylab=channels[[2]], main=main, outliers=outliers )
}
plot.gate.chull <- function(X, classification, col) {
print(table(classification==1))
X1 <- X[which(classification==1),]
p <- X1[chull(X1),]
p <- rbind(p, p)
lines(p, col=col, lwd=2)
}
pdf(plot.file)
par(mfrow=c(3,2), cex.lab=2, cex.main=2, las=1, mar=c(6,6,2,1))
x <- fcs.data
figure.labels <- iter(paste(letters,')',sep=''))
# Lymphocytes
channels <- c('FSCA','SSCA')
f(x, channels, main='Lymphocytes')
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Lymphocytes'], col='red')
title(nextElem(figure.labels), adj=0)
x <- fcs.data[ which(as.logical(CLR[,'Lymphocytes'])) , ]
# Single cells
channels <- c('SSCH','SSCW')
f(x, channels, main='Single cells', outliers=FALSE)
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Single cells'], col='red')
title(nextElem(figure.labels), adj=0)
x <- fcs.data[ which(as.logical(CLR[,'Single cells'])) , ]
# CD4
channels <- c('CD4','SSCA')
f(x, channels, main='CD4+', outliers=FALSE)
plot.gate.chull(fcs.data[,channels], classification=CLR[,'CD4'], col='red')
title(nextElem(figure.labels), adj=0)
x <- fcs.data[ which(as.logical(CLR[,'CD4'])) , ]
# Memory / Naive
channels <- c('CD45RA','SSCA')
f(x, channels, main='Memory / Naive', outliers=TRUE)
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Memory'], col='black')
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Naive'], col='red')
title(nextElem(figure.labels), adj=0)
x.naive <- fcs.data[ as.logical(CLR[,'Naive']) , ]
x.memory <- fcs.data[ as.logical(CLR[,'Memory']) , ]
# Naive Eff / Treg
channels <- c('CD25','FOXP3')
f(x.naive, channels, main='Naive Eff / TReg', outliers=TRUE)
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Naive Eff'], col='green')
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Naive Treg'], col='blue')
title(nextElem(figure.labels), adj=0)
# Memory Eff / Treg
channels <- c('CD25','FOXP3')
f(x.memory, channels, main='Memory Eff / TReg', outliers=TRUE)
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Memory Eff'], col='black')
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Memory Treg'], col='red')
title(nextElem(figure.labels), adj=0)
dev.off()
| /IL2/bin/gating/plot-gates.R | no_license | pontikos/PhD_Projects | R | false | false | 3,959 | r | #!/usr/bin/env Rscript
suppressPackageStartupMessages(library(optparse))
# Plots the gates from the FCS and corresponding CLR file following the
# Lymphocyte -> Single cells -> CD4+ ...
# hierarchy.
#for x in RData/pstat5-join/*.RData; do x=`basename $x`; Rscript ~nikolas/Projects/IL2/bin/gating/plot-gates.R --in.file RData/pstat5-join/$x --plot.file CLR/Plots/${x%.RData}.pdf --gate.file CLR/$x ; done
option_list <- list(
make_option(c("--in.file"), default=NULL, help = ".RData file"),
make_option(c("--gate.file"), default=NULL, help = ".RData file"),
make_option(c("--plot.file"), default=NULL, help = ".RData file")
)
OptionParser(option_list=option_list) -> option.parser
parse_args(option.parser) -> opt
#in.file
print(basename(opt$in.file))
load(opt$in.file)
print(colnames(fcs.data))
#gate.file
print(basename(opt$gate.file))
load(opt$gate.file)
print(colnames(CLR))
if (nrow(fcs.data)!=nrow(CLR)) stop('number of rows of in.file and gate.file do not match!')
#plot.file
print(plot.file <- opt$plot.file)
source('~nikolas/bin/FCS/fcs.R',chdir=T)
print(load('~nikolas/dunwich/Projects/IL2/PSTAT5-CD25-CD45RA-CD4-FOXP3/transform-w1.RData'))
print(names(transforms))
#fcs.data <- applyTransforms(fcs.data, transforms)
fcs.data <- cbind(fcs.data[,c('FSCA','SSCA')], applyTransforms(fcs.data[,-grep('FSCA|SSCA',colnames(fcs.data))], transforms))
f <- function(fcs.data, channels, main, outliers=FALSE) {
xquant <- quantile(fcs.data[,channels[[1]]],probs=seq(0,1,.01))
yquant <- quantile(fcs.data[,channels[[2]]],probs=seq(0,1,.01))
print(xlim <- c(xquant[['1%']],xquant[['99%']]))
print(ylim <- c(yquant[['1%']],yquant[['99%']]))
smoothPlot( fcs.data[,channels], xlab=channels[[1]], ylab=channels[[2]], main=main, outliers=outliers )
}
plot.gate.chull <- function(X, classification, col) {
print(table(classification==1))
X1 <- X[which(classification==1),]
p <- X1[chull(X1),]
p <- rbind(p, p)
lines(p, col=col, lwd=2)
}
pdf(plot.file)
par(mfrow=c(3,2), cex.lab=2, cex.main=2, las=1, mar=c(6,6,2,1))
x <- fcs.data
figure.labels <- iter(paste(letters,')',sep=''))
# Lymphocytes
channels <- c('FSCA','SSCA')
f(x, channels, main='Lymphocytes')
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Lymphocytes'], col='red')
title(nextElem(figure.labels), adj=0)
x <- fcs.data[ which(as.logical(CLR[,'Lymphocytes'])) , ]
# Single cells
channels <- c('SSCH','SSCW')
f(x, channels, main='Single cells', outliers=FALSE)
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Single cells'], col='red')
title(nextElem(figure.labels), adj=0)
x <- fcs.data[ which(as.logical(CLR[,'Single cells'])) , ]
# CD4
channels <- c('CD4','SSCA')
f(x, channels, main='CD4+', outliers=FALSE)
plot.gate.chull(fcs.data[,channels], classification=CLR[,'CD4'], col='red')
title(nextElem(figure.labels), adj=0)
x <- fcs.data[ which(as.logical(CLR[,'CD4'])) , ]
# Memory / Naive
channels <- c('CD45RA','SSCA')
f(x, channels, main='Memory / Naive', outliers=TRUE)
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Memory'], col='black')
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Naive'], col='red')
title(nextElem(figure.labels), adj=0)
x.naive <- fcs.data[ as.logical(CLR[,'Naive']) , ]
x.memory <- fcs.data[ as.logical(CLR[,'Memory']) , ]
# Naive Eff / Treg
channels <- c('CD25','FOXP3')
f(x.naive, channels, main='Naive Eff / TReg', outliers=TRUE)
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Naive Eff'], col='green')
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Naive Treg'], col='blue')
title(nextElem(figure.labels), adj=0)
# Memory Eff / Treg
channels <- c('CD25','FOXP3')
f(x.memory, channels, main='Memory Eff / TReg', outliers=TRUE)
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Memory Eff'], col='black')
plot.gate.chull(fcs.data[,channels], classification=CLR[,'Memory Treg'], col='red')
title(nextElem(figure.labels), adj=0)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imaap_one_call_per_cell.R
\name{imaap_one_call_per_cell}
\alias{imaap_one_call_per_cell}
\title{Reduce to one cell type call per cell}
\usage{
imaap_one_call_per_cell(cell_calling)
}
\arguments{
\item{cell_calling}{The cell annotation output from \code{\link{imaap_marker_drop}}}
}
\value{
A single cell type calling for each cell in the dataset.
}
\description{
Every cell gets only one annotation; that is all levels of cell annotation are reduced to one.
}
| /man/imaap_one_call_per_cell.Rd | permissive | labsyspharm/IMAAP | R | false | true | 538 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imaap_one_call_per_cell.R
\name{imaap_one_call_per_cell}
\alias{imaap_one_call_per_cell}
\title{Reduce to one cell type call per cell}
\usage{
imaap_one_call_per_cell(cell_calling)
}
\arguments{
\item{cell_calling}{The cell annotation output from \code{\link{imaap_marker_drop}}}
}
\value{
A single cell type calling for each cell in the dataset.
}
\description{
Every cell gets only one annotation; that is all levels of cell annotation are reduced to one.
}
|
setwd('/Users/Warren/Desktop/Udacity/data_analysis_with_R/')
fb_friends_birthdays <- read.csv('friends_birthdays.csv')
bdays <- fb_friends_birthdays
bdays$clean_dates <- as.Date(bdays$Start, format = '%m/%d/%Y')
bdays$month <- format(bdays$clean_dates, '%m')
qplot(x = month, data = bdays)
with(bdays, table(month))
bdays$day <- format(bdays$clean_dates, '%d')
with(bdays, table(day))
qplot(x = day, data = bdays)
| /data_analysis_with_R/fb_birthdays.R | no_license | wlau88/udacity_data_analyst | R | false | false | 421 | r | setwd('/Users/Warren/Desktop/Udacity/data_analysis_with_R/')
fb_friends_birthdays <- read.csv('friends_birthdays.csv')
bdays <- fb_friends_birthdays
bdays$clean_dates <- as.Date(bdays$Start, format = '%m/%d/%Y')
bdays$month <- format(bdays$clean_dates, '%m')
qplot(x = month, data = bdays)
with(bdays, table(month))
bdays$day <- format(bdays$clean_dates, '%d')
with(bdays, table(day))
qplot(x = day, data = bdays)
|
#' Generate public crime data for use with ElasticSynth
#'
#' @export
generate_public_crime_data <- function() {
library(dplyr)
library(RSocrata)
library(zoo)
library(lubridate)
library(tidyr)
fbi_code <- "'05'" # 01A = Murder, 02 = CSA, 03 = Robbery, 04A = Assault, 04B = Battery
# Pull all data with FBI Code less than 05
url <- sprintf("https://data.cityofchicago.org/resource/6zsd-86xi.json?$select=*&$where=fbi_code<%s", fbi_code)
violent_felonies <- read.socrata(url)
violent_felonies$date_clean <- as.Date(as.POSIXct(violent_felonies$date, format = '%Y-%m-%d %H:%M:%S'))
violent_felonies$yearmon <- as.Date(as.yearmon(violent_felonies$date_clean))
violent_felonies_by_district <- violent_felonies %>%
group_by(yearmon, district, .drop=F) %>%
summarise(countcrimes = length(id)) %>%
complete(yearmon, district) %>%
ungroup() %>%
mutate(fbi_code = 'VF') %>%
bind_rows(
violent_felonies %>%
# define homicide as homicide + second degree homicide
mutate(fbi_code = ifelse(fbi_code == '01B', '01A', fbi_code)) %>%
group_by(yearmon, district, fbi_code, .drop =F) %>%
summarise(countcrimes = length(id)) %>%
complete(yearmon, district, fbi_code)
) %>%
filter(yearmon >= '2010-03-01' & yearmon <= '2018-12-01') %>%
# --- period is six month intervals, Period 1 = March through August, Period 2 = September through February.
# --- Except for 2017 due to staggered release of SDSCs and introduction of Tier 2 SDSCs before the end of period 2
mutate(period = ifelse(month(yearmon) %in% 3:8, 1, 2),
period = ifelse(month(yearmon) %in% 1:2,
as.numeric(paste(year(yearmon) - 1, period, sep = '.')),
as.numeric(paste(year(yearmon), period, sep = '.'))),
period = ifelse(month(yearmon) %in% 1:2 & year(yearmon) == 2018, period + 0.9, period),
period = ifelse(month(yearmon) == 2 & year(yearmon) == 2017, period + 0.9, period),
period = as.numeric(as.character(period)),
period = as.factor(period),
district = as.factor(district),
fbi_code = as.factor(fbi_code)) %>%
group_by(period, district, fbi_code, .drop=F) %>%
summarise(countcrimes = mean(countcrimes)) %>%
ungroup() %>%
# --- generate time variable
arrange(district, fbi_code, period) %>%
group_by(district, fbi_code) %>%
mutate(countcrimes = ifelse(is.nan(countcrimes), 0, countcrimes),
time = 1:length(period))
treated_units <- subset(violent_felonies_by_district, district %in% c('006', '007', '009', '010', '011', '015'))
donor_units <- subset(violent_felonies_by_district, !(district %in% c('006', '007', '009', '010', '011', '015', '031')))
return(list(treated_units = treated_units,
donor_units = donor_units))
}
| /R/generate_public_crime_data.R | no_license | terryneumann/ElasticSynth | R | false | false | 2,885 | r | #' Generate public crime data for use with ElasticSynth
#'
#' @export
generate_public_crime_data <- function() {
library(dplyr)
library(RSocrata)
library(zoo)
library(lubridate)
library(tidyr)
fbi_code <- "'05'" # 01A = Murder, 02 = CSA, 03 = Robbery, 04A = Assault, 04B = Battery
# Pull all data with FBI Code less than 05
url <- sprintf("https://data.cityofchicago.org/resource/6zsd-86xi.json?$select=*&$where=fbi_code<%s", fbi_code)
violent_felonies <- read.socrata(url)
violent_felonies$date_clean <- as.Date(as.POSIXct(violent_felonies$date, format = '%Y-%m-%d %H:%M:%S'))
violent_felonies$yearmon <- as.Date(as.yearmon(violent_felonies$date_clean))
violent_felonies_by_district <- violent_felonies %>%
group_by(yearmon, district, .drop=F) %>%
summarise(countcrimes = length(id)) %>%
complete(yearmon, district) %>%
ungroup() %>%
mutate(fbi_code = 'VF') %>%
bind_rows(
violent_felonies %>%
# define homicide as homicide + second degree homicide
mutate(fbi_code = ifelse(fbi_code == '01B', '01A', fbi_code)) %>%
group_by(yearmon, district, fbi_code, .drop =F) %>%
summarise(countcrimes = length(id)) %>%
complete(yearmon, district, fbi_code)
) %>%
filter(yearmon >= '2010-03-01' & yearmon <= '2018-12-01') %>%
# --- period is six month intervals, Period 1 = March through August, Period 2 = September through February.
# --- Except for 2017 due to staggered release of SDSCs and introduction of Tier 2 SDSCs before the end of period 2
mutate(period = ifelse(month(yearmon) %in% 3:8, 1, 2),
period = ifelse(month(yearmon) %in% 1:2,
as.numeric(paste(year(yearmon) - 1, period, sep = '.')),
as.numeric(paste(year(yearmon), period, sep = '.'))),
period = ifelse(month(yearmon) %in% 1:2 & year(yearmon) == 2018, period + 0.9, period),
period = ifelse(month(yearmon) == 2 & year(yearmon) == 2017, period + 0.9, period),
period = as.numeric(as.character(period)),
period = as.factor(period),
district = as.factor(district),
fbi_code = as.factor(fbi_code)) %>%
group_by(period, district, fbi_code, .drop=F) %>%
summarise(countcrimes = mean(countcrimes)) %>%
ungroup() %>%
# --- generate time variable
arrange(district, fbi_code, period) %>%
group_by(district, fbi_code) %>%
mutate(countcrimes = ifelse(is.nan(countcrimes), 0, countcrimes),
time = 1:length(period))
treated_units <- subset(violent_felonies_by_district, district %in% c('006', '007', '009', '010', '011', '015'))
donor_units <- subset(violent_felonies_by_district, !(district %in% c('006', '007', '009', '010', '011', '015', '031')))
return(list(treated_units = treated_units,
donor_units = donor_units))
}
|
## Van der Meulen locations ##
library(sf)
library(tidyverse)
library(raster)
library(opencage)
library(ggrepel)
# Load data ---------------------------------------------------------------
# All data is st_crs(4326)
rivers <- st_read("data-raw/ne_10m_rivers_lake_centerlines/ne_10m_rivers_lake_centerlines.shp")
lakes <- st_read("data-raw/ne_10m_lakes/ne_10m_lakes.shp")
oceans <- st_read("data-raw/ne_10m_ocean/ne_10m_ocean.shp")
elev_raster <- raster("data-raw/SR_HR/SR_HR.tif") %>%
crop(extent(c(-3, 14, 44, 58)))
cities <- c("Antwerp", "Bremen", "Cologne", "Frankfurt", "Strasbourg", "Leiden")
# Use bounds to ensure geocoding is accurate
cities_df <- oc_forward_df(placename = cities,
bounds = oc_bbox(0, 48, 11.5, 55))
# Change CRS --------------------------------------------------------------
set_crs <- st_crs(3034)
# Transform sf objects
rivers_proj <- st_transform(rivers, crs = set_crs)
lakes_proj <- st_transform(lakes, crs = set_crs)
oceans_proj <- st_transform(oceans, crs = set_crs)
# Transform raster and cast to data frame
elev_raster_proj <- projectRaster(elev_raster, crs = set_crs$proj4string)
elev_df_proj <- elev_raster_proj %>%
rasterToPoints() %>%
data.frame()
names(elev_df_proj) <- c("lng", "lat", "alt")
# Cast to sf, transform, and get coordinates for plotting text
cities_proj <- st_as_sf(cities_df, coords = c("oc_lng", "oc_lat"), crs = 4326) %>%
st_transform(crs = set_crs) %>%
mutate(lng = st_coordinates(.)[ , 1],
lat = st_coordinates(.)[ , 2])
bounds <- st_bbox(c(xmin = 0,
xmax = 11.5,
ymax = 48,
ymin = 55),
crs = st_crs(4326)) %>%
st_as_sfc() %>%
st_transform(crs = set_crs) %>%
st_bbox()
ggplot() +
geom_raster(data = elev_df_proj, aes(lng, lat, fill = alt), alpha = 0.6) +
scale_fill_gradientn(colors = gray.colors(50, start = 0.6, end = 1)) +
geom_sf(data = oceans_proj, color = NA, fill = gray(0.8)) +
geom_sf(data = rivers_proj, color = gray(0.8), size = 0.2) +
geom_sf(data = lakes_proj, color = gray(0.8), fill = gray(0.8)) +
geom_sf(data = cities_proj, size = 1) +
geom_text_repel(data = cities_proj,
aes(x = lng, y = lat, label = placename),
size = 3) +
coord_sf(xlim = c(bounds[1], bounds[3]),
ylim = c(bounds[2], bounds[4]),
expand = FALSE, datum = NA) +
theme_void() +
theme(legend.position = "none")
ggsave(paste0("img/vdm-locations-",
st_crs(cities_proj)$epsg, "-",
lubridate::today(), ".png"))
| /vdm-locations.R | no_license | rohit-21/making-maps | R | false | false | 2,618 | r | ## Van der Meulen locations ##
library(sf)
library(tidyverse)
library(raster)
library(opencage)
library(ggrepel)
# Load data ---------------------------------------------------------------
# All data is st_crs(4326)
rivers <- st_read("data-raw/ne_10m_rivers_lake_centerlines/ne_10m_rivers_lake_centerlines.shp")
lakes <- st_read("data-raw/ne_10m_lakes/ne_10m_lakes.shp")
oceans <- st_read("data-raw/ne_10m_ocean/ne_10m_ocean.shp")
elev_raster <- raster("data-raw/SR_HR/SR_HR.tif") %>%
crop(extent(c(-3, 14, 44, 58)))
cities <- c("Antwerp", "Bremen", "Cologne", "Frankfurt", "Strasbourg", "Leiden")
# Use bounds to ensure geocoding is accurate
cities_df <- oc_forward_df(placename = cities,
bounds = oc_bbox(0, 48, 11.5, 55))
# Change CRS --------------------------------------------------------------
set_crs <- st_crs(3034)
# Transform sf objects
rivers_proj <- st_transform(rivers, crs = set_crs)
lakes_proj <- st_transform(lakes, crs = set_crs)
oceans_proj <- st_transform(oceans, crs = set_crs)
# Transform raster and cast to data frame
elev_raster_proj <- projectRaster(elev_raster, crs = set_crs$proj4string)
elev_df_proj <- elev_raster_proj %>%
rasterToPoints() %>%
data.frame()
names(elev_df_proj) <- c("lng", "lat", "alt")
# Cast to sf, transform, and get coordinates for plotting text
cities_proj <- st_as_sf(cities_df, coords = c("oc_lng", "oc_lat"), crs = 4326) %>%
st_transform(crs = set_crs) %>%
mutate(lng = st_coordinates(.)[ , 1],
lat = st_coordinates(.)[ , 2])
bounds <- st_bbox(c(xmin = 0,
xmax = 11.5,
ymax = 48,
ymin = 55),
crs = st_crs(4326)) %>%
st_as_sfc() %>%
st_transform(crs = set_crs) %>%
st_bbox()
ggplot() +
geom_raster(data = elev_df_proj, aes(lng, lat, fill = alt), alpha = 0.6) +
scale_fill_gradientn(colors = gray.colors(50, start = 0.6, end = 1)) +
geom_sf(data = oceans_proj, color = NA, fill = gray(0.8)) +
geom_sf(data = rivers_proj, color = gray(0.8), size = 0.2) +
geom_sf(data = lakes_proj, color = gray(0.8), fill = gray(0.8)) +
geom_sf(data = cities_proj, size = 1) +
geom_text_repel(data = cities_proj,
aes(x = lng, y = lat, label = placename),
size = 3) +
coord_sf(xlim = c(bounds[1], bounds[3]),
ylim = c(bounds[2], bounds[4]),
expand = FALSE, datum = NA) +
theme_void() +
theme(legend.position = "none")
ggsave(paste0("img/vdm-locations-",
st_crs(cities_proj)$epsg, "-",
lubridate::today(), ".png"))
|
# nocov start
.onLoad <- function(libname, pkgname) {
ns <- rlang::ns_env("tune")
# Modified version of the cli .onLoad()
# We can't use cli::symbol$tick because the width of the character
# looks awful when you output it alongside info / warning characters
makeActiveBinding(
"tune_symbol",
function() {
# If `cli.unicode` is set we use that
opt <- getOption("cli.unicode", NULL)
if (!is.null(opt)) {
if (isTRUE(opt)) return(tune_symbol_utf8) else return(tune_symbol_ascii)
}
# Otherwise we try to auto-detect
if (cli::is_utf8_output()) {
tune_symbol_utf8
} else if (is_latex_output()) {
tune_symbol_ascii
} else if (is_windows()) {
tune_symbol_windows
} else {
tune_symbol_ascii
}
},
ns
)
makeActiveBinding(
"tune_color",
function() {
opt <- getOption("tidymodels.dark", NULL)
if (!is.null(opt)) {
if (isTRUE(opt)) {
return(tune_color_dark)
} else {
return(tune_color_light)
}
}
tune_color_light
},
ns
)
# lazily register autoplot
s3_register("ggplot2::autoplot", "tune_results")
if (dplyr_pre_1.0.0()) {
vctrs::s3_register("dplyr::mutate", "tune_results", method = mutate_tune_results)
vctrs::s3_register("dplyr::arrange", "tune_results", method = arrange_tune_results)
vctrs::s3_register("dplyr::filter", "tune_results", method = filter_tune_results)
vctrs::s3_register("dplyr::rename", "tune_results", method = rename_tune_results)
vctrs::s3_register("dplyr::select", "tune_results", method = select_tune_results)
vctrs::s3_register("dplyr::slice", "tune_results", method = slice_tune_results)
vctrs::s3_register("dplyr::mutate", "resample_results", method = mutate_resample_results)
vctrs::s3_register("dplyr::arrange", "resample_results", method = arrange_resample_results)
vctrs::s3_register("dplyr::filter", "resample_results", method = filter_resample_results)
vctrs::s3_register("dplyr::rename", "resample_results", method = rename_resample_results)
vctrs::s3_register("dplyr::select", "resample_results", method = select_resample_results)
vctrs::s3_register("dplyr::slice", "resample_results", method = slice_resample_results)
vctrs::s3_register("dplyr::mutate", "iteration_results", method = mutate_iteration_results)
vctrs::s3_register("dplyr::arrange", "iteration_results", method = arrange_iteration_results)
vctrs::s3_register("dplyr::filter", "iteration_results", method = filter_iteration_results)
vctrs::s3_register("dplyr::rename", "iteration_results", method = rename_iteration_results)
vctrs::s3_register("dplyr::select", "iteration_results", method = select_iteration_results)
vctrs::s3_register("dplyr::slice", "iteration_results", method = slice_iteration_results)
} else {
vctrs::s3_register("dplyr::dplyr_reconstruct", "tune_results", method = dplyr_reconstruct_tune_results)
vctrs::s3_register("dplyr::dplyr_reconstruct", "resample_results", method = dplyr_reconstruct_resample_results)
vctrs::s3_register("dplyr::dplyr_reconstruct", "iteration_results", method = dplyr_reconstruct_iteration_results)
}
}
# nocov end
| /R/zzz.R | permissive | rorynolan/tune | R | false | false | 3,271 | r | # nocov start
.onLoad <- function(libname, pkgname) {
ns <- rlang::ns_env("tune")
# Modified version of the cli .onLoad()
# We can't use cli::symbol$tick because the width of the character
# looks awful when you output it alongside info / warning characters
makeActiveBinding(
"tune_symbol",
function() {
# If `cli.unicode` is set we use that
opt <- getOption("cli.unicode", NULL)
if (!is.null(opt)) {
if (isTRUE(opt)) return(tune_symbol_utf8) else return(tune_symbol_ascii)
}
# Otherwise we try to auto-detect
if (cli::is_utf8_output()) {
tune_symbol_utf8
} else if (is_latex_output()) {
tune_symbol_ascii
} else if (is_windows()) {
tune_symbol_windows
} else {
tune_symbol_ascii
}
},
ns
)
makeActiveBinding(
"tune_color",
function() {
opt <- getOption("tidymodels.dark", NULL)
if (!is.null(opt)) {
if (isTRUE(opt)) {
return(tune_color_dark)
} else {
return(tune_color_light)
}
}
tune_color_light
},
ns
)
# lazily register autoplot
s3_register("ggplot2::autoplot", "tune_results")
if (dplyr_pre_1.0.0()) {
vctrs::s3_register("dplyr::mutate", "tune_results", method = mutate_tune_results)
vctrs::s3_register("dplyr::arrange", "tune_results", method = arrange_tune_results)
vctrs::s3_register("dplyr::filter", "tune_results", method = filter_tune_results)
vctrs::s3_register("dplyr::rename", "tune_results", method = rename_tune_results)
vctrs::s3_register("dplyr::select", "tune_results", method = select_tune_results)
vctrs::s3_register("dplyr::slice", "tune_results", method = slice_tune_results)
vctrs::s3_register("dplyr::mutate", "resample_results", method = mutate_resample_results)
vctrs::s3_register("dplyr::arrange", "resample_results", method = arrange_resample_results)
vctrs::s3_register("dplyr::filter", "resample_results", method = filter_resample_results)
vctrs::s3_register("dplyr::rename", "resample_results", method = rename_resample_results)
vctrs::s3_register("dplyr::select", "resample_results", method = select_resample_results)
vctrs::s3_register("dplyr::slice", "resample_results", method = slice_resample_results)
vctrs::s3_register("dplyr::mutate", "iteration_results", method = mutate_iteration_results)
vctrs::s3_register("dplyr::arrange", "iteration_results", method = arrange_iteration_results)
vctrs::s3_register("dplyr::filter", "iteration_results", method = filter_iteration_results)
vctrs::s3_register("dplyr::rename", "iteration_results", method = rename_iteration_results)
vctrs::s3_register("dplyr::select", "iteration_results", method = select_iteration_results)
vctrs::s3_register("dplyr::slice", "iteration_results", method = slice_iteration_results)
} else {
vctrs::s3_register("dplyr::dplyr_reconstruct", "tune_results", method = dplyr_reconstruct_tune_results)
vctrs::s3_register("dplyr::dplyr_reconstruct", "resample_results", method = dplyr_reconstruct_resample_results)
vctrs::s3_register("dplyr::dplyr_reconstruct", "iteration_results", method = dplyr_reconstruct_iteration_results)
}
}
# nocov end
|
`learn.skeleton.norm` <- function(tree, cov, n, p.value, drop = TRUE)
{
validObject(tree)
local.ug <- c()
vset <- rownames(cov)
n.clique <- length(tree@cliques)
for(i in 1:n.clique){
idx <- tree@cliques[[i]]$vset
# if (length(idx) >= 10)
new.ug <- .get.localug.pc(cov[idx, idx], n, p.value)
# else
# new.ug <- .get.localug.ic(cov[idx, idx], n, p.value)
local.ug <- append(local.ug, new.ug)
}
p <- length(vset)
amat <- matrix(0, p, p)
rownames(amat) <- colnames(amat) <- vset
n.clique <- length(tree@cliques)
for(i in 1:n.clique){
idx <- tree@cliques[[i]]$vset
amat[idx, idx] <- 1
}
diag(amat) <- 0
sep.pairs <- c()
n.loc.sep <- length(local.ug)
if(n.loc.sep>0)
for(i in 1:n.loc.sep){
u <- local.ug[[i]]@u
v <- local.ug[[i]]@v
if(amat[u,v] == 1){
amat[u,v] <- amat[v,u] <- 0
sep.pairs <- append(sep.pairs, local.ug[i])
}
}
## the following code is partially adapted from the "pcAlgo" function
## from "pcalg" package in R
if (drop) {
ind <- .get.exed.cand1(tree, amat)
if (any(ind)) {
ind <- ind[order(ind[,1]),]
ord <- 0
seq_p <- 1:p
done <- FALSE
remainingEdgeTests <- nrow(ind)
while (!done && any(as.logical(amat))) {
done <- TRUE
for (i in 1:remainingEdgeTests) {
x <- ind[i, 1]
y <- ind[i, 2]
if (amat[y, x]) {
nbrsBool <- amat[, x] == 1
nbrsBool[y] <- FALSE
nbrs <- seq_p[nbrsBool]
length_nbrs <- length(nbrs)
if (length_nbrs >= ord) {
if (length_nbrs > ord)
done <- FALSE
S <- seq(length = ord)
repeat {
p.val <- norm.ci.test(cov, n, vset[x], vset[y],
vset[nbrs[S]])$p.value
if (p.val > p.value) {
amat[x, y] <- amat[y, x] <- 0
pair <- new("sep.pair", u = vset[x],
v = vset[y], s = vset[nbrs[S]])
sep.pairs <- append(sep.pairs, pair)
break
}
else {
nextSet <- .getNextSet(length_nbrs, ord, S)
if (nextSet$wasLast)
break
S <- nextSet$nextSet
}
}
}
}
}
ord <- ord + 1
}
}
## } else {
## if (any(ind)) {
## for(i in 1:nrow(ind)){
## pair <- new("sep.pair", u = vset[ind[i,1]],
## v = vset[ind[i,2]], s = character(0))
## cand <- setdiff(vset[amat[pair@u,]==1], pair@v)
## idx <- c(pair@u, pair@v, cand)
## res <- .get.sep(cov[idx, idx], n, p.value, pair@u, pair@v, cand)
## if(res$seped){
## amat[pair@u, pair@v] <- amat[pair@v, pair@u] <- 0
## sep.pairs <- append(sep.pairs, res$sep)
## }
## }
## }
## }
}
return(list(amat=amat, sep.pairs=sep.pairs))
}
| /R/learn.skeleton.norm.R | no_license | cran/lcd | R | false | false | 3,991 | r | `learn.skeleton.norm` <- function(tree, cov, n, p.value, drop = TRUE)
{
validObject(tree)
local.ug <- c()
vset <- rownames(cov)
n.clique <- length(tree@cliques)
for(i in 1:n.clique){
idx <- tree@cliques[[i]]$vset
# if (length(idx) >= 10)
new.ug <- .get.localug.pc(cov[idx, idx], n, p.value)
# else
# new.ug <- .get.localug.ic(cov[idx, idx], n, p.value)
local.ug <- append(local.ug, new.ug)
}
p <- length(vset)
amat <- matrix(0, p, p)
rownames(amat) <- colnames(amat) <- vset
n.clique <- length(tree@cliques)
for(i in 1:n.clique){
idx <- tree@cliques[[i]]$vset
amat[idx, idx] <- 1
}
diag(amat) <- 0
sep.pairs <- c()
n.loc.sep <- length(local.ug)
if(n.loc.sep>0)
for(i in 1:n.loc.sep){
u <- local.ug[[i]]@u
v <- local.ug[[i]]@v
if(amat[u,v] == 1){
amat[u,v] <- amat[v,u] <- 0
sep.pairs <- append(sep.pairs, local.ug[i])
}
}
## the following code is partially adapted from the "pcAlgo" function
## from "pcalg" package in R
if (drop) {
ind <- .get.exed.cand1(tree, amat)
if (any(ind)) {
ind <- ind[order(ind[,1]),]
ord <- 0
seq_p <- 1:p
done <- FALSE
remainingEdgeTests <- nrow(ind)
while (!done && any(as.logical(amat))) {
done <- TRUE
for (i in 1:remainingEdgeTests) {
x <- ind[i, 1]
y <- ind[i, 2]
if (amat[y, x]) {
nbrsBool <- amat[, x] == 1
nbrsBool[y] <- FALSE
nbrs <- seq_p[nbrsBool]
length_nbrs <- length(nbrs)
if (length_nbrs >= ord) {
if (length_nbrs > ord)
done <- FALSE
S <- seq(length = ord)
repeat {
p.val <- norm.ci.test(cov, n, vset[x], vset[y],
vset[nbrs[S]])$p.value
if (p.val > p.value) {
amat[x, y] <- amat[y, x] <- 0
pair <- new("sep.pair", u = vset[x],
v = vset[y], s = vset[nbrs[S]])
sep.pairs <- append(sep.pairs, pair)
break
}
else {
nextSet <- .getNextSet(length_nbrs, ord, S)
if (nextSet$wasLast)
break
S <- nextSet$nextSet
}
}
}
}
}
ord <- ord + 1
}
}
## } else {
## if (any(ind)) {
## for(i in 1:nrow(ind)){
## pair <- new("sep.pair", u = vset[ind[i,1]],
## v = vset[ind[i,2]], s = character(0))
## cand <- setdiff(vset[amat[pair@u,]==1], pair@v)
## idx <- c(pair@u, pair@v, cand)
## res <- .get.sep(cov[idx, idx], n, p.value, pair@u, pair@v, cand)
## if(res$seped){
## amat[pair@u, pair@v] <- amat[pair@v, pair@u] <- 0
## sep.pairs <- append(sep.pairs, res$sep)
## }
## }
## }
## }
}
return(list(amat=amat, sep.pairs=sep.pairs))
}
|
numPerPatch213500 <- c(2518,2482)
| /NatureEE-data-archive/Run203121/JAFSdata/JAFSnumPerPatch213500.R | no_license | flaxmans/NatureEE2017 | R | false | false | 34 | r | numPerPatch213500 <- c(2518,2482)
|
% Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/rwirelesscom.R
\name{iqdensityplot}
\alias{iqdensityplot}
\title{IQ Density Plot}
\usage{
iqdensityplot(r, iq = "r")
}
\arguments{
\item{r}{- complex or real valued vector}
\item{iq}{- if iq = "r" (default) then plot density of Re(r) else if iq = "q" then plot density of Im(r)}
}
\description{
A convenience function to plot a density function of a vector containing the in-phase and
quadrature signal (plus noise).
}
\examples{
M=4
Es=1
Eb = Es/log2(M)
Nsymbols=1000
Nbits=log2(M)*Nsymbols
bits <- sample(0:1,Nbits, replace=TRUE)
s <- fqpskmod(bits)
EbNodB=4
No = Eb/(10^(EbNodB/10))
n <- fNo(Nsymbols,No,type="complex")
r <- s+n
}
\seealso{
Other rwirelesscom functions: \code{\link{eyediagram}};
\code{\link{f16pskdemod}}; \code{\link{f16pskmod}};
\code{\link{f16qamdemod}}; \code{\link{f16qammod}};
\code{\link{f64qamdemod}}; \code{\link{f64qammod}};
\code{\link{f8pskdemod}}; \code{\link{f8pskmod}};
\code{\link{fNo}}; \code{\link{fbpskdemod}};
\code{\link{fbpskmod}}; \code{\link{fqpskdemod}};
\code{\link{iqscatterplot}}; \code{\link{stemplot}}
}
| /man/iqdensityplot.Rd | no_license | cran/rwirelesscom | R | false | false | 1,162 | rd | % Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/rwirelesscom.R
\name{iqdensityplot}
\alias{iqdensityplot}
\title{IQ Density Plot}
\usage{
iqdensityplot(r, iq = "r")
}
\arguments{
\item{r}{- complex or real valued vector}
\item{iq}{- if iq = "r" (default) then plot density of Re(r) else if iq = "q" then plot density of Im(r)}
}
\description{
A convenience function to plot a density function of a vector containing the in-phase and
quadrature signal (plus noise).
}
\examples{
M=4
Es=1
Eb = Es/log2(M)
Nsymbols=1000
Nbits=log2(M)*Nsymbols
bits <- sample(0:1,Nbits, replace=TRUE)
s <- fqpskmod(bits)
EbNodB=4
No = Eb/(10^(EbNodB/10))
n <- fNo(Nsymbols,No,type="complex")
r <- s+n
}
\seealso{
Other rwirelesscom functions: \code{\link{eyediagram}};
\code{\link{f16pskdemod}}; \code{\link{f16pskmod}};
\code{\link{f16qamdemod}}; \code{\link{f16qammod}};
\code{\link{f64qamdemod}}; \code{\link{f64qammod}};
\code{\link{f8pskdemod}}; \code{\link{f8pskmod}};
\code{\link{fNo}}; \code{\link{fbpskdemod}};
\code{\link{fbpskmod}}; \code{\link{fqpskdemod}};
\code{\link{iqscatterplot}}; \code{\link{stemplot}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-movie.R
\docType{data}
\name{movie_445}
\alias{movie_445}
\title{Lost in Space}
\format{
igraph object
}
\source{
https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/T4HBA3
https://www.imdb.com/title/tt0120738
}
\usage{
movie_445
}
\description{
Interactions of characters in the movie "Lost in Space" (1998)
}
\details{
The networks were built with a movie script parser. Even after multiple manual checks, the data set can still contain minor errors (e.g. typos in character names or wrongly parsed names). This may require some additional manual checks before using the data. Please report any such issues (https://github.com/schochastics/networkdata/issues/)
}
\references{
Kaminski, Jermain; Schober, Michael; Albaladejo, Raymond; Zastupailo, Oleksandr; Hidalgo, César, 2018, Moviegalaxies - Social Networks in Movies, https://doi.org/10.7910/DVN/T4HBA3, Harvard Dataverse, V3
}
\keyword{datasets}
| /man/movie_445.Rd | permissive | schochastics/networkdata | R | false | true | 1,009 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-movie.R
\docType{data}
\name{movie_445}
\alias{movie_445}
\title{Lost in Space}
\format{
igraph object
}
\source{
https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/T4HBA3
https://www.imdb.com/title/tt0120738
}
\usage{
movie_445
}
\description{
Interactions of characters in the movie "Lost in Space" (1998)
}
\details{
The networks were built with a movie script parser. Even after multiple manual checks, the data set can still contain minor errors (e.g. typos in character names or wrongly parsed names). This may require some additional manual checks before using the data. Please report any such issues (https://github.com/schochastics/networkdata/issues/)
}
\references{
Kaminski, Jermain; Schober, Michael; Albaladejo, Raymond; Zastupailo, Oleksandr; Hidalgo, César, 2018, Moviegalaxies - Social Networks in Movies, https://doi.org/10.7910/DVN/T4HBA3, Harvard Dataverse, V3
}
\keyword{datasets}
|
#' HC.R
#'
#' Calculate the Higher Criticism test statistic and p-value.
#'
#' @param test_stats Vector of test statistics for each factor in the set (i.e. marginal
#' test statistic for each SNP in a gene).
#' @param cor_mat d*d matrix of the correlations between all the test statistics in
#' the set, where d is the total number of test statistics in the set.
#' You only need to specify EITHER cor_mat OR pairwise_cors.
#' @param pairwise_cors A vector of all d(d-1)/2 pairwise correlations between the test
#' statistics. You only need to specify EITHER cor_mat OR pairwise_cors.
#'
#' @return A list with the elements:
#' \item{HC}{The observed Higher Criticism test statistic.}
#' \item{HC_pvalue}{The p-value of this observed value, given the size of the set and
#' correlation structure.}
#'
#' @export
#' @examples
#' # Should return statistic = 2.067475 and p_value = 0.2755146
#' set.seed(100)
#' Z_vec <- rnorm(5) + rep(1,5)
#' cor_Z <- matrix(data=0.2, nrow=5, ncol=5)
#' diag(cor_Z) <- 1
#' HC(test_stats=Z_vec, cor_mat=cor_Z)
HC <- function(test_stats, cor_mat=NULL, pairwise_cors=NULL) {
# Parse inputs, do some error checking.
param_list <- parse_input(test_stats=test_stats, cor_mat=cor_mat,
pairwise_cors=pairwise_cors)
t_vec <- param_list$t_vec
pairwise_cors <- param_list$pairwise_cors
d <- length(t_vec)
# Calculate HC objectives
p_values <- 1-pchisq(t_vec^2, df=1)
i_vec <- 1:d
HC_stats <- sqrt(d) * (i_vec/d - p_values) / sqrt(p_values*(1-p_values))
# Observed HC statistic
h <- max(HC_stats, na.rm=TRUE)
# Calculate p-value
if (h<=0) {
return ( list(HC=0, HC_pvalue=1) )
}
# BJ bounds
HC_p_bounds <- rep(NA, d)
# Explicit inverse of HC to find the p-value bounds
HC_p_bounds <- ((2*i_vec+h^2)/d - sqrt((2*i_vec/d+h^2/d)^2 - 4*i_vec^2/d^2 - 4*i_vec^2*h^2/d^3))/(2*(1+h^2/d))
HC_z_bounds <- qnorm(1-HC_p_bounds/2)
HC_z_bounds <- sort(HC_z_bounds, decreasing=F)
# qnorm can't handle more precision than 10^-16
# Also crossprob_cor can only handle Z up to 8.2
HC_z_bounds[which(HC_z_bounds > 8.2)]= 8.2
# Send it to the C++.
if (sum(abs(pairwise_cors)) == 0) {
# For the independence flag in the c++, just have to send a number < -1.
HC_corp <- ebb_crossprob_cor_R(d=d, bounds=HC_z_bounds, correlations=rep(-999,2))
} else {
HC_corp <- ebb_crossprob_cor_R(d=d, bounds=HC_z_bounds, correlations=pairwise_cors)
}
return ( list(HC=h, HC_pvalue=HC_corp) )
}
| /GBJ/R/HC.R | no_license | akhikolla/InformationHouse | R | false | false | 2,456 | r | #' HC.R
#'
#' Calculate the Higher Criticism test statistic and p-value.
#'
#' @param test_stats Vector of test statistics for each factor in the set (i.e. marginal
#' test statistic for each SNP in a gene).
#' @param cor_mat d*d matrix of the correlations between all the test statistics in
#' the set, where d is the total number of test statistics in the set.
#' You only need to specify EITHER cor_mat OR pairwise_cors.
#' @param pairwise_cors A vector of all d(d-1)/2 pairwise correlations between the test
#' statistics. You only need to specify EITHER cor_mat OR pairwise_cors.
#'
#' @return A list with the elements:
#' \item{HC}{The observed Higher Criticism test statistic.}
#' \item{HC_pvalue}{The p-value of this observed value, given the size of the set and
#' correlation structure.}
#'
#' @export
#' @examples
#' # Should return statistic = 2.067475 and p_value = 0.2755146
#' set.seed(100)
#' Z_vec <- rnorm(5) + rep(1,5)
#' cor_Z <- matrix(data=0.2, nrow=5, ncol=5)
#' diag(cor_Z) <- 1
#' HC(test_stats=Z_vec, cor_mat=cor_Z)
HC <- function(test_stats, cor_mat=NULL, pairwise_cors=NULL) {
# Parse inputs, do some error checking.
param_list <- parse_input(test_stats=test_stats, cor_mat=cor_mat,
pairwise_cors=pairwise_cors)
t_vec <- param_list$t_vec
pairwise_cors <- param_list$pairwise_cors
d <- length(t_vec)
# Calculate HC objectives
p_values <- 1-pchisq(t_vec^2, df=1)
i_vec <- 1:d
HC_stats <- sqrt(d) * (i_vec/d - p_values) / sqrt(p_values*(1-p_values))
# Observed HC statistic
h <- max(HC_stats, na.rm=TRUE)
# Calculate p-value
if (h<=0) {
return ( list(HC=0, HC_pvalue=1) )
}
# BJ bounds
HC_p_bounds <- rep(NA, d)
# Explicit inverse of HC to find the p-value bounds
HC_p_bounds <- ((2*i_vec+h^2)/d - sqrt((2*i_vec/d+h^2/d)^2 - 4*i_vec^2/d^2 - 4*i_vec^2*h^2/d^3))/(2*(1+h^2/d))
HC_z_bounds <- qnorm(1-HC_p_bounds/2)
HC_z_bounds <- sort(HC_z_bounds, decreasing=F)
# qnorm can't handle more precision than 10^-16
# Also crossprob_cor can only handle Z up to 8.2
HC_z_bounds[which(HC_z_bounds > 8.2)]= 8.2
# Send it to the C++.
if (sum(abs(pairwise_cors)) == 0) {
# For the independence flag in the c++, just have to send a number < -1.
HC_corp <- ebb_crossprob_cor_R(d=d, bounds=HC_z_bounds, correlations=rep(-999,2))
} else {
HC_corp <- ebb_crossprob_cor_R(d=d, bounds=HC_z_bounds, correlations=pairwise_cors)
}
return ( list(HC=h, HC_pvalue=HC_corp) )
}
|
context("bdmData object")
test_that('initialize bdmData', {
dat <- bdmData(index = runif(10), harvest = 1:10)
expect_is(dat, "bdmData")
})
test_that('sigmao(dat) initialise', {
# check dimensions
dat <- bdmData(index = runif(10), harvest = 1:10)
expect_equal(dim(sigmao(dat)), c(10, 1))
dat <- bdmData(index = matrix(runif(20), 10, 2), harvest = 1:10)
expect_equal(dim(sigmao(dat)), c(10, 2))
# check values
sigmao.in <- matrix(runif(20), 10, 2)
dat <- bdmData(index = matrix(runif(20), 10, 2), harvest = 1:10, sigmao = sigmao.in)
expect_equal(dim(sigmao(dat)), c(10, 2))
expect_equal(sigmao(dat)[,1], sigmao.in[,1])
expect_equal(sigmao(dat)[,2], sigmao.in[,2])
sigmao.in <- runif(2)
dat <- bdmData(index = matrix(runif(20), 10, 2), harvest = 1:10, sigmao = sigmao.in)
expect_equal(dim(sigmao(dat)), c(10, 2))
expect_equal(sigmao(dat)[,1], rep(sigmao.in[1],10))
expect_equal(sigmao(dat)[,2], rep(sigmao.in[2],10))
sigmao.in <- runif(1)
dat <- bdmData(index = runif(10), harvest = 1:10, sigmao = sigmao.in)
expect_equal(dim(sigmao(dat)), c(10, 1))
expect_equal(sigmao(dat)[,1], rep(sigmao.in,10))
})
test_that('sigmao(dat) assignment', {
# assign matrix for >1 index
dat <- bdmData(index = matrix(runif(20), 10, 2), harvest = 1:10)
sigmao.in <- matrix(runif(20), 10, 2)
sigmao(dat) <- sigmao.in
expect_equal(dim(sigmao(dat)), c(10, 2))
expect_equal(sigmao(dat)[,1], sigmao.in[,1])
expect_equal(sigmao(dat)[,2], sigmao.in[,2])
# assign numeric for >1 index
dat <- bdmData(index = matrix(runif(20), 10, 2), harvest = 1:10)
sigmao.in <- runif(2)
sigmao(dat) <- sigmao.in
expect_equal(dim(sigmao(dat)), c(10, 2))
expect_equal(sigmao(dat), matrix(sigmao.in, 10, 2, byrow = TRUE))
sigmao.in <- runif(1)
sigmao(dat) <- sigmao.in
expect_equal(dim(sigmao(dat)), c(10, 2))
expect_equal(sigmao(dat), matrix(sigmao.in, 10, 2))
# assign numeric for 1 index
dat <- bdmData(index = runif(10), harvest = 1:10)
sigmao.in <- runif(1)
sigmao(dat) <- sigmao.in
expect_equal(dim(sigmao(dat)), c(10, 1))
expect_equal(sigmao(dat), matrix(sigmao.in, 10, 1))
})
test_that('shape(dat) assignment', {
dat <- bdmData(index = runif(10), harvest = 1:10)
shape.in <- runif(1, 0.1, 0.9)
shape(dat) <- shape.in
expect_less_than(abs(shape(dat) - shape.in), .Machine$double.eps^0.25)
n <- shape(dat, 'n')
expect_less_than(abs((1/n)^(1/(n-1)) - shape.in), .Machine$double.eps^0.25)
})
test_that('plot bdmData', {
# load some data
data(albio)
dat <- bdmData(harvest = albio$catch, index = albio$cpue, time = rownames(albio))
# plots
gg <- plot(dat)
expect_is(gg, "ggplot")
})
| /tests/testthat/test_bdmData.R | no_license | cttedwards/bdm | R | false | false | 2,777 | r |
context("bdmData object")
test_that('initialize bdmData', {
dat <- bdmData(index = runif(10), harvest = 1:10)
expect_is(dat, "bdmData")
})
test_that('sigmao(dat) initialise', {
# check dimensions
dat <- bdmData(index = runif(10), harvest = 1:10)
expect_equal(dim(sigmao(dat)), c(10, 1))
dat <- bdmData(index = matrix(runif(20), 10, 2), harvest = 1:10)
expect_equal(dim(sigmao(dat)), c(10, 2))
# check values
sigmao.in <- matrix(runif(20), 10, 2)
dat <- bdmData(index = matrix(runif(20), 10, 2), harvest = 1:10, sigmao = sigmao.in)
expect_equal(dim(sigmao(dat)), c(10, 2))
expect_equal(sigmao(dat)[,1], sigmao.in[,1])
expect_equal(sigmao(dat)[,2], sigmao.in[,2])
sigmao.in <- runif(2)
dat <- bdmData(index = matrix(runif(20), 10, 2), harvest = 1:10, sigmao = sigmao.in)
expect_equal(dim(sigmao(dat)), c(10, 2))
expect_equal(sigmao(dat)[,1], rep(sigmao.in[1],10))
expect_equal(sigmao(dat)[,2], rep(sigmao.in[2],10))
sigmao.in <- runif(1)
dat <- bdmData(index = runif(10), harvest = 1:10, sigmao = sigmao.in)
expect_equal(dim(sigmao(dat)), c(10, 1))
expect_equal(sigmao(dat)[,1], rep(sigmao.in,10))
})
test_that('sigmao(dat) assignment', {
# assign matrix for >1 index
dat <- bdmData(index = matrix(runif(20), 10, 2), harvest = 1:10)
sigmao.in <- matrix(runif(20), 10, 2)
sigmao(dat) <- sigmao.in
expect_equal(dim(sigmao(dat)), c(10, 2))
expect_equal(sigmao(dat)[,1], sigmao.in[,1])
expect_equal(sigmao(dat)[,2], sigmao.in[,2])
# assign numeric for >1 index
dat <- bdmData(index = matrix(runif(20), 10, 2), harvest = 1:10)
sigmao.in <- runif(2)
sigmao(dat) <- sigmao.in
expect_equal(dim(sigmao(dat)), c(10, 2))
expect_equal(sigmao(dat), matrix(sigmao.in, 10, 2, byrow = TRUE))
sigmao.in <- runif(1)
sigmao(dat) <- sigmao.in
expect_equal(dim(sigmao(dat)), c(10, 2))
expect_equal(sigmao(dat), matrix(sigmao.in, 10, 2))
# assign numeric for 1 index
dat <- bdmData(index = runif(10), harvest = 1:10)
sigmao.in <- runif(1)
sigmao(dat) <- sigmao.in
expect_equal(dim(sigmao(dat)), c(10, 1))
expect_equal(sigmao(dat), matrix(sigmao.in, 10, 1))
})
test_that('shape(dat) assignment', {
dat <- bdmData(index = runif(10), harvest = 1:10)
shape.in <- runif(1, 0.1, 0.9)
shape(dat) <- shape.in
expect_less_than(abs(shape(dat) - shape.in), .Machine$double.eps^0.25)
n <- shape(dat, 'n')
expect_less_than(abs((1/n)^(1/(n-1)) - shape.in), .Machine$double.eps^0.25)
})
test_that('plot bdmData', {
# load some data
data(albio)
dat <- bdmData(harvest = albio$catch, index = albio$cpue, time = rownames(albio))
# plots
gg <- plot(dat)
expect_is(gg, "ggplot")
})
|
library(DSpat)
### Name: dspat
### Title: Fits spatial model to distance sampling data
### Aliases: dspat
### ** Examples
# get example data
data(DSpat.lines)
data(DSpat.obs)
data(DSpat.covariates)
# Fit model with covariates used to create the data
sim.dspat=dspat(~ river + factor(habitat),
study.area=owin(xrange=c(0,100), yrange=c(0,100)),
obs=DSpat.obs,lines=DSpat.lines,covariates=DSpat.covariates,
epsvu=c(4,.1),width=0.4)
## No test:
# Print
sim.dspat
# Summarize results
summary(sim.dspat)
# Extract coefficients
coef.intensity <- coef(sim.dspat)$intensity
coef.detection <- coef(sim.dspat)$detection
# Extract variance-covariance matrix (inverse information matrix)
J.inv <- vcov(sim.dspat)
# Compute AIC
AIC(sim.dspat)
# Visualize intensity (no. animals per area) and estimate abundance
mu.B <- integrate.intensity(sim.dspat,dimyx=100)
cat('Abundance = ', round(mu.B$abundance,0), "\n")
dev.new()
plot(mu.B$lambda, col=gray(1-c(1:100)/120), main='Estimated Intensity')
plot(sim.dspat$model$Q$data,add=TRUE)
plot(owin(poly=sim.dspat$transect),add=TRUE)
plot(sim.dspat$lines.psp,lty=2,add=TRUE)
# Compute se and confidence interval for abundance without over-dispersion
mu.B <- integrate.intensity(sim.dspat,se=TRUE,dimyx=100)
cat("Standard Error = ", round(mu.B$precision$se,0), "\n",
"95 Percent Conf. Int. = (", round(mu.B$precision$lcl.95,0), ',',
round(mu.B$precision$ucl.95,0), ")", "\n")
# Compute se and confidence interval for abundance with over-dispersion estimate
dev.new()
# The rest of the example has been put into a function to speed up package checking; remove # to run
# to run type do.dspat()
do.spat=function()
{
mu.B <- integrate.intensity(sim.dspat,se=TRUE,od=TRUE,reps=30,dimyx=100)
cat("Standard Error (corrected) = ", round(mu.B$precision.od$se,0), "\n",
"95 Percent Conf. Int. (corrected) = (", round(mu.B$precision.od$lcl.95,0),
",", round(mu.B$precision.od$ucl.95,0), ")", "\n")
# Fit model with smooth of x and y
sim.dspat=dspat(~ s(x) + s(y),study.area=owin(xrange=c(0,100), yrange=c(0,100)),
obs=DSpat.obs,lines=DSpat.lines,covariates=DSpat.covariates,
epsvu=c(1,.01),width=0.4)
AIC(sim.dspat)
# Visualize intensity (no. animals per area) and estimate abundance
mu.B <- integrate.intensity(sim.dspat,dimyx=100,se=TRUE)
cat('Abundance = ', round(mu.B$abundance,0), "\n")
cat("Standard Error = ", round(mu.B$precision$se,0), "\n",
"95 Percent Conf. Int. = (", round(mu.B$precision$lcl.95,0),
",", round(mu.B$precision$ucl.95,0), ")", "\n")
dev.new()
plot(mu.B$lambda, col=gray(1-c(1:100)/120), main='Estimated Intensity')
plot(sim.dspat$model$Q$data,add=TRUE)
plot(owin(poly=sim.dspat$transect),add=TRUE)
plot(sim.dspat$lines.psp,lty=2,add=TRUE)
#
# Fit model with smooth of x and y with interaction
#
sim.dspat=dspat(~ s(x,y),study.area=owin(xrange=c(0,100), yrange=c(0,100)),
obs=DSpat.obs,lines=DSpat.lines,covariates=DSpat.covariates,
epsvu=c(1,.01),width=0.4)
AIC(sim.dspat)
# Visualize intensity (no. animals per area) and estimate abundance
mu.B <- integrate.intensity(sim.dspat,dimyx=100,se=TRUE)
cat('Abundance = ', round(mu.B$abundance,0), "\n")
cat("Standard Error = ", round(mu.B$precision$se,0), "\n",
"95 Percent Conf. Int. = (", round(mu.B$precision$lcl.95,0),
",", round(mu.B$precision$ucl.95,0), ")", "\n")
dev.new()
plot(mu.B$lambda, col=gray(1-c(1:100)/120), main='Estimated Intensity')
plot(sim.dspat$model$Q$data,add=TRUE)
plot(owin(poly=sim.dspat$transect),add=TRUE)
plot(sim.dspat$lines.psp,lty=2,add=TRUE)
}
## End(No test)
| /data/genthat_extracted_code/DSpat/examples/dspat.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 3,725 | r | library(DSpat)
### Name: dspat
### Title: Fits spatial model to distance sampling data
### Aliases: dspat
### ** Examples
# get example data
data(DSpat.lines)
data(DSpat.obs)
data(DSpat.covariates)
# Fit model with covariates used to create the data
sim.dspat=dspat(~ river + factor(habitat),
study.area=owin(xrange=c(0,100), yrange=c(0,100)),
obs=DSpat.obs,lines=DSpat.lines,covariates=DSpat.covariates,
epsvu=c(4,.1),width=0.4)
## No test:
# Print
sim.dspat
# Summarize results
summary(sim.dspat)
# Extract coefficients
coef.intensity <- coef(sim.dspat)$intensity
coef.detection <- coef(sim.dspat)$detection
# Extract variance-covariance matrix (inverse information matrix)
J.inv <- vcov(sim.dspat)
# Compute AIC
AIC(sim.dspat)
# Visualize intensity (no. animals per area) and estimate abundance
mu.B <- integrate.intensity(sim.dspat,dimyx=100)
cat('Abundance = ', round(mu.B$abundance,0), "\n")
dev.new()
plot(mu.B$lambda, col=gray(1-c(1:100)/120), main='Estimated Intensity')
plot(sim.dspat$model$Q$data,add=TRUE)
plot(owin(poly=sim.dspat$transect),add=TRUE)
plot(sim.dspat$lines.psp,lty=2,add=TRUE)
# Compute se and confidence interval for abundance without over-dispersion
mu.B <- integrate.intensity(sim.dspat,se=TRUE,dimyx=100)
cat("Standard Error = ", round(mu.B$precision$se,0), "\n",
"95 Percent Conf. Int. = (", round(mu.B$precision$lcl.95,0), ',',
round(mu.B$precision$ucl.95,0), ")", "\n")
# Compute se and confidence interval for abundance with over-dispersion estimate
dev.new()
# The rest of the example has been put into a function to speed up package checking; remove # to run
# to run type do.dspat()
do.spat=function()
{
mu.B <- integrate.intensity(sim.dspat,se=TRUE,od=TRUE,reps=30,dimyx=100)
cat("Standard Error (corrected) = ", round(mu.B$precision.od$se,0), "\n",
"95 Percent Conf. Int. (corrected) = (", round(mu.B$precision.od$lcl.95,0),
",", round(mu.B$precision.od$ucl.95,0), ")", "\n")
# Fit model with smooth of x and y
sim.dspat=dspat(~ s(x) + s(y),study.area=owin(xrange=c(0,100), yrange=c(0,100)),
obs=DSpat.obs,lines=DSpat.lines,covariates=DSpat.covariates,
epsvu=c(1,.01),width=0.4)
AIC(sim.dspat)
# Visualize intensity (no. animals per area) and estimate abundance
mu.B <- integrate.intensity(sim.dspat,dimyx=100,se=TRUE)
cat('Abundance = ', round(mu.B$abundance,0), "\n")
cat("Standard Error = ", round(mu.B$precision$se,0), "\n",
"95 Percent Conf. Int. = (", round(mu.B$precision$lcl.95,0),
",", round(mu.B$precision$ucl.95,0), ")", "\n")
dev.new()
plot(mu.B$lambda, col=gray(1-c(1:100)/120), main='Estimated Intensity')
plot(sim.dspat$model$Q$data,add=TRUE)
plot(owin(poly=sim.dspat$transect),add=TRUE)
plot(sim.dspat$lines.psp,lty=2,add=TRUE)
#
# Fit model with smooth of x and y with interaction
#
sim.dspat=dspat(~ s(x,y),study.area=owin(xrange=c(0,100), yrange=c(0,100)),
obs=DSpat.obs,lines=DSpat.lines,covariates=DSpat.covariates,
epsvu=c(1,.01),width=0.4)
AIC(sim.dspat)
# Visualize intensity (no. animals per area) and estimate abundance
mu.B <- integrate.intensity(sim.dspat,dimyx=100,se=TRUE)
cat('Abundance = ', round(mu.B$abundance,0), "\n")
cat("Standard Error = ", round(mu.B$precision$se,0), "\n",
"95 Percent Conf. Int. = (", round(mu.B$precision$lcl.95,0),
",", round(mu.B$precision$ucl.95,0), ")", "\n")
dev.new()
plot(mu.B$lambda, col=gray(1-c(1:100)/120), main='Estimated Intensity')
plot(sim.dspat$model$Q$data,add=TRUE)
plot(owin(poly=sim.dspat$transect),add=TRUE)
plot(sim.dspat$lines.psp,lty=2,add=TRUE)
}
## End(No test)
|
sub2 <- function() {
library(randomForest)
training <<- read.csv("data/test.csv")
#remove name, ticket and cabin
training$name <<- NULL
training$ticket <<- NULL
training$cabin <<- NULL
#convert sex and embarked to numeric
training$sex <<- as.numeric(training$sex)
training$embarked <<- as.numeric(training$embarked)
#remove rows where survived is NA and set to factor
#training <<- training[!is.na(training$survived),]
#training$survived <<- as.factor(training$survived)
# replace NA sex with random 1 or 2
training$sex[is.na(training$sex)]<<- sample(c(1,2),1)
# replace NA age with mean age (this might be a good place to improve)
training$age[is.na(training$age)]<<- mean(training$age,na.rm=T)
#replace NA sibsp with mean sibsp
training$sibsp[is.na(training$sibsp)]<<- mean(training$sibsp,na.rm=T)
#replace NA parch with mean parch
training$parch[is.na(training$parch)]<<- mean(training$parch,na.rm=T)
# fill in 0.00 for fare based on mean for pclass
training$fare <<- apply(training,1,function(x) {replaceZeroFareByClass(x)})
# fill in NA for pclass based on fare
training$pclass <<- apply(training,1,function(x) {replaceNAPClassByFare(x)})
return (training)
}
replaceZeroFareByClass <- function(passenger) {
if (passenger[7]!=0) {
return(passenger[7])
}
class <- passenger[2]
if (is.na(class)) {
return (mean(training$fare))
} else {
return (mean(training$fare[training$pclass==class]))
}
}
replaceNAPClassByFare <- function(passenger) {
if (!is.na(passenger[2])) {
return(passenger[2])
}
fare <- passenger[7]
if (fare<11) {
return (3)
} else if (fare>=11 & fare<=29) {
return (2)
} else if (fare > 29) {
sibs <- passenger[5]
if(sibs>=4) {
return (3)
} else {
return (1)
}
}
}
prediction <- function() {
library(randomForest)
library(plyr)
training <- read.csv("sub2train.csv")
training$survived <- as.factor(training$survived)
testing <- read.csv("sub2test.csv")
testing$survived <- 0
train.rf <- randomForest(survived ~.,data=training)
trainp<-predict(train.rf,training)
accuracy<-((length(which(trainp == training$survived))) / length(training$survived)) * 100
p<-predict(train.rf,testing)
} | /Titanic/submission2/submission2.R | no_license | shannonrush/Contests | R | false | false | 2,288 | r | sub2 <- function() {
library(randomForest)
training <<- read.csv("data/test.csv")
#remove name, ticket and cabin
training$name <<- NULL
training$ticket <<- NULL
training$cabin <<- NULL
#convert sex and embarked to numeric
training$sex <<- as.numeric(training$sex)
training$embarked <<- as.numeric(training$embarked)
#remove rows where survived is NA and set to factor
#training <<- training[!is.na(training$survived),]
#training$survived <<- as.factor(training$survived)
# replace NA sex with random 1 or 2
training$sex[is.na(training$sex)]<<- sample(c(1,2),1)
# replace NA age with mean age (this might be a good place to improve)
training$age[is.na(training$age)]<<- mean(training$age,na.rm=T)
#replace NA sibsp with mean sibsp
training$sibsp[is.na(training$sibsp)]<<- mean(training$sibsp,na.rm=T)
#replace NA parch with mean parch
training$parch[is.na(training$parch)]<<- mean(training$parch,na.rm=T)
# fill in 0.00 for fare based on mean for pclass
training$fare <<- apply(training,1,function(x) {replaceZeroFareByClass(x)})
# fill in NA for pclass based on fare
training$pclass <<- apply(training,1,function(x) {replaceNAPClassByFare(x)})
return (training)
}
replaceZeroFareByClass <- function(passenger) {
if (passenger[7]!=0) {
return(passenger[7])
}
class <- passenger[2]
if (is.na(class)) {
return (mean(training$fare))
} else {
return (mean(training$fare[training$pclass==class]))
}
}
replaceNAPClassByFare <- function(passenger) {
if (!is.na(passenger[2])) {
return(passenger[2])
}
fare <- passenger[7]
if (fare<11) {
return (3)
} else if (fare>=11 & fare<=29) {
return (2)
} else if (fare > 29) {
sibs <- passenger[5]
if(sibs>=4) {
return (3)
} else {
return (1)
}
}
}
prediction <- function() {
library(randomForest)
library(plyr)
training <- read.csv("sub2train.csv")
training$survived <- as.factor(training$survived)
testing <- read.csv("sub2test.csv")
testing$survived <- 0
train.rf <- randomForest(survived ~.,data=training)
trainp<-predict(train.rf,training)
accuracy<-((length(which(trainp == training$survived))) / length(training$survived)) * 100
p<-predict(train.rf,testing)
} |
# Assignment 3_4 - Session 3
#Q1: Import the Titanic Dataset from the link Titanic Data Set.
#Perform the following:
# a. Preprocess the passenger names to come up with a list of titles that represent families
# and represent using appropriate visualization graph.
# b. Represent the proportion of people survived from the family size using a graph.
# c. Impute the missing values in Age variable using Mice Library, create two different
#graphs showing Age distribution before and after imputation.
#Solution 1:
#a)
#Importing the titanic dataset.
library(readxl)
titanic <- read_xls("titanic3.xls")
#b)
library(ggplot2)
ggplot(data = titanic) + geom_bar(mapping = aes(x = survived))
#c)
sum(is.na(titanic$age))
# Total 263 missing values in age variable of titanic dataset
#install.packages("mice")
library(mice)
md.pattern(titanic)
mice_imp <- mice(titanic, m=5, maxit = 40)
titanic_imp <- complete(mice_imputes,5)
sum(is.na(titanic_imp$age))
#distribution before and after imputation
hist(titanic$age, main='Original Age histogram ', col = "blue")
hist(titanic_imp$age, main="Imputed Age histogram", col="green")
| /Assignment_3_4.R | no_license | sheetalnishad/assignment-3.4 | R | false | false | 1,157 | r | # Assignment 3_4 - Session 3
#Q1: Import the Titanic Dataset from the link Titanic Data Set.
#Perform the following:
# a. Preprocess the passenger names to come up with a list of titles that represent families
# and represent using appropriate visualization graph.
# b. Represent the proportion of people survived from the family size using a graph.
# c. Impute the missing values in Age variable using Mice Library, create two different
#graphs showing Age distribution before and after imputation.
#Solution 1:
#a)
#Importing the titanic dataset.
library(readxl)
titanic <- read_xls("titanic3.xls")
#b)
library(ggplot2)
ggplot(data = titanic) + geom_bar(mapping = aes(x = survived))
#c)
sum(is.na(titanic$age))
# Total 263 missing values in age variable of titanic dataset
#install.packages("mice")
library(mice)
md.pattern(titanic)
mice_imp <- mice(titanic, m=5, maxit = 40)
titanic_imp <- complete(mice_imputes,5)
sum(is.na(titanic_imp$age))
#distribution before and after imputation
hist(titanic$age, main='Original Age histogram ', col = "blue")
hist(titanic_imp$age, main="Imputed Age histogram", col="green")
|
# Author:
# Brandon Dey
#
# Date:
# 9.9.18
#
# Purpose:
# This script is the tag-a-long .R for ODSC article 3 on IDW geospatial interpolation.
#
#################
## ENVIRONMENT ##
#################
# load libraries
library(tidyverse)
library(rgdal)
library(leaflet)
library(geosphere)
library(directlabels)
library(RColorBrewer)
# get data
#breweries
read.csv("./Data/Raw/breweries-brew-pubs-in-the-usa/7160_1.csv") -> breweries
read.csv("./Data/Raw/breweries-brew-pubs-in-the-usa/8260_1.csv") -> breweries_new
names(breweries)
names(breweries_new)
# explore data
glimpse(breweries)
# remove missing values
paste0("Missing values in ",nrow(breweries_new) - na.omit(breweries_new) %>% nrow, " observations of ", nrow(breweries_new))
breweries_new <- na.omit(breweries_new)
# Find epicenter of brewery activity in each state
geographic_average <- function(lon, lat, weight = NULL) {
if (is.null(weight)) {
weight <- rep(1, length(lon))
}
lon <- weighted.mean(lon, w = weight)
lat <- weighted.mean(lat, w = weight)
data.frame(lon = lon, lat = lat)
}
# limit to breweries in the continguous U.S.
breweries_new %>%
filter(between(longitude, -124.446359, -70.6539763) &
between(latitude, 25.8192058, 47.3873012) &
nchar(as.character(state)) == 2) -> breweries_new
breweries_us <- breweries_new
epicenters <- data.frame(state = unique(breweries_us$province), lon = NA, lat = NA, breweries = NA)
epicenters <- filter(epicenters, str_count(state) == 2)
for(s in 1:nrow(epicenters)) {
state <- epicenters[s,1]
s_df <- filter(breweries_us, province == state)
s_epi <- geographic_average(lon = s_df$longitude, lat = s_df$latitude)
s_brs <- nrow(s_df)
epicenters[s, 2] <- s_epi[,1]
epicenters[s, 3] <- s_epi[,2]
epicenters[s, 4] <- s_brs
}
# Find U.S. Brewery Epicenter
geographic_average(lon = breweries_us$longitude,
breweries_us$latitude) -> nat_epicenter
# plot epicenters
ggplot(epicenters,
aes(x = lon, y = lat)) +
xlim(-125, -65) +
ylim(24, 51) +
borders('state',
alpha = 1,
size = 0.5,
fill = "#fec44f") +
# plot breweries
geom_point(data = breweries_us,
aes(x = longitude,
y = latitude),
alpha = .25,
col = "#fff7bc",
size = 1) +
# plot state epicenters
geom_point(col = "#d95f0e",
aes(size = breweries)) +
# plot state labels
geom_text(aes(x = lon,
y = lat,
label = state),
nudge_y = .25) +
labs(title = "The State(s) of Breweries",
size = "Geographic \"Center\" \nof State's Beer Scene\n",
caption = "Figure 1: \nEvery brewery and/or brew pub in the contiguous U.S. \nThe size of each dark orange dot is proportional to the count of breweries and/or brew pubs in that state. \nData Source: https://www.kaggle.com/datafiniti/breweries-brew-pubs-in-the-usa/version/2 ") +
theme_void(base_size = 14) +
theme(plot.title = element_text(hjust = 0.5),
plot.caption = element_text(hjust = 1)) -> gg_us
ggsave(gg_us,
filename = "./Plots/US.jpeg",
dpi = 1200,
width = 11,
height = 6)
# summarize cities
breweries_us %>%
group_by(city, province) %>%
summarize(breweries = n_distinct(name),
lat = mean(latitude), # mean bc some cities include adjacent areas.
lon = mean(longitude)) %>%
ungroup %>%
mutate(state = province) %>%
select(-province) -> city_sum
# join epicenters to compare geographic average to just picking the city with most breweries
city_sum %>% left_join(epicenters,
by = "state") %>%
mutate(lat = lat.x,
lon = lon.x,
lat_geoavg = lat.y,
lon_geoavg = lon.y,
breweries_state = breweries.y,
breweries_city = breweries.x) %>%
select(-contains(".")) -> city_sum
city_sum %>%
filter(nchar(as.character(state)) == 2) -> city_sum
# Plot WI
ggplot(data = filter(city_sum, state == "WI"),
aes(x = lon,
y = lat)) +
borders("state", "WI",
fill = "#203731",
col = "#FFB612") +
# plot cities with breweries
geom_point(aes(x = lon,
y = lat,
size = breweries_city),
alpha = .75,
fill = "#FFB612",
col = "#FFB612") +
# plot epicenter
geom_point(aes(x = lon_geoavg,
y = lat_geoavg),
col = "#d95f0e") +
# Epicenter label
geom_dl(aes(label = "Geographic \"Center\" \n of Beer Scene", x = lon_geoavg, y = lat_geoavg),
method = list(dl.trans(x = x - 1.2), "last.points", cex = 0.8)) +
# MKE label
geom_dl(data = filter(city_sum, city == "Milwaukee" & state == "WI"),
aes(label = "Better Epicenter of \n Beer Scene", x = lon, y = lat), method = list(dl.trans(x = x + 0.5), "last.points", cex = 0.8)) +
labs(caption = "Figure 2: \nEvery location in Wisconsin with a brewery and/or brew pub. \nThe size of each dot is proportional to the count of breweries and/or brew pubs in that city. \nData Source: https://www.kaggle.com/datafiniti/breweries-brew-pubs-in-the-usa/version/2 ") +
scale_size_area() +
theme_void() +
theme(plot.title = element_text(hjust = 0.5),
plot.caption = element_text(hjust = 0)) +
coord_quickmap() +
guides(size = F,
alpha = F,
col = F) -> gg_wi
ggsave(gg_wi,
filename = "./Plots/WI.jpeg",
dpi = 1200,
width = 7,
height = 7)
# Plot OR
ggplot(data = filter(city_sum, state == "OR"),
aes(x = lon,
y = lat)) +
borders("state", "OR",
fill = "#002A86",
col = "#FFEA0F") +
# plot cities with breweries
geom_point(aes(x = lon,
y = lat,
size = breweries_city,
alpha = 1),
col = "#FFEA0F") +
# Portland label
geom_dl(data = filter(city_sum, city == "Portland" & state == "OR"),
aes(label = "Portland", x = lon, y = lat), method = list(dl.trans(x = x , y = y + .5), "last.points", cex = 0.8)) +
labs(caption = "Figure 3: \nEvery location in Oregon with a brewery and/or brew pub. \nThe size of each dot is proportional to the count of breweries and/or brew pubs in that city. \nData Source: https://www.kaggle.com/datafiniti/breweries-brew-pubs-in-the-usa/version/2 ") +
scale_size_area() +
theme_void() +
theme(plot.title = element_text(hjust = 0.5),
plot.caption = element_text(hjust = 0)) +
coord_quickmap() +
guides(size = F,
alpha = F,
col = F) -> gg_or
ggsave(gg_or,
filename = "./Plots/OR.jpeg",
dpi = 1200,
width = 7,
height = 7)
###############################################################################
## DISTANCE MATRIX ############################################################
###############################################################################
# Create a distance matrix of distances between every brewery and the nearest brewery in WI and OR.
# WI
breweries_wi <- breweries_us[breweries_us$province == "WI",]
mat_wi <- distm(breweries_wi[,c("longitude","latitude")],
breweries_wi[,c("longitude","latitude")],
fun = distVincentyEllipsoid) # The shortest distance between two points (i.e., the 'great-circle-distance' or 'as the crow flies'), according to the 'Vincenty (ellipsoid)' method. This method uses an ellipsoid and the results are very accurate. The method is computationally more intensive than the other great-circled methods in this package. # Earth isn't a perfect sphere. It's not. It's eppiloidal.
# convert meters to miles
mat_wi <- mat_wi/1609.344
# don't want to include itself so replace with a big number that'll never be the smallest.
mat_wi[mat_wi == 0] <- 1000000
breweries_wi %>%
mutate(closest_pub = breweries_wi$name[max.col(-mat_wi)],
closest_pub_city = breweries_wi$city[max.col(-mat_wi)],
closest_pub_address = breweries_wi$address[max.col(-mat_wi)],
closest_lat = breweries_wi$latitude[max.col(-mat_wi)],
closest_lon = breweries_wi$longitude[max.col(-mat_wi)]) -> breweries_wi
breweries_wi$miles_to_closest <- distVincentyEllipsoid(p1 = breweries_wi[,c("longitude", "latitude")],
p2 = breweries_wi[,c("closest_lon", "closest_lat")]) / 1609.344
# explore the closest pubs in WI...
ggplot(data = breweries_wi) +
borders("state", "WI",
fill = "#203731",
col = "#FFB612") +
# plot pubs
geom_point(aes(x = longitude,
y = latitude),
fill = "#FFB612",
col = "#FFB612") +
# plot nearest pub
geom_point(aes(x = closest_lon,
y = closest_lat,
size = miles_to_closest),
col = "#d95f0e",
alpha = .5) +
labs(caption = "Figure 4: \nEvery location in Wisconsin with a brewery and/or brew pub. \nThe size of each dot is proportional to the count of breweries and/or brew pubs in that city. \nData Source: https://www.kaggle.com/datafiniti/breweries-brew-pubs-in-the-usa/version/2 ") +
scale_size_area() +
theme_void() +
theme(plot.title = element_text(hjust = 0.5),
plot.caption = element_text(hjust = 0)) +
coord_quickmap() +
guides(size = F,
alpha = F,
col = F)
# Now do the same for OR
breweries_or <- breweries_us[breweries_us$province == "OR",]
# OR distance matrix
mat_or <- distm(breweries_or[,c("longitude","latitude")],
breweries_or[,c("longitude","latitude")],
fun = distVincentyEllipsoid)
mat_or <- mat_or/1609.344 # convert meters to miles
mat_or[mat_or == 0] <- 1000000 # don't want to include itself so replace with a big number that'll never be the smallest.
breweries_or %>%
mutate(closest_pub = breweries_or$name[max.col(-mat_or)],
closest_pub_city = breweries_or$city[max.col(-mat_or)],
closest_pub_address = breweries_or$address[max.col(-mat_or)],
closest_lat = breweries_or$latitude[max.col(-mat_or)],
closest_lon = breweries_or$longitude[max.col(-mat_or)]) -> breweries_or
breweries_or$miles_to_closest <- distVincentyEllipsoid(p1 = breweries_or[,c("longitude", "latitude")],
p2 = breweries_or[,c("closest_lon", "closest_lat")]) / 1609.344
ggplot(data = breweries_or) +
borders("state", "OR",
fill = "#002A86",
col = "#FFEA0F") +
# plot pubs
geom_point(aes(x = longitude,
y = latitude),
fill = "#FFB612",
col = "#FFB612") +
# plot closest pub
geom_point(aes(x = closest_lon,
y = closest_lat,
size = miles_to_closest),
col = "#d95f0e",
alpha = .5) +
# Portland label
geom_dl(data = filter(city_sum, city == "Portland" & state == "OR"),
aes(label = "Portland", x = lon, y = lat), method = list(dl.trans(x = x , y = y + .5), "last.points", cex = 0.8)) +
labs(caption = "Figure 3: \nEvery location in Oregon with a brewery and/or brew pub. \nThe size of each dot is proportional to the count of breweries and/or brew pubs in that city. \nData Source: https://www.kaggle.com/datafiniti/breweries-brew-pubs-in-the-usa/version/2 ") +
scale_size_area() +
theme_void() +
theme(plot.title = element_text(hjust = 0.5),
plot.caption = element_text(hjust = 0)) +
coord_quickmap() +
guides(size = F,
alpha = F,
col = F)
summary(breweries_or$miles_to_closest)
summary(breweries_wi$miles_to_closest)
###############################################################################
## MODEL ######################################################################
###############################################################################
# U.S Shapefile
us <- rgdal::readOGR("./Data/Raw/US shapefile",
"tl_2017_us_state")
# isolate to contiguous U.S.
no_thanks <- c('Alaska', 'American Samoa', 'Puerto Rico', 'Guam',
'Commonwealth of the Northern Mariana Islands United States Virgin Islands',
'Commonwealth of the Northern Mariana Islands',
'United States Virgin Islands', 'Hawaii')
us_cont <- subset(us, !(us@data$NAME %in% no_thanks))
wi <- subset(us, (us@data$NAME %in% "Wisconsin"))
# place a grid around shapefile
grid_us <- makegrid(us_cont,
n = 20000) %>%
SpatialPoints(proj4string = CRS(proj4string(us))) %>%
.[us_cont, ] # subset to contiguous U.S.
makegrid(wi, n = 2000000) %>% SpatialPoints(proj4string = CRS(proj4string(us))) %>%
.[wi, ] -> grid_wi
plot(grid_wi)
# convert the data to a spacial dataframe.
sp::coordinates(breweries_wi) = ~longitude + latitude
# make sure that the projection matches the grid we've built.
proj4string(breweries_wi) <- CRS(proj4string(wi))
warnings()
# fit basic inverse distance model
idw_model <- gstat::idw(
formula = miles_to_closest ~ 1,
locations = breweries_wi,
newdata = grid_wi,
idp = 2)
# extract interpolated predictions
interpolated_results = as.data.frame(idw_model) %>% {# output is defined as a data table
names(.)[1:3] <- c("longitude", "latitude", "miles_to_closest") # give names to the modeled variables
. } %>%
select(longitude, latitude, miles_to_closest)
interpolated_results %>%
head() %>%
knitr::kable()
# plot map with distances a la IDW
# ['#543005','#8c510a','#bf812d','#dfc27d','#f6e8c3','#c7eae5','#80cdc1','#35978f','#01665e','#003c30']
guide_tinker = guide_legend(
title.position = "top",
label.position="bottom",
label.hjust = 0.5,
direction = "horizontal",
keywidth = 1,
nrow = 1 )
colourCount = interpolated_results$miles_to_closest %>% unique() %>% length()
palette = colorRampPalette(brewer.pal(9, "YlGnBu"))(colourCount)
ggplot(interpolated_results,
aes(x = longitude,
y = latitude)) +
geom_raster( aes(fill = miles_to_closest)) +
scale_fill_manual(values = palette,
guide = guide_tinker) +
scale_fill_distiller(palette = 'YlGn',
direction = -1) +
theme_void() +
theme(
text = element_text(family = 'Montserrat'),
legend.justification = c(0,0),
legend.position = c(0,0.02),
legend.title = element_text(size = 10),
legend.text = element_text(size = 8),
legend.box.background = element_rect(fill = '#f0f0f0', color = NA)
) +
labs(fill = "working title") +
borders('state', "WI",
alpha = 0.1,
size = 0.1)
###############################################################################
## GRAVEYARD ##################################################################
###############################################################################
us.cities %>%
mutate(city = substr(name, start = 1, nchar(name)-3)) -> us.cities # create city var by removing state from name
city_sum %>%
left_join(us.cities, by = c("city", "province" = "country.etc")) -> city_sum
# renanme and reorder vars
city_sum %>%
mutate(state = province,
lat = lat.x,
lon = lon.x
) %>%
select(city, state, pop, breweries, lat, lon, capital,-(contains("."))) -> city_sum
# find cities with suspicious lon values
filter(city_sum, lon > 0) %>% arrange(desc(lon))
# fix burlington, WI
city_sum[city_sum$lon > 0 & city_sum$city =="Burlington", 5] <- 42.6762677
city_sum[city_sum$lon > 0 & city_sum$city =="Burlington", 6] <- -88.3422618
# fix sacramento, CA
city_sum[city_sum$lon > 0 & city_sum$city =="Sacramento", 5] <- -38.5725121
city_sum[city_sum$lon > 0 & city_sum$city =="Sacramento", 6] <- -121.4857704
plot(subset(us, (us@data$STUSPS %in% "WI")))
# Get coordinates of every city in every state
cities %>% separate(Geolocation, into = c("lat", "long"), sep = ",") -> cities
cities %>%
mutate(lat = as.numeric(gsub(x = cities$lat, pattern = "\\(", replacement = "")),
long = as.numeric(gsub(x = cities$long, pattern = "\\)", replacement = ""))) -> cities
leaflet(breweries) %>% addTiles('http://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png',
attribution = 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a> — Map data © <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a>') %>%
setView( -95.7129, 37.0902, zoom = 4) %>%
addCircles(~long, ~lat, popup = datz$name,
weight = 3,
radius=40,
color="#ffa500", stroke = TRUE, fillOpacity = 0.9) %>%
addLegend("bottomleft", colors= "#ffa500", labels="Locations", title="Pubs-Breweries : USA")
ggplot(interpolated_results, aes(x = lon, y = lat)) +
xlim(-125, -65) + ylim(24, 51) +
theme_void() +
labs(fill = "Temp swing in degrees") +
borders('state', alpha = 0.1, size = 0.1)
| /Spatial Interpolation/1.0 Tutorial for Interpolation - IDW.R | no_license | wskwon/ODSC | R | false | false | 17,104 | r | # Author:
# Brandon Dey
#
# Date:
# 9.9.18
#
# Purpose:
# This script is the tag-a-long .R for ODSC article 3 on IDW geospatial interpolation.
#
#################
## ENVIRONMENT ##
#################
# load libraries
library(tidyverse)
library(rgdal)
library(leaflet)
library(geosphere)
library(directlabels)
library(RColorBrewer)
# get data
#breweries
read.csv("./Data/Raw/breweries-brew-pubs-in-the-usa/7160_1.csv") -> breweries
read.csv("./Data/Raw/breweries-brew-pubs-in-the-usa/8260_1.csv") -> breweries_new
names(breweries)
names(breweries_new)
# explore data
glimpse(breweries)
# remove missing values
paste0("Missing values in ",nrow(breweries_new) - na.omit(breweries_new) %>% nrow, " observations of ", nrow(breweries_new))
breweries_new <- na.omit(breweries_new)
# Find epicenter of brewery activity in each state
geographic_average <- function(lon, lat, weight = NULL) {
if (is.null(weight)) {
weight <- rep(1, length(lon))
}
lon <- weighted.mean(lon, w = weight)
lat <- weighted.mean(lat, w = weight)
data.frame(lon = lon, lat = lat)
}
# limit to breweries in the continguous U.S.
breweries_new %>%
filter(between(longitude, -124.446359, -70.6539763) &
between(latitude, 25.8192058, 47.3873012) &
nchar(as.character(state)) == 2) -> breweries_new
breweries_us <- breweries_new
epicenters <- data.frame(state = unique(breweries_us$province), lon = NA, lat = NA, breweries = NA)
epicenters <- filter(epicenters, str_count(state) == 2)
for(s in 1:nrow(epicenters)) {
state <- epicenters[s,1]
s_df <- filter(breweries_us, province == state)
s_epi <- geographic_average(lon = s_df$longitude, lat = s_df$latitude)
s_brs <- nrow(s_df)
epicenters[s, 2] <- s_epi[,1]
epicenters[s, 3] <- s_epi[,2]
epicenters[s, 4] <- s_brs
}
# Find U.S. Brewery Epicenter
geographic_average(lon = breweries_us$longitude,
breweries_us$latitude) -> nat_epicenter
# plot epicenters
ggplot(epicenters,
aes(x = lon, y = lat)) +
xlim(-125, -65) +
ylim(24, 51) +
borders('state',
alpha = 1,
size = 0.5,
fill = "#fec44f") +
# plot breweries
geom_point(data = breweries_us,
aes(x = longitude,
y = latitude),
alpha = .25,
col = "#fff7bc",
size = 1) +
# plot state epicenters
geom_point(col = "#d95f0e",
aes(size = breweries)) +
# plot state labels
geom_text(aes(x = lon,
y = lat,
label = state),
nudge_y = .25) +
labs(title = "The State(s) of Breweries",
size = "Geographic \"Center\" \nof State's Beer Scene\n",
caption = "Figure 1: \nEvery brewery and/or brew pub in the contiguous U.S. \nThe size of each dark orange dot is proportional to the count of breweries and/or brew pubs in that state. \nData Source: https://www.kaggle.com/datafiniti/breweries-brew-pubs-in-the-usa/version/2 ") +
theme_void(base_size = 14) +
theme(plot.title = element_text(hjust = 0.5),
plot.caption = element_text(hjust = 1)) -> gg_us
ggsave(gg_us,
filename = "./Plots/US.jpeg",
dpi = 1200,
width = 11,
height = 6)
# summarize cities
breweries_us %>%
group_by(city, province) %>%
summarize(breweries = n_distinct(name),
lat = mean(latitude), # mean bc some cities include adjacent areas.
lon = mean(longitude)) %>%
ungroup %>%
mutate(state = province) %>%
select(-province) -> city_sum
# join epicenters to compare geographic average to just picking the city with most breweries
city_sum %>% left_join(epicenters,
by = "state") %>%
mutate(lat = lat.x,
lon = lon.x,
lat_geoavg = lat.y,
lon_geoavg = lon.y,
breweries_state = breweries.y,
breweries_city = breweries.x) %>%
select(-contains(".")) -> city_sum
city_sum %>%
filter(nchar(as.character(state)) == 2) -> city_sum
# Plot WI
ggplot(data = filter(city_sum, state == "WI"),
aes(x = lon,
y = lat)) +
borders("state", "WI",
fill = "#203731",
col = "#FFB612") +
# plot cities with breweries
geom_point(aes(x = lon,
y = lat,
size = breweries_city),
alpha = .75,
fill = "#FFB612",
col = "#FFB612") +
# plot epicenter
geom_point(aes(x = lon_geoavg,
y = lat_geoavg),
col = "#d95f0e") +
# Epicenter label
geom_dl(aes(label = "Geographic \"Center\" \n of Beer Scene", x = lon_geoavg, y = lat_geoavg),
method = list(dl.trans(x = x - 1.2), "last.points", cex = 0.8)) +
# MKE label
geom_dl(data = filter(city_sum, city == "Milwaukee" & state == "WI"),
aes(label = "Better Epicenter of \n Beer Scene", x = lon, y = lat), method = list(dl.trans(x = x + 0.5), "last.points", cex = 0.8)) +
labs(caption = "Figure 2: \nEvery location in Wisconsin with a brewery and/or brew pub. \nThe size of each dot is proportional to the count of breweries and/or brew pubs in that city. \nData Source: https://www.kaggle.com/datafiniti/breweries-brew-pubs-in-the-usa/version/2 ") +
scale_size_area() +
theme_void() +
theme(plot.title = element_text(hjust = 0.5),
plot.caption = element_text(hjust = 0)) +
coord_quickmap() +
guides(size = F,
alpha = F,
col = F) -> gg_wi
ggsave(gg_wi,
filename = "./Plots/WI.jpeg",
dpi = 1200,
width = 7,
height = 7)
# Plot OR
ggplot(data = filter(city_sum, state == "OR"),
aes(x = lon,
y = lat)) +
borders("state", "OR",
fill = "#002A86",
col = "#FFEA0F") +
# plot cities with breweries
geom_point(aes(x = lon,
y = lat,
size = breweries_city,
alpha = 1),
col = "#FFEA0F") +
# Portland label
geom_dl(data = filter(city_sum, city == "Portland" & state == "OR"),
aes(label = "Portland", x = lon, y = lat), method = list(dl.trans(x = x , y = y + .5), "last.points", cex = 0.8)) +
labs(caption = "Figure 3: \nEvery location in Oregon with a brewery and/or brew pub. \nThe size of each dot is proportional to the count of breweries and/or brew pubs in that city. \nData Source: https://www.kaggle.com/datafiniti/breweries-brew-pubs-in-the-usa/version/2 ") +
scale_size_area() +
theme_void() +
theme(plot.title = element_text(hjust = 0.5),
plot.caption = element_text(hjust = 0)) +
coord_quickmap() +
guides(size = F,
alpha = F,
col = F) -> gg_or
ggsave(gg_or,
filename = "./Plots/OR.jpeg",
dpi = 1200,
width = 7,
height = 7)
###############################################################################
## DISTANCE MATRIX ############################################################
###############################################################################
# Create a distance matrix of distances between every brewery and the nearest brewery in WI and OR.
# WI
breweries_wi <- breweries_us[breweries_us$province == "WI",]
mat_wi <- distm(breweries_wi[,c("longitude","latitude")],
breweries_wi[,c("longitude","latitude")],
fun = distVincentyEllipsoid) # The shortest distance between two points (i.e., the 'great-circle-distance' or 'as the crow flies'), according to the 'Vincenty (ellipsoid)' method. This method uses an ellipsoid and the results are very accurate. The method is computationally more intensive than the other great-circled methods in this package. # Earth isn't a perfect sphere. It's not. It's eppiloidal.
# convert meters to miles
mat_wi <- mat_wi/1609.344
# don't want to include itself so replace with a big number that'll never be the smallest.
mat_wi[mat_wi == 0] <- 1000000
breweries_wi %>%
mutate(closest_pub = breweries_wi$name[max.col(-mat_wi)],
closest_pub_city = breweries_wi$city[max.col(-mat_wi)],
closest_pub_address = breweries_wi$address[max.col(-mat_wi)],
closest_lat = breweries_wi$latitude[max.col(-mat_wi)],
closest_lon = breweries_wi$longitude[max.col(-mat_wi)]) -> breweries_wi
breweries_wi$miles_to_closest <- distVincentyEllipsoid(p1 = breweries_wi[,c("longitude", "latitude")],
p2 = breweries_wi[,c("closest_lon", "closest_lat")]) / 1609.344
# explore the closest pubs in WI...
ggplot(data = breweries_wi) +
borders("state", "WI",
fill = "#203731",
col = "#FFB612") +
# plot pubs
geom_point(aes(x = longitude,
y = latitude),
fill = "#FFB612",
col = "#FFB612") +
# plot nearest pub
geom_point(aes(x = closest_lon,
y = closest_lat,
size = miles_to_closest),
col = "#d95f0e",
alpha = .5) +
labs(caption = "Figure 4: \nEvery location in Wisconsin with a brewery and/or brew pub. \nThe size of each dot is proportional to the count of breweries and/or brew pubs in that city. \nData Source: https://www.kaggle.com/datafiniti/breweries-brew-pubs-in-the-usa/version/2 ") +
scale_size_area() +
theme_void() +
theme(plot.title = element_text(hjust = 0.5),
plot.caption = element_text(hjust = 0)) +
coord_quickmap() +
guides(size = F,
alpha = F,
col = F)
# Now do the same for OR
breweries_or <- breweries_us[breweries_us$province == "OR",]
# OR distance matrix
mat_or <- distm(breweries_or[,c("longitude","latitude")],
breweries_or[,c("longitude","latitude")],
fun = distVincentyEllipsoid)
mat_or <- mat_or/1609.344 # convert meters to miles
mat_or[mat_or == 0] <- 1000000 # don't want to include itself so replace with a big number that'll never be the smallest.
breweries_or %>%
mutate(closest_pub = breweries_or$name[max.col(-mat_or)],
closest_pub_city = breweries_or$city[max.col(-mat_or)],
closest_pub_address = breweries_or$address[max.col(-mat_or)],
closest_lat = breweries_or$latitude[max.col(-mat_or)],
closest_lon = breweries_or$longitude[max.col(-mat_or)]) -> breweries_or
breweries_or$miles_to_closest <- distVincentyEllipsoid(p1 = breweries_or[,c("longitude", "latitude")],
p2 = breweries_or[,c("closest_lon", "closest_lat")]) / 1609.344
ggplot(data = breweries_or) +
borders("state", "OR",
fill = "#002A86",
col = "#FFEA0F") +
# plot pubs
geom_point(aes(x = longitude,
y = latitude),
fill = "#FFB612",
col = "#FFB612") +
# plot closest pub
geom_point(aes(x = closest_lon,
y = closest_lat,
size = miles_to_closest),
col = "#d95f0e",
alpha = .5) +
# Portland label
geom_dl(data = filter(city_sum, city == "Portland" & state == "OR"),
aes(label = "Portland", x = lon, y = lat), method = list(dl.trans(x = x , y = y + .5), "last.points", cex = 0.8)) +
labs(caption = "Figure 3: \nEvery location in Oregon with a brewery and/or brew pub. \nThe size of each dot is proportional to the count of breweries and/or brew pubs in that city. \nData Source: https://www.kaggle.com/datafiniti/breweries-brew-pubs-in-the-usa/version/2 ") +
scale_size_area() +
theme_void() +
theme(plot.title = element_text(hjust = 0.5),
plot.caption = element_text(hjust = 0)) +
coord_quickmap() +
guides(size = F,
alpha = F,
col = F)
summary(breweries_or$miles_to_closest)
summary(breweries_wi$miles_to_closest)
###############################################################################
## MODEL ######################################################################
###############################################################################
# U.S Shapefile
us <- rgdal::readOGR("./Data/Raw/US shapefile",
"tl_2017_us_state")
# isolate to contiguous U.S.
no_thanks <- c('Alaska', 'American Samoa', 'Puerto Rico', 'Guam',
'Commonwealth of the Northern Mariana Islands United States Virgin Islands',
'Commonwealth of the Northern Mariana Islands',
'United States Virgin Islands', 'Hawaii')
us_cont <- subset(us, !(us@data$NAME %in% no_thanks))
wi <- subset(us, (us@data$NAME %in% "Wisconsin"))
# place a grid around shapefile
grid_us <- makegrid(us_cont,
n = 20000) %>%
SpatialPoints(proj4string = CRS(proj4string(us))) %>%
.[us_cont, ] # subset to contiguous U.S.
makegrid(wi, n = 2000000) %>% SpatialPoints(proj4string = CRS(proj4string(us))) %>%
.[wi, ] -> grid_wi
plot(grid_wi)
# convert the data to a spacial dataframe.
sp::coordinates(breweries_wi) = ~longitude + latitude
# make sure that the projection matches the grid we've built.
proj4string(breweries_wi) <- CRS(proj4string(wi))
warnings()
# fit basic inverse distance model
idw_model <- gstat::idw(
formula = miles_to_closest ~ 1,
locations = breweries_wi,
newdata = grid_wi,
idp = 2)
# extract interpolated predictions
interpolated_results = as.data.frame(idw_model) %>% {# output is defined as a data table
names(.)[1:3] <- c("longitude", "latitude", "miles_to_closest") # give names to the modeled variables
. } %>%
select(longitude, latitude, miles_to_closest)
interpolated_results %>%
head() %>%
knitr::kable()
# plot map with distances a la IDW
# ['#543005','#8c510a','#bf812d','#dfc27d','#f6e8c3','#c7eae5','#80cdc1','#35978f','#01665e','#003c30']
guide_tinker = guide_legend(
title.position = "top",
label.position="bottom",
label.hjust = 0.5,
direction = "horizontal",
keywidth = 1,
nrow = 1 )
colourCount = interpolated_results$miles_to_closest %>% unique() %>% length()
palette = colorRampPalette(brewer.pal(9, "YlGnBu"))(colourCount)
ggplot(interpolated_results,
aes(x = longitude,
y = latitude)) +
geom_raster( aes(fill = miles_to_closest)) +
scale_fill_manual(values = palette,
guide = guide_tinker) +
scale_fill_distiller(palette = 'YlGn',
direction = -1) +
theme_void() +
theme(
text = element_text(family = 'Montserrat'),
legend.justification = c(0,0),
legend.position = c(0,0.02),
legend.title = element_text(size = 10),
legend.text = element_text(size = 8),
legend.box.background = element_rect(fill = '#f0f0f0', color = NA)
) +
labs(fill = "working title") +
borders('state', "WI",
alpha = 0.1,
size = 0.1)
###############################################################################
## GRAVEYARD ##################################################################
###############################################################################
us.cities %>%
mutate(city = substr(name, start = 1, nchar(name)-3)) -> us.cities # create city var by removing state from name
city_sum %>%
left_join(us.cities, by = c("city", "province" = "country.etc")) -> city_sum
# renanme and reorder vars
city_sum %>%
mutate(state = province,
lat = lat.x,
lon = lon.x
) %>%
select(city, state, pop, breweries, lat, lon, capital,-(contains("."))) -> city_sum
# find cities with suspicious lon values
filter(city_sum, lon > 0) %>% arrange(desc(lon))
# fix burlington, WI
city_sum[city_sum$lon > 0 & city_sum$city =="Burlington", 5] <- 42.6762677
city_sum[city_sum$lon > 0 & city_sum$city =="Burlington", 6] <- -88.3422618
# fix sacramento, CA
city_sum[city_sum$lon > 0 & city_sum$city =="Sacramento", 5] <- -38.5725121
city_sum[city_sum$lon > 0 & city_sum$city =="Sacramento", 6] <- -121.4857704
plot(subset(us, (us@data$STUSPS %in% "WI")))
# Get coordinates of every city in every state
cities %>% separate(Geolocation, into = c("lat", "long"), sep = ",") -> cities
cities %>%
mutate(lat = as.numeric(gsub(x = cities$lat, pattern = "\\(", replacement = "")),
long = as.numeric(gsub(x = cities$long, pattern = "\\)", replacement = ""))) -> cities
leaflet(breweries) %>% addTiles('http://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png',
attribution = 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a> — Map data © <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a>') %>%
setView( -95.7129, 37.0902, zoom = 4) %>%
addCircles(~long, ~lat, popup = datz$name,
weight = 3,
radius=40,
color="#ffa500", stroke = TRUE, fillOpacity = 0.9) %>%
addLegend("bottomleft", colors= "#ffa500", labels="Locations", title="Pubs-Breweries : USA")
ggplot(interpolated_results, aes(x = lon, y = lat)) +
xlim(-125, -65) + ylim(24, 51) +
theme_void() +
labs(fill = "Temp swing in degrees") +
borders('state', alpha = 0.1, size = 0.1)
|
context("deploy")
# setup ---------------------------------------------------
# should connect with env vars
test_conn_1 <- connect(prefix = "TEST_1")
test_conn_2 <- connect(prefix = "TEST_2")
cont1_name <- uuid::UUIDgenerate()
cont1_title <- "Test Content 1"
cont1_guid <- NULL
cont1_bundle <- NULL
cont1_content <- NULL
# bundle ---------------------------------------------------
test_that("bundle_static deploys", {
bnd <- bundle_static(path = rprojroot::find_package_root_file("tests/testthat/examples/static/test.png"))
uniq_id <- uuid::UUIDgenerate()
deployed <- deploy(test_conn_1, bnd, uniq_id)
expect_true(validate_R6_class(bnd, "Bundle"))
expect_true(validate_R6_class(deployed, "Content"))
deployed2 <- deploy(test_conn_1, bnd, uniq_id)
expect_true(validate_R6_class(deployed2, "Content"))
})
test_that("bundle_dir deploys", {
dir_path <- rprojroot::find_package_root_file("tests/testthat/examples/static")
tmp_file <- fs::file_temp(pattern = "bundle", ext = ".tar.gz")
bund <- bundle_dir(path = dir_path, filename = tmp_file)
expect_equal(tmp_file, bund$path)
# with a name / title
tsk <- deploy(connect = test_conn_1, bundle = bund, name = cont1_name, title = cont1_title)
cont1_guid <<- tsk$get_content()$guid
cont1_content <<- tsk
# how should we test that deployment happened?
expect_true(validate_R6_class(tsk, "Content"))
expect_equal(tsk$get_content()$name, cont1_name)
expect_equal(tsk$get_content()$title, cont1_title)
expect_true(validate_R6_class(tsk, "Task"))
expect_gt(nchar(tsk$get_task()$task_id), 0)
# with a guid
tsk2 <- deploy(connect = test_conn_1, bundle = bund, guid = cont1_guid)
expect_true(validate_R6_class(tsk2, "Content"))
expect_equal(tsk2$get_content()$name, cont1_name)
expect_equal(tsk2$get_content()$title, cont1_title)
expect_equal(tsk2$get_content()$guid, cont1_guid)
})
test_that("bundle_path deploys", {
tar_path <- rprojroot::find_package_root_file("tests/testthat/examples/static.tar.gz")
bund <- bundle_path(path = tar_path)
expect_equal(tar_path, as.character(bund$path))
# deploy to a new endpoint
tsk <- deploy(connect = test_conn_1, bundle = bund)
# how should we test that deployment happened?
expect_true(validate_R6_class(tsk, "Content"))
})
# deploy ---------------------------------------------------
test_that("strange name re-casing does not break things", {
bnd <- bundle_static(path = rprojroot::find_package_root_file("tests/testthat/examples/static/test.png"))
testname <- "test_Test_45"
deploy1 <- deploy(test_conn_1, bnd, testname)
deploy2 <- deploy(test_conn_1, bnd, testname)
testname2 <- "test_Test"
deployA <- deploy(test_conn_1, bnd, testname2)
deployB <- deploy(test_conn_1, bnd, testname2)
})
test_that(".pre_deploy hook works", {
scoped_experimental_silence()
bnd <- bundle_static(path = rprojroot::find_package_root_file("tests/testthat/examples/static/test.png"))
deployed <- deploy(test_conn_1, bnd, uuid::UUIDgenerate(), .pre_deploy = {
content %>% set_vanity_url(glue::glue("pre_deploy_{bundle_id}"))
})
active_bundle <- deployed$get_content_remote()$bundle_id
expect_equal(
get_vanity_url(deployed)$vanity$path_prefix,
as.character(glue::glue("/pre_deploy_{active_bundle}/"))
)
})
# iamge ---------------------------------------------------
test_that("set_image_path works", {
scoped_experimental_silence()
img_path <- rprojroot::find_package_root_file("tests/testthat/examples/logo.png")
res <- set_image_path(cont1_content, img_path)
expect_true(validate_R6_class(res, "Content"))
})
test_that("get_image works", {
scoped_experimental_silence()
img_path <- rprojroot::find_package_root_file("tests/testthat/examples/logo.png")
tmp_img <- fs::file_temp(pattern = "img", ext = ".png")
get_image(cont1_content, tmp_img)
expect_identical(
readBin(img_path, "raw"),
readBin(tmp_img, "raw")
)
# works again (i.e. does not append data)
get_image(cont1_content, tmp_img)
expect_identical(
readBin(img_path, "raw"),
readBin(tmp_img, "raw")
)
# works with no path
auto_path <- get_image(cont1_content)
expect_identical(
readBin(img_path, "raw"),
readBin(auto_path, "raw")
)
expect_identical(fs::path_ext(auto_path), "png")
})
test_that("has_image works with an image", {
scoped_experimental_silence()
expect_true(has_image(cont1_content))
})
test_that("delete_image works", {
scoped_experimental_silence()
# from above
img_path <- rprojroot::find_package_root_file("tests/testthat/examples/logo.png")
tmp_img <- fs::file_temp(pattern = "img", ext = ".png")
# retains the image at the path
expect_false(fs::file_exists(tmp_img))
expect_true(validate_R6_class(delete_image(cont1_content, tmp_img), "Content"))
expect_true(fs::file_exists(tmp_img))
expect_identical(
readBin(img_path, "raw"),
readBin(tmp_img, "raw")
)
expect_false(has_image(cont1_content))
# works again - i.e. if no image available
expect_true(validate_R6_class(delete_image(cont1_content), "Content"))
})
test_that("has_image works with no image", {
scoped_experimental_silence()
expect_false(has_image(cont1_content))
})
test_that("get_image returns NA if no image", {
scoped_experimental_silence()
tmp_img <- fs::file_temp(pattern = "img", ext = ".png")
response <- get_image(cont1_content, tmp_img)
expect_false(identical(tmp_img, response))
expect_true(is.na(response))
})
test_that("set_image_url works", {
# need to find a reliable image URL that is small
# ... and we are willing to take a dependency on...
# or... we could use the Connect instance itself :p
skip("not implemented yet")
})
test_that("set_image_webshot works", {
skip("currently broken")
scoped_experimental_silence()
res <- set_image_webshot(cont1_content)
expect_true(validate_R6_class(res, "Content"))
})
# vanity_url ---------------------------------------------------
test_that("set_vanity_url works", {
scoped_experimental_silence()
res <- set_vanity_url(cont1_content, cont1_name)
expect_true(validate_R6_class(res, "Vanity"))
expect_equal(res$get_vanity()$path_prefix, paste0("/", cont1_name, "/"))
res2 <- set_vanity_url(cont1_content, paste0(cont1_name, "update"))
expect_true(validate_R6_class(res2, "Vanity"))
expect_equal(res2$get_vanity()$path_prefix, paste0("/", cont1_name, "update/"))
})
test_that("get_vanity_url works", {
scoped_experimental_silence()
tmp_content_name <- uuid::UUIDgenerate()
tmp_content_prep <- content_ensure(test_conn_1, name = tmp_content_name)
tmp_content <- Content$new(connect = test_conn_1, content = tmp_content_prep)
# without a vanity
curr_vanity <- get_vanity_url(tmp_content)
expect_true(validate_R6_class(curr_vanity, "Content"))
expect_error(validate_R6_class(curr_vanity, "Vanity"), regexp = "R6 Vanity")
# with a vanity
res <- set_vanity_url(tmp_content, tmp_content_name)
existing_vanity <- get_vanity_url(tmp_content)
expect_true(validate_R6_class(existing_vanity, "Vanity"))
expect_equal(existing_vanity$get_vanity()$path_prefix, paste0("/", tmp_content_name, "/"))
})
# misc functions ---------------------------------------------------
test_that("poll_task works and returns its input", {
expect_message(
res <- poll_task(cont1_content)
)
expect_equal(res, cont1_content)
})
test_that("download_bundle works", {
bnd <- download_bundle(content_item(test_conn_1, cont1_guid))
expect_true(validate_R6_class(bnd, "Bundle"))
})
test_that("download_bundle throws an error for undeployed content", {
cont_prep <- content_ensure(test_conn_1)
cont <- content_item(test_conn_1, cont_prep$guid)
expect_error(
download_bundle(cont),
"This content has no bundle_id"
)
})
test_that("dashboard_url resolves properly", {
cont <- content_item(test_conn_1, cont1_guid)
dash_url <- dashboard_url(cont)
skip("not yet tested")
})
test_that("deployment timestamps respect timezone", {
bnd <- bundle_static(path = rprojroot::find_package_root_file("tests/testthat/examples/static/test.png"))
myc <- deploy(test_conn_1, bnd)
myc_guid <- myc$get_content()$guid
# will fail without the png package
invisible(tryCatch(test_conn_1$GET_URL(myc$get_url()), error = function(e){}))
allusg <- get_usage_static(test_conn_1, content_guid = myc_guid)
# we just did this, so it should be less than 1 minute ago...
# (really protecting against being off by hours b/c of timezone differences)
expect_true(any((Sys.time() - allusg$time) < lubridate::make_difftime(60, "seconds")))
})
| /tests/integrated/test-deploy.R | no_license | slodge/connectapi | R | false | false | 8,616 | r | context("deploy")
# setup ---------------------------------------------------
# should connect with env vars
test_conn_1 <- connect(prefix = "TEST_1")
test_conn_2 <- connect(prefix = "TEST_2")
cont1_name <- uuid::UUIDgenerate()
cont1_title <- "Test Content 1"
cont1_guid <- NULL
cont1_bundle <- NULL
cont1_content <- NULL
# bundle ---------------------------------------------------
test_that("bundle_static deploys", {
bnd <- bundle_static(path = rprojroot::find_package_root_file("tests/testthat/examples/static/test.png"))
uniq_id <- uuid::UUIDgenerate()
deployed <- deploy(test_conn_1, bnd, uniq_id)
expect_true(validate_R6_class(bnd, "Bundle"))
expect_true(validate_R6_class(deployed, "Content"))
deployed2 <- deploy(test_conn_1, bnd, uniq_id)
expect_true(validate_R6_class(deployed2, "Content"))
})
test_that("bundle_dir deploys", {
dir_path <- rprojroot::find_package_root_file("tests/testthat/examples/static")
tmp_file <- fs::file_temp(pattern = "bundle", ext = ".tar.gz")
bund <- bundle_dir(path = dir_path, filename = tmp_file)
expect_equal(tmp_file, bund$path)
# with a name / title
tsk <- deploy(connect = test_conn_1, bundle = bund, name = cont1_name, title = cont1_title)
cont1_guid <<- tsk$get_content()$guid
cont1_content <<- tsk
# how should we test that deployment happened?
expect_true(validate_R6_class(tsk, "Content"))
expect_equal(tsk$get_content()$name, cont1_name)
expect_equal(tsk$get_content()$title, cont1_title)
expect_true(validate_R6_class(tsk, "Task"))
expect_gt(nchar(tsk$get_task()$task_id), 0)
# with a guid
tsk2 <- deploy(connect = test_conn_1, bundle = bund, guid = cont1_guid)
expect_true(validate_R6_class(tsk2, "Content"))
expect_equal(tsk2$get_content()$name, cont1_name)
expect_equal(tsk2$get_content()$title, cont1_title)
expect_equal(tsk2$get_content()$guid, cont1_guid)
})
test_that("bundle_path deploys", {
tar_path <- rprojroot::find_package_root_file("tests/testthat/examples/static.tar.gz")
bund <- bundle_path(path = tar_path)
expect_equal(tar_path, as.character(bund$path))
# deploy to a new endpoint
tsk <- deploy(connect = test_conn_1, bundle = bund)
# how should we test that deployment happened?
expect_true(validate_R6_class(tsk, "Content"))
})
# deploy ---------------------------------------------------
test_that("strange name re-casing does not break things", {
bnd <- bundle_static(path = rprojroot::find_package_root_file("tests/testthat/examples/static/test.png"))
testname <- "test_Test_45"
deploy1 <- deploy(test_conn_1, bnd, testname)
deploy2 <- deploy(test_conn_1, bnd, testname)
testname2 <- "test_Test"
deployA <- deploy(test_conn_1, bnd, testname2)
deployB <- deploy(test_conn_1, bnd, testname2)
})
test_that(".pre_deploy hook works", {
scoped_experimental_silence()
bnd <- bundle_static(path = rprojroot::find_package_root_file("tests/testthat/examples/static/test.png"))
deployed <- deploy(test_conn_1, bnd, uuid::UUIDgenerate(), .pre_deploy = {
content %>% set_vanity_url(glue::glue("pre_deploy_{bundle_id}"))
})
active_bundle <- deployed$get_content_remote()$bundle_id
expect_equal(
get_vanity_url(deployed)$vanity$path_prefix,
as.character(glue::glue("/pre_deploy_{active_bundle}/"))
)
})
# iamge ---------------------------------------------------
test_that("set_image_path works", {
scoped_experimental_silence()
img_path <- rprojroot::find_package_root_file("tests/testthat/examples/logo.png")
res <- set_image_path(cont1_content, img_path)
expect_true(validate_R6_class(res, "Content"))
})
test_that("get_image works", {
scoped_experimental_silence()
img_path <- rprojroot::find_package_root_file("tests/testthat/examples/logo.png")
tmp_img <- fs::file_temp(pattern = "img", ext = ".png")
get_image(cont1_content, tmp_img)
expect_identical(
readBin(img_path, "raw"),
readBin(tmp_img, "raw")
)
# works again (i.e. does not append data)
get_image(cont1_content, tmp_img)
expect_identical(
readBin(img_path, "raw"),
readBin(tmp_img, "raw")
)
# works with no path
auto_path <- get_image(cont1_content)
expect_identical(
readBin(img_path, "raw"),
readBin(auto_path, "raw")
)
expect_identical(fs::path_ext(auto_path), "png")
})
test_that("has_image works with an image", {
scoped_experimental_silence()
expect_true(has_image(cont1_content))
})
test_that("delete_image works", {
scoped_experimental_silence()
# from above
img_path <- rprojroot::find_package_root_file("tests/testthat/examples/logo.png")
tmp_img <- fs::file_temp(pattern = "img", ext = ".png")
# retains the image at the path
expect_false(fs::file_exists(tmp_img))
expect_true(validate_R6_class(delete_image(cont1_content, tmp_img), "Content"))
expect_true(fs::file_exists(tmp_img))
expect_identical(
readBin(img_path, "raw"),
readBin(tmp_img, "raw")
)
expect_false(has_image(cont1_content))
# works again - i.e. if no image available
expect_true(validate_R6_class(delete_image(cont1_content), "Content"))
})
test_that("has_image works with no image", {
scoped_experimental_silence()
expect_false(has_image(cont1_content))
})
test_that("get_image returns NA if no image", {
scoped_experimental_silence()
tmp_img <- fs::file_temp(pattern = "img", ext = ".png")
response <- get_image(cont1_content, tmp_img)
expect_false(identical(tmp_img, response))
expect_true(is.na(response))
})
test_that("set_image_url works", {
# need to find a reliable image URL that is small
# ... and we are willing to take a dependency on...
# or... we could use the Connect instance itself :p
skip("not implemented yet")
})
test_that("set_image_webshot works", {
skip("currently broken")
scoped_experimental_silence()
res <- set_image_webshot(cont1_content)
expect_true(validate_R6_class(res, "Content"))
})
# vanity_url ---------------------------------------------------
test_that("set_vanity_url works", {
scoped_experimental_silence()
res <- set_vanity_url(cont1_content, cont1_name)
expect_true(validate_R6_class(res, "Vanity"))
expect_equal(res$get_vanity()$path_prefix, paste0("/", cont1_name, "/"))
res2 <- set_vanity_url(cont1_content, paste0(cont1_name, "update"))
expect_true(validate_R6_class(res2, "Vanity"))
expect_equal(res2$get_vanity()$path_prefix, paste0("/", cont1_name, "update/"))
})
test_that("get_vanity_url works", {
scoped_experimental_silence()
tmp_content_name <- uuid::UUIDgenerate()
tmp_content_prep <- content_ensure(test_conn_1, name = tmp_content_name)
tmp_content <- Content$new(connect = test_conn_1, content = tmp_content_prep)
# without a vanity
curr_vanity <- get_vanity_url(tmp_content)
expect_true(validate_R6_class(curr_vanity, "Content"))
expect_error(validate_R6_class(curr_vanity, "Vanity"), regexp = "R6 Vanity")
# with a vanity
res <- set_vanity_url(tmp_content, tmp_content_name)
existing_vanity <- get_vanity_url(tmp_content)
expect_true(validate_R6_class(existing_vanity, "Vanity"))
expect_equal(existing_vanity$get_vanity()$path_prefix, paste0("/", tmp_content_name, "/"))
})
# misc functions ---------------------------------------------------
test_that("poll_task works and returns its input", {
expect_message(
res <- poll_task(cont1_content)
)
expect_equal(res, cont1_content)
})
test_that("download_bundle works", {
bnd <- download_bundle(content_item(test_conn_1, cont1_guid))
expect_true(validate_R6_class(bnd, "Bundle"))
})
test_that("download_bundle throws an error for undeployed content", {
cont_prep <- content_ensure(test_conn_1)
cont <- content_item(test_conn_1, cont_prep$guid)
expect_error(
download_bundle(cont),
"This content has no bundle_id"
)
})
test_that("dashboard_url resolves properly", {
cont <- content_item(test_conn_1, cont1_guid)
dash_url <- dashboard_url(cont)
skip("not yet tested")
})
test_that("deployment timestamps respect timezone", {
bnd <- bundle_static(path = rprojroot::find_package_root_file("tests/testthat/examples/static/test.png"))
myc <- deploy(test_conn_1, bnd)
myc_guid <- myc$get_content()$guid
# will fail without the png package
invisible(tryCatch(test_conn_1$GET_URL(myc$get_url()), error = function(e){}))
allusg <- get_usage_static(test_conn_1, content_guid = myc_guid)
# we just did this, so it should be less than 1 minute ago...
# (really protecting against being off by hours b/c of timezone differences)
expect_true(any((Sys.time() - allusg$time) < lubridate::make_difftime(60, "seconds")))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dplyr.R
\name{mutate.SummarizedExperiment}
\alias{mutate.SummarizedExperiment}
\title{Create or transform variables}
\usage{
\method{mutate}{SummarizedExperiment}(.data, axis, ...)
}
\arguments{
\item{.data}{SummarizedExperiment to subset}
\item{axis}{The axis to perform the operation on. Either row or col.}
\item{...}{Name-value pairs of expressions, each with length 1 or the same
length as the number of rows/cols in row- or colData. The name of each
argument will
be the name of a new variable, and the value will be its corresponding value.
Use a NULL value in mutate to drop a variable. New variables overwrite
existing variables of the same name.
The arguments in ... are automatically quoted and evaluated in the context
of the data frame.
They support unquoting and splicing. See vignette("programming") for an
introduction to these concepts.}
}
\value{
A SummarizedExperiment after the mutate operation
}
\description{
mutate() adds new variables and preserves existing ones;
it preserves the number of rows/cols of the input.
New variables overwrite existing variables of the same name.
}
\examples{
#Change the treatment time from hours to minutes
data(seq_se)
seq_se \%>\% mutate(col, time = (time * 60))
}
| /man/mutate.SummarizedExperiment.Rd | permissive | martijnvanattekum/cleanse | R | false | true | 1,312 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dplyr.R
\name{mutate.SummarizedExperiment}
\alias{mutate.SummarizedExperiment}
\title{Create or transform variables}
\usage{
\method{mutate}{SummarizedExperiment}(.data, axis, ...)
}
\arguments{
\item{.data}{SummarizedExperiment to subset}
\item{axis}{The axis to perform the operation on. Either row or col.}
\item{...}{Name-value pairs of expressions, each with length 1 or the same
length as the number of rows/cols in row- or colData. The name of each
argument will
be the name of a new variable, and the value will be its corresponding value.
Use a NULL value in mutate to drop a variable. New variables overwrite
existing variables of the same name.
The arguments in ... are automatically quoted and evaluated in the context
of the data frame.
They support unquoting and splicing. See vignette("programming") for an
introduction to these concepts.}
}
\value{
A SummarizedExperiment after the mutate operation
}
\description{
mutate() adds new variables and preserves existing ones;
it preserves the number of rows/cols of the input.
New variables overwrite existing variables of the same name.
}
\examples{
#Change the treatment time from hours to minutes
data(seq_se)
seq_se \%>\% mutate(col, time = (time * 60))
}
|
#setwd("Z:/Monitors/spi/Data/All SPI Data/ForDataCollation")
SPI_All=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 3), c(NA, 3)),col_names = FALSE,col_types="numeric")*100
SPI_Food=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 4), c(NA, 4)),col_names = FALSE,col_types="numeric")*100
SPI_NF=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 5), c(NA, 5)),col_names = FALSE,col_types="numeric")*100
SPI_Clothes=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 10), c(NA, 10)),col_names = FALSE,col_types="numeric")*100
SPI_Furniture=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 11), c(NA, 11)),col_names = FALSE,col_types="numeric")*100
SPI_Elect=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 12), c(NA, 12)),col_names = FALSE,col_types="numeric")*100
SPI_DIY=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 13), c(NA, 13)),col_names = FALSE,col_types="numeric")*100
SPI_Books=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 14), c(NA, 14)),col_names = FALSE,col_types="numeric")*100
SPI_HB=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 15), c(NA, 15)),col_names = FALSE,col_types="numeric")*100
SPI_ONF=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 16), c(NA, 16)),col_names = FALSE,col_types="numeric")*100
SPI_Fresh=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 7), c(NA, 7)),col_names = FALSE,col_types="numeric")*100
SPI_Ambient=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 8), c(NA, 8)),col_names = FALSE,col_types="numeric")*100
spi_df <- cbind(SPI_All, SPI_Ambient, SPI_Books, SPI_Clothes, SPI_DIY, SPI_Elect, SPI_Food, SPI_Fresh, SPI_Furniture, SPI_HB, SPI_NF, SPI_ONF)
spi_df <- head(spi_df, -7)
colnames(spi_df) <- c("SPI_All", "SPI_Ambient", "SPI_Books", "SPI_Clothes", "SPI_DIY", "SPI_Elect", "SPI_Food", "SPI_Fresh", "SPI_Furniture", "SPI_HB", "SPI_NF", "SPI_ONF")
spi_embargo <- data.frame(
id = as.numeric(144:157),
embargo = as.Date(embargoes$SPI_embargo,"%d/%m/%y")
)
spi_df$id <- as.numeric(row.names(spi_df))
spi_all_show <- merge(spi_df, spi_embargo, by = "id", all = TRUE)
spi_all_show <- spi_all_show[order(spi_all_show$id),]
dates <- seq(as.Date("2006-12-01"), length=nrow(spi_all_show), by="months")
spi_all_show <- xts(x=spi_all_show, order.by=dates)
spi_all_showdf <- data.frame(date = index(spi_all_show), coredata(spi_all_show))
spi_all_showdf$embargo <- as.Date(spi_all_showdf$embargo)
spi_all_embargo_df <- spi_all_showdf %>%
filter(spi_all_showdf$date <= Sys.Date() & spi_all_showdf$embargo <= Sys.Date() | (spi_all_showdf$date <= "2019-01-31" & is.na(spi_all_showdf$embargo)))
spi_all_embargo_df$date <- as.Date(LastDayInMonth(spi_all_embargo_df$date))
dates <- seq(as.Date("2006-12-01"), length=nrow(spi_all_embargo_df), by="months")
dates <- LastDayInMonth(dates)
spi_all_embargo_xts <- xts(x=spi_all_embargo_df, order.by = dates) | /BRCSPIData.R | no_license | BRCRetailInsight/DatabaseBuild | R | false | false | 3,579 | r |
#setwd("Z:/Monitors/spi/Data/All SPI Data/ForDataCollation")
SPI_All=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 3), c(NA, 3)),col_names = FALSE,col_types="numeric")*100
SPI_Food=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 4), c(NA, 4)),col_names = FALSE,col_types="numeric")*100
SPI_NF=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 5), c(NA, 5)),col_names = FALSE,col_types="numeric")*100
SPI_Clothes=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 10), c(NA, 10)),col_names = FALSE,col_types="numeric")*100
SPI_Furniture=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 11), c(NA, 11)),col_names = FALSE,col_types="numeric")*100
SPI_Elect=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 12), c(NA, 12)),col_names = FALSE,col_types="numeric")*100
SPI_DIY=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 13), c(NA, 13)),col_names = FALSE,col_types="numeric")*100
SPI_Books=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 14), c(NA, 14)),col_names = FALSE,col_types="numeric")*100
SPI_HB=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 15), c(NA, 15)),col_names = FALSE,col_types="numeric")*100
SPI_ONF=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 16), c(NA, 16)),col_names = FALSE,col_types="numeric")*100
SPI_Fresh=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 7), c(NA, 7)),col_names = FALSE,col_types="numeric")*100
SPI_Ambient=read_excel("Z:/Monitors/spi/Data/All SPI Data/SPIMaster.xlsx", sheet = "Annual change", range = cell_limits(c(2, 8), c(NA, 8)),col_names = FALSE,col_types="numeric")*100
spi_df <- cbind(SPI_All, SPI_Ambient, SPI_Books, SPI_Clothes, SPI_DIY, SPI_Elect, SPI_Food, SPI_Fresh, SPI_Furniture, SPI_HB, SPI_NF, SPI_ONF)
spi_df <- head(spi_df, -7)
colnames(spi_df) <- c("SPI_All", "SPI_Ambient", "SPI_Books", "SPI_Clothes", "SPI_DIY", "SPI_Elect", "SPI_Food", "SPI_Fresh", "SPI_Furniture", "SPI_HB", "SPI_NF", "SPI_ONF")
spi_embargo <- data.frame(
id = as.numeric(144:157),
embargo = as.Date(embargoes$SPI_embargo,"%d/%m/%y")
)
spi_df$id <- as.numeric(row.names(spi_df))
spi_all_show <- merge(spi_df, spi_embargo, by = "id", all = TRUE)
spi_all_show <- spi_all_show[order(spi_all_show$id),]
dates <- seq(as.Date("2006-12-01"), length=nrow(spi_all_show), by="months")
spi_all_show <- xts(x=spi_all_show, order.by=dates)
spi_all_showdf <- data.frame(date = index(spi_all_show), coredata(spi_all_show))
spi_all_showdf$embargo <- as.Date(spi_all_showdf$embargo)
spi_all_embargo_df <- spi_all_showdf %>%
filter(spi_all_showdf$date <= Sys.Date() & spi_all_showdf$embargo <= Sys.Date() | (spi_all_showdf$date <= "2019-01-31" & is.na(spi_all_showdf$embargo)))
spi_all_embargo_df$date <- as.Date(LastDayInMonth(spi_all_embargo_df$date))
dates <- seq(as.Date("2006-12-01"), length=nrow(spi_all_embargo_df), by="months")
dates <- LastDayInMonth(dates)
spi_all_embargo_xts <- xts(x=spi_all_embargo_df, order.by = dates) |
source("load_data.R")
df = load_data()
#plot to device (can't combine the calls because we can't set size to dev.copy)
par(mfrow = c(1, 1), mfcol = c(1, 1))
hist(df$Global_active_power, col = "red", xlab = "Global Active Power (killowatts)", ylab = "Frequency", main = "Global Active Power")
#plot to png
png("plot1.png", width=480, height=480)
par(mfrow = c(1, 1), mfcol = c(1, 1))
hist(df$Global_active_power, col = "red", xlab = "Global Active Power (killowatts)", ylab = "Frequency", main = "Global Active Power")
dev.off()
| /plot1.R | no_license | aviaz/ExData_Plotting1 | R | false | false | 531 | r | source("load_data.R")
df = load_data()
#plot to device (can't combine the calls because we can't set size to dev.copy)
par(mfrow = c(1, 1), mfcol = c(1, 1))
hist(df$Global_active_power, col = "red", xlab = "Global Active Power (killowatts)", ylab = "Frequency", main = "Global Active Power")
#plot to png
png("plot1.png", width=480, height=480)
par(mfrow = c(1, 1), mfcol = c(1, 1))
hist(df$Global_active_power, col = "red", xlab = "Global Active Power (killowatts)", ylab = "Frequency", main = "Global Active Power")
dev.off()
|
setwd('/Users/brycedietrich/finding_fauci_replication/')
library(MASS)
library(stargazer)
#re-estimate table 2
image_results<-read.csv('data/final_celebrity_results.csv',as.is=T)
#make fox baseline
image_results$network2<-factor(image_results$network,levels=c('fox','cnn','msnbc'))
#set week 9 to zero so the intercept is meaningful
image_results$week2<-image_results$week-9
#negative binomial regressions with offsets
mod1<-glm.nb(fauci~network2+offset(log(cc)),data=image_results)
mod2<-glm.nb(fauci~network2*week2+offset(log(cc)),data=image_results)
#subset image data to only include complete cases
image_results<-image_results[names(residuals(mod2)),]
#create ID
image_results$id<-paste(image_results$show,image_results$week,image_results$year,sep='_')
#load text restuls and create ID
text_results<-read.csv('data/final_caption_results.csv',as.is=T)
text_results$id<-paste(text_results$show,text_results$week,text_results$year,sep='_')
#merge results
results<-merge(image_results,text_results[,c('id','death_text','health_text')])
#set week 9 to zero so the intercept is meaningful
results$week2<-results$week-9
#make fox baseline
results$network2<-factor(results$network,levels=c('fox','cnn','msnbc'))
#create binary variable discussed on page 13 in the main text
my_shows<-unique(results$show)
for(my_show in my_shows){
results[results$show==my_show,'mentions_health2']<-ifelse(results[results$show==my_show,'health_text']>median(results[results$show==my_show,'health_text'],na.rm=T),1,0)
results[results$show==my_show,'mentions_death2']<-ifelse(results[results$show==my_show,'death_text']>median(results[results$show==my_show,'death_text'],na.rm=T),1,0)
}
#estimate negative binomial regression with offset
mod1<-glm.nb(fauci~network2*mentions_death2*mentions_health2+offset(log(cc)),data=results)
#export table
stargazer(mod1,intercept.bottom = F,order=c(1,2,3,4,5,6,8,7,9,10,11,12),title="Table 5: Are Dr. Anthony Fauci's Appearances Condidtioned on the Text?",dep.var.labels=c('Fauci Appearances'),covariate.labels=c('Constant','CNN','MSNBC',"'Death' Mentions","'Health' Mentions","CNN X 'Death' Mentions","CNN X 'Health' Mentions","MSNBC X 'Death' Mentions","MSNBC X 'Health' Mentions","'Death' Mentions X 'Health' Mentions","CNN X 'Death' Mentions X 'Health' Mentions","MSNBC X 'Death' Mentions X 'Health' Mentions"),type='html',out='output/table5.html')
| /code/table5.R | no_license | brycejdietrich/finding_fauci_replication | R | false | false | 2,387 | r | setwd('/Users/brycedietrich/finding_fauci_replication/')
library(MASS)
library(stargazer)
#re-estimate table 2
image_results<-read.csv('data/final_celebrity_results.csv',as.is=T)
#make fox baseline
image_results$network2<-factor(image_results$network,levels=c('fox','cnn','msnbc'))
#set week 9 to zero so the intercept is meaningful
image_results$week2<-image_results$week-9
#negative binomial regressions with offsets
mod1<-glm.nb(fauci~network2+offset(log(cc)),data=image_results)
mod2<-glm.nb(fauci~network2*week2+offset(log(cc)),data=image_results)
#subset image data to only include complete cases
image_results<-image_results[names(residuals(mod2)),]
#create ID
image_results$id<-paste(image_results$show,image_results$week,image_results$year,sep='_')
#load text restuls and create ID
text_results<-read.csv('data/final_caption_results.csv',as.is=T)
text_results$id<-paste(text_results$show,text_results$week,text_results$year,sep='_')
#merge results
results<-merge(image_results,text_results[,c('id','death_text','health_text')])
#set week 9 to zero so the intercept is meaningful
results$week2<-results$week-9
#make fox baseline
results$network2<-factor(results$network,levels=c('fox','cnn','msnbc'))
#create binary variable discussed on page 13 in the main text
my_shows<-unique(results$show)
for(my_show in my_shows){
results[results$show==my_show,'mentions_health2']<-ifelse(results[results$show==my_show,'health_text']>median(results[results$show==my_show,'health_text'],na.rm=T),1,0)
results[results$show==my_show,'mentions_death2']<-ifelse(results[results$show==my_show,'death_text']>median(results[results$show==my_show,'death_text'],na.rm=T),1,0)
}
#estimate negative binomial regression with offset
mod1<-glm.nb(fauci~network2*mentions_death2*mentions_health2+offset(log(cc)),data=results)
#export table
stargazer(mod1,intercept.bottom = F,order=c(1,2,3,4,5,6,8,7,9,10,11,12),title="Table 5: Are Dr. Anthony Fauci's Appearances Condidtioned on the Text?",dep.var.labels=c('Fauci Appearances'),covariate.labels=c('Constant','CNN','MSNBC',"'Death' Mentions","'Health' Mentions","CNN X 'Death' Mentions","CNN X 'Health' Mentions","MSNBC X 'Death' Mentions","MSNBC X 'Health' Mentions","'Death' Mentions X 'Health' Mentions","CNN X 'Death' Mentions X 'Health' Mentions","MSNBC X 'Death' Mentions X 'Health' Mentions"),type='html',out='output/table5.html')
|
library(quanteda.textplots)## Semantic network
library(quanteda)
library(RColorBrewer)
library(dplyr)
mention_network <- function(df, top_n = 50){
dd <- lapply(df$mentions_screen_name, data.frame)
tmp <- list()
for(i in 1:length(dd)){
tmp[[i]] <- data.frame(from = df$screen_name[[i]], to = dd[[i]])
}
tmp <- do.call(rbind.data.frame, tmp)
colnames(tmp) <- c("from", "to")
df.network <- tmp
df.network <-df.network[complete.cases(df.network), ]
df.network$from <- paste0("@", df.network$from)
df.network$to <- paste0("@", df.network$to)
df.network$communication <- paste(df.network$from, df.network$to)
hash_dfm <- dfm(df.network$communication)
toptag <- names(topfeatures(hash_dfm, top_n)) # Most important mentions_screen_name; we dont want to plot every hashtag
tag_fcm <- fcm(hash_dfm) # Feature-occurance matrix which shows the network structure
topgat_fcm <- fcm_select(tag_fcm, pattern = toptag) # Filter results so that we plot only 50 top mentions_screen_name
##draw semantic network plot
quanteda.textplots::textplot_network(topgat_fcm, min_freq = 1, edge_color = "grey",vertex_color ="#538797")
}
| /R/mention_network.R | no_license | ossisirkka/ComTxt | R | false | false | 1,152 | r |
library(quanteda.textplots)## Semantic network
library(quanteda)
library(RColorBrewer)
library(dplyr)
mention_network <- function(df, top_n = 50){
dd <- lapply(df$mentions_screen_name, data.frame)
tmp <- list()
for(i in 1:length(dd)){
tmp[[i]] <- data.frame(from = df$screen_name[[i]], to = dd[[i]])
}
tmp <- do.call(rbind.data.frame, tmp)
colnames(tmp) <- c("from", "to")
df.network <- tmp
df.network <-df.network[complete.cases(df.network), ]
df.network$from <- paste0("@", df.network$from)
df.network$to <- paste0("@", df.network$to)
df.network$communication <- paste(df.network$from, df.network$to)
hash_dfm <- dfm(df.network$communication)
toptag <- names(topfeatures(hash_dfm, top_n)) # Most important mentions_screen_name; we dont want to plot every hashtag
tag_fcm <- fcm(hash_dfm) # Feature-occurance matrix which shows the network structure
topgat_fcm <- fcm_select(tag_fcm, pattern = toptag) # Filter results so that we plot only 50 top mentions_screen_name
##draw semantic network plot
quanteda.textplots::textplot_network(topgat_fcm, min_freq = 1, edge_color = "grey",vertex_color ="#538797")
}
|
\name{mletype1}
\alias{mletype1}
\title{Computing the maximum likelihood estimator (MLE) for the parameters of the statistical model fitted to a progressive type-I interval censoring scheme.}
\description{Computes the MLE of for the parameters of the model fitted to a progressive type-I interval censoring scheme with likelihood function
\deqn{l(\Theta)=\log L(\Theta) \propto \sum_{i=1}^{m}X_i \log \bigl[F(t_{i}{{;}}\Theta)-F(t_{i-1}{{;}}\Theta)\bigr]+\sum_{i=1}^{m}R_i\bigl[1-F(t_{i}{{;}}\Theta)\bigr],}
in which \eqn{F(.;\Theta)} is the family cumulative distribution function for \eqn{\Theta=(\theta_1,\dots,\theta_k)^T} provided that
\eqn{F(t_{0};\Theta)=0}.
}
\usage{mletype1(plan, param, start, cdf.expression = FALSE, pdf.expression = TRUE, cdf, pdf,
method = "Nelder-Mead", lb = 0, ub = Inf, level = 0.05)}
\arguments{
\item{plan}{Censoring plan for progressive type-I interval censoring scheme. It must be given as a \code{data.frame} that includes vector of upper bounds of the censoring times \code{T}, vector of number of failed items \code{X}, and vector of removed items in each interval \code{R}.}
\item{param}{Vector of the of the family parameter's names.}
\item{start}{Vector of the initial values.}
\item{cdf.expression}{Logical. That is \code{TRUE}, if there is a closed form expression for the cumulative distribution function.}
\item{pdf.expression}{Logical. That is \code{TRUE}, if there is a closed form expression for the probability density function.}
\item{cdf}{Expression of the cumulative distribution function.}
\item{pdf}{Expression of the probability density function.}
\item{method}{The method for the numerically optimization that includes one of \code{CG}, \code{Nelder-Mead}, \code{BFGS}, \code{L-BFGS-B}, \code{SANN}.}
\item{lb}{Lower bound of the family's support. That is zero by default.}
\item{ub}{Upper bound of the family's support. That is \code{Inf} by default.}
\item{level}{Significance level for constructing asymptotic confidence interval That is \code{0.05} by default for constructing a \code{95\%} confidence interval.}
}
\value{MLE, standard error of MLE, and asymptotic confidence interval for MLE.}
%\references{}
\author{Mahdi Teimouri}
\examples{
data(plasma, package="bccp")
plan <- data.frame(T = plasma$upper, X = plasma$X, P = plasma$P, R = plasma$R)
param <- c("lambda","beta")
mle <- c(1.4, 0.05)
pdf <- quote( lambda*(1-exp( -(x*beta)))^(lambda-1)*beta*exp( -(x*beta)) )
cdf <- quote( (1-exp( -(x*beta)))^lambda )
lb <- 0
ub <- Inf
level <- 0.05
mletype1(plan = plan, param = param, start = mle, cdf.expression = FALSE, pdf.expression = TRUE,
cdf = cdf, pdf = pdf, method = "Nelder-Mead", lb = lb, ub = ub, level = level)
}
| /man/mletype1.Rd | no_license | cran/bccp | R | false | false | 2,778 | rd | \name{mletype1}
\alias{mletype1}
\title{Computing the maximum likelihood estimator (MLE) for the parameters of the statistical model fitted to a progressive type-I interval censoring scheme.}
\description{Computes the MLE of for the parameters of the model fitted to a progressive type-I interval censoring scheme with likelihood function
\deqn{l(\Theta)=\log L(\Theta) \propto \sum_{i=1}^{m}X_i \log \bigl[F(t_{i}{{;}}\Theta)-F(t_{i-1}{{;}}\Theta)\bigr]+\sum_{i=1}^{m}R_i\bigl[1-F(t_{i}{{;}}\Theta)\bigr],}
in which \eqn{F(.;\Theta)} is the family cumulative distribution function for \eqn{\Theta=(\theta_1,\dots,\theta_k)^T} provided that
\eqn{F(t_{0};\Theta)=0}.
}
\usage{mletype1(plan, param, start, cdf.expression = FALSE, pdf.expression = TRUE, cdf, pdf,
method = "Nelder-Mead", lb = 0, ub = Inf, level = 0.05)}
\arguments{
\item{plan}{Censoring plan for progressive type-I interval censoring scheme. It must be given as a \code{data.frame} that includes vector of upper bounds of the censoring times \code{T}, vector of number of failed items \code{X}, and vector of removed items in each interval \code{R}.}
\item{param}{Vector of the of the family parameter's names.}
\item{start}{Vector of the initial values.}
\item{cdf.expression}{Logical. That is \code{TRUE}, if there is a closed form expression for the cumulative distribution function.}
\item{pdf.expression}{Logical. That is \code{TRUE}, if there is a closed form expression for the probability density function.}
\item{cdf}{Expression of the cumulative distribution function.}
\item{pdf}{Expression of the probability density function.}
\item{method}{The method for the numerically optimization that includes one of \code{CG}, \code{Nelder-Mead}, \code{BFGS}, \code{L-BFGS-B}, \code{SANN}.}
\item{lb}{Lower bound of the family's support. That is zero by default.}
\item{ub}{Upper bound of the family's support. That is \code{Inf} by default.}
\item{level}{Significance level for constructing asymptotic confidence interval That is \code{0.05} by default for constructing a \code{95\%} confidence interval.}
}
\value{MLE, standard error of MLE, and asymptotic confidence interval for MLE.}
%\references{}
\author{Mahdi Teimouri}
\examples{
data(plasma, package="bccp")
plan <- data.frame(T = plasma$upper, X = plasma$X, P = plasma$P, R = plasma$R)
param <- c("lambda","beta")
mle <- c(1.4, 0.05)
pdf <- quote( lambda*(1-exp( -(x*beta)))^(lambda-1)*beta*exp( -(x*beta)) )
cdf <- quote( (1-exp( -(x*beta)))^lambda )
lb <- 0
ub <- Inf
level <- 0.05
mletype1(plan = plan, param = param, start = mle, cdf.expression = FALSE, pdf.expression = TRUE,
cdf = cdf, pdf = pdf, method = "Nelder-Mead", lb = lb, ub = ub, level = level)
}
|
# Read data file
householdData <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
# Create Date-Time variable by concatenating date and time, and then converting it to a date-time field
householdData$DateTime <- paste(householdData$Date, householdData$Time)
householdData$DateTime <- strptime(householdData$DateTime, format="%d/%m/%Y %H:%M:%S")
# Filter (subset) on DateTime between 01-02-2077 and 02-02-2007
requiredHouseholdData <- subset(householdData, DateTime >= strptime("01/02/2007 00:00:00", "%d/%m/%Y %H:%M:%S") & DateTime < strptime("03/02/2007 00:00:00", "%d/%m/%Y %H:%M:%S"))
# Convert Global Active Power to a number
# No need to take out NAs (?) as there are none in the Global Active Power column for these dates
requiredHouseholdData$Global_active_power <- as.numeric(as.character(requiredHouseholdData$Global_active_power))
# Create png-device
png(filename="plot2.png", width=480, height=480, units="px")
# Create Plot, but without the data
plot(requiredHouseholdData$DateTime, requiredHouseholdData$Global_active_power, type="n", xlab="", ylab="Global Active Power (kilowatts)")
# Add the data through the lines() function
lines(requiredHouseholdData$DateTime, requiredHouseholdData$Global_active_power)
# Close png-device
dev.off() | /plot2.R | no_license | vanderq/ExData_Plotting1 | R | false | false | 1,281 | r | # Read data file
householdData <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
# Create Date-Time variable by concatenating date and time, and then converting it to a date-time field
householdData$DateTime <- paste(householdData$Date, householdData$Time)
householdData$DateTime <- strptime(householdData$DateTime, format="%d/%m/%Y %H:%M:%S")
# Filter (subset) on DateTime between 01-02-2077 and 02-02-2007
requiredHouseholdData <- subset(householdData, DateTime >= strptime("01/02/2007 00:00:00", "%d/%m/%Y %H:%M:%S") & DateTime < strptime("03/02/2007 00:00:00", "%d/%m/%Y %H:%M:%S"))
# Convert Global Active Power to a number
# No need to take out NAs (?) as there are none in the Global Active Power column for these dates
requiredHouseholdData$Global_active_power <- as.numeric(as.character(requiredHouseholdData$Global_active_power))
# Create png-device
png(filename="plot2.png", width=480, height=480, units="px")
# Create Plot, but without the data
plot(requiredHouseholdData$DateTime, requiredHouseholdData$Global_active_power, type="n", xlab="", ylab="Global Active Power (kilowatts)")
# Add the data through the lines() function
lines(requiredHouseholdData$DateTime, requiredHouseholdData$Global_active_power)
# Close png-device
dev.off() |
source <- "D:/exdata_data_household_power_consumption/household_power_consumption.txt"
data <- read.table(dsource, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
globalReactivePower <- as.numeric(subSetData$Global_reactive_power)
voltage <- as.numeric(subSetData$Voltage)
sm1 <- as.numeric(subSetData$Sub_metering_1)
sm2 <- as.numeric(subSetData$Sub_metering_2)
sm3 <- as.numeric(subSetData$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, sm1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, sm2, type="l", col="red")
lines(datetime, sm3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() | /plot4.R | no_license | Praveen-Raaj-T/Exploratory-Data-Analysis- | R | false | false | 1,256 | r | source <- "D:/exdata_data_household_power_consumption/household_power_consumption.txt"
data <- read.table(dsource, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
globalReactivePower <- as.numeric(subSetData$Global_reactive_power)
voltage <- as.numeric(subSetData$Voltage)
sm1 <- as.numeric(subSetData$Sub_metering_1)
sm2 <- as.numeric(subSetData$Sub_metering_2)
sm3 <- as.numeric(subSetData$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, sm1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, sm2, type="l", col="red")
lines(datetime, sm3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() |
rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "EC"
beta0 <- c(-6, -5)
betaE <- c(log(2.5), log(2.25))
betaU <- c(log(4), log(1/1.5))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen19a",patt,".RData"))
| /Simulations/Scripts/R/Rare/Scenario 19a/CMPEn50KrareScen19aEC.R | no_license | yadevi/CausalMPE | R | false | false | 4,224 | r | rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "EC"
beta0 <- c(-6, -5)
betaE <- c(log(2.5), log(2.25))
betaU <- c(log(4), log(1/1.5))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen19a",patt,".RData"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/STAR2bSMRT_NRXN.R
\name{STAR2bSMRT_NRXN}
\alias{STAR2bSMRT_NRXN}
\title{STAR2bSMRT_NRXN
the main function of STAR2bSMRT specially designed for NRXN1 alpha splicing
identification}
\usage{
STAR2bSMRT_NRXN(genomeDir, genomeFasta, LRphqv = NULL, LRflnc = NULL,
LRnfl = NULL, SR1, SR2 = NULL, useSJout = TRUE,
adjustNCjunc = FALSE, thresSR, thresDis, outputDir,
fixedMatchedLS = FALSE, fuzzyMatch = 100, chrom = NULL, s = 0,
e = Inf, cores = 10)
}
\arguments{
\item{genomeDir}{character value indicating the directory of STAR genome
index for both STARlong and STARshort read mapping}
\item{genomeFasta}{character value indicating the fasta file of genome
reference}
\item{SR1}{character value indicating the short read file in fastq format:
single-end or paired-end R1}
\item{SR2}{character value indicating the short read file in fastq format:
paired-end R2}
\item{useSJout}{boolean value indicating whether to use the STARshort
generated SJ.out.tab for splicing junction. If FALSE, STAR2bSMRT infer
the splicing junction from bam files. By default, FALSE.}
\item{adjustNCjunc}{boolean value indicating whether to minimize the
non-canonical junction sites.}
\item{thresSR}{a vector of integers indicating the searching range for the
number of short reads which support the splicing junction sites.}
\item{thresDis}{a vector of integers indicating the searching range for the
tolerance distance between short read-derived splicing junction and long
read-derived junction. STAR2bSMRT will correct the long read-derived
junction to the short read-derived junction, if more short reads than
defined thresSR support that short read-derived junction, and the distance
between long and short read junctions is shorter than the defined thresDis.}
\item{outputDir}{character value indicating the direcotry where results are
saved.}
\item{fixedMatchedLS}{boolean value indicating how often the distance is
calculate betwen long read and short read-derived junction sites. If TRUE,
only calculated once at the very beginning, which may save running time;
otherwise, calculate repeatly after every long read correction.
By default, FALSE.}
\item{fuzzyMatch}{integer value indicating the distance for fuzzyMatch}
\item{chrom}{character value indicating the chromosome of interest. By default,
STAR2bSMRT works on the whole genome.}
\item{s}{integeter value indicating the start position of the transcript of
interest. This is useful for target Isoseq sequencing.}
\item{e}{integeter value indicating the end position of the transcript of
interest. This is useful for target Isoseq sequencing.}
\item{cores}{integer value indicating the number of cores for parallel computing}
\item{phqv}{character value indicating the Isoseq polished high QV trascripts
in fasta/fastq, where
read counts for each transcript consensus should be saved in transcript names}
\item{flnc}{character value indicating the Isoseq full-length non-chimeric
reads in fasta/fastq format}
\item{nfl}{character value indicating the Isoseq non-full-length reads in
fasta/fastq format}
}
\description{
STAR2bSMRT_NRXN
the main function of STAR2bSMRT specially designed for NRXN1 alpha splicing
identification
}
| /man/STAR2bSMRT_NRXN.Rd | no_license | zhushijia/STAR2bSMRT | R | false | true | 3,292 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/STAR2bSMRT_NRXN.R
\name{STAR2bSMRT_NRXN}
\alias{STAR2bSMRT_NRXN}
\title{STAR2bSMRT_NRXN
the main function of STAR2bSMRT specially designed for NRXN1 alpha splicing
identification}
\usage{
STAR2bSMRT_NRXN(genomeDir, genomeFasta, LRphqv = NULL, LRflnc = NULL,
LRnfl = NULL, SR1, SR2 = NULL, useSJout = TRUE,
adjustNCjunc = FALSE, thresSR, thresDis, outputDir,
fixedMatchedLS = FALSE, fuzzyMatch = 100, chrom = NULL, s = 0,
e = Inf, cores = 10)
}
\arguments{
\item{genomeDir}{character value indicating the directory of STAR genome
index for both STARlong and STARshort read mapping}
\item{genomeFasta}{character value indicating the fasta file of genome
reference}
\item{SR1}{character value indicating the short read file in fastq format:
single-end or paired-end R1}
\item{SR2}{character value indicating the short read file in fastq format:
paired-end R2}
\item{useSJout}{boolean value indicating whether to use the STARshort
generated SJ.out.tab for splicing junction. If FALSE, STAR2bSMRT infer
the splicing junction from bam files. By default, FALSE.}
\item{adjustNCjunc}{boolean value indicating whether to minimize the
non-canonical junction sites.}
\item{thresSR}{a vector of integers indicating the searching range for the
number of short reads which support the splicing junction sites.}
\item{thresDis}{a vector of integers indicating the searching range for the
tolerance distance between short read-derived splicing junction and long
read-derived junction. STAR2bSMRT will correct the long read-derived
junction to the short read-derived junction, if more short reads than
defined thresSR support that short read-derived junction, and the distance
between long and short read junctions is shorter than the defined thresDis.}
\item{outputDir}{character value indicating the direcotry where results are
saved.}
\item{fixedMatchedLS}{boolean value indicating how often the distance is
calculate betwen long read and short read-derived junction sites. If TRUE,
only calculated once at the very beginning, which may save running time;
otherwise, calculate repeatly after every long read correction.
By default, FALSE.}
\item{fuzzyMatch}{integer value indicating the distance for fuzzyMatch}
\item{chrom}{character value indicating the chromosome of interest. By default,
STAR2bSMRT works on the whole genome.}
\item{s}{integeter value indicating the start position of the transcript of
interest. This is useful for target Isoseq sequencing.}
\item{e}{integeter value indicating the end position of the transcript of
interest. This is useful for target Isoseq sequencing.}
\item{cores}{integer value indicating the number of cores for parallel computing}
\item{phqv}{character value indicating the Isoseq polished high QV trascripts
in fasta/fastq, where
read counts for each transcript consensus should be saved in transcript names}
\item{flnc}{character value indicating the Isoseq full-length non-chimeric
reads in fasta/fastq format}
\item{nfl}{character value indicating the Isoseq non-full-length reads in
fasta/fastq format}
}
\description{
STAR2bSMRT_NRXN
the main function of STAR2bSMRT specially designed for NRXN1 alpha splicing
identification
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TCGADownload.R
\name{GDCdownload}
\alias{GDCdownload}
\title{Download GDC data}
\usage{
GDCdownload(query, token.file, method = "api", directory = "GDCdata",
chunks.per.download = NULL)
}
\arguments{
\item{query}{A query for GDCquery function}
\item{token.file}{Token file to download controled data (only for method = "client")}
\item{method}{Uses the API (POST method) or gdc client tool. Options "api", "client".
API is faster, but the data might get corrupted in the download, and it might need to be executed again}
\item{directory}{Directory/Folder where the data was downloaded. Default: GDCdata}
\item{chunks.per.download}{This will make the API method only download n (chunks.per.download) files at a time.
This may reduce the download problems when the data size is too large. Expected a integer number (example chunks.per.download = 6)}
}
\value{
Shows the output from the GDC transfer tools
}
\description{
Uses GDC API or GDC transfer tool to download gdc data
The user can use query argument
The data from query will be save in a folder: project/data.category
}
\examples{
query <- GDCquery(project = "TCGA-ACC",
data.category = "Copy number variation",
legacy = TRUE,
file.type = "hg19.seg",
barcode = c("TCGA-OR-A5LR-01A-11D-A29H-01", "TCGA-OR-A5LJ-10A-01D-A29K-01"))
# data will be saved in GDCdata/TCGA-ACC/legacy/Copy_number_variation/Copy_number_segmentation
GDCdownload(query, method = "api")
query <- GDCquery(project = "TARGET-AML",
data.category = "Transcriptome Profiling",
data.type = "miRNA Expression Quantification",
workflow.type = "BCGSC miRNA Profiling",
barcode = c("TARGET-20-PARUDL-03A-01R","TARGET-20-PASRRB-03A-01R"))
# data will be saved in:
# example_data_dir/TARGET-AML/harmonized/Transcriptome_Profiling/miRNA_Expression_Quantification
GDCdownload(query, method = "client", directory = "example_data_dir")
query <- GDCquery(project = "TCGA-COAD", data.category = "Clinical")
GDCdownload(query, chunks.per.download = 200)
\dontrun{
acc.gbm <- GDCquery(project = c("TCGA-ACC","TCGA-GBM"),
data.category = "Transcriptome Profiling",
data.type = "Gene Expression Quantification",
workflow.type = "HTSeq - Counts")
GDCdownload(acc.gbm, method = "api", directory = "example", chunks.per.download = 50)
}
}
| /man/GDCdownload.Rd | no_license | Juggernaut93/TCGAbiolinks | R | false | true | 2,553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TCGADownload.R
\name{GDCdownload}
\alias{GDCdownload}
\title{Download GDC data}
\usage{
GDCdownload(query, token.file, method = "api", directory = "GDCdata",
chunks.per.download = NULL)
}
\arguments{
\item{query}{A query for GDCquery function}
\item{token.file}{Token file to download controled data (only for method = "client")}
\item{method}{Uses the API (POST method) or gdc client tool. Options "api", "client".
API is faster, but the data might get corrupted in the download, and it might need to be executed again}
\item{directory}{Directory/Folder where the data was downloaded. Default: GDCdata}
\item{chunks.per.download}{This will make the API method only download n (chunks.per.download) files at a time.
This may reduce the download problems when the data size is too large. Expected a integer number (example chunks.per.download = 6)}
}
\value{
Shows the output from the GDC transfer tools
}
\description{
Uses GDC API or GDC transfer tool to download gdc data
The user can use query argument
The data from query will be save in a folder: project/data.category
}
\examples{
query <- GDCquery(project = "TCGA-ACC",
data.category = "Copy number variation",
legacy = TRUE,
file.type = "hg19.seg",
barcode = c("TCGA-OR-A5LR-01A-11D-A29H-01", "TCGA-OR-A5LJ-10A-01D-A29K-01"))
# data will be saved in GDCdata/TCGA-ACC/legacy/Copy_number_variation/Copy_number_segmentation
GDCdownload(query, method = "api")
query <- GDCquery(project = "TARGET-AML",
data.category = "Transcriptome Profiling",
data.type = "miRNA Expression Quantification",
workflow.type = "BCGSC miRNA Profiling",
barcode = c("TARGET-20-PARUDL-03A-01R","TARGET-20-PASRRB-03A-01R"))
# data will be saved in:
# example_data_dir/TARGET-AML/harmonized/Transcriptome_Profiling/miRNA_Expression_Quantification
GDCdownload(query, method = "client", directory = "example_data_dir")
query <- GDCquery(project = "TCGA-COAD", data.category = "Clinical")
GDCdownload(query, chunks.per.download = 200)
\dontrun{
acc.gbm <- GDCquery(project = c("TCGA-ACC","TCGA-GBM"),
data.category = "Transcriptome Profiling",
data.type = "Gene Expression Quantification",
workflow.type = "HTSeq - Counts")
GDCdownload(acc.gbm, method = "api", directory = "example", chunks.per.download = 50)
}
}
|
library(ggplot2)
ggplot(diamonds) # if only the dataset is known.
ggplot(diamonds, aes(x=carat)) # if only X-axis is known. The Y-axis can be specified in respective geoms.
ggplot(diamonds, aes(x=carat, y=price)) # if both X and Y axes are fixed for all layers.
ggplot(diamonds, aes(x=carat, color=cut)) # Each category of the 'cut' variable will now have a distinct color, once a geom is added.
ggplot(diamonds, aes(x=carat), color="steelblue")
ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth()
# Adding scatterplot geom (layer1) and smoothing geom (layer2).
ggplot(diamonds) + geom_point(aes(x=carat, y=price, color=cut)) + geom_smooth(aes(x=carat, y=price, color=cut)) # Same as above but specifying the aesthetics inside the geoms.
ggplot(diamonds) + geom_point(aes(x=carat, y=price, color=cut)) + geom_smooth(aes(x=carat, y=price)) # Remove color from geom_smooth
ggplot(diamonds, aes(x=carat, y=price)) + geom_point(aes(color=cut)) + geom_smooth() # same but simpler
ggplot(diamonds, aes(x=carat, y=price, color=cut, shape=color)) + geom_point()
library(ggplot2)
gg <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + labs(title="Scatterplot", x="Carat", y="Price") # add axis lables and plot title.
print(gg)
gg1 <- gg + theme(plot.title=element_text(size=30, face="bold"),
axis.text.x=element_text(size=15),
axis.text.y=element_text(size=15),
axis.title.x=element_text(size=25),
axis.title.y=element_text(size=25)) +
scale_color_discrete(name="Cut of diamonds")# add title and axis text, change legend title.
print(gg1)
scale_fill_continuous(name="legend title")
gg1 + facet_wrap( ~ cut, ncol=3)
gg1 + facet_wrap(color ~ cut)
gg1 + facet_wrap(color ~ cut, scales="free")
gg1 + facet_grid(color ~ cut)
#it needs time variable in order to create a plot
library(ggfortify)
install.packages("ggfortify")
autoplot(AirPassengers) + labs(title="AirPassengers")# where AirPassengers is a 'ts' object
install.packages("zoo")
data(economics, package="ggplot2")
economics <- data.frame(economics)
ggplot(economics) + geom_line(aes(x=date, y=pce, color="pcs")) + geom_line(aes(x=date, y=unemploy,
col="unemploy")) + scale_color_discrete(name="Legend") + labs(title="Economics")
#CAN BE USEFUL FOR THE COURSEWORK###################################################
plot1 <- ggplot(mtcars, aes(x=cyl)) + geom_bar() + labs(title="Frequency bar chart") # Y axis derived from counts of X item
print(plot1)
df <- data.frame(var=c("a", "b", "c"), nums=c(1:3))
plot2 <- ggplot(df, aes(x=var, y=nums)) + geom_bar(stat = "identity") # Y axis is explicit. 'stat=identity'
print(plot2)
library(gridExtra)
grid.arrange(plot1, plot2, ncol=2)
#################################################################################
df <- data.frame(var=c("a", "b", "c"), nums=c(1:3))
ggplot(df, aes(x=var, y=nums)) + geom_bar(stat = "identity") + coord_flip() + labs(title="Coordinates are flipped")
ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() +
coord_cartesian(ylim=c(0, 10000)) + labs(title="Coord_cartesian zoomed in!")
ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() + ylim(c(0, 10000)) +
labs(title="Datapoints deleted: Note the change in smoothing lines!")
#> Warning messages:
#> 1: Removed 5222 rows containing non-finite values
#> (stat_smooth).
#> 2: Removed 5222 rows containing missing values
ggplot(diamonds, aes(x=price, y=price+runif(nrow(diamonds), 100, 10000), color=cut)) + geom_point() + geom_smooth() + coord_equal()
#> (geom_point).
ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() +theme_linedraw() + labs(title="LINEDRAW Theme")
p1 <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() + theme(legend.position="none") + labs(title="legend.position='none'") # remove legend
p2 <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() + theme(legend.position="top") + labs(title="legend.position='top'") # legend at top
p3 <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() + labs(title="legend.position='coords inside plot'") + theme(legend.justification=c(1,0), legend.position=c(1,0)) # legend inside the plot.
grid.arrange(p1, p2, p3, ncol=3) # arrange
ggplot(mtcars, aes(x=cyl)) + geom_bar(fill='darkgoldenrod2') +
theme(panel.background = element_rect(fill = 'steelblue'),
panel.grid.major = element_line(colour = "firebrick", size=3),
panel.grid.minor = element_line(colour = "blue", size=1))
ggplot(mtcars, aes(x=cyl)) + geom_bar(fill="firebrick") + theme(plot.background=element_rect(fill="steelblue"),
plot.margin = unit(c(2, 4, 1, 3), "cm"))
library(grid)
my_grob = grobTree(textGrob("This text is at x=0.1 and y=0.9, relative!\n Anchor point is at 0,0", x=0.1, y=0.9, hjust=0,
gp=gpar(col="firebrick", fontsize=25, fontface="bold")))
ggplot(mtcars, aes(x=cyl)) + geom_bar() + annotation_custom(my_grob) + labs(title="Annotation Example")
plot1 <- ggplot(mtcars, aes(x=cyl)) + geom_bar()
ggsave("myggplot.png") # saves the last plot.
ggsave("myggplot.png", plot=plot1) # save a stored ggplot | /Data Analytics- Coursework 1/practical2.R | no_license | kamiada/Data-Analytics---part-1 | R | false | false | 5,636 | r | library(ggplot2)
ggplot(diamonds) # if only the dataset is known.
ggplot(diamonds, aes(x=carat)) # if only X-axis is known. The Y-axis can be specified in respective geoms.
ggplot(diamonds, aes(x=carat, y=price)) # if both X and Y axes are fixed for all layers.
ggplot(diamonds, aes(x=carat, color=cut)) # Each category of the 'cut' variable will now have a distinct color, once a geom is added.
ggplot(diamonds, aes(x=carat), color="steelblue")
ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth()
# Adding scatterplot geom (layer1) and smoothing geom (layer2).
ggplot(diamonds) + geom_point(aes(x=carat, y=price, color=cut)) + geom_smooth(aes(x=carat, y=price, color=cut)) # Same as above but specifying the aesthetics inside the geoms.
ggplot(diamonds) + geom_point(aes(x=carat, y=price, color=cut)) + geom_smooth(aes(x=carat, y=price)) # Remove color from geom_smooth
ggplot(diamonds, aes(x=carat, y=price)) + geom_point(aes(color=cut)) + geom_smooth() # same but simpler
ggplot(diamonds, aes(x=carat, y=price, color=cut, shape=color)) + geom_point()
library(ggplot2)
gg <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + labs(title="Scatterplot", x="Carat", y="Price") # add axis lables and plot title.
print(gg)
gg1 <- gg + theme(plot.title=element_text(size=30, face="bold"),
axis.text.x=element_text(size=15),
axis.text.y=element_text(size=15),
axis.title.x=element_text(size=25),
axis.title.y=element_text(size=25)) +
scale_color_discrete(name="Cut of diamonds")# add title and axis text, change legend title.
print(gg1)
scale_fill_continuous(name="legend title")
gg1 + facet_wrap( ~ cut, ncol=3)
gg1 + facet_wrap(color ~ cut)
gg1 + facet_wrap(color ~ cut, scales="free")
gg1 + facet_grid(color ~ cut)
#it needs time variable in order to create a plot
library(ggfortify)
install.packages("ggfortify")
autoplot(AirPassengers) + labs(title="AirPassengers")# where AirPassengers is a 'ts' object
install.packages("zoo")
data(economics, package="ggplot2")
economics <- data.frame(economics)
ggplot(economics) + geom_line(aes(x=date, y=pce, color="pcs")) + geom_line(aes(x=date, y=unemploy,
col="unemploy")) + scale_color_discrete(name="Legend") + labs(title="Economics")
#CAN BE USEFUL FOR THE COURSEWORK###################################################
plot1 <- ggplot(mtcars, aes(x=cyl)) + geom_bar() + labs(title="Frequency bar chart") # Y axis derived from counts of X item
print(plot1)
df <- data.frame(var=c("a", "b", "c"), nums=c(1:3))
plot2 <- ggplot(df, aes(x=var, y=nums)) + geom_bar(stat = "identity") # Y axis is explicit. 'stat=identity'
print(plot2)
library(gridExtra)
grid.arrange(plot1, plot2, ncol=2)
#################################################################################
df <- data.frame(var=c("a", "b", "c"), nums=c(1:3))
ggplot(df, aes(x=var, y=nums)) + geom_bar(stat = "identity") + coord_flip() + labs(title="Coordinates are flipped")
ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() +
coord_cartesian(ylim=c(0, 10000)) + labs(title="Coord_cartesian zoomed in!")
ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() + ylim(c(0, 10000)) +
labs(title="Datapoints deleted: Note the change in smoothing lines!")
#> Warning messages:
#> 1: Removed 5222 rows containing non-finite values
#> (stat_smooth).
#> 2: Removed 5222 rows containing missing values
ggplot(diamonds, aes(x=price, y=price+runif(nrow(diamonds), 100, 10000), color=cut)) + geom_point() + geom_smooth() + coord_equal()
#> (geom_point).
ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() +theme_linedraw() + labs(title="LINEDRAW Theme")
p1 <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() + theme(legend.position="none") + labs(title="legend.position='none'") # remove legend
p2 <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() + theme(legend.position="top") + labs(title="legend.position='top'") # legend at top
p3 <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() + labs(title="legend.position='coords inside plot'") + theme(legend.justification=c(1,0), legend.position=c(1,0)) # legend inside the plot.
grid.arrange(p1, p2, p3, ncol=3) # arrange
ggplot(mtcars, aes(x=cyl)) + geom_bar(fill='darkgoldenrod2') +
theme(panel.background = element_rect(fill = 'steelblue'),
panel.grid.major = element_line(colour = "firebrick", size=3),
panel.grid.minor = element_line(colour = "blue", size=1))
ggplot(mtcars, aes(x=cyl)) + geom_bar(fill="firebrick") + theme(plot.background=element_rect(fill="steelblue"),
plot.margin = unit(c(2, 4, 1, 3), "cm"))
library(grid)
my_grob = grobTree(textGrob("This text is at x=0.1 and y=0.9, relative!\n Anchor point is at 0,0", x=0.1, y=0.9, hjust=0,
gp=gpar(col="firebrick", fontsize=25, fontface="bold")))
ggplot(mtcars, aes(x=cyl)) + geom_bar() + annotation_custom(my_grob) + labs(title="Annotation Example")
plot1 <- ggplot(mtcars, aes(x=cyl)) + geom_bar()
ggsave("myggplot.png") # saves the last plot.
ggsave("myggplot.png", plot=plot1) # save a stored ggplot |
/Ejemplos Sonia/Promedio_Trafico.R | permissive | eledero/RDatelligence | R | false | false | 862 | r | ||
### Model 1
# a ~ Cue
# sv ~ 1
### modelSpec is a list containing:
# 1. The parameters to fit, and the factors they depend on
# 2. constants in the model
# 3. The factors from (1), and their levels
modelSpec = list('variablePars'=list('a' = 'condition',
'm' = 1,
't0' = 1,
'eta1' = 1,
'sv' = 1,
'sz' = 1),
'constants'=c('z'=0.5, 's'=1, 'eta2'=-Inf, 'st0'=0),
'condition'=c('SPD', 'ACC'),
'learningRule'= 'Qlearning')
obj <- objRLDDMMultiCond
### transformLearningRate is a function transforming
### "global" parameters to trial-by-trial values, dependent
### on the condition
transformLearningRate <- function(pars, condition) {
# "Declare"
eta1 <- eta2 <- rep(pars[['eta1']], length(condition))
return(list(eta1=eta1, eta2=eta2))
}
### the following function gets trial-by-trial DDM pars
transformDDMPars <- function(pars, condition, delta_ev) {
### Gets trial-by-trial DDM parameters ###
nTrials = length(condition)
a <- v <- t0 <- z <- sv <- sz <- s <- rep(NA, nTrials)
# all current models have no variability in sz, sv, s, t0
t0 = rep(pars[['t0']], nTrials)
z = rep(pars[['z']], nTrials)
sv <- rep(pars[['sv']], nTrials)
sz <- rep(pars[['sz']], nTrials)
s <- rep(pars[['s']], nTrials)
# all models assume a linear relation between delta_ev and v
v = delta_ev*pars[['m']]
# a differs by condition
a[condition=='SPD'] <- pars[['a.SPD']]
a[condition=='ACC'] <- pars[['a.ACC']]
# rescale z from [0, 1] to [0, a]
z = z*a
sv = sv
return(list(t0=t0, a=a, v=v, z=z, sz=sz, sv=sv, s=s, st0=pars[['st0']]))
}
| /analysis/models/old_models/model1szsv.R | permissive | StevenM1/RLDDM | R | false | false | 1,799 | r | ### Model 1
# a ~ Cue
# sv ~ 1
### modelSpec is a list containing:
# 1. The parameters to fit, and the factors they depend on
# 2. constants in the model
# 3. The factors from (1), and their levels
modelSpec = list('variablePars'=list('a' = 'condition',
'm' = 1,
't0' = 1,
'eta1' = 1,
'sv' = 1,
'sz' = 1),
'constants'=c('z'=0.5, 's'=1, 'eta2'=-Inf, 'st0'=0),
'condition'=c('SPD', 'ACC'),
'learningRule'= 'Qlearning')
obj <- objRLDDMMultiCond
### transformLearningRate is a function transforming
### "global" parameters to trial-by-trial values, dependent
### on the condition
transformLearningRate <- function(pars, condition) {
# "Declare"
eta1 <- eta2 <- rep(pars[['eta1']], length(condition))
return(list(eta1=eta1, eta2=eta2))
}
### the following function gets trial-by-trial DDM pars
transformDDMPars <- function(pars, condition, delta_ev) {
### Gets trial-by-trial DDM parameters ###
nTrials = length(condition)
a <- v <- t0 <- z <- sv <- sz <- s <- rep(NA, nTrials)
# all current models have no variability in sz, sv, s, t0
t0 = rep(pars[['t0']], nTrials)
z = rep(pars[['z']], nTrials)
sv <- rep(pars[['sv']], nTrials)
sz <- rep(pars[['sz']], nTrials)
s <- rep(pars[['s']], nTrials)
# all models assume a linear relation between delta_ev and v
v = delta_ev*pars[['m']]
# a differs by condition
a[condition=='SPD'] <- pars[['a.SPD']]
a[condition=='ACC'] <- pars[['a.ACC']]
# rescale z from [0, 1] to [0, a]
z = z*a
sv = sv
return(list(t0=t0, a=a, v=v, z=z, sz=sz, sv=sv, s=s, st0=pars[['st0']]))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ranges.R
\name{wkb_ranges}
\alias{wkb_ranges}
\alias{wkt_ranges}
\alias{wksxp_ranges}
\alias{wkb_feature_ranges}
\alias{wkt_feature_ranges}
\alias{wksxp_feature_ranges}
\title{Extract ranges information}
\usage{
wkb_ranges(wkb, na.rm = FALSE, finite = FALSE)
wkt_ranges(wkt, na.rm = FALSE, finite = FALSE)
wksxp_ranges(wksxp, na.rm = FALSE, finite = FALSE)
wkb_feature_ranges(wkb, na.rm = FALSE, finite = FALSE)
wkt_feature_ranges(wkt, na.rm = FALSE, finite = FALSE)
wksxp_feature_ranges(wksxp, na.rm = FALSE, finite = FALSE)
}
\arguments{
\item{wkb}{A \code{list()} of \code{\link[=raw]{raw()}} vectors, such as that
returned by \code{\link[sf:st_as_binary]{sf::st_as_binary()}}.}
\item{na.rm}{Pass \code{TRUE} to not consider missing (nan) values}
\item{finite}{Pass \code{TRUE} to only consider finite
(non-missing, non-infinite) values.}
\item{wkt}{A character vector containing well-known text.}
\item{wksxp}{A \code{list()} of classed objects}
}
\value{
A data.frame with columns:
\itemize{
\item \code{xmin}, \code{ymin}, \code{zmin}, and \code{mmin}: Minimum coordinate values
\item \code{xmax}, \code{ymax}, \code{zmax}, and \code{mmax}: Maximum coordinate values
}
}
\description{
This is intended to behave the same as \code{\link[=range]{range()}}, returning the
minimum and maximum x, y, z, and m coordinate values.
}
\examples{
wkt_ranges("POINT (30 10)")
}
| /wkutils/man/wkb_ranges.Rd | no_license | akhikolla/InformationHouse | R | false | true | 1,460 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ranges.R
\name{wkb_ranges}
\alias{wkb_ranges}
\alias{wkt_ranges}
\alias{wksxp_ranges}
\alias{wkb_feature_ranges}
\alias{wkt_feature_ranges}
\alias{wksxp_feature_ranges}
\title{Extract ranges information}
\usage{
wkb_ranges(wkb, na.rm = FALSE, finite = FALSE)
wkt_ranges(wkt, na.rm = FALSE, finite = FALSE)
wksxp_ranges(wksxp, na.rm = FALSE, finite = FALSE)
wkb_feature_ranges(wkb, na.rm = FALSE, finite = FALSE)
wkt_feature_ranges(wkt, na.rm = FALSE, finite = FALSE)
wksxp_feature_ranges(wksxp, na.rm = FALSE, finite = FALSE)
}
\arguments{
\item{wkb}{A \code{list()} of \code{\link[=raw]{raw()}} vectors, such as that
returned by \code{\link[sf:st_as_binary]{sf::st_as_binary()}}.}
\item{na.rm}{Pass \code{TRUE} to not consider missing (nan) values}
\item{finite}{Pass \code{TRUE} to only consider finite
(non-missing, non-infinite) values.}
\item{wkt}{A character vector containing well-known text.}
\item{wksxp}{A \code{list()} of classed objects}
}
\value{
A data.frame with columns:
\itemize{
\item \code{xmin}, \code{ymin}, \code{zmin}, and \code{mmin}: Minimum coordinate values
\item \code{xmax}, \code{ymax}, \code{zmax}, and \code{mmax}: Maximum coordinate values
}
}
\description{
This is intended to behave the same as \code{\link[=range]{range()}}, returning the
minimum and maximum x, y, z, and m coordinate values.
}
\examples{
wkt_ranges("POINT (30 10)")
}
|
\name{dl3}
\alias{dl3}
\title{Function to return the DL3 hydrologic indicator statistic for a given data frame}
\usage{
dl3(qfiletempf, pref = "mean")
}
\arguments{
\item{qfiletempf}{data frame containing a "discharge"
column containing daily flow values}
\item{pref}{string containing a "mean" or "median"
preference}
}
\value{
dl3 numeric containing the mean of the annual minimum
7-day average flows for the given data frame
}
\description{
This function accepts a data frame that contains a column
named "discharge" and calculates the mean of the annual
minimum 7-day average flows for the entire record
}
\examples{
load_data<-paste(system.file(package="HITHATStats"),"/data/obs_data.csv",sep="")
qfiletempf<-read.csv(load_data)
dl3(qfiletempf)
}
| /R/RProjects/HITHATStats/man/dl3.Rd | no_license | jlthomps/EflowStats | R | false | false | 774 | rd | \name{dl3}
\alias{dl3}
\title{Function to return the DL3 hydrologic indicator statistic for a given data frame}
\usage{
dl3(qfiletempf, pref = "mean")
}
\arguments{
\item{qfiletempf}{data frame containing a "discharge"
column containing daily flow values}
\item{pref}{string containing a "mean" or "median"
preference}
}
\value{
dl3 numeric containing the mean of the annual minimum
7-day average flows for the given data frame
}
\description{
This function accepts a data frame that contains a column
named "discharge" and calculates the mean of the annual
minimum 7-day average flows for the entire record
}
\examples{
load_data<-paste(system.file(package="HITHATStats"),"/data/obs_data.csv",sep="")
qfiletempf<-read.csv(load_data)
dl3(qfiletempf)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualizeassignTab.R
\name{visualizeassignTab}
\alias{visualizeassignTab}
\title{UI elements for visualization and group reassignment}
\usage{
visualizeassignTab()
}
\description{
UI elements for visualization and group reassignment
}
| /man/visualizeassignTab.Rd | no_license | mpeeples2008/NAA_analytical_dashboard | R | false | true | 313 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualizeassignTab.R
\name{visualizeassignTab}
\alias{visualizeassignTab}
\title{UI elements for visualization and group reassignment}
\usage{
visualizeassignTab()
}
\description{
UI elements for visualization and group reassignment
}
|
predict.frontier <- function( object, newdata = NULL, asInData = TRUE, ... ) {
if( is.null( newdata ) ) {
pred <- fitted( object, asInData = asInData )
} else {
if( !is.data.frame( newdata ) ) {
stop( "argument 'newdata' must be of class data.frame")
}
estCall <- object$call
estFunc <- as.character( estCall[[ 1 ]] )
estArg <- as.list( estCall )[ -1 ]
estArg$data <- newdata
estArg$maxit <- 0
estArg$startVal <- object$mleParam
estNew <- suppressWarnings( do.call( estFunc, estArg ) )
pred <- fitted( estNew, asInData = asInData )
}
return( pred )
}
| /R/predict.frontier.R | no_license | cran/frontier | R | false | false | 618 | r | predict.frontier <- function( object, newdata = NULL, asInData = TRUE, ... ) {
if( is.null( newdata ) ) {
pred <- fitted( object, asInData = asInData )
} else {
if( !is.data.frame( newdata ) ) {
stop( "argument 'newdata' must be of class data.frame")
}
estCall <- object$call
estFunc <- as.character( estCall[[ 1 ]] )
estArg <- as.list( estCall )[ -1 ]
estArg$data <- newdata
estArg$maxit <- 0
estArg$startVal <- object$mleParam
estNew <- suppressWarnings( do.call( estFunc, estArg ) )
pred <- fitted( estNew, asInData = asInData )
}
return( pred )
}
|
interface("transformacoes") | /execucoes/transformacoes.R | no_license | acgabriel3/bi_arbo | R | false | false | 29 | r |
interface("transformacoes") |
library("SimMultiCorrData")
context("Simulate using correlation method 2")
skip_on_cran()
options(scipen = 999)
tol <- 1e-5
set.seed(1234)
n <- 25
cstart1 <- runif(n, min = -2, max = 2)
cstart2 <- runif(n, min = -1, max = 1)
cstart3 <- runif(n, min = -0.5, max = 0.5)
cstartF <- cbind(cstart1, cstart2, cstart3)
set.seed(1234)
cstart1 <- runif(n, min = -2, max = 2)
cstart2 <- runif(n, min = -1, max = 1)
cstart3 <- runif(n, min = -1, max = 1)
cstart4 <- runif(n, min = -0.025, max = 0.025)
cstart5 <- runif(n, min = -0.025, max = 0.025)
cstartP <- cbind(cstart1, cstart2, cstart3, cstart4, cstart5)
L <- calc_theory("Logistic", c(0, 1))
Six <- list(seq(1.7, 1.8, 0.01))
marginal <- list(0.3)
lam <- 0.5
pois_eps <- 0.0001
size <- 2
prob <- 0.75
mu <- size * (1 - prob)/prob
nb_eps <- 0.0001
Rey <- matrix(0.4, 4, 4)
diag(Rey) <- 1
test_that("works for 0 continuous, 1 ordinal, 1 Poisson, 1 NB", {
expect_equal(all.equal(rcorrvar2(k_cat = 1, k_pois = 1, k_nb = 1,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, size = size, nb_eps = nb_eps,
prob = prob, rho = Rey[1:3, 1:3])$maxerr, 0.007967684,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cat = 1, k_pois = 1, k_nb = 1,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, size = size, nb_eps = nb_eps,
mu = mu, rho = Rey[1:3, 1:3])$maxerr, 0.007967684,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cat = 1, k_pois = 1, k_nb = 1,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, size = size, nb_eps = nb_eps,
prob = prob, rho = Rey[1:3, 1:3], errorloop = TRUE)$maxerr, 0.0009919255,
tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Fleishman method: 1 continuous, 1 ordinal, 1 Poisson,
1 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps,
prob = prob, rho = Rey)$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps, mu = mu,
rho = Rey, cstart = list(cstartF))$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps, prob = prob,
rho = Rey, errorloop = TRUE)$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Fleishman method: 1 continuous, 0 ordinal, 1 Poisson,
1 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 0, k_pois = 1, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], lam = lam, pois_eps = pois_eps, size = size,
nb_eps = nb_eps, prob = prob, rho = Rey[1:3, 1:3])$constants[1, "c3"],
0.03605955, tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 0, k_pois = 1, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], lam = lam, pois_eps = pois_eps, size = size,
nb_eps = nb_eps, mu = mu, rho = Rey[1:3, 1:3])$constants[1, "c3"],
0.03605955, tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 0, k_pois = 1, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], lam = lam, pois_eps = pois_eps, size = size,
nb_eps = nb_eps, prob = prob, rho = Rey[1:3, 1:3],
errorloop = TRUE)$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Fleishman method: 1 continuous, 1 ordinal, 0 Poisson,
1 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 0, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
size = size, nb_eps = nb_eps, prob = prob,
rho = Rey[1:3, 1:3])$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 0, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
size = size, nb_eps = nb_eps, mu = mu,
rho = Rey[1:3, 1:3])$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 0, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
size = size, nb_eps = nb_eps, prob = prob, rho = Rey[1:3, 1:3],
errorloop = TRUE)$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Fleishman method: 1 continuous, 1 ordinal, 1 Poisson,
0 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 0,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
lam = lam, pois_eps = pois_eps, rho = Rey[1:3, 1:3])$constants[1, "c3"],
0.03605955, tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 0,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
lam = lam, pois_eps = pois_eps, rho = Rey[1:3, 1:3],
errorloop = TRUE)$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Polynomial method: 1 continuous, 1 ordinal, 1 Poisson,
1 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, size = size, prob = prob, nb_eps = nb_eps,
rho = Rey)$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
cstart = list(cstartP), marginal = marginal, support = list(c(0, 1)),
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps,
mu = mu, rho = Rey)$constants[1, "c5"], 0.0000006125703,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, size = size, prob = prob, nb_eps = nb_eps,
rho = Rey, errorloop = TRUE)$constants[1, "c5"],
0.0000006124845, tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Polynomial method: 1 continuous, 0 ordinal, 1 Poisson,
1 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 0, k_pois = 1, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps, prob = prob,
rho = Rey[1:3, 1:3])$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 0, k_pois = 1, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps, mu = mu,
rho = Rey[1:3, 1:3])$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 0, k_pois = 1, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps,
prob = prob, rho = Rey[1:3, 1:3], errorloop = TRUE)$constants[1, "c5"],
0.0000006124845, tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Polynomial method: 1 continuous, 1 ordinal, 0 Poisson,
1 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 0, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), size = size, nb_eps = nb_eps,
prob = prob, rho = Rey[1:3, 1:3])$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 0, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), size = size,
nb_eps = nb_eps, mu = mu, rho = Rey[1:3, 1:3])$constants[1, "c5"],
0.0000006124845, tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 0, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), size = size,
nb_eps = nb_eps, prob = prob, rho = Rey[1:3, 1:3],
errorloop = TRUE)$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Polynomial method: 1 continuous, 1 ordinal, 1 Poisson,
0 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 0,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, rho = Rey[1:3, 1:3])$constants[1, "c5"],
0.0000006124845, tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 0,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, rho = Rey[1:3, 1:3],
errorloop = TRUE)$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
})
Rey2 <- matrix(0.4, 5, 5)
diag(Rey2) <- 1
test_that("works for Polynomial method: same continuous distribution", {
expect_equal(all.equal(rcorrvar2(k_cont = 2, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Polynomial", means = rep(L[1], 2), vars = rep(L[2]^2, 2),
skews = rep(L[3], 2), skurts = rep(L[4], 2), fifths = rep(L[5], 2),
sixths = rep(L[6], 2), Six = list(1.75, 1.75),
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, size = size, prob = prob, nb_eps = nb_eps,
rho = Rey2)$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 2, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Polynomial", means = rep(L[1], 2), vars = rep(L[2]^2, 2),
skews = rep(L[3], 2), skurts = rep(L[4], 2), fifths = rep(L[5], 2),
sixths = rep(L[6], 2), Six = list(1.75, 1.75), marginal = marginal,
support = list(c(0, 1)), lam = lam, pois_eps = pois_eps, size = size,
prob = prob, nb_eps = nb_eps, rho = Rey2,
errorloop = TRUE)$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
})
| /tests/testthat/test-rcorrvar2.R | no_license | shaoyoucheng/SimMultiCorrData | R | false | false | 12,640 | r | library("SimMultiCorrData")
context("Simulate using correlation method 2")
skip_on_cran()
options(scipen = 999)
tol <- 1e-5
set.seed(1234)
n <- 25
cstart1 <- runif(n, min = -2, max = 2)
cstart2 <- runif(n, min = -1, max = 1)
cstart3 <- runif(n, min = -0.5, max = 0.5)
cstartF <- cbind(cstart1, cstart2, cstart3)
set.seed(1234)
cstart1 <- runif(n, min = -2, max = 2)
cstart2 <- runif(n, min = -1, max = 1)
cstart3 <- runif(n, min = -1, max = 1)
cstart4 <- runif(n, min = -0.025, max = 0.025)
cstart5 <- runif(n, min = -0.025, max = 0.025)
cstartP <- cbind(cstart1, cstart2, cstart3, cstart4, cstart5)
L <- calc_theory("Logistic", c(0, 1))
Six <- list(seq(1.7, 1.8, 0.01))
marginal <- list(0.3)
lam <- 0.5
pois_eps <- 0.0001
size <- 2
prob <- 0.75
mu <- size * (1 - prob)/prob
nb_eps <- 0.0001
Rey <- matrix(0.4, 4, 4)
diag(Rey) <- 1
test_that("works for 0 continuous, 1 ordinal, 1 Poisson, 1 NB", {
expect_equal(all.equal(rcorrvar2(k_cat = 1, k_pois = 1, k_nb = 1,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, size = size, nb_eps = nb_eps,
prob = prob, rho = Rey[1:3, 1:3])$maxerr, 0.007967684,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cat = 1, k_pois = 1, k_nb = 1,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, size = size, nb_eps = nb_eps,
mu = mu, rho = Rey[1:3, 1:3])$maxerr, 0.007967684,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cat = 1, k_pois = 1, k_nb = 1,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, size = size, nb_eps = nb_eps,
prob = prob, rho = Rey[1:3, 1:3], errorloop = TRUE)$maxerr, 0.0009919255,
tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Fleishman method: 1 continuous, 1 ordinal, 1 Poisson,
1 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps,
prob = prob, rho = Rey)$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps, mu = mu,
rho = Rey, cstart = list(cstartF))$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps, prob = prob,
rho = Rey, errorloop = TRUE)$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Fleishman method: 1 continuous, 0 ordinal, 1 Poisson,
1 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 0, k_pois = 1, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], lam = lam, pois_eps = pois_eps, size = size,
nb_eps = nb_eps, prob = prob, rho = Rey[1:3, 1:3])$constants[1, "c3"],
0.03605955, tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 0, k_pois = 1, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], lam = lam, pois_eps = pois_eps, size = size,
nb_eps = nb_eps, mu = mu, rho = Rey[1:3, 1:3])$constants[1, "c3"],
0.03605955, tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 0, k_pois = 1, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], lam = lam, pois_eps = pois_eps, size = size,
nb_eps = nb_eps, prob = prob, rho = Rey[1:3, 1:3],
errorloop = TRUE)$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Fleishman method: 1 continuous, 1 ordinal, 0 Poisson,
1 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 0, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
size = size, nb_eps = nb_eps, prob = prob,
rho = Rey[1:3, 1:3])$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 0, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
size = size, nb_eps = nb_eps, mu = mu,
rho = Rey[1:3, 1:3])$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 0, k_nb = 1,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
size = size, nb_eps = nb_eps, prob = prob, rho = Rey[1:3, 1:3],
errorloop = TRUE)$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Fleishman method: 1 continuous, 1 ordinal, 1 Poisson,
0 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 0,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
lam = lam, pois_eps = pois_eps, rho = Rey[1:3, 1:3])$constants[1, "c3"],
0.03605955, tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 0,
method = "Fleishman", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], marginal = marginal, support = list(c(0, 1)),
lam = lam, pois_eps = pois_eps, rho = Rey[1:3, 1:3],
errorloop = TRUE)$constants[1, "c3"], 0.03605955,
tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Polynomial method: 1 continuous, 1 ordinal, 1 Poisson,
1 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, size = size, prob = prob, nb_eps = nb_eps,
rho = Rey)$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
cstart = list(cstartP), marginal = marginal, support = list(c(0, 1)),
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps,
mu = mu, rho = Rey)$constants[1, "c5"], 0.0000006125703,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, size = size, prob = prob, nb_eps = nb_eps,
rho = Rey, errorloop = TRUE)$constants[1, "c5"],
0.0000006124845, tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Polynomial method: 1 continuous, 0 ordinal, 1 Poisson,
1 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 0, k_pois = 1, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps, prob = prob,
rho = Rey[1:3, 1:3])$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 0, k_pois = 1, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps, mu = mu,
rho = Rey[1:3, 1:3])$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 0, k_pois = 1, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
lam = lam, pois_eps = pois_eps, size = size, nb_eps = nb_eps,
prob = prob, rho = Rey[1:3, 1:3], errorloop = TRUE)$constants[1, "c5"],
0.0000006124845, tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Polynomial method: 1 continuous, 1 ordinal, 0 Poisson,
1 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 0, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), size = size, nb_eps = nb_eps,
prob = prob, rho = Rey[1:3, 1:3])$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 0, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), size = size,
nb_eps = nb_eps, mu = mu, rho = Rey[1:3, 1:3])$constants[1, "c5"],
0.0000006124845, tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 0, k_nb = 1,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), size = size,
nb_eps = nb_eps, prob = prob, rho = Rey[1:3, 1:3],
errorloop = TRUE)$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
})
test_that("works for Polynomial method: 1 continuous, 1 ordinal, 1 Poisson,
0 NB", {
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 0,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, rho = Rey[1:3, 1:3])$constants[1, "c5"],
0.0000006124845, tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 1, k_cat = 1, k_pois = 1, k_nb = 0,
method = "Polynomial", means = L[1], vars = L[2]^2, skews = L[3],
skurts = L[4], fifths = L[5], sixths = L[6], Six = Six,
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, rho = Rey[1:3, 1:3],
errorloop = TRUE)$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
})
Rey2 <- matrix(0.4, 5, 5)
diag(Rey2) <- 1
test_that("works for Polynomial method: same continuous distribution", {
expect_equal(all.equal(rcorrvar2(k_cont = 2, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Polynomial", means = rep(L[1], 2), vars = rep(L[2]^2, 2),
skews = rep(L[3], 2), skurts = rep(L[4], 2), fifths = rep(L[5], 2),
sixths = rep(L[6], 2), Six = list(1.75, 1.75),
marginal = marginal, support = list(c(0, 1)), lam = lam,
pois_eps = pois_eps, size = size, prob = prob, nb_eps = nb_eps,
rho = Rey2)$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
expect_equal(all.equal(rcorrvar2(k_cont = 2, k_cat = 1, k_pois = 1, k_nb = 1,
method = "Polynomial", means = rep(L[1], 2), vars = rep(L[2]^2, 2),
skews = rep(L[3], 2), skurts = rep(L[4], 2), fifths = rep(L[5], 2),
sixths = rep(L[6], 2), Six = list(1.75, 1.75), marginal = marginal,
support = list(c(0, 1)), lam = lam, pois_eps = pois_eps, size = size,
prob = prob, nb_eps = nb_eps, rho = Rey2,
errorloop = TRUE)$constants[1, "c5"], 0.0000006124845,
tolerance = tol, check.attributes = FALSE), TRUE)
})
|
dataFile <- "household_power_consumption.txt"
data <- read.table(dataFile, header =T, sep = ";", stringsAsFactors=F, dec= ".")
subSetData <- data[data$Date %in% c("1/2/2007", "2/2/2007"), ]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep= " "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width = 480, height= 480)
plot(datetime, globalActivePower, type= "l", xlab= "", ylab= "Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | Chrisgarr77/ExData_Plotting1 | R | false | false | 496 | r | dataFile <- "household_power_consumption.txt"
data <- read.table(dataFile, header =T, sep = ";", stringsAsFactors=F, dec= ".")
subSetData <- data[data$Date %in% c("1/2/2007", "2/2/2007"), ]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep= " "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width = 480, height= 480)
plot(datetime, globalActivePower, type= "l", xlab= "", ylab= "Global Active Power (kilowatts)")
dev.off() |
# Make plots of the mean number of gene sequences per species for tandems that
# are split or not-split across orthogroups.
setwd("/Users/tomkono/Dropbox/GitHub/Maize_Tandem_Evolution/Results/Orthofinder")
# Read in the data
b_false_single_genes <- read.table("B73_False_Single_OGGenes.txt", header=FALSE)
b_true_single_genes <- read.table("B73_True_Single_OGGenes.txt", header=FALSE)
b_false_multi_genes <- read.table("B73_False_Multi_OGGenes.txt", header=FALSE)
b_true_multi_genes <- read.table("B73_True_Multi_OGGenes.txt", header=FALSE)
p_false_single_genes <- read.table("PH207_False_Single_OGGenes.txt", header=FALSE)
p_true_single_genes <- read.table("PH207_True_Single_OGGenes.txt", header=FALSE)
p_false_multi_genes <- read.table("PH207_False_Multi_OGGenes.txt", header=FALSE)
p_true_multi_genes <- read.table("PH207_True_Multi_OGGenes.txt", header=FALSE)
# Define a function to return a numeric vector of means
comma_mean <- function(string) {
string <- as.character(string)
counts <- unlist(strsplit(string, ","))
counts <- as.numeric(counts)
return(mean(counts))
}
# Get means for each of the partitions
b_false_single_means <- sapply(b_false_single_genes$V3, comma_mean)
b_true_single_means <- sapply(b_true_single_genes$V3, comma_mean)
b_false_multi_means <- sapply(b_false_multi_genes$V3, comma_mean)
b_true_multi_means <- sapply(b_true_multi_genes$V3, comma_mean)
p_false_single_means <- sapply(p_false_single_genes$V3, comma_mean)
p_true_single_means <- sapply(p_true_single_genes$V3, comma_mean)
p_false_multi_means <- sapply(p_false_multi_genes$V3, comma_mean)
p_true_multi_means <- sapply(p_true_multi_genes$V3, comma_mean)
# Make some plots
pdf(file="B73_GenesPerSp_SplitOGs.pdf", 6, 6)
single <- c(b_false_single_means, b_true_single_means)
multi <- c(b_false_multi_means, b_true_multi_means)
hist(
log(single),
breaks=20,
col=rgb(0, 0, 1, 0.5),
xlab="log(Mean Number of Genes per Species in Orthogroup)",
ylab="Count",
main="Distribution of Genes per Species in\nOrthogroups With B73 Tandem Duplicates",
ylim=c(0, 250))
hist(
log(multi),
breaks=20,
col=rgb(1, 0, 0, 0.5),
add=TRUE)
legend(
"topright",
c("Not Split", "Split"),
fill=c(rgb(0, 0, 1, 0.5), rgb(1, 0, 0, 0.5)))
dev.off()
pdf(file="B73_GenesPerSp_TrueFalse.pdf", 6, 6)
truetand <- c(b_true_multi_means, b_true_single_means)
falsetand <- c(b_false_multi_means, b_false_single_means)
hist(
log(truetand),
breaks=20,
col=rgb(0, 0, 1, 0.5),
xlab="log(Mean Number of Genes per Species in Orthogroup)",
ylab="Count",
main="Distribution of Genes per Species in\nOrthogroups With B73 Tandem Duplicates",
ylim=c(0, 250))
hist(
log(falsetand),
breaks=20,
col=rgb(1, 0, 0, 0.5),
add=TRUE)
legend(
"topright",
c("True Tandem", "False Tandem"),
fill=c(rgb(0, 0, 1, 0.5), rgb(1, 0, 0, 0.5)))
dev.off()
pdf(file="PH207_GenesPerSp_SplitOGs.pdf", 6, 6)
single <- c(p_false_single_means, p_true_single_means)
multi <- c(p_false_multi_means, p_true_multi_means)
hist(
log(single),
breaks=20,
col=rgb(0, 0, 1, 0.5),
xlab="log(Mean Number of Genes per Species in Orthogroup)",
ylab="Count",
main="Distribution of Genes per Species in\nOrthogroups With PH207 Tandem Duplicates",
ylim=c(0, 400))
hist(
log(multi),
breaks=20,
col=rgb(1, 0, 0, 0.5),
add=TRUE)
legend(
"topright",
c("Not Split", "Split"),
fill=c(rgb(0, 0, 1, 0.5), rgb(1, 0, 0, 0.5)))
dev.off()
pdf(file="PH207_GenesPerSp_TrueFalse.pdf", 6, 6)
truetand <- c(p_true_multi_means, p_true_single_means)
falsetand <- c(p_false_multi_means, p_false_single_means)
hist(
log(truetand),
breaks=20,
col=rgb(0, 0, 1, 0.5),
xlab="log(Mean Number of Genes per Species in Orthogroup)",
ylab="Count",
main="Distribution of Genes per Species in\nOrthogroups With PH207 Tandem Duplicates",
ylim=c(0, 400))
hist(
log(falsetand),
breaks=20,
col=rgb(1, 0, 0, 0.5),
add=TRUE)
legend(
"topright",
c("True Tandem", "False Tandem"),
fill=c(rgb(0, 0, 1, 0.5), rgb(1, 0, 0, 0.5)))
dev.off()
| /Scripts/Plotting/Tandem_GenesPerSpecies_OGs.R | no_license | TomJKono/Maize_Tandem_Evolution | R | false | false | 4,141 | r | # Make plots of the mean number of gene sequences per species for tandems that
# are split or not-split across orthogroups.
setwd("/Users/tomkono/Dropbox/GitHub/Maize_Tandem_Evolution/Results/Orthofinder")
# Read in the data
b_false_single_genes <- read.table("B73_False_Single_OGGenes.txt", header=FALSE)
b_true_single_genes <- read.table("B73_True_Single_OGGenes.txt", header=FALSE)
b_false_multi_genes <- read.table("B73_False_Multi_OGGenes.txt", header=FALSE)
b_true_multi_genes <- read.table("B73_True_Multi_OGGenes.txt", header=FALSE)
p_false_single_genes <- read.table("PH207_False_Single_OGGenes.txt", header=FALSE)
p_true_single_genes <- read.table("PH207_True_Single_OGGenes.txt", header=FALSE)
p_false_multi_genes <- read.table("PH207_False_Multi_OGGenes.txt", header=FALSE)
p_true_multi_genes <- read.table("PH207_True_Multi_OGGenes.txt", header=FALSE)
# Define a function to return a numeric vector of means
comma_mean <- function(string) {
string <- as.character(string)
counts <- unlist(strsplit(string, ","))
counts <- as.numeric(counts)
return(mean(counts))
}
# Get means for each of the partitions
b_false_single_means <- sapply(b_false_single_genes$V3, comma_mean)
b_true_single_means <- sapply(b_true_single_genes$V3, comma_mean)
b_false_multi_means <- sapply(b_false_multi_genes$V3, comma_mean)
b_true_multi_means <- sapply(b_true_multi_genes$V3, comma_mean)
p_false_single_means <- sapply(p_false_single_genes$V3, comma_mean)
p_true_single_means <- sapply(p_true_single_genes$V3, comma_mean)
p_false_multi_means <- sapply(p_false_multi_genes$V3, comma_mean)
p_true_multi_means <- sapply(p_true_multi_genes$V3, comma_mean)
# Make some plots
pdf(file="B73_GenesPerSp_SplitOGs.pdf", 6, 6)
single <- c(b_false_single_means, b_true_single_means)
multi <- c(b_false_multi_means, b_true_multi_means)
hist(
log(single),
breaks=20,
col=rgb(0, 0, 1, 0.5),
xlab="log(Mean Number of Genes per Species in Orthogroup)",
ylab="Count",
main="Distribution of Genes per Species in\nOrthogroups With B73 Tandem Duplicates",
ylim=c(0, 250))
hist(
log(multi),
breaks=20,
col=rgb(1, 0, 0, 0.5),
add=TRUE)
legend(
"topright",
c("Not Split", "Split"),
fill=c(rgb(0, 0, 1, 0.5), rgb(1, 0, 0, 0.5)))
dev.off()
pdf(file="B73_GenesPerSp_TrueFalse.pdf", 6, 6)
truetand <- c(b_true_multi_means, b_true_single_means)
falsetand <- c(b_false_multi_means, b_false_single_means)
hist(
log(truetand),
breaks=20,
col=rgb(0, 0, 1, 0.5),
xlab="log(Mean Number of Genes per Species in Orthogroup)",
ylab="Count",
main="Distribution of Genes per Species in\nOrthogroups With B73 Tandem Duplicates",
ylim=c(0, 250))
hist(
log(falsetand),
breaks=20,
col=rgb(1, 0, 0, 0.5),
add=TRUE)
legend(
"topright",
c("True Tandem", "False Tandem"),
fill=c(rgb(0, 0, 1, 0.5), rgb(1, 0, 0, 0.5)))
dev.off()
pdf(file="PH207_GenesPerSp_SplitOGs.pdf", 6, 6)
single <- c(p_false_single_means, p_true_single_means)
multi <- c(p_false_multi_means, p_true_multi_means)
hist(
log(single),
breaks=20,
col=rgb(0, 0, 1, 0.5),
xlab="log(Mean Number of Genes per Species in Orthogroup)",
ylab="Count",
main="Distribution of Genes per Species in\nOrthogroups With PH207 Tandem Duplicates",
ylim=c(0, 400))
hist(
log(multi),
breaks=20,
col=rgb(1, 0, 0, 0.5),
add=TRUE)
legend(
"topright",
c("Not Split", "Split"),
fill=c(rgb(0, 0, 1, 0.5), rgb(1, 0, 0, 0.5)))
dev.off()
pdf(file="PH207_GenesPerSp_TrueFalse.pdf", 6, 6)
truetand <- c(p_true_multi_means, p_true_single_means)
falsetand <- c(p_false_multi_means, p_false_single_means)
hist(
log(truetand),
breaks=20,
col=rgb(0, 0, 1, 0.5),
xlab="log(Mean Number of Genes per Species in Orthogroup)",
ylab="Count",
main="Distribution of Genes per Species in\nOrthogroups With PH207 Tandem Duplicates",
ylim=c(0, 400))
hist(
log(falsetand),
breaks=20,
col=rgb(1, 0, 0, 0.5),
add=TRUE)
legend(
"topright",
c("True Tandem", "False Tandem"),
fill=c(rgb(0, 0, 1, 0.5), rgb(1, 0, 0, 0.5)))
dev.off()
|
mydataset <- asv.count.HAB10.enviro %>%
separate(sample, into = c("Site", "Month", "Year", "z")) %>%
mutate(Season = ifelse(Month %in% c("10","11","12","1","2","3"), "Winter", "Summer")) %>%
dplyr::rename("presence" = "x") %>%
filter(pH > 7.5) %>%
filter(Taxon == "Alexandrium_3fc") %>%
mutate(TempStd = (Temperature - mean(Temperature))/sd(Temperature),
pHStd = (pH - mean(pH))/sd(pH),
SalinityStd = (Salinity - mean(Salinity))/sd(Salinity))
arm.fit1 <- stan_glmer(presence ~ (1 + pHStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit2 <- stan_glmer(presence ~ (1 + TempStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit3 <- stan_glmer(presence ~ (1 + SalinityStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit4 <- stan_glmer(presence ~ TempStd + (1 + pHStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit5 <- stan_glmer(presence ~ pHStd + (1 + TempStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit6 <- stan_glmer(presence ~ pHStd + SalinityStd + (1 + TempStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit7 <- stan_glm(presence ~ pHStd + TempStd,
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit8 <- stan_glm(presence ~ pHStd * TempStd,
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit9 <- stan_glm(presence ~ pHStd + SalinityStd,
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit10 <- stan_glmer(presence ~ pHStd + SalinityStd + (1 | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit11 <- stan_glmer(presence ~ pHStd + (1 + SalinityStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit12 <- stan_glmer(presence ~ SalinityStd + (1 + pHStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit13 <- stan_glmer(presence ~ pHStd + (0 + SalinityStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit14 <- stan_glmer(presence ~ SalinityStd + (0 + pHStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit15 <- stan_glmer(presence ~ 0 + (1 + TempStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit16 <- stan_glmer(presence ~ 1 + (0 + TempStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
#model compare
modList <- paste0("arm.fit", 1:16)
names(modList) <- paste0("arm.fit", 1:16)
modList %>%
map(as.symbol) %>%
map(eval) %>%
map(waic) %>%
loo_compare()
plot(arm.fit15)
mydataset %>%
add_fitted_draws(arm.fit15, n = 1000) %>%
ggplot(aes(x = Temperature, y = presence, color = Season)) +
geom_point() +
facet_grid(~ Season, scales = "free_x") +
stat_lineribbon(aes(y = .value), .width = c(.95, .5)) +
scale_fill_brewer()
saveRDS(arm.fit15, file = "BayesianLogisticModels_Environmental/Alexandrium_3fc_BestModel.RDS")
#PREDERROR(arm.fit15, mydataset, "presence")
| /Manuscript/BayesianLogisticModels_Environmental/Alexandrium_3fc_models.R | no_license | ramongallego/Harmful.Algae.eDNA | R | false | false | 6,181 | r | mydataset <- asv.count.HAB10.enviro %>%
separate(sample, into = c("Site", "Month", "Year", "z")) %>%
mutate(Season = ifelse(Month %in% c("10","11","12","1","2","3"), "Winter", "Summer")) %>%
dplyr::rename("presence" = "x") %>%
filter(pH > 7.5) %>%
filter(Taxon == "Alexandrium_3fc") %>%
mutate(TempStd = (Temperature - mean(Temperature))/sd(Temperature),
pHStd = (pH - mean(pH))/sd(pH),
SalinityStd = (Salinity - mean(Salinity))/sd(Salinity))
arm.fit1 <- stan_glmer(presence ~ (1 + pHStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit2 <- stan_glmer(presence ~ (1 + TempStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit3 <- stan_glmer(presence ~ (1 + SalinityStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit4 <- stan_glmer(presence ~ TempStd + (1 + pHStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit5 <- stan_glmer(presence ~ pHStd + (1 + TempStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit6 <- stan_glmer(presence ~ pHStd + SalinityStd + (1 + TempStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit7 <- stan_glm(presence ~ pHStd + TempStd,
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit8 <- stan_glm(presence ~ pHStd * TempStd,
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit9 <- stan_glm(presence ~ pHStd + SalinityStd,
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit10 <- stan_glmer(presence ~ pHStd + SalinityStd + (1 | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit11 <- stan_glmer(presence ~ pHStd + (1 + SalinityStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit12 <- stan_glmer(presence ~ SalinityStd + (1 + pHStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit13 <- stan_glmer(presence ~ pHStd + (0 + SalinityStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit14 <- stan_glmer(presence ~ SalinityStd + (0 + pHStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit15 <- stan_glmer(presence ~ 0 + (1 + TempStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
arm.fit16 <- stan_glmer(presence ~ 1 + (0 + TempStd | Season),
data = mydataset,
family = "binomial",
prior_intercept = normal(0, 1),
prior = normal(0,1),
iter = 1000,
chains = 4)
#model compare
modList <- paste0("arm.fit", 1:16)
names(modList) <- paste0("arm.fit", 1:16)
modList %>%
map(as.symbol) %>%
map(eval) %>%
map(waic) %>%
loo_compare()
plot(arm.fit15)
mydataset %>%
add_fitted_draws(arm.fit15, n = 1000) %>%
ggplot(aes(x = Temperature, y = presence, color = Season)) +
geom_point() +
facet_grid(~ Season, scales = "free_x") +
stat_lineribbon(aes(y = .value), .width = c(.95, .5)) +
scale_fill_brewer()
saveRDS(arm.fit15, file = "BayesianLogisticModels_Environmental/Alexandrium_3fc_BestModel.RDS")
#PREDERROR(arm.fit15, mydataset, "presence")
|
library(sf)
library(ggmap)
library(ggplot2)
library(leaflet)
library(readxl)
library(dplyr)
library(stringr)
library(tidyr)
library(lubridate)
library(ggplot2)
library(janitor)
nips <- read.csv('data/nip_data.csv')
load(file = "data/flrt_data.rdata")
falmouth <- c(-70.693531,41.53, -70.448287, 41.625940)
leaflet() %>%
setView(lng = -70.617672, lat = 41.564279, zoom = 12) %>%
addTiles() %>%
addPolylines(data = flrt_nonrandom_surv1, color = "black", popup = ~paste("Nips found:", as.character(count))) %>%
addPolylines(data = flrt_random_surv, color = "red")
| /R/make_leaflet.R | no_license | merrend/FLRT | R | false | false | 577 | r | library(sf)
library(ggmap)
library(ggplot2)
library(leaflet)
library(readxl)
library(dplyr)
library(stringr)
library(tidyr)
library(lubridate)
library(ggplot2)
library(janitor)
nips <- read.csv('data/nip_data.csv')
load(file = "data/flrt_data.rdata")
falmouth <- c(-70.693531,41.53, -70.448287, 41.625940)
leaflet() %>%
setView(lng = -70.617672, lat = 41.564279, zoom = 12) %>%
addTiles() %>%
addPolylines(data = flrt_nonrandom_surv1, color = "black", popup = ~paste("Nips found:", as.character(count))) %>%
addPolylines(data = flrt_random_surv, color = "red")
|
find_bargain_f <-
function(f_psf,
f_by_fleet,
numbers_at_age,
lh,
new_psfad_catch,
fleet
){
new_f <- f_by_fleet
# f_psf_frame <- data_frame(int_quarter = 1:4, gear_type = 'PS-FAD', f_psf = f_psf)
new_f$f[new_f$gear_type == fleet] <- f_psf
new_f <- new_f %>%
mutate(effective_f = f * selectivity)
total_f_at_age <- new_f %>%
ungroup() %>%
mutate(effective_f = f * selectivity) %>%
group_by(age,int_quarter) %>%
dplyr::summarise(f = sum(effective_f))
catch <- numbers_at_age %>%
left_join(total_f_at_age, by = c('age','int_quarter')) %>%
mutate(catch = (f/(f + m)) * b_at_age * (1 - exp(-(f + m))))
catch_by_fleet <- new_f %>%
left_join(catch %>% select(age,catch, int_quarter), by = c('age', 'int_quarter')) %>%
group_by(age, int_quarter) %>%
mutate(total_f_at_a = pmax(1e-6,sum(effective_f))) %>%
ungroup() %>%
mutate(catch_by_fleet = (effective_f / (total_f_at_a)) * catch) %>%
group_by(gear_type,int_quarter) %>%
dplyr::summarise(catch = sum(catch_by_fleet))
psfad_catch <- catch_by_fleet$catch[catch_by_fleet$gear_type == fleet]
obs_psfad_catch <- new_psfad_catch$new_catch[new_psfad_catch$gear_type == fleet]
ss <- sum((log(psfad_catch + 1e-6) - log(obs_psfad_catch + 1e-6))^2)
return(ss)
} | /functions/find_bargain_f.R | no_license | DanOvando/coasean-tuna | R | false | false | 1,395 | r | find_bargain_f <-
function(f_psf,
f_by_fleet,
numbers_at_age,
lh,
new_psfad_catch,
fleet
){
new_f <- f_by_fleet
# f_psf_frame <- data_frame(int_quarter = 1:4, gear_type = 'PS-FAD', f_psf = f_psf)
new_f$f[new_f$gear_type == fleet] <- f_psf
new_f <- new_f %>%
mutate(effective_f = f * selectivity)
total_f_at_age <- new_f %>%
ungroup() %>%
mutate(effective_f = f * selectivity) %>%
group_by(age,int_quarter) %>%
dplyr::summarise(f = sum(effective_f))
catch <- numbers_at_age %>%
left_join(total_f_at_age, by = c('age','int_quarter')) %>%
mutate(catch = (f/(f + m)) * b_at_age * (1 - exp(-(f + m))))
catch_by_fleet <- new_f %>%
left_join(catch %>% select(age,catch, int_quarter), by = c('age', 'int_quarter')) %>%
group_by(age, int_quarter) %>%
mutate(total_f_at_a = pmax(1e-6,sum(effective_f))) %>%
ungroup() %>%
mutate(catch_by_fleet = (effective_f / (total_f_at_a)) * catch) %>%
group_by(gear_type,int_quarter) %>%
dplyr::summarise(catch = sum(catch_by_fleet))
psfad_catch <- catch_by_fleet$catch[catch_by_fleet$gear_type == fleet]
obs_psfad_catch <- new_psfad_catch$new_catch[new_psfad_catch$gear_type == fleet]
ss <- sum((log(psfad_catch + 1e-6) - log(obs_psfad_catch + 1e-6))^2)
return(ss)
} |
#
# Ordinal Logistic Regression
# -----------------------------------------------------
# Steve Miller
# Date: 1 April 2020
# Let's get started with updating/installing some packages -----
install.packages("ordinal")
library(tidyverse)
library(post8000r)
library(ordinal)
# Let's revisit an old data frame from earlier in the semester.
gss_spending
?gss_spending
# First, let's do some recoding.
# collegeed = if respondent has an undergraduate or graduate degree.
# pid7 = recoding that top category of other party supporters to be missing.
# natfare_f: declaring that the natfare variable is an ordered factor.
# You gotta do this for ordinal analyses.
gss_spending %>%
mutate(collegeed = ifelse(degree >= 3, 1, 0),
pid7 = ifelse(partyid == 7, NA, partyid),
natfare_f = ordered(natfare)) -> gss_spending
# Let's assume we want to model attitudes toward welfare spending among white people as a function of these things:
# age, sex (whether respondent is a woman), college education, income, partisanship (D to R), and ideology (L to C).
M2 <- clm(natfare_f ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1))
summary(M2)
# Prelim takeaways:
# women are less likely than men to think about spending more on welfare.
# Predictable effects of partisanship and ideology.
# I tend to use very general language on coefficient interpretation for ordinal models, but if you want something more exact, here it is.
# Observe the coefficient for polviews is ~-.269, as a logit.
# Thus, the likelihood (natural logged odds) of observing a 1 versus a 0 or -1 decreases by about -.269 for a unit increase in polviews.
# Related: the likelihood (natural logged odds) of observing a 0 versus a -1 decreases by about -.269 for a unit increase in polviews.
# ON THRESHOLDS:
# I'm generally loathe to talk about these things. They're not typically parameters of interest for how you're probably using an ordinal model.
# However, you'll want to provide them anyway.
# These thresholds or "cut points" are natural logged odds between two variables.
# So, in this case: the "coefficient" reading -1|0 is the natural logged odds of being a -1 versus a 0 or 1.
# The "coefficient" reading 0|1 is the natural logged odds of being a -1 or 0 versus a 1.
# The "|" is kind of misleading, especially if you're used to it as a strict logical operator.
# In this case, the "|" is like a cumulative cut point, or a way of saying it is.
# Let's talk a bit about what's happening here. We call ordinal logistic regression an extension of (binary) logistic regression because:
# 1) it's in spirit *multiple* (binary) logistic regressions of
# 2) the natural logged odds of appearing in a category or below it.
# However, we are assuming the lines are in parallel to each other, separated by the thresholds
# So, in this case, think of M2 as kind of like two logistic regressions, each with identical betas.
# logit(p(y == -1)) = -2.87 + B*X and logit(p(y <= 0)) = -1.4221 + B*X.
# You should, at least in spirit, care about the proportional odds assumption that the slopes are the same at every level.
# There are any number of ways of testing this and I *really* wish there was a Brant test add-on for the ordinal package.
# There isn't (i.e. it's there for the polr function in MASS, which I eschewed here).
# Instead, you can do a nominal test, which is the ordinal package's way of saying "likelihood ratio test."
# Think of this as a test of the hypothesis that relaxing the proportional odds (PO) assumption of parallel lines across all levels of the response provides a better model fit.
# If the p < .05, you reject the hypothesis that relaxing the PO assumption does not improve model fit.
# In other words, one or more of the covariates may have non-constant effects at all levels.
nominal_test(M2)
# You can interpret the above in a few ways:
# You can use this as a call for a multinomial model. This might even be advisable in this context.
# You could spin me a ball of yarn that with just three categories, awkwardly given to the respondent, that this is really an unstructured response.
# OR: you can allow the effects to vary at all levels.
# You do this by specifying a nominal call in the clm function.
# Here, we'll do it for just age and sex.
M3 <- clm(natfare_f ~ collegeed + rincom16 + pid7 + polviews, nominal = ~ age + sex, data=subset(gss_spending, race == 1))
summary(M3) # Notice there's no single coefficient for age and sex. It's in the intercepts/thresholds.
nominal_test(M3)
# Here's a better idea, while also upfront confessing I'm doing this stream of consciousness.
# Let's note that the nature of the response (-1, 0, 1) is probably wanting a multinomial solution notwithstanding the order we want to impose on it.
# Instead, let's make an index of three variables: natheal, natfare, and natsoc
# Think of this as an index on attitudes toward social spending (broadly defined). Higher values = more support for more social spending
# (or, technically, that the respondent thinks we're spending too little)
gss_spending %>%
mutate(y = natheal + natfare + natsoc,
y_f = ordered(y)) -> gss_spending
# Here's what our variable looks like:
table(gss_spending$y_ord)
# Let's try this again
M4 <- clm(y_ord ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1))
summary(M4)
nominal_test(M4)
# Much betta https://66.media.tumblr.com/6437f1bc98d5d0952a1edd19b9e4241e/1932ca80ea201e4f-5d/s640x960/a558c99f1fa3f6d0377ccfc48966917a8a94c8f2.gif
# You can do the same thing and the same interpretation of the coefficient output as you did above.
# More values in the DV, though, mean more thresholds to sift through.
# HOT #take coming up: I'm of the mentality you should always run an ordinal logistic regression if that's the DV you're handed.
# I will throw something at you if you try running an OLS on a five-item Likert because that's just not the data you have.
# But I kind of hate them, and I would forgive you for hating them too, because communicating them is a chore.
# OLS has a straightforward interpretation. Binary DVs are really straightforward as well.
# However, the PO assumption can be restrictive and there are a lot of moving pieces from the model output.
# Your audience may not have the appetite for it.
# In other words, be prepared to communicate your statistical model graphically.
# In the ordinal package, this is the predict() function and think about using it with hypothetical data.
# For example, let's create a simple data frame that has all our right-hand side values, but we'll have three variables of partisanship.
# These will be the strong Ds (0), pure indies who don't lean one way or another (3), and the strong Rs (6).
# Everything else is at a typical value (a median).
newdat <- tibble(age = median(gss_spending$age, na.rm=T),
collegeed = 0,
sex = 0,
pid7 = c(0, 3, 6),
polviews = median(gss_spending$polviews, na.rm=T),
rincom16 = median(gss_spending$rincom16, na.rm=T))
# Alrightie, this code is convoluted as hell, and it's why I prefer Bayes for ordinal models.
# But that's in two weeks.
# Oh god, here we go...
predict(M2, newdata = newdat, se.fit=T) %>% # get predictions with standard errors.
# This is a list of two matrices
# Let's coerce it to two data frames while also begrudging that I have to do this.
map(~as.data.frame(.)) %>% # god purrr is awesome
# There's a hiden rowname in here. It's going to somewhat coincide with the values of pid7
# Let's extract it
map(~rownames_to_column(.)) %>%
# Now let's make these two data frames into one data frame.
# Importantly, obj is going to tell me whether it's a prediction or a standard error around the prediction
map2_df(names(.), ~mutate(.x,obj=.y)) %>%
# alrightie... okay. See that rowname variable? I know that's the pid7 values of 0, 3, and 6.
# However, the clm predict doesn't save those. Let's tell them for what they are.
rename(pid7 = rowname) %>%
# It also delightfully thinks it's a character. So, let's humor it and overwrite it.
mutate(pid7 = rep(c("Strong Democrat", "Independent", "Strong Republican"), 2),
# Make it a factor in order it appears. You'll thank me later for this.
pid7 = forcats::fct_inorder(pid7)) %>%
# okay, tidyr::gather() is going to have to do some heavy lifting here.
gather(var, val, -pid7, -obj) %>%
# Importantly, I needed this longer because I want my -1, 0, and 1s (as responses) to be "long."
# so, now this made it "longer" while still giving me a glimpse as to what's my fit and what's my se.fit
# See that's in the obj column? Let's group_split and bind_cols to get them next to each other
group_split(obj) %>%
bind_cols() %>%
# voila! I have everything I need now
# Now, let's have some fun and create a column called upr and lwr creating bounds around the estimate
rename(fit = val,
se = val1) %>%
mutate(upr = fit + 1.96*se,
lwr = fit - 1.96*se) %>%
ggplot(.,aes(pid7, fit, ymax=upr, ymin=lwr)) +
geom_pointrange() +
# Oh god help me I never do anything the easy way...
facet_wrap(~var, labeller=labeller(var = c("-1" = "Spends Too Much",
"0" = "Spending About Right",
"1" = "Spending Too Little"))) +
labs(title = "Attitudes Toward Spending on Welfare, by Partisanship",
x = "Partisanship", y = "Predicted Probability of the Response (with 95% Intervals)",
caption = "Source: General Social Survey, 2018. Note: for pedagogical use in my grad methods class. Stay out of my mentions.",
subtitle = "Increasing partisanship (with the GOP) increases the likelihood of the spend too much or spend about right response, but decreases the likelihood of the\nspend too little response. You knew this.")
# ^ Consider this a preview for the quantities of interest week, that's coming up next.
# Basically: regression modeling is story-telling as well, in a way.
# You, the story-teller, just have more work to do with ordinal models, even as the ordinal model may faithfully capture the underlying distribution of the DV.
# With that in mind, I want to give you an "out" of a kind.
# This will touch on some of the readings you had this week (and even earlier in the semester) on whether you can treat your ordinal DV as continuous.
# My rule of thumb:
# 3-5: hard no.
# 7: I'm listening...
# 10+: f*ck it, just go for it, provided there's no natural clumping of responses on some extreme in the distribution.
# ^ The more thorough interpretation: with more values on a still truncated (ordinal) scale, you can start to think of the differences as "equally spaced out."
# In which case, the OLS model is informative, if technically wrong.
# You'll remember it performed well enough in the lecture in which I explicitly simulated the data, even if it was discernibly off the true parameters.
# No one is going to give you too much grief and I won't either, but you may want to consider some form of heteroskedasticity correction to be safe.
# ^ On the above point in the distribution of responses on a granular ordinal scale. Remember the bribe-taking prompt in the US from the World Values Survey?
# This was the justifiability of taking a bribe on a 1-10 scale.
# It has 10 responses, but almost all of them are at 1.
# In other words, don't treat that as interval below:
usa_justifbribe %>%
group_by(justifbribe) %>%
count() %>%
na.omit %>%
ggplot(.,aes(as.factor(justifbribe), n)) +
geom_bar(stat="identity", alpha=0.8, color="black") +
scale_x_discrete(labels=c("Never Justifiable", "2", "3", "4",
"5", "6", "7", "8", "9", "Always Justifiable")) +
scale_y_continuous(labels = scales::comma) +
geom_text(aes(label=n), vjust=-.5, colour="black",
position=position_dodge(.9), size=4) +
labs(y = "Number of Observations in Particular Response",
x = "",
title = "The Justifiability of Taking a Bribe in the U.S., 1995-2011",
caption = "Data: World Values Survey, 1995-2011",
subtitle = "There are just 10 different responses in this variable with a huge right skew. I wouldn't ask for a mean of this.")
# You may not even want to think of it as ordinal. With noisy as hell data like this, as I mentioned in that session, you'll probably just want to embrace
# the noisiness and estimate it as a binary DV of 1 versus not 1.
# What about in our y variable from model 4?
summary(M4)
summary(M5 <- lm(y ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1)))
broom::tidy(M4)
broom::tidy(M5)
# ^ off, technically wrong, but not so wrong.
# Recall the assumptions of the ordinal model of the underlying latent variable. This is why OLS is performing better here
# than it performed with the binary model.
# What about something bigger, like the sumnatsoc variable?
table(gss_spending$sumnatsoc)
summary(M6 <- clm(ordered(sumnatsoc) ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1)))
summary(M7 <- lm(sumnatsoc ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1)))
# Similar performance. No one is going to yell too much at you for doing an OLS on a technically ordinal item that has like 22 different values.
# But, maybe consider some kind of heteroskedasticity correction.
| /lab-scripts/ordinal-logistic-regression-lab.R | permissive | anhnguyendepocen/post8000 | R | false | false | 13,569 | r | #
# Ordinal Logistic Regression
# -----------------------------------------------------
# Steve Miller
# Date: 1 April 2020
# Let's get started with updating/installing some packages -----
install.packages("ordinal")
library(tidyverse)
library(post8000r)
library(ordinal)
# Let's revisit an old data frame from earlier in the semester.
gss_spending
?gss_spending
# First, let's do some recoding.
# collegeed = if respondent has an undergraduate or graduate degree.
# pid7 = recoding that top category of other party supporters to be missing.
# natfare_f: declaring that the natfare variable is an ordered factor.
# You gotta do this for ordinal analyses.
gss_spending %>%
mutate(collegeed = ifelse(degree >= 3, 1, 0),
pid7 = ifelse(partyid == 7, NA, partyid),
natfare_f = ordered(natfare)) -> gss_spending
# Let's assume we want to model attitudes toward welfare spending among white people as a function of these things:
# age, sex (whether respondent is a woman), college education, income, partisanship (D to R), and ideology (L to C).
M2 <- clm(natfare_f ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1))
summary(M2)
# Prelim takeaways:
# women are less likely than men to think about spending more on welfare.
# Predictable effects of partisanship and ideology.
# I tend to use very general language on coefficient interpretation for ordinal models, but if you want something more exact, here it is.
# Observe the coefficient for polviews is ~-.269, as a logit.
# Thus, the likelihood (natural logged odds) of observing a 1 versus a 0 or -1 decreases by about -.269 for a unit increase in polviews.
# Related: the likelihood (natural logged odds) of observing a 0 versus a -1 decreases by about -.269 for a unit increase in polviews.
# ON THRESHOLDS:
# I'm generally loathe to talk about these things. They're not typically parameters of interest for how you're probably using an ordinal model.
# However, you'll want to provide them anyway.
# These thresholds or "cut points" are natural logged odds between two variables.
# So, in this case: the "coefficient" reading -1|0 is the natural logged odds of being a -1 versus a 0 or 1.
# The "coefficient" reading 0|1 is the natural logged odds of being a -1 or 0 versus a 1.
# The "|" is kind of misleading, especially if you're used to it as a strict logical operator.
# In this case, the "|" is like a cumulative cut point, or a way of saying it is.
# Let's talk a bit about what's happening here. We call ordinal logistic regression an extension of (binary) logistic regression because:
# 1) it's in spirit *multiple* (binary) logistic regressions of
# 2) the natural logged odds of appearing in a category or below it.
# However, we are assuming the lines are in parallel to each other, separated by the thresholds
# So, in this case, think of M2 as kind of like two logistic regressions, each with identical betas.
# logit(p(y == -1)) = -2.87 + B*X and logit(p(y <= 0)) = -1.4221 + B*X.
# You should, at least in spirit, care about the proportional odds assumption that the slopes are the same at every level.
# There are any number of ways of testing this and I *really* wish there was a Brant test add-on for the ordinal package.
# There isn't (i.e. it's there for the polr function in MASS, which I eschewed here).
# Instead, you can do a nominal test, which is the ordinal package's way of saying "likelihood ratio test."
# Think of this as a test of the hypothesis that relaxing the proportional odds (PO) assumption of parallel lines across all levels of the response provides a better model fit.
# If the p < .05, you reject the hypothesis that relaxing the PO assumption does not improve model fit.
# In other words, one or more of the covariates may have non-constant effects at all levels.
nominal_test(M2)
# You can interpret the above in a few ways:
# You can use this as a call for a multinomial model. This might even be advisable in this context.
# You could spin me a ball of yarn that with just three categories, awkwardly given to the respondent, that this is really an unstructured response.
# OR: you can allow the effects to vary at all levels.
# You do this by specifying a nominal call in the clm function.
# Here, we'll do it for just age and sex.
M3 <- clm(natfare_f ~ collegeed + rincom16 + pid7 + polviews, nominal = ~ age + sex, data=subset(gss_spending, race == 1))
summary(M3) # Notice there's no single coefficient for age and sex. It's in the intercepts/thresholds.
nominal_test(M3)
# Here's a better idea, while also upfront confessing I'm doing this stream of consciousness.
# Let's note that the nature of the response (-1, 0, 1) is probably wanting a multinomial solution notwithstanding the order we want to impose on it.
# Instead, let's make an index of three variables: natheal, natfare, and natsoc
# Think of this as an index on attitudes toward social spending (broadly defined). Higher values = more support for more social spending
# (or, technically, that the respondent thinks we're spending too little)
gss_spending %>%
mutate(y = natheal + natfare + natsoc,
y_f = ordered(y)) -> gss_spending
# Here's what our variable looks like:
table(gss_spending$y_ord)
# Let's try this again
M4 <- clm(y_ord ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1))
summary(M4)
nominal_test(M4)
# Much betta https://66.media.tumblr.com/6437f1bc98d5d0952a1edd19b9e4241e/1932ca80ea201e4f-5d/s640x960/a558c99f1fa3f6d0377ccfc48966917a8a94c8f2.gif
# You can do the same thing and the same interpretation of the coefficient output as you did above.
# More values in the DV, though, mean more thresholds to sift through.
# HOT #take coming up: I'm of the mentality you should always run an ordinal logistic regression if that's the DV you're handed.
# I will throw something at you if you try running an OLS on a five-item Likert because that's just not the data you have.
# But I kind of hate them, and I would forgive you for hating them too, because communicating them is a chore.
# OLS has a straightforward interpretation. Binary DVs are really straightforward as well.
# However, the PO assumption can be restrictive and there are a lot of moving pieces from the model output.
# Your audience may not have the appetite for it.
# In other words, be prepared to communicate your statistical model graphically.
# In the ordinal package, this is the predict() function and think about using it with hypothetical data.
# For example, let's create a simple data frame that has all our right-hand side values, but we'll have three variables of partisanship.
# These will be the strong Ds (0), pure indies who don't lean one way or another (3), and the strong Rs (6).
# Everything else is at a typical value (a median).
newdat <- tibble(age = median(gss_spending$age, na.rm=T),
collegeed = 0,
sex = 0,
pid7 = c(0, 3, 6),
polviews = median(gss_spending$polviews, na.rm=T),
rincom16 = median(gss_spending$rincom16, na.rm=T))
# Alrightie, this code is convoluted as hell, and it's why I prefer Bayes for ordinal models.
# But that's in two weeks.
# Oh god, here we go...
predict(M2, newdata = newdat, se.fit=T) %>% # get predictions with standard errors.
# This is a list of two matrices
# Let's coerce it to two data frames while also begrudging that I have to do this.
map(~as.data.frame(.)) %>% # god purrr is awesome
# There's a hiden rowname in here. It's going to somewhat coincide with the values of pid7
# Let's extract it
map(~rownames_to_column(.)) %>%
# Now let's make these two data frames into one data frame.
# Importantly, obj is going to tell me whether it's a prediction or a standard error around the prediction
map2_df(names(.), ~mutate(.x,obj=.y)) %>%
# alrightie... okay. See that rowname variable? I know that's the pid7 values of 0, 3, and 6.
# However, the clm predict doesn't save those. Let's tell them for what they are.
rename(pid7 = rowname) %>%
# It also delightfully thinks it's a character. So, let's humor it and overwrite it.
mutate(pid7 = rep(c("Strong Democrat", "Independent", "Strong Republican"), 2),
# Make it a factor in order it appears. You'll thank me later for this.
pid7 = forcats::fct_inorder(pid7)) %>%
# okay, tidyr::gather() is going to have to do some heavy lifting here.
gather(var, val, -pid7, -obj) %>%
# Importantly, I needed this longer because I want my -1, 0, and 1s (as responses) to be "long."
# so, now this made it "longer" while still giving me a glimpse as to what's my fit and what's my se.fit
# See that's in the obj column? Let's group_split and bind_cols to get them next to each other
group_split(obj) %>%
bind_cols() %>%
# voila! I have everything I need now
# Now, let's have some fun and create a column called upr and lwr creating bounds around the estimate
rename(fit = val,
se = val1) %>%
mutate(upr = fit + 1.96*se,
lwr = fit - 1.96*se) %>%
ggplot(.,aes(pid7, fit, ymax=upr, ymin=lwr)) +
geom_pointrange() +
# Oh god help me I never do anything the easy way...
facet_wrap(~var, labeller=labeller(var = c("-1" = "Spends Too Much",
"0" = "Spending About Right",
"1" = "Spending Too Little"))) +
labs(title = "Attitudes Toward Spending on Welfare, by Partisanship",
x = "Partisanship", y = "Predicted Probability of the Response (with 95% Intervals)",
caption = "Source: General Social Survey, 2018. Note: for pedagogical use in my grad methods class. Stay out of my mentions.",
subtitle = "Increasing partisanship (with the GOP) increases the likelihood of the spend too much or spend about right response, but decreases the likelihood of the\nspend too little response. You knew this.")
# ^ Consider this a preview for the quantities of interest week, that's coming up next.
# Basically: regression modeling is story-telling as well, in a way.
# You, the story-teller, just have more work to do with ordinal models, even as the ordinal model may faithfully capture the underlying distribution of the DV.
# With that in mind, I want to give you an "out" of a kind.
# This will touch on some of the readings you had this week (and even earlier in the semester) on whether you can treat your ordinal DV as continuous.
# My rule of thumb:
# 3-5: hard no.
# 7: I'm listening...
# 10+: f*ck it, just go for it, provided there's no natural clumping of responses on some extreme in the distribution.
# ^ The more thorough interpretation: with more values on a still truncated (ordinal) scale, you can start to think of the differences as "equally spaced out."
# In which case, the OLS model is informative, if technically wrong.
# You'll remember it performed well enough in the lecture in which I explicitly simulated the data, even if it was discernibly off the true parameters.
# No one is going to give you too much grief and I won't either, but you may want to consider some form of heteroskedasticity correction to be safe.
# ^ On the above point in the distribution of responses on a granular ordinal scale. Remember the bribe-taking prompt in the US from the World Values Survey?
# This was the justifiability of taking a bribe on a 1-10 scale.
# It has 10 responses, but almost all of them are at 1.
# In other words, don't treat that as interval below:
usa_justifbribe %>%
group_by(justifbribe) %>%
count() %>%
na.omit %>%
ggplot(.,aes(as.factor(justifbribe), n)) +
geom_bar(stat="identity", alpha=0.8, color="black") +
scale_x_discrete(labels=c("Never Justifiable", "2", "3", "4",
"5", "6", "7", "8", "9", "Always Justifiable")) +
scale_y_continuous(labels = scales::comma) +
geom_text(aes(label=n), vjust=-.5, colour="black",
position=position_dodge(.9), size=4) +
labs(y = "Number of Observations in Particular Response",
x = "",
title = "The Justifiability of Taking a Bribe in the U.S., 1995-2011",
caption = "Data: World Values Survey, 1995-2011",
subtitle = "There are just 10 different responses in this variable with a huge right skew. I wouldn't ask for a mean of this.")
# You may not even want to think of it as ordinal. With noisy as hell data like this, as I mentioned in that session, you'll probably just want to embrace
# the noisiness and estimate it as a binary DV of 1 versus not 1.
# What about in our y variable from model 4?
summary(M4)
summary(M5 <- lm(y ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1)))
broom::tidy(M4)
broom::tidy(M5)
# ^ off, technically wrong, but not so wrong.
# Recall the assumptions of the ordinal model of the underlying latent variable. This is why OLS is performing better here
# than it performed with the binary model.
# What about something bigger, like the sumnatsoc variable?
table(gss_spending$sumnatsoc)
summary(M6 <- clm(ordered(sumnatsoc) ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1)))
summary(M7 <- lm(sumnatsoc ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1)))
# Similar performance. No one is going to yell too much at you for doing an OLS on a technically ordinal item that has like 22 different values.
# But, maybe consider some kind of heteroskedasticity correction.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/marketplacecatalog_operations.R
\name{marketplacecatalog_list_entities}
\alias{marketplacecatalog_list_entities}
\title{Provides the list of entities of a given type}
\usage{
marketplacecatalog_list_entities(
Catalog,
EntityType,
FilterList = NULL,
Sort = NULL,
NextToken = NULL,
MaxResults = NULL,
OwnershipType = NULL
)
}
\arguments{
\item{Catalog}{[required] The catalog related to the request. Fixed value: \code{AWSMarketplace}}
\item{EntityType}{[required] The type of entities to retrieve.}
\item{FilterList}{An array of filter objects. Each filter object contains two attributes,
\code{filterName} and \code{filterValues}.}
\item{Sort}{An object that contains two attributes, \code{SortBy} and \code{SortOrder}.}
\item{NextToken}{The value of the next token, if it exists. Null if there are no more
results.}
\item{MaxResults}{Specifies the upper limit of the elements on a single page. If a value
isn't provided, the default value is 20.}
\item{OwnershipType}{}
}
\description{
Provides the list of entities of a given type.
See \url{https://www.paws-r-sdk.com/docs/marketplacecatalog_list_entities/} for full documentation.
}
\keyword{internal}
| /cran/paws.cost.management/man/marketplacecatalog_list_entities.Rd | permissive | paws-r/paws | R | false | true | 1,254 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/marketplacecatalog_operations.R
\name{marketplacecatalog_list_entities}
\alias{marketplacecatalog_list_entities}
\title{Provides the list of entities of a given type}
\usage{
marketplacecatalog_list_entities(
Catalog,
EntityType,
FilterList = NULL,
Sort = NULL,
NextToken = NULL,
MaxResults = NULL,
OwnershipType = NULL
)
}
\arguments{
\item{Catalog}{[required] The catalog related to the request. Fixed value: \code{AWSMarketplace}}
\item{EntityType}{[required] The type of entities to retrieve.}
\item{FilterList}{An array of filter objects. Each filter object contains two attributes,
\code{filterName} and \code{filterValues}.}
\item{Sort}{An object that contains two attributes, \code{SortBy} and \code{SortOrder}.}
\item{NextToken}{The value of the next token, if it exists. Null if there are no more
results.}
\item{MaxResults}{Specifies the upper limit of the elements on a single page. If a value
isn't provided, the default value is 20.}
\item{OwnershipType}{}
}
\description{
Provides the list of entities of a given type.
See \url{https://www.paws-r-sdk.com/docs/marketplacecatalog_list_entities/} for full documentation.
}
\keyword{internal}
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | SidGarcia/ProgrammingAssignment2 | R | false | false | 1,129 | r | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
# server.R
library(dplyr)
# Read in data
source('./scripts/build_map.R')
source('./scripts/build_scatter.R')
df <- read.csv('./data/electoral_college.csv', stringsAsFactors = FALSE)
state_codes <- read.csv('./data/state_codes.csv', stringsAsFactors = FALSE)
# Join together state.codes and df
joined_data <- left_join(df, state_codes, by="state")
# Compute the electoral votes per 100K people in each state
joined_data <- joined_data %>% mutate(ratio = votes/population * 100000)
# Start shinyServer
shinyServer(function(input, output) {
# Render a plotly object that returns your map
output$map <- renderPlotly({
return(build_map(joined_data, input$mapvar))
})
output$scatter <- renderPlotly(({
return(build_scatter(joined_data, input$search))
}))
}) | /exercise-5/server.R | permissive | rsr3rs/ch16-shiny | R | false | false | 807 | r | # server.R
library(dplyr)
# Read in data
source('./scripts/build_map.R')
source('./scripts/build_scatter.R')
df <- read.csv('./data/electoral_college.csv', stringsAsFactors = FALSE)
state_codes <- read.csv('./data/state_codes.csv', stringsAsFactors = FALSE)
# Join together state.codes and df
joined_data <- left_join(df, state_codes, by="state")
# Compute the electoral votes per 100K people in each state
joined_data <- joined_data %>% mutate(ratio = votes/population * 100000)
# Start shinyServer
shinyServer(function(input, output) {
# Render a plotly object that returns your map
output$map <- renderPlotly({
return(build_map(joined_data, input$mapvar))
})
output$scatter <- renderPlotly(({
return(build_scatter(joined_data, input$search))
}))
}) |
context("Vector reduction")
library(dst)
test_that("reduction", {
# T1 Apply reduction to a numeric vector
result <- reduction(c(1,2,3,4), f="-")
expect_equal(result, -8)
# T2 Apply reduction to a logical vector
result <- reduction(c(1,0,1,1,0), f="&")
expect_equal(result, FALSE)
#T3 Apply reduction to a string vector
result <- reduction(c("a", "b", "c"), f="paste")
expect_equal(result, "a b c")
}) | /tests/testthat/test_reduction.R | no_license | RAPLER/dst-1 | R | false | false | 420 | r | context("Vector reduction")
library(dst)
test_that("reduction", {
# T1 Apply reduction to a numeric vector
result <- reduction(c(1,2,3,4), f="-")
expect_equal(result, -8)
# T2 Apply reduction to a logical vector
result <- reduction(c(1,0,1,1,0), f="&")
expect_equal(result, FALSE)
#T3 Apply reduction to a string vector
result <- reduction(c("a", "b", "c"), f="paste")
expect_equal(result, "a b c")
}) |
############# Examples from the documentation of cooc_null_model ####################################
library(EcoSimR)
library(tidyverse)
## Example is not identical if we do not save seed
finchMod <- cooc_null_model(dataWiFinches, algo="sim9",nReps=10000,burn_in = 500)
finch2 <- cooc_null_model(dataWiFinches, algo="sim9",nReps=10000,burn_in = 500)
identical(finchMod, finch2)
## Example that is repeatable with a saved seed
finchMod <- cooc_null_model(dataWiFinches, algo="sim1",saveSeed = TRUE)
a <- mean(finchMod$Sim)
## Run the model with the seed saved
finchMod <- cooc_null_model(dataWiFinches, algo="sim1",saveSeed=T)
## Check model output
b <- mean(finchMod$Sim)
## So much for the documentation, these are still not identical.
identical(a, b)
## This doesn't even run, just throws an error
## reproduce_model(finchMod$Sim)
## Not even sure why this is included, but it is not identical
finchMod <- cooc_null_model(dataWiFinches, algo="sim1")
mean(finchMod$Sim)
## reproduce_model(finchMod$Sim)
############################ Example from Kari's code ###########################
prac <- matrix(rbinom(24, 1, .5), ncol = 4)
pracdf <- as.data.frame(prac)
names(pracdf) <- c('a','b','c','d')
get_sorenson_matrix <- function(cooccurrence_df) {
a_matrix <-
matrix(nrow = ncol(cooccurrence_df),
ncol = ncol(cooccurrence_df))
bc_matrix <-
matrix(nrow = ncol(cooccurrence_df),
ncol = ncol(cooccurrence_df))
for (i in 1:ncol(cooccurrence_df)) {
df <- subset(cooccurrence_df, cooccurrence_df[, i] == 1)
a = colSums(df[, i] == df)
a_matrix[i, ] <- a
b = dim(df)[1] - a
bc_matrix[i, ] <- b
}
dissimilarity <-
matrix(nrow = ncol(cooccurrence_df),
ncol = ncol(cooccurrence_df))
for (i in 1:ncol(cooccurrence_df)) {
for (j in 1:ncol(cooccurrence_df)) {
b_c <- bc_matrix[i, j] + bc_matrix[j, i]
dissimilarity[i, j] <- b_c
dissimilarity[j, i] <- b_c
}
}
sorenson <- dissimilarity / ((2 * a_matrix) + dissimilarity)
return(as_data_frame(sorenson))
}
prac_sor <- get_sorenson_matrix(pracdf)
df <- as.data.frame(prac_sor)
## These are identical:
n1 <- cooc_null_model(df, algo = "sim9", nReps = 1000, saveSeed = FALSE)$Randomized.Data
n2 <- cooc_null_model(df, algo = "sim9", nReps = 1000, saveSeed = FALSE)$Randomized.Data
identical(n1, n2)
## All identical except time stamp:
n1 <- cooc_null_model(df, algo = "sim9", nReps = 1000, saveSeed = FALSE)
n2 <- cooc_null_model(df, algo = "sim9", nReps = 1000, saveSeed = FALSE)
map2(n1, n2, identical)
## Randomized.Data is not identical with sim1:
n1 <- cooc_null_model(df, algo = "sim1", nReps = 1000, saveSeed = FALSE)$Randomized.Data
n2 <- cooc_null_model(df, algo = "sim1", nReps = 1000, saveSeed = FALSE)$Randomized.Data
identical(n1, n2)
## Okay, so things are looking pretty hokey at this point.... Examining cooc_null_model shows completely different dispatch methods for sim9 vs the rest:
cooc_null_model
## Calling sim9 routine directly, we find that these are still identical
n1 <- sim9(df, algo = "sim9", metric = "c_score")$Randomized.Data
n2 <- sim9(df, algo = "sim9", metric = "c_score")$Randomized.Data
identical(n1,n2)
## So time to dig into code for sim9
sim9
## Looks like it calls sim9_single, whatever that is. Stipping out that part of the code, we
## see sim9_single is also giving identical values on each call:
df <- speciesData
metricF <- get("c_score")
Obs <- metricF(as.matrix(speciesData))
msim <- speciesData[rowSums(speciesData) > 0, ]
n1 <- sim9_single(msim)
n2 <- sim9_single(msim)
identical(n1, n2)
ex1 <- matrix(rbinom(100, 1, 0.5), nrow = 10)
## This is not expected, or at least it doesn't occur with the default data of the function:
identical(sim9_single(ex1), sim9_single(ex1))
## So, what's special about df? Perhaps something in the conversions of speciesData is causing this...
| /debug_null.R | no_license | karinorman/richness_decomposition | R | false | false | 3,914 | r | ############# Examples from the documentation of cooc_null_model ####################################
library(EcoSimR)
library(tidyverse)
## Example is not identical if we do not save seed
finchMod <- cooc_null_model(dataWiFinches, algo="sim9",nReps=10000,burn_in = 500)
finch2 <- cooc_null_model(dataWiFinches, algo="sim9",nReps=10000,burn_in = 500)
identical(finchMod, finch2)
## Example that is repeatable with a saved seed
finchMod <- cooc_null_model(dataWiFinches, algo="sim1",saveSeed = TRUE)
a <- mean(finchMod$Sim)
## Run the model with the seed saved
finchMod <- cooc_null_model(dataWiFinches, algo="sim1",saveSeed=T)
## Check model output
b <- mean(finchMod$Sim)
## So much for the documentation, these are still not identical.
identical(a, b)
## This doesn't even run, just throws an error
## reproduce_model(finchMod$Sim)
## Not even sure why this is included, but it is not identical
finchMod <- cooc_null_model(dataWiFinches, algo="sim1")
mean(finchMod$Sim)
## reproduce_model(finchMod$Sim)
############################ Example from Kari's code ###########################
prac <- matrix(rbinom(24, 1, .5), ncol = 4)
pracdf <- as.data.frame(prac)
names(pracdf) <- c('a','b','c','d')
get_sorenson_matrix <- function(cooccurrence_df) {
a_matrix <-
matrix(nrow = ncol(cooccurrence_df),
ncol = ncol(cooccurrence_df))
bc_matrix <-
matrix(nrow = ncol(cooccurrence_df),
ncol = ncol(cooccurrence_df))
for (i in 1:ncol(cooccurrence_df)) {
df <- subset(cooccurrence_df, cooccurrence_df[, i] == 1)
a = colSums(df[, i] == df)
a_matrix[i, ] <- a
b = dim(df)[1] - a
bc_matrix[i, ] <- b
}
dissimilarity <-
matrix(nrow = ncol(cooccurrence_df),
ncol = ncol(cooccurrence_df))
for (i in 1:ncol(cooccurrence_df)) {
for (j in 1:ncol(cooccurrence_df)) {
b_c <- bc_matrix[i, j] + bc_matrix[j, i]
dissimilarity[i, j] <- b_c
dissimilarity[j, i] <- b_c
}
}
sorenson <- dissimilarity / ((2 * a_matrix) + dissimilarity)
return(as_data_frame(sorenson))
}
prac_sor <- get_sorenson_matrix(pracdf)
df <- as.data.frame(prac_sor)
## These are identical:
n1 <- cooc_null_model(df, algo = "sim9", nReps = 1000, saveSeed = FALSE)$Randomized.Data
n2 <- cooc_null_model(df, algo = "sim9", nReps = 1000, saveSeed = FALSE)$Randomized.Data
identical(n1, n2)
## All identical except time stamp:
n1 <- cooc_null_model(df, algo = "sim9", nReps = 1000, saveSeed = FALSE)
n2 <- cooc_null_model(df, algo = "sim9", nReps = 1000, saveSeed = FALSE)
map2(n1, n2, identical)
## Randomized.Data is not identical with sim1:
n1 <- cooc_null_model(df, algo = "sim1", nReps = 1000, saveSeed = FALSE)$Randomized.Data
n2 <- cooc_null_model(df, algo = "sim1", nReps = 1000, saveSeed = FALSE)$Randomized.Data
identical(n1, n2)
## Okay, so things are looking pretty hokey at this point.... Examining cooc_null_model shows completely different dispatch methods for sim9 vs the rest:
cooc_null_model
## Calling sim9 routine directly, we find that these are still identical
n1 <- sim9(df, algo = "sim9", metric = "c_score")$Randomized.Data
n2 <- sim9(df, algo = "sim9", metric = "c_score")$Randomized.Data
identical(n1,n2)
## So time to dig into code for sim9
sim9
## Looks like it calls sim9_single, whatever that is. Stipping out that part of the code, we
## see sim9_single is also giving identical values on each call:
df <- speciesData
metricF <- get("c_score")
Obs <- metricF(as.matrix(speciesData))
msim <- speciesData[rowSums(speciesData) > 0, ]
n1 <- sim9_single(msim)
n2 <- sim9_single(msim)
identical(n1, n2)
ex1 <- matrix(rbinom(100, 1, 0.5), nrow = 10)
## This is not expected, or at least it doesn't occur with the default data of the function:
identical(sim9_single(ex1), sim9_single(ex1))
## So, what's special about df? Perhaps something in the conversions of speciesData is causing this...
|
testlist <- list(x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(diceR:::indicator_matrix,testlist)
str(result) | /diceR/inst/testfiles/indicator_matrix/libFuzzer_indicator_matrix/indicator_matrix_valgrind_files/1609958887-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 234 | r | testlist <- list(x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(diceR:::indicator_matrix,testlist)
str(result) |
source('0_settings.R')
library(stringr)
library(foreach)
library(doParallel)
registerDoParallel(n_cpus)
scene_topocorr_key <- read.csv('Scene_topocorr_key.csv')
overwrite <- TRUE
reprocess <- TRUE
verbose <- TRUE
algorithm <- 'CLOUD_REMOVE_FAST'
#algorithm <- 'simple'
start_dates <- as.Date(c('1988/1/1',
'1993/1/1',
'1998/1/1',
'2003/1/1',
'2008/1/1'))
end_dates <- as.Date(c('1992/12/31',
'1997/12/31',
'2002/12/31',
'2007/12/31',
'2012/12/31'))
sensors_bydate <- list(c('L4T', 'L5T', 'L7E', 'L8E'),
c('L4T', 'L5T', 'L7E', 'L8E'),
c('L4T', 'L5T', 'L7E', 'L8E'),
c('L4T', 'L5T', 'L7E', 'L8E'),
c('L4T', 'L5T', 'L8E'))
# start_dates <- as.Date(c('1988/1/1', '1998/1/1', '2008/1/1'))
# end_dates <- as.Date(c('1992/12/31', '2002/12/31', '2012/12/31'))
# sensors_bydate <- list(c('L4T', 'L5T', 'L7E', 'L8E'),
# c('L4T', 'L5T', 'L7E', 'L8E'),
# c('L4T', 'L5T', 'L8E'))
# start_dates <- as.Date(c('1998/1/1', '2008/1/1'))
# end_dates <- as.Date(c('2002/12/31', '2012/12/31'))
# sensors_bydate <- list(c('L4T', 'L5T', 'L7E', 'L8E'),
# c('L4T', 'L5T', 'L8E'))
stopifnot(length(start_dates) == length(end_dates))
stopifnot(length(start_dates) == length(sensors_bydate))
output_dir <- file.path(prefix, 'Landsat', 'Cloud_Filled')
sitecodes_rep <- c()
base_dirs <- c()
wrspaths <- c()
wrsrows <- c()
tcs <- c()
for (sitecode in sitecodes) {
this_base_dir <- file.path(prefix, 'Landsat', sitecode)
image_dirs <- dir(this_base_dir, pattern='^[0-9]{3}-[0-9]{3}_[0-9]{4}-[0-9]{3}_((LE)|(LT))[4578]$')
wrspathrows <- unique(str_extract(image_dirs, '^[0-9]{3}-[0-9]{3}_'))
these_wrspaths <- as.numeric(gsub('[-]', '', str_extract(wrspathrows, '^[0-9]{3}-')))
these_wrsrows <- as.numeric(gsub('[_-]', '', str_extract(wrspathrows, '-[0-9]{3}_')))
tc_key_rows <- match(paste(sitecode, these_wrspaths, these_wrsrows),
with(scene_topocorr_key, paste(sitecode, wrspath, wrsrow)))
new_tcs <- scene_topocorr_key$do_tc[tc_key_rows]
tcs <- c(tcs, new_tcs)
wrspaths <- c(wrspaths, these_wrspaths)
wrsrows <- c(wrsrows, these_wrsrows)
base_dirs <- c(base_dirs, rep(this_base_dir, length(these_wrspaths)))
sitecodes_rep <- c(sitecodes_rep, rep(sitecode, length(these_wrspaths)))
}
# sitecode <- sitecodes_rep[1]
# base_dir <- base_dirs[1]
# wrspath <- wrspaths[1]
# wrsrow <- wrsrows[1]
#
# start_date <- start_dates[1]
# end_date <- end_dates[1]
stopifnot(length(sitecodes_rep) == length(base_dirs))
stopifnot(length(sitecodes_rep) == length(wrspaths))
stopifnot(length(sitecodes_rep) == length(wrsrows))
stopifnot(length(sitecodes_rep) == length(tcs))
foreach (sitecode=iter(sitecodes_rep), base_dir=iter(base_dirs),
wrspath=iter(wrspaths), wrsrow=iter(wrsrows), tc=iter(tcs),
.inorder=FALSE) %:%
foreach (start_date=iter(start_dates), end_date=(end_dates),
sensors=iter(sensors_bydate),
.packages=c('teamlucc', 'raster', 'sp'),
.inorder=FALSE) %dopar% {
mid_date <- (end_date - start_date)/2 + start_date
out_base <- file.path(output_dir,
paste0(sitecode,
sprintf('_%03i-%03i_', wrspath, wrsrow),
format(mid_date, '%Y-%j'), '_cf'))
status_line <- paste0(sitecode, ' ', wrspath, '/', wrsrow, ' (',
format(start_date, '%Y/%d/%m'), ' - ',
format(end_date, '%Y/%d/%m'), ')')
output_file <- paste0(out_base, ext)
if (file_test('-f', output_file)) {
if (!reprocess) return()
if (!overwrite) stop(paste(output_file, 'already exists'))
}
# Set a separate raster temp dir for each worker, so that temp
# files can be cleared after each iteration
rasterOptions(tmpdir=paste0(tempdir(), '_raster'))
tryCatch(cf <- auto_cloud_fill(base_dir, wrspath, wrsrow, start_date,
end_date, out_name=out_base, tc=tc,
sensors=sensors, n_cpus=1,
overwrite=overwrite, verbose=verbose,
DN_min=-100, DN_max=16000,
algorithm=algorithm, byblock=FALSE),
error=function(e) {
print(paste(status_line, 'FAILED'))
})
removeTmpFiles(h=0)
}
| /srcPool/7_cloud_fill.R | no_license | landsat/Landsat_Processing | R | false | false | 4,829 | r | source('0_settings.R')
library(stringr)
library(foreach)
library(doParallel)
registerDoParallel(n_cpus)
scene_topocorr_key <- read.csv('Scene_topocorr_key.csv')
overwrite <- TRUE
reprocess <- TRUE
verbose <- TRUE
algorithm <- 'CLOUD_REMOVE_FAST'
#algorithm <- 'simple'
start_dates <- as.Date(c('1988/1/1',
'1993/1/1',
'1998/1/1',
'2003/1/1',
'2008/1/1'))
end_dates <- as.Date(c('1992/12/31',
'1997/12/31',
'2002/12/31',
'2007/12/31',
'2012/12/31'))
sensors_bydate <- list(c('L4T', 'L5T', 'L7E', 'L8E'),
c('L4T', 'L5T', 'L7E', 'L8E'),
c('L4T', 'L5T', 'L7E', 'L8E'),
c('L4T', 'L5T', 'L7E', 'L8E'),
c('L4T', 'L5T', 'L8E'))
# start_dates <- as.Date(c('1988/1/1', '1998/1/1', '2008/1/1'))
# end_dates <- as.Date(c('1992/12/31', '2002/12/31', '2012/12/31'))
# sensors_bydate <- list(c('L4T', 'L5T', 'L7E', 'L8E'),
# c('L4T', 'L5T', 'L7E', 'L8E'),
# c('L4T', 'L5T', 'L8E'))
# start_dates <- as.Date(c('1998/1/1', '2008/1/1'))
# end_dates <- as.Date(c('2002/12/31', '2012/12/31'))
# sensors_bydate <- list(c('L4T', 'L5T', 'L7E', 'L8E'),
# c('L4T', 'L5T', 'L8E'))
stopifnot(length(start_dates) == length(end_dates))
stopifnot(length(start_dates) == length(sensors_bydate))
output_dir <- file.path(prefix, 'Landsat', 'Cloud_Filled')
sitecodes_rep <- c()
base_dirs <- c()
wrspaths <- c()
wrsrows <- c()
tcs <- c()
for (sitecode in sitecodes) {
this_base_dir <- file.path(prefix, 'Landsat', sitecode)
image_dirs <- dir(this_base_dir, pattern='^[0-9]{3}-[0-9]{3}_[0-9]{4}-[0-9]{3}_((LE)|(LT))[4578]$')
wrspathrows <- unique(str_extract(image_dirs, '^[0-9]{3}-[0-9]{3}_'))
these_wrspaths <- as.numeric(gsub('[-]', '', str_extract(wrspathrows, '^[0-9]{3}-')))
these_wrsrows <- as.numeric(gsub('[_-]', '', str_extract(wrspathrows, '-[0-9]{3}_')))
tc_key_rows <- match(paste(sitecode, these_wrspaths, these_wrsrows),
with(scene_topocorr_key, paste(sitecode, wrspath, wrsrow)))
new_tcs <- scene_topocorr_key$do_tc[tc_key_rows]
tcs <- c(tcs, new_tcs)
wrspaths <- c(wrspaths, these_wrspaths)
wrsrows <- c(wrsrows, these_wrsrows)
base_dirs <- c(base_dirs, rep(this_base_dir, length(these_wrspaths)))
sitecodes_rep <- c(sitecodes_rep, rep(sitecode, length(these_wrspaths)))
}
# sitecode <- sitecodes_rep[1]
# base_dir <- base_dirs[1]
# wrspath <- wrspaths[1]
# wrsrow <- wrsrows[1]
#
# start_date <- start_dates[1]
# end_date <- end_dates[1]
stopifnot(length(sitecodes_rep) == length(base_dirs))
stopifnot(length(sitecodes_rep) == length(wrspaths))
stopifnot(length(sitecodes_rep) == length(wrsrows))
stopifnot(length(sitecodes_rep) == length(tcs))
foreach (sitecode=iter(sitecodes_rep), base_dir=iter(base_dirs),
wrspath=iter(wrspaths), wrsrow=iter(wrsrows), tc=iter(tcs),
.inorder=FALSE) %:%
foreach (start_date=iter(start_dates), end_date=(end_dates),
sensors=iter(sensors_bydate),
.packages=c('teamlucc', 'raster', 'sp'),
.inorder=FALSE) %dopar% {
mid_date <- (end_date - start_date)/2 + start_date
out_base <- file.path(output_dir,
paste0(sitecode,
sprintf('_%03i-%03i_', wrspath, wrsrow),
format(mid_date, '%Y-%j'), '_cf'))
status_line <- paste0(sitecode, ' ', wrspath, '/', wrsrow, ' (',
format(start_date, '%Y/%d/%m'), ' - ',
format(end_date, '%Y/%d/%m'), ')')
output_file <- paste0(out_base, ext)
if (file_test('-f', output_file)) {
if (!reprocess) return()
if (!overwrite) stop(paste(output_file, 'already exists'))
}
# Set a separate raster temp dir for each worker, so that temp
# files can be cleared after each iteration
rasterOptions(tmpdir=paste0(tempdir(), '_raster'))
tryCatch(cf <- auto_cloud_fill(base_dir, wrspath, wrsrow, start_date,
end_date, out_name=out_base, tc=tc,
sensors=sensors, n_cpus=1,
overwrite=overwrite, verbose=verbose,
DN_min=-100, DN_max=16000,
algorithm=algorithm, byblock=FALSE),
error=function(e) {
print(paste(status_line, 'FAILED'))
})
removeTmpFiles(h=0)
}
|
# --reform ref3.json
tc.wincmd <- function(tc.fn, tc.dir, tc.cli, taxyear=2013, reform.fn=NULL, reform.plans.dir=NULL){
# Build a Windows system command that will call the Tax-Calculator CLI. See:
# https://pslmodels.github.io/Tax-Calculator/
# CAUTION: must use full dir names, not relative to working directory
# 2013 is the FIRST possible tax year that Tax-Calculator will do
tc.infile.fullpath <- shQuote(paste0(paste0(tc.dir, tc.fn)))
tc.outdir <- shQuote(str_sub(tc.dir, 1, -1)) # must remove trailing "/"
reformstring <- NULL
if(!is.null(reform.fn)) reformstring <- paste0("--reform", " ", shQuote(paste0(paste0(reform.plans.dir, reform.fn))))
cmd <- paste0(tc.cli, " ", tc.infile.fullpath, " ", taxyear, " ", reformstring, " ", "--dump --outdir ", tc.outdir)
return(cmd)
}
# glimpse(synprep$tc.base)
altruns.dir <- paste0(globals$tc.dir, "altruns/")
# write tcbase to a file, because the Tax-Calculator CLI reads a csv file
# maybe use temp file?
tc.fn <- "tcbase.csv"
write_csv(synprep$tc.base, paste0(altruns.dir, tc.fn))
# reform.fullname <- "D:/Dropbox/RPrograms PC/OSPC/EvaluateWtdSynFile/tax_plans/rate_cut.json"
reform.plans.dir <- "D:/Dropbox/RPrograms PC/OSPC/EvaluateWtdSynFile/tax_plans/"
reform.fn <- "rate_cut.json"
reform.fn <- "toprate.json"
reform.fn <- "EITC.json"
cmd <- tc.wincmd(tc.fn=tc.fn, tc.dir=altruns.dir, tc.cli=globals$tc.cli, reform.fn=reform.fn, reform.plans.dir=reform.plans.dir)
cmd # a good idea to look at the command
a <- proc.time()
system(cmd) # CAUTION: this will overwrite any existing output file that had same input filename!
proc.time() - a # it can easily take 5-10 minutes depending on the size of the input file
# tcbase-13-#-rate_cut-#.csv
tc.outfn <- paste0(str_remove(basename(tc.fn), ".csv"), "-", 13, "-#-", str_remove(basename(reform.fn), ".json"), "-#.csv")
tc.outfn
tc.output <- read_csv(paste0(altruns.dir, tc.outfn),
col_types = cols(.default= col_double()),
n_max=-1)
glimpse(tc.output)
quantile(tc.output$RECID)
saveRDS(tc.output, paste0(altruns.dir, str_remove(basename(reform.fn), ".json"), ".rds"))
| /misc/run_tax_reforms.r | no_license | donboyd5/EvaluateWtdSynFile | R | false | false | 2,160 | r |
# --reform ref3.json
tc.wincmd <- function(tc.fn, tc.dir, tc.cli, taxyear=2013, reform.fn=NULL, reform.plans.dir=NULL){
# Build a Windows system command that will call the Tax-Calculator CLI. See:
# https://pslmodels.github.io/Tax-Calculator/
# CAUTION: must use full dir names, not relative to working directory
# 2013 is the FIRST possible tax year that Tax-Calculator will do
tc.infile.fullpath <- shQuote(paste0(paste0(tc.dir, tc.fn)))
tc.outdir <- shQuote(str_sub(tc.dir, 1, -1)) # must remove trailing "/"
reformstring <- NULL
if(!is.null(reform.fn)) reformstring <- paste0("--reform", " ", shQuote(paste0(paste0(reform.plans.dir, reform.fn))))
cmd <- paste0(tc.cli, " ", tc.infile.fullpath, " ", taxyear, " ", reformstring, " ", "--dump --outdir ", tc.outdir)
return(cmd)
}
# glimpse(synprep$tc.base)
altruns.dir <- paste0(globals$tc.dir, "altruns/")
# write tcbase to a file, because the Tax-Calculator CLI reads a csv file
# maybe use temp file?
tc.fn <- "tcbase.csv"
write_csv(synprep$tc.base, paste0(altruns.dir, tc.fn))
# reform.fullname <- "D:/Dropbox/RPrograms PC/OSPC/EvaluateWtdSynFile/tax_plans/rate_cut.json"
reform.plans.dir <- "D:/Dropbox/RPrograms PC/OSPC/EvaluateWtdSynFile/tax_plans/"
reform.fn <- "rate_cut.json"
reform.fn <- "toprate.json"
reform.fn <- "EITC.json"
cmd <- tc.wincmd(tc.fn=tc.fn, tc.dir=altruns.dir, tc.cli=globals$tc.cli, reform.fn=reform.fn, reform.plans.dir=reform.plans.dir)
cmd # a good idea to look at the command
a <- proc.time()
system(cmd) # CAUTION: this will overwrite any existing output file that had same input filename!
proc.time() - a # it can easily take 5-10 minutes depending on the size of the input file
# tcbase-13-#-rate_cut-#.csv
tc.outfn <- paste0(str_remove(basename(tc.fn), ".csv"), "-", 13, "-#-", str_remove(basename(reform.fn), ".json"), "-#.csv")
tc.outfn
tc.output <- read_csv(paste0(altruns.dir, tc.outfn),
col_types = cols(.default= col_double()),
n_max=-1)
glimpse(tc.output)
quantile(tc.output$RECID)
saveRDS(tc.output, paste0(altruns.dir, str_remove(basename(reform.fn), ".json"), ".rds"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_lines.R
\name{read_lines}
\alias{read_lines}
\title{Read Lines by Giving the File Encoding}
\usage{
read_lines(file, ..., encoding = "unknown", fileEncoding = "")
}
\arguments{
\item{file}{a connection object or character string}
\item{\dots}{arguments passed to \code{\link{readLines}}}
\item{encoding}{passed to \code{\link{readLines}}.}
\item{fileEncoding}{The name of the encoding to be assumed. Passed as
\code{encoding} to \code{\link{file}}, see there.}
}
\description{
Read Lines by Giving the File Encoding
}
| /man/read_lines.Rd | permissive | KWB-R/fakin.path.app | R | false | true | 604 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_lines.R
\name{read_lines}
\alias{read_lines}
\title{Read Lines by Giving the File Encoding}
\usage{
read_lines(file, ..., encoding = "unknown", fileEncoding = "")
}
\arguments{
\item{file}{a connection object or character string}
\item{\dots}{arguments passed to \code{\link{readLines}}}
\item{encoding}{passed to \code{\link{readLines}}.}
\item{fileEncoding}{The name of the encoding to be assumed. Passed as
\code{encoding} to \code{\link{file}}, see there.}
}
\description{
Read Lines by Giving the File Encoding
}
|
#' Import Lato Font
#'
#' \code{import_lato} makes the included Lato font available in R. This process
#' only needs to be completed once.
#'
#' @rdname lato_font
#' @export
#'
import_lato <- function() {
lato_path <- system.file("fonts", "lato", package = "lato")
suppressWarnings(
suppressWarnings(
extrafont::font_import(lato_path, prompt = FALSE)
)
)
message(
sprintf(
"\nYou should also install Lato fonts on your system.\nThey can be found in [%s]",
lato_path
)
)
}
#' \code{is_lato_imported} checks if Lato has been imported
#' @rdname lato_font
#' @export
is_lato_imported <- function() {
ft <- extrafont::fonttable()
any(grepl("Lato", ft$FamilyName))
}
| /R/lato_font.R | permissive | waldnerf/lato | R | false | false | 761 | r | #' Import Lato Font
#'
#' \code{import_lato} makes the included Lato font available in R. This process
#' only needs to be completed once.
#'
#' @rdname lato_font
#' @export
#'
import_lato <- function() {
lato_path <- system.file("fonts", "lato", package = "lato")
suppressWarnings(
suppressWarnings(
extrafont::font_import(lato_path, prompt = FALSE)
)
)
message(
sprintf(
"\nYou should also install Lato fonts on your system.\nThey can be found in [%s]",
lato_path
)
)
}
#' \code{is_lato_imported} checks if Lato has been imported
#' @rdname lato_font
#' @export
is_lato_imported <- function() {
ft <- extrafont::fonttable()
any(grepl("Lato", ft$FamilyName))
}
|
df <- read.csv('data_scrape/reddit_posts_all.csv',
sep="\t",
encoding="UTF-8",
stringsAsFactors=FALSE)
# install.packages("sentimentr")
install.packages("fmsb")
library(sentimentr)
library(tidytext)
library(syuzhet)
library(fmsb)
library(dplyr)
library(ggplot2)
library(plotly)
# ------------------------------------------- COMPUTE SENTIMENT
sentiment=sentiment_by(df$body)
summary(sentiment$ave_sentiment)
# plot histogram of sentiment
qplot(sentiment$ave_sentiment,
geom="histogram",
binwidth=0.1,
main="Posts Sentiment Histogram")
# add sentiment column to dataframe
df$ave_sentiment=sentiment$ave_sentiment
df$sd_sentiment=sentiment$sd
# save df to csv file
write.csv(df,
"df_with_sentiment.csv",
row.names = TRUE)
# plot sentiment in time (timestamp data)
plot(df$timestamp, df$ave_sentiment)
# create date column without hours
df$date <- substr(df$timestamp, 1, 10)
# group sentiment by day
df_sentiment_by_day <- aggregate(ave_sentiment ~ date, df, mean)
df_sentiment_by_day$date <- as.Date(df_sentiment_by_day$date)
# save it to a csv file
write.csv(df,
"daily_avg_sentiment.csv",
row.names = TRUE)
# read csv and plot time series of daily, average sentiment
df_sentiment_by_day <- read.csv("daily_avg_sentiment.csv")
df_sentiment_by_day$date <- as.Date(df_sentiment_by_day$date)
ggplot(df_sentiment_by_day, aes(x=df_sentiment_by_day$date, y=df_sentiment_by_day$ave_sentiment,group=1)) +
geom_point()+
geom_line() +
scale_x_date() +
xlab("") +
ylab("Average Sentiment")
# sentiment ploting function
plot_sentiment <- function(df = NULL, use_default = FALSE) {
library(ggplot2)
if (use_default) {
df <- read.csv("daily_avg_sentiment.csv")
}
df$date <- as.Date(df$date)
ggplot(df, aes(x=df$date, y=df$ave_sentiment, group=1)) +
geom_point()+
geom_line() +
scale_x_date() +
xlab("") +
ylab("Average Sentiment")
}
plot_sentiment(df)
| /sentiment.R | no_license | blawok/usa-iran-conflict | R | false | false | 2,011 | r | df <- read.csv('data_scrape/reddit_posts_all.csv',
sep="\t",
encoding="UTF-8",
stringsAsFactors=FALSE)
# install.packages("sentimentr")
install.packages("fmsb")
library(sentimentr)
library(tidytext)
library(syuzhet)
library(fmsb)
library(dplyr)
library(ggplot2)
library(plotly)
# ------------------------------------------- COMPUTE SENTIMENT
sentiment=sentiment_by(df$body)
summary(sentiment$ave_sentiment)
# plot histogram of sentiment
qplot(sentiment$ave_sentiment,
geom="histogram",
binwidth=0.1,
main="Posts Sentiment Histogram")
# add sentiment column to dataframe
df$ave_sentiment=sentiment$ave_sentiment
df$sd_sentiment=sentiment$sd
# save df to csv file
write.csv(df,
"df_with_sentiment.csv",
row.names = TRUE)
# plot sentiment in time (timestamp data)
plot(df$timestamp, df$ave_sentiment)
# create date column without hours
df$date <- substr(df$timestamp, 1, 10)
# group sentiment by day
df_sentiment_by_day <- aggregate(ave_sentiment ~ date, df, mean)
df_sentiment_by_day$date <- as.Date(df_sentiment_by_day$date)
# save it to a csv file
write.csv(df,
"daily_avg_sentiment.csv",
row.names = TRUE)
# read csv and plot time series of daily, average sentiment
df_sentiment_by_day <- read.csv("daily_avg_sentiment.csv")
df_sentiment_by_day$date <- as.Date(df_sentiment_by_day$date)
ggplot(df_sentiment_by_day, aes(x=df_sentiment_by_day$date, y=df_sentiment_by_day$ave_sentiment,group=1)) +
geom_point()+
geom_line() +
scale_x_date() +
xlab("") +
ylab("Average Sentiment")
# sentiment ploting function
plot_sentiment <- function(df = NULL, use_default = FALSE) {
library(ggplot2)
if (use_default) {
df <- read.csv("daily_avg_sentiment.csv")
}
df$date <- as.Date(df$date)
ggplot(df, aes(x=df$date, y=df$ave_sentiment, group=1)) +
geom_point()+
geom_line() +
scale_x_date() +
xlab("") +
ylab("Average Sentiment")
}
plot_sentiment(df)
|
setwd("C:/Users/user/Documents/R/Coursera/ExData_Plotting1")
data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007"))
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(data1$Date), data1$Time)
data1$Datetime <- as.POSIXct(datetime)
## Plot 2
with(data1, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
})
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off() | /plot2.R | no_license | snehabb/ExData_Plotting1 | R | false | false | 635 | r | setwd("C:/Users/user/Documents/R/Coursera/ExData_Plotting1")
data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007"))
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(data1$Date), data1$Time)
data1$Datetime <- as.POSIXct(datetime)
## Plot 2
with(data1, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
})
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off() |
library("kwb.utils")
library("xml2")
paths <- resolve(list(
wp = "//poseidon/projekte$/AUFTRAEGE/RELIABLE_SEWER/Data-Work packages",
root = "<wp>/T_11_Data_Collection/20_Sofia/05_Exchange/2017_11_08_from_JK",
xml_1 = "<root>/BEFDSS_Beispieldaten_20171103.xml",
xml_2 = "<root>/Muster_M150_Typ-B.xml"
))
# M A I N ----------------------------------------------------------------------
if (FALSE)
{
content <- read_xml_dwa_m150(xml = safePath(paths$xml_2))
}
| /inst/extdata/test_read_xml.R | permissive | KWB-R/kwb.dwa.m150 | R | false | false | 470 | r | library("kwb.utils")
library("xml2")
paths <- resolve(list(
wp = "//poseidon/projekte$/AUFTRAEGE/RELIABLE_SEWER/Data-Work packages",
root = "<wp>/T_11_Data_Collection/20_Sofia/05_Exchange/2017_11_08_from_JK",
xml_1 = "<root>/BEFDSS_Beispieldaten_20171103.xml",
xml_2 = "<root>/Muster_M150_Typ-B.xml"
))
# M A I N ----------------------------------------------------------------------
if (FALSE)
{
content <- read_xml_dwa_m150(xml = safePath(paths$xml_2))
}
|
# This is going to be a large function that loads/processes datasets
#' @param df is a data frame
#' @export
getSeuratObject<-function(df){
require(Seurat)
require(singleCellSeq)
clust.sres <-dataMatrixToCluster.seurat(df)
clust.sres<-FindClusters(clust.sres)
clust.sres<-RunUMAP(clust.sres)
clust.sres
}
#'
#'@export
loadChung<-function(){
require(singleCellSeq)
library(reticulate)
require(tidyverse)
synapse <- import("synapseclient")
syn <- synapse$Synapse()
syn$login()
#define variables for RMd
syn_file<-'syn11967840'
annotation_file <-'syn11967839'
analysis_dir<-"syn12494570"
#define matrix
samp.tab<-read.table(syn$get(syn_file)$path,header=T,as.is=TRUE,sep='\t')%>%dplyr::select(-c(Gene.ID_1,Gene.ID_2))%>%dplyr::rename(Gene="Gene.Symbol")
require(org.Hs.eg.db)
all.gn<-unique(unlist(as.list(org.Hs.egSYMBOL)))
samp.tab <- samp.tab%>%filter(Gene%in%all.gn)
allz<-which(apply(samp.tab%>%dplyr::select(-Gene),1,function(x) all(x==0)))
if(length(allz)>0)
samp.tab<-samp.tab[-allz,]
#need to remove the gene column
samp.mat<-samp.tab%>%dplyr::select(-Gene)
rownames(samp.mat) <- make.names(samp.tab$Gene,unique=TRUE)
#define any cell specific annotations
at<-read.table(syn$get('syn11967839')$path,sep='\t',header=T)%>%dplyr::select(Cell,CellType="CELL_TYPE_TSNE",Time,Sample)
rownames(at)<-at$Cell
at<-at%>%dplyr::select(-Cell)
return(list(data=samp.mat,annote=at,seurat=getSeuratObject(samp.mat)))
}
#'
#'@export
loadSims<-function(){
}
#'
#'@export
loadChang<-function(){
require(dplyr)
require(tidyr)
require(singleCellSeq)
library(reticulate)
synapse <- import("synapseclient")
syn <- synapse$Synapse()
syn$login()
#define variables for RMd
syn_file<-'syn12045100'
analysis_dir<-"syn12118521"
analysis_file=paste(syn_file,'analysis.html',sep='_')
#define matrix
samp.tab<-read.table(syn$get(syn_file)$path,header=T,as.is=TRUE)%>%dplyr::select(-c(gene_id,gene_type))%>%dplyr::rename(Gene="gene_name")
require(org.Hs.eg.db)
all.gn<-unique(unlist(as.list(org.Hs.egSYMBOL)))
samp.tab <- samp.tab%>%filter(Gene%in%all.gn)
allz<-which(apply(samp.tab%>%dplyr::select(-Gene),1,function(x) all(x==0)))
if(length(allz)>0)
samp.tab<-samp.tab[-allz,]
#need to remove the gene column
samp.mat<-samp.tab%>%dplyr::select(-Gene)
print(dim(samp.mat))
rownames(samp.mat) <- make.names(samp.tab$Gene,unique=TRUE)
#define any cell specific annotations
cell.annotations<-data.frame(
Patient=as.factor(sapply(colnames(samp.tab), function(x) gsub("LN","",unlist(strsplit(x,split='_'))[1]))),
IsPooled=as.factor(sapply(colnames(samp.tab),function(x) unlist(strsplit(x,split='_'))[2]=="Pooled")),
IsTumor=as.factor(sapply(colnames(samp.tab),function(x) length(grep('LN',x))==0)))[-1,]
return(list(data=samp.mat,annote=cell.annotations,seurat=getSeuratObject(samp.mat)))
}
| /R/loadData.R | no_license | Sage-Bionetworks/single-cell-seq | R | false | false | 2,961 | r | # This is going to be a large function that loads/processes datasets
#' @param df is a data frame
#' @export
getSeuratObject<-function(df){
require(Seurat)
require(singleCellSeq)
clust.sres <-dataMatrixToCluster.seurat(df)
clust.sres<-FindClusters(clust.sres)
clust.sres<-RunUMAP(clust.sres)
clust.sres
}
#'
#'@export
loadChung<-function(){
require(singleCellSeq)
library(reticulate)
require(tidyverse)
synapse <- import("synapseclient")
syn <- synapse$Synapse()
syn$login()
#define variables for RMd
syn_file<-'syn11967840'
annotation_file <-'syn11967839'
analysis_dir<-"syn12494570"
#define matrix
samp.tab<-read.table(syn$get(syn_file)$path,header=T,as.is=TRUE,sep='\t')%>%dplyr::select(-c(Gene.ID_1,Gene.ID_2))%>%dplyr::rename(Gene="Gene.Symbol")
require(org.Hs.eg.db)
all.gn<-unique(unlist(as.list(org.Hs.egSYMBOL)))
samp.tab <- samp.tab%>%filter(Gene%in%all.gn)
allz<-which(apply(samp.tab%>%dplyr::select(-Gene),1,function(x) all(x==0)))
if(length(allz)>0)
samp.tab<-samp.tab[-allz,]
#need to remove the gene column
samp.mat<-samp.tab%>%dplyr::select(-Gene)
rownames(samp.mat) <- make.names(samp.tab$Gene,unique=TRUE)
#define any cell specific annotations
at<-read.table(syn$get('syn11967839')$path,sep='\t',header=T)%>%dplyr::select(Cell,CellType="CELL_TYPE_TSNE",Time,Sample)
rownames(at)<-at$Cell
at<-at%>%dplyr::select(-Cell)
return(list(data=samp.mat,annote=at,seurat=getSeuratObject(samp.mat)))
}
#'
#'@export
loadSims<-function(){
}
#'
#'@export
loadChang<-function(){
require(dplyr)
require(tidyr)
require(singleCellSeq)
library(reticulate)
synapse <- import("synapseclient")
syn <- synapse$Synapse()
syn$login()
#define variables for RMd
syn_file<-'syn12045100'
analysis_dir<-"syn12118521"
analysis_file=paste(syn_file,'analysis.html',sep='_')
#define matrix
samp.tab<-read.table(syn$get(syn_file)$path,header=T,as.is=TRUE)%>%dplyr::select(-c(gene_id,gene_type))%>%dplyr::rename(Gene="gene_name")
require(org.Hs.eg.db)
all.gn<-unique(unlist(as.list(org.Hs.egSYMBOL)))
samp.tab <- samp.tab%>%filter(Gene%in%all.gn)
allz<-which(apply(samp.tab%>%dplyr::select(-Gene),1,function(x) all(x==0)))
if(length(allz)>0)
samp.tab<-samp.tab[-allz,]
#need to remove the gene column
samp.mat<-samp.tab%>%dplyr::select(-Gene)
print(dim(samp.mat))
rownames(samp.mat) <- make.names(samp.tab$Gene,unique=TRUE)
#define any cell specific annotations
cell.annotations<-data.frame(
Patient=as.factor(sapply(colnames(samp.tab), function(x) gsub("LN","",unlist(strsplit(x,split='_'))[1]))),
IsPooled=as.factor(sapply(colnames(samp.tab),function(x) unlist(strsplit(x,split='_'))[2]=="Pooled")),
IsTumor=as.factor(sapply(colnames(samp.tab),function(x) length(grep('LN',x))==0)))[-1,]
return(list(data=samp.mat,annote=cell.annotations,seurat=getSeuratObject(samp.mat)))
}
|
source("Data_Load.R")
png(filename = "Plot3.png", width = 480, height = 480, units = "px")
plot(Date_time, Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering")
lines(Date_time, Sub_metering_1, col = "black")
lines(Date_time, Sub_metering_2, col = "red")
lines(Date_time, Sub_metering_3, col = "blue")
legend("topright", col = c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd = 1)
axis(side = 1, at = c(1, 1441, 2880), labels = c("Thu", "Fri", "Sat"))
dev.off()
| /plot3.R | no_license | rakeshas/Exploratory_Data_Analysis_Course_Project_1 | R | false | false | 531 | r | source("Data_Load.R")
png(filename = "Plot3.png", width = 480, height = 480, units = "px")
plot(Date_time, Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering")
lines(Date_time, Sub_metering_1, col = "black")
lines(Date_time, Sub_metering_2, col = "red")
lines(Date_time, Sub_metering_3, col = "blue")
legend("topright", col = c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd = 1)
axis(side = 1, at = c(1, 1441, 2880), labels = c("Thu", "Fri", "Sat"))
dev.off()
|
# Plots to go in the paper
# All plots MUST be in Lat Long
library(raster)
library(rasterVis)
library(rgdal)
source("functions/loadAllAncils.R")
source("functions/loadVeg.R") # Returns myveg = fractional veg cover for each pft tile
source("functions/loadOtherAncils.R")
source("functions/makeBoxes.R")
source("functions/vegPrep.R") # Returns allveg = vegetation classess (1 to 6) AND veg classes intersected with zones (i.e. boxes)
source("functions/patches.R")
source("functions/mcsStats.R")
source("functions/popStats.R")
source("functions/initiations.R")
source("functions/makeLines.R")
source("getMyData.R")
source("trackCheck.R")
# source("mcsIntensity.R")
if (Sys.info()[["sysname"]] == "Darwin"){
indatadir <- "/Users/ajh235/Work/DataLocal/ModelData/WAFR/"
dlresultsdir <- "/Users/ajh235/Work/DataLocal/Projects/InternalSabbatical/Results/"
resultsdir <- "/Users/ajh235/Work/Projects/InternalSabbatical/Results/"
scratchdir <- "/Users/ajh235/Work/Scratch/"
} else {
indatadir <- "/data/local/hadhy/ModelData/WAFR/"
dlresultsdir <- "/data/local/hadhy/Projects/InternalSabbatical/Results/"
resultsdir <- "/home/h02/hadhy/Projects/InternalSabbatical/Results/"
scratchdir <- "/data/local/hadhy/Scratch/"
require(PP,lib.loc="/project/ukmo/rhel6/R")
}
rasterOptions(tmpdir=scratchdir, todisk=F)
timestep <- "10min" # "avg"
threshold <- 1000
myproj <- "ll" # "rp"
models <- c("rb5216.4km.std", "rb5216.4km.50k", "rb5216.4km.300k") # [2:3]
id <- "s" # c("s","w","y")#[2:3]
# Get precip data
mydata <- getMyData(timestep=timestep, var="lsrain", overwrite=F)
rb5216.4km.std <- mydata[[2]]
land_simple <- readOGR(dsn=paste(indatadir,"ancils",sep=""), layer="land_ll") # Lat Long
# Get boxes made in RP, but projected to LatLong
spp <- readOGR(dsn="/Users/ajh235/Work/DataLocal/ModelData/WAFR/ancils", layer="boxes_rp2ll")
spp.r <- rasterize(spp, mylandfrac)
myveg <- loadVeg(model.nm=models[1], proj=myproj, overwrite=F)
mylandfrac <- loadOtherAncils(model.nm=models[1], ancil="landfrac", proj=myproj, overwrite=F)
myorog <- loadOtherAncils(model.nm=models[1], ancil="orog", proj=myproj,overwrite=F)
allveg <- vegPrep(model.nm=models[1], id=id[1], myproj=myproj, myveg, myorog, mylandfrac, land_simple, spp, spp.r, plots=F, vegThreshold=0.3, overwrite=T) # return(mycl, mycl.z) and creates pdf plots
mycl <- allveg[[1]]
myLUT <- data.frame(ID=c(1,2,3,4,5,6,7), Landcover=factor(c("tree", "grass", "sparse", "boundary", "boundary, tree", "boundary, grass", "orography"), levels=c("tree", "grass", "sparse", "boundary", "boundary, tree", "boundary, grass", "orography")[c(3,2,6,4,5,1,7)]), Colours=c("dark green", "yellow", "orange", "sienna", "yellow green", "gold", "dark grey"), plotOrder=c(4,2,1,3,5,6,7))
mycl.f <- as.factor(mycl)
ftab <- levels(mycl.f)[[1]]
ftab$Name <- myLUT$Landcover
levels(mycl.f) <- ftab
# Plot model domains
# 12km
e12km <- extent(c(xmin=-21.99668292, xmax=14.06752545, ymin=-0.18893839, ymax=23.96993351))
land12k.pol <- as(extent(e12km), "SpatialPolygons")
land12k.pol <- SpatialPolygonsDataFrame(land12k.pol, data=data.frame(id=1))
writeOGR(land12k.pol, dsn="/Users/ajh235/Work/DataLocal/ModelData/WAFR/ancils/km12/", layer="extent_12km_ll", driver="ESRI Shapefile", check_exists=T, overwrite_layer=T)
# 4km
e4km <- extent(c(xmin=-20.62620937, xmax=12.58290222, ymin=1.29328735, ymax=22.85308582))
land4k.pol <- as(extent(e4km), "SpatialPolygons")
land4k.pol <- SpatialPolygonsDataFrame(land4k.pol, data=data.frame(id=1))
writeOGR(land4k.pol, dsn="/Users/ajh235/Work/DataLocal/ModelData/WAFR/ancils/km4", layer="extent_4km_ll", driver="ESRI Shapefile", check_exists=T, overwrite_layer=T)
# Plot Vegetation classes w/ all boundary classes
png("../../Results/Vegetation_classes2.png", width=1000, height=600)
print(
levelplot(mycl.f, maxpixels=600000, par.settings=rasterTheme(region=myLUT$Colours), xlab=NULL, ylab=NULL, xlim=c(-12,10), ylim=c(4,18), main="Vegetation classes and zones") + # , scales=list(draw=FALSE), xlim=c(-24,15), ylim=c(-1,26),
latticeExtra::layer(sp.polygons(land_simple, col="black", lty=2)) +
latticeExtra::layer(sp.polygons(spp)) +
latticeExtra::layer(sp.text(loc=coordinates(spp), txt=1:nrow(spp@data), cex=3)) +
latticeExtra::layer(sp.polygons(land12k.pol)) +
latticeExtra::layer(sp.polygons(land4k.pol))
)
dev.off()
# Plot Vegetation classes w/ ONE boundary class
myLUT <- data.frame(ID=c(1,2,3,4,7), Landcover=factor(c("tree", "grass", "sparse", "boundary", "orography"), levels=c("tree", "grass", "sparse", "boundary", "orography")[c(3,2,4,1,5)]), Colours=c("dark green", "yellow", "orange", "sienna", "dark grey"), plotOrder=c(4,2,1,3,5))
mycl.1b <- mycl
mycl.1b[mycl.1b == 5] <- 4
mycl.1b[mycl.1b == 6] <- 4
mycl.1bf <- as.factor(mycl.1b)
ftab <- myLUT[myLUT$ID %in% levels(mycl.f)[[1]]$ID, ]
levels(mycl.1bf) <- ftab
png("../../Results/Vegetation_classes_1bnd.png", width=1000, height=600)
print(
levelplot(mycl.1bf, maxpixels=600000, par.settings=rasterTheme(region=myLUT$Colours), xlab=NULL, ylab=NULL, xlim=c(-12,10), ylim=c(4,18), main="Vegetation classes and zones") + # , scales=list(draw=FALSE), xlim=c(-24,15), ylim=c(-1,26),
latticeExtra::layer(sp.polygons(land_simple, col="black", lty=2)) +
latticeExtra::layer(sp.polygons(spp)) +
latticeExtra::layer(sp.text(loc=coordinates(spp), txt=1:nrow(spp@data), cex=3)) +
latticeExtra::layer(sp.polygons(land12k.pol)) +
latticeExtra::layer(sp.polygons(land4k.pol))
)
dev.off()
# Plot afternoon initiations
aftinit <- results[results$class == 'generation' & (as.numeric(format(results$timestep, "%H")) >= 16 & as.numeric(format(results$timestep, "%H")) <= 17),c("x","y")]
initpts_rp <- SpatialPoints(aftinit, CRS("+proj=ob_tran +o_proj=longlat +o_lon_p=175.3000030517578 +o_lat_p=77.4000015258789 +lon_0=180 +ellps=sphere"))
# Reproject afternoon initiation points
initpts_ll <- spTransform(initpts_rp, CRSobj=CRS("+init=epsg:4326"), use_ob_tran=T)
initpts_ll <- initpts_ll[!is.na(extract(mycl.f, initpts_ll)),]
png("../../Results/Vegetation_AfternoonInitiations.png", width=1000, height=600)
print(
levelplot(mycl.1bf, att="Landcover", maxpixels=600000, main="Afternoon (16-18Z) MCS Initiations Over Vegetation Classes", xlim=c(-18,11), ylim=c(4,20), xlab=NULL, ylab=NULL, col.regions=as.character(fdat$Colours)) + # scales=list(draw=FALSE), xlim=c(-3,10), ylim=c(12,20)
latticeExtra::layer(sp.polygons(land_rp2ll, lty=2)) +
latticeExtra::layer(sp.points(initpts_ll, pch="+", cex=4, col="black")) #+
)
dev.off()
# Plot POP and MCS precipitation statistics
source("patches_plot2.R")
# Get Veg classes in rotated pole
ancils <- loadAllAncils(myproj="rp", nBndClass=1, model="rb5216.4km.std", overwrite=F)
mycl <- ancils[[4]]
mycl.z <- ancils[[10]]
mylandfrac <- ancils[[2]]
land_simple <- ancils[[9]]
spp.r <- ancils[[8]][[1]]
sppa <- ancils[[8]][[3]]
# MCS intense precipitation
#diurnalcycle2(rb5216.4km.std, type="all", patch=F, model.nm="rb5216.4km.std", id="s", spp.r=spp.r, sppa=sppa, mycl=mycl, land_simple, overwrite=F) # Creates pdf plots
diurnalcycle2(rb5216.4km.std, type="intense", patch=F, model.nm="rb5216.4km.std", id="s", spp.r=spp.r, sppa=sppa, mycl=mycl, land_simple, overwrite=F) # Creates pdf plots
| /paperplots.R | no_license | claretandy/MCS-Veg-Interactions | R | false | false | 7,314 | r | # Plots to go in the paper
# All plots MUST be in Lat Long
library(raster)
library(rasterVis)
library(rgdal)
source("functions/loadAllAncils.R")
source("functions/loadVeg.R") # Returns myveg = fractional veg cover for each pft tile
source("functions/loadOtherAncils.R")
source("functions/makeBoxes.R")
source("functions/vegPrep.R") # Returns allveg = vegetation classess (1 to 6) AND veg classes intersected with zones (i.e. boxes)
source("functions/patches.R")
source("functions/mcsStats.R")
source("functions/popStats.R")
source("functions/initiations.R")
source("functions/makeLines.R")
source("getMyData.R")
source("trackCheck.R")
# source("mcsIntensity.R")
if (Sys.info()[["sysname"]] == "Darwin"){
indatadir <- "/Users/ajh235/Work/DataLocal/ModelData/WAFR/"
dlresultsdir <- "/Users/ajh235/Work/DataLocal/Projects/InternalSabbatical/Results/"
resultsdir <- "/Users/ajh235/Work/Projects/InternalSabbatical/Results/"
scratchdir <- "/Users/ajh235/Work/Scratch/"
} else {
indatadir <- "/data/local/hadhy/ModelData/WAFR/"
dlresultsdir <- "/data/local/hadhy/Projects/InternalSabbatical/Results/"
resultsdir <- "/home/h02/hadhy/Projects/InternalSabbatical/Results/"
scratchdir <- "/data/local/hadhy/Scratch/"
require(PP,lib.loc="/project/ukmo/rhel6/R")
}
rasterOptions(tmpdir=scratchdir, todisk=F)
timestep <- "10min" # "avg"
threshold <- 1000
myproj <- "ll" # "rp"
models <- c("rb5216.4km.std", "rb5216.4km.50k", "rb5216.4km.300k") # [2:3]
id <- "s" # c("s","w","y")#[2:3]
# Get precip data
mydata <- getMyData(timestep=timestep, var="lsrain", overwrite=F)
rb5216.4km.std <- mydata[[2]]
land_simple <- readOGR(dsn=paste(indatadir,"ancils",sep=""), layer="land_ll") # Lat Long
# Get boxes made in RP, but projected to LatLong
spp <- readOGR(dsn="/Users/ajh235/Work/DataLocal/ModelData/WAFR/ancils", layer="boxes_rp2ll")
spp.r <- rasterize(spp, mylandfrac)
myveg <- loadVeg(model.nm=models[1], proj=myproj, overwrite=F)
mylandfrac <- loadOtherAncils(model.nm=models[1], ancil="landfrac", proj=myproj, overwrite=F)
myorog <- loadOtherAncils(model.nm=models[1], ancil="orog", proj=myproj,overwrite=F)
allveg <- vegPrep(model.nm=models[1], id=id[1], myproj=myproj, myveg, myorog, mylandfrac, land_simple, spp, spp.r, plots=F, vegThreshold=0.3, overwrite=T) # return(mycl, mycl.z) and creates pdf plots
mycl <- allveg[[1]]
myLUT <- data.frame(ID=c(1,2,3,4,5,6,7), Landcover=factor(c("tree", "grass", "sparse", "boundary", "boundary, tree", "boundary, grass", "orography"), levels=c("tree", "grass", "sparse", "boundary", "boundary, tree", "boundary, grass", "orography")[c(3,2,6,4,5,1,7)]), Colours=c("dark green", "yellow", "orange", "sienna", "yellow green", "gold", "dark grey"), plotOrder=c(4,2,1,3,5,6,7))
mycl.f <- as.factor(mycl)
ftab <- levels(mycl.f)[[1]]
ftab$Name <- myLUT$Landcover
levels(mycl.f) <- ftab
# Plot model domains
# 12km
e12km <- extent(c(xmin=-21.99668292, xmax=14.06752545, ymin=-0.18893839, ymax=23.96993351))
land12k.pol <- as(extent(e12km), "SpatialPolygons")
land12k.pol <- SpatialPolygonsDataFrame(land12k.pol, data=data.frame(id=1))
writeOGR(land12k.pol, dsn="/Users/ajh235/Work/DataLocal/ModelData/WAFR/ancils/km12/", layer="extent_12km_ll", driver="ESRI Shapefile", check_exists=T, overwrite_layer=T)
# 4km
e4km <- extent(c(xmin=-20.62620937, xmax=12.58290222, ymin=1.29328735, ymax=22.85308582))
land4k.pol <- as(extent(e4km), "SpatialPolygons")
land4k.pol <- SpatialPolygonsDataFrame(land4k.pol, data=data.frame(id=1))
writeOGR(land4k.pol, dsn="/Users/ajh235/Work/DataLocal/ModelData/WAFR/ancils/km4", layer="extent_4km_ll", driver="ESRI Shapefile", check_exists=T, overwrite_layer=T)
# Plot Vegetation classes w/ all boundary classes
png("../../Results/Vegetation_classes2.png", width=1000, height=600)
print(
levelplot(mycl.f, maxpixels=600000, par.settings=rasterTheme(region=myLUT$Colours), xlab=NULL, ylab=NULL, xlim=c(-12,10), ylim=c(4,18), main="Vegetation classes and zones") + # , scales=list(draw=FALSE), xlim=c(-24,15), ylim=c(-1,26),
latticeExtra::layer(sp.polygons(land_simple, col="black", lty=2)) +
latticeExtra::layer(sp.polygons(spp)) +
latticeExtra::layer(sp.text(loc=coordinates(spp), txt=1:nrow(spp@data), cex=3)) +
latticeExtra::layer(sp.polygons(land12k.pol)) +
latticeExtra::layer(sp.polygons(land4k.pol))
)
dev.off()
# Plot Vegetation classes w/ ONE boundary class
myLUT <- data.frame(ID=c(1,2,3,4,7), Landcover=factor(c("tree", "grass", "sparse", "boundary", "orography"), levels=c("tree", "grass", "sparse", "boundary", "orography")[c(3,2,4,1,5)]), Colours=c("dark green", "yellow", "orange", "sienna", "dark grey"), plotOrder=c(4,2,1,3,5))
mycl.1b <- mycl
mycl.1b[mycl.1b == 5] <- 4
mycl.1b[mycl.1b == 6] <- 4
mycl.1bf <- as.factor(mycl.1b)
ftab <- myLUT[myLUT$ID %in% levels(mycl.f)[[1]]$ID, ]
levels(mycl.1bf) <- ftab
png("../../Results/Vegetation_classes_1bnd.png", width=1000, height=600)
print(
levelplot(mycl.1bf, maxpixels=600000, par.settings=rasterTheme(region=myLUT$Colours), xlab=NULL, ylab=NULL, xlim=c(-12,10), ylim=c(4,18), main="Vegetation classes and zones") + # , scales=list(draw=FALSE), xlim=c(-24,15), ylim=c(-1,26),
latticeExtra::layer(sp.polygons(land_simple, col="black", lty=2)) +
latticeExtra::layer(sp.polygons(spp)) +
latticeExtra::layer(sp.text(loc=coordinates(spp), txt=1:nrow(spp@data), cex=3)) +
latticeExtra::layer(sp.polygons(land12k.pol)) +
latticeExtra::layer(sp.polygons(land4k.pol))
)
dev.off()
# Plot afternoon initiations
aftinit <- results[results$class == 'generation' & (as.numeric(format(results$timestep, "%H")) >= 16 & as.numeric(format(results$timestep, "%H")) <= 17),c("x","y")]
initpts_rp <- SpatialPoints(aftinit, CRS("+proj=ob_tran +o_proj=longlat +o_lon_p=175.3000030517578 +o_lat_p=77.4000015258789 +lon_0=180 +ellps=sphere"))
# Reproject afternoon initiation points
initpts_ll <- spTransform(initpts_rp, CRSobj=CRS("+init=epsg:4326"), use_ob_tran=T)
initpts_ll <- initpts_ll[!is.na(extract(mycl.f, initpts_ll)),]
png("../../Results/Vegetation_AfternoonInitiations.png", width=1000, height=600)
print(
levelplot(mycl.1bf, att="Landcover", maxpixels=600000, main="Afternoon (16-18Z) MCS Initiations Over Vegetation Classes", xlim=c(-18,11), ylim=c(4,20), xlab=NULL, ylab=NULL, col.regions=as.character(fdat$Colours)) + # scales=list(draw=FALSE), xlim=c(-3,10), ylim=c(12,20)
latticeExtra::layer(sp.polygons(land_rp2ll, lty=2)) +
latticeExtra::layer(sp.points(initpts_ll, pch="+", cex=4, col="black")) #+
)
dev.off()
# Plot POP and MCS precipitation statistics
source("patches_plot2.R")
# Get Veg classes in rotated pole
ancils <- loadAllAncils(myproj="rp", nBndClass=1, model="rb5216.4km.std", overwrite=F)
mycl <- ancils[[4]]
mycl.z <- ancils[[10]]
mylandfrac <- ancils[[2]]
land_simple <- ancils[[9]]
spp.r <- ancils[[8]][[1]]
sppa <- ancils[[8]][[3]]
# MCS intense precipitation
#diurnalcycle2(rb5216.4km.std, type="all", patch=F, model.nm="rb5216.4km.std", id="s", spp.r=spp.r, sppa=sppa, mycl=mycl, land_simple, overwrite=F) # Creates pdf plots
diurnalcycle2(rb5216.4km.std, type="intense", patch=F, model.nm="rb5216.4km.std", id="s", spp.r=spp.r, sppa=sppa, mycl=mycl, land_simple, overwrite=F) # Creates pdf plots
|
data <- read.table("Data.txt", header= TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subsetdata <- data[data$Date %in% c("1/2/2007","2/2/2007"),]
GlobalActivePower <- as.numeric(subsetdata$Global_active_power)
GlobalReactivePower <- as.numeric(subsetdata$Global_reactive_power)
voltage <- as.numeric(subsetdata$Voltage)
subMetering1 <- as.numeric(subsetdata$Sub_metering_1)
subMetering2 <- as.numeric(subsetdata$Sub_metering_2)
subMetering3 <- as.numeric(subsetdata$Sub_metering_3)
timeseries <- strptime(paste(subsetdata$Date, subsetdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
plot(timeseries, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(timeseries, subMetering2, type="l", col="red")
lines(timeseries, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
| /plot3.R | no_license | MohamedElashri/ExData_Plotting1 | R | false | false | 890 | r | data <- read.table("Data.txt", header= TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subsetdata <- data[data$Date %in% c("1/2/2007","2/2/2007"),]
GlobalActivePower <- as.numeric(subsetdata$Global_active_power)
GlobalReactivePower <- as.numeric(subsetdata$Global_reactive_power)
voltage <- as.numeric(subsetdata$Voltage)
subMetering1 <- as.numeric(subsetdata$Sub_metering_1)
subMetering2 <- as.numeric(subsetdata$Sub_metering_2)
subMetering3 <- as.numeric(subsetdata$Sub_metering_3)
timeseries <- strptime(paste(subsetdata$Date, subsetdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
plot(timeseries, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(timeseries, subMetering2, type="l", col="red")
lines(timeseries, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
|
internds <-
function(x){
#return internal edges and their lengths, labeled by the descendant node
n <- length(x$tip.label)
intin<-x$edge[,2]>n
anc <- x$edge[intin,1]
time <- as.numeric(branching.times(x))
names(time) <- (n+1):(n+x$Nnode)
data.frame(anc=x$edge[intin,1],dec=x$edge[intin,2],len=x$edge.length[intin],time=time[match(anc,names(time))],label=x$edge[intin,2])
}
| /iteRates/R/internds.R | no_license | ingted/R-Examples | R | false | false | 382 | r | internds <-
function(x){
#return internal edges and their lengths, labeled by the descendant node
n <- length(x$tip.label)
intin<-x$edge[,2]>n
anc <- x$edge[intin,1]
time <- as.numeric(branching.times(x))
names(time) <- (n+1):(n+x$Nnode)
data.frame(anc=x$edge[intin,1],dec=x$edge[intin,2],len=x$edge.length[intin],time=time[match(anc,names(time))],label=x$edge[intin,2])
}
|
/PlaneWave.R | no_license | aneves76/R-VSWF | R | false | false | 3,458 | r | ||
# model file model-001a.bug fits an intercept (mean) only model with known
# variance corresponding R file model-001.r
data{
n <- length(length)
}
model{
for (i in 1:n) {
length[i] ~ dnorm(mu, 0.00023)
}
mu ~ dnorm(0, 1e-04)
}
| /inst/models/model-001a.bugs.R | no_license | jmcurran/jaggR | R | false | false | 246 | r | # model file model-001a.bug fits an intercept (mean) only model with known
# variance corresponding R file model-001.r
data{
n <- length(length)
}
model{
for (i in 1:n) {
length[i] ~ dnorm(mu, 0.00023)
}
mu ~ dnorm(0, 1e-04)
}
|
#' Run PCA on the main data
#'
#' This function takes an object of class iCellR and runs PCA on the main data.
#' @param x An object of class iCellR.
#' @param method Choose from "base.mean.rank" or "gene.model", default is "base.mean.rank". If gene.model is chosen you need to provide gene.list.
#' @param top.rank A number taking the top genes ranked by base mean, default = 500.
#' @param plus.log.value A number to add to each value in the matrix before log transformasion to aviond Inf numbers, default = 0.1.
#' @param gene.list A charactor vector of genes to be used for PCA. If "clust.method" is set to "gene.model", default = "my_model_genes.txt".
#' @param batch.norm If TRUE the data will be normalized based on the genes in gene.list or top ranked genes.
#' @return An object of class iCellR.
#' @examples
#' \dontrun{
#' my.obj <- run.pca(my.obj, clust.method = "gene.model", gene.list = "my_model_genes.txt")
#' }
#' @export
run.pca <- function (x = NULL,
data.type = "main",
method = "base.mean.rank",
top.rank = 500,
plus.log.value = 0.1,
batch.norm = F,
gene.list = "character") {
if ("iCellR" != class(x)[1]) {
stop("x should be an object of class iCellR")
}
# geth the genes and scale them based on model
## get main data
if (data.type == "main") {
DATA <- x@main.data
}
if (data.type == "imputed") {
DATA <- x@imputed.data
}
# model base mean rank
if (method == "base.mean.rank") {
raw.data.order <- DATA[ order(rowMeans(DATA), decreasing = T), ]
topGenes <- head(raw.data.order,top.rank)
TopNormLogScale <- log(topGenes + plus.log.value)
# TopNormLogScale <- scale(topGenes)
# TopNormLogScale <- t(TopNormLogScale)
# TopNormLogScale <- as.data.frame(t(scale(TopNormLogScale)))
}
# gene model
if (method == "gene.model") {
if (gene.list[1] == "character") {
stop("please provide gene names for clustering")
} else {
genesForClustering <- gene.list
topGenes <- subset(DATA, rownames(DATA) %in% genesForClustering)
if (batch.norm == F){
TopNormLogScale <- log(topGenes + plus.log.value)
# TopNormLogScale <- scale(topGenes)
}
if (batch.norm == T){
## new method
libSiz <- colSums(topGenes)
norm.facts <- as.numeric(libSiz) / mean(as.numeric(libSiz))
dataMat <- as.matrix(topGenes)
normalized <- as.data.frame(sweep(dataMat, 2, norm.facts, `/`))
TopNormLogScale <- log(normalized + plus.log.value)
TopNormLogScale <- normalized
}
}
}
# Returns
# info
counts.pca <- prcomp(TopNormLogScale, center = T, scale. = T)
attributes(x)$pca.info <- counts.pca
# DATA
dataPCA = data.frame(counts.pca$rotation) # [1:max.dim]
attributes(x)$pca.data <- dataPCA
# optimal
DATA <- counts.pca$sdev
OPTpcs <- mean(DATA)*2
OPTpcs <- (DATA > OPTpcs)
OPTpcs <- length(OPTpcs[OPTpcs==TRUE]) + 1
attributes(x)$opt.pcs <- OPTpcs
# object
return(x)
}
| /R/F012.run.pca.R | no_license | weiliuyuan/iCellR | R | false | false | 3,141 | r | #' Run PCA on the main data
#'
#' This function takes an object of class iCellR and runs PCA on the main data.
#' @param x An object of class iCellR.
#' @param method Choose from "base.mean.rank" or "gene.model", default is "base.mean.rank". If gene.model is chosen you need to provide gene.list.
#' @param top.rank A number taking the top genes ranked by base mean, default = 500.
#' @param plus.log.value A number to add to each value in the matrix before log transformasion to aviond Inf numbers, default = 0.1.
#' @param gene.list A charactor vector of genes to be used for PCA. If "clust.method" is set to "gene.model", default = "my_model_genes.txt".
#' @param batch.norm If TRUE the data will be normalized based on the genes in gene.list or top ranked genes.
#' @return An object of class iCellR.
#' @examples
#' \dontrun{
#' my.obj <- run.pca(my.obj, clust.method = "gene.model", gene.list = "my_model_genes.txt")
#' }
#' @export
run.pca <- function (x = NULL,
data.type = "main",
method = "base.mean.rank",
top.rank = 500,
plus.log.value = 0.1,
batch.norm = F,
gene.list = "character") {
if ("iCellR" != class(x)[1]) {
stop("x should be an object of class iCellR")
}
# geth the genes and scale them based on model
## get main data
if (data.type == "main") {
DATA <- x@main.data
}
if (data.type == "imputed") {
DATA <- x@imputed.data
}
# model base mean rank
if (method == "base.mean.rank") {
raw.data.order <- DATA[ order(rowMeans(DATA), decreasing = T), ]
topGenes <- head(raw.data.order,top.rank)
TopNormLogScale <- log(topGenes + plus.log.value)
# TopNormLogScale <- scale(topGenes)
# TopNormLogScale <- t(TopNormLogScale)
# TopNormLogScale <- as.data.frame(t(scale(TopNormLogScale)))
}
# gene model
if (method == "gene.model") {
if (gene.list[1] == "character") {
stop("please provide gene names for clustering")
} else {
genesForClustering <- gene.list
topGenes <- subset(DATA, rownames(DATA) %in% genesForClustering)
if (batch.norm == F){
TopNormLogScale <- log(topGenes + plus.log.value)
# TopNormLogScale <- scale(topGenes)
}
if (batch.norm == T){
## new method
libSiz <- colSums(topGenes)
norm.facts <- as.numeric(libSiz) / mean(as.numeric(libSiz))
dataMat <- as.matrix(topGenes)
normalized <- as.data.frame(sweep(dataMat, 2, norm.facts, `/`))
TopNormLogScale <- log(normalized + plus.log.value)
TopNormLogScale <- normalized
}
}
}
# Returns
# info
counts.pca <- prcomp(TopNormLogScale, center = T, scale. = T)
attributes(x)$pca.info <- counts.pca
# DATA
dataPCA = data.frame(counts.pca$rotation) # [1:max.dim]
attributes(x)$pca.data <- dataPCA
# optimal
DATA <- counts.pca$sdev
OPTpcs <- mean(DATA)*2
OPTpcs <- (DATA > OPTpcs)
OPTpcs <- length(OPTpcs[OPTpcs==TRUE]) + 1
attributes(x)$opt.pcs <- OPTpcs
# object
return(x)
}
|
# Data ingest, coding and cleansing
#- setup, echo = FALSE
knitr::opts_chunk$set(echo = TRUE, message = FALSE, warning = FALSE)
#- libraries
library(tidyverse)
library(haven)
library(here)
library(labelled)
#' Helper function to generate regexes from question numbers, such that they match
#' the variety of question names in the dataset.
match_questions <- function(q_number, prefix = "Q", suffix = "[a-zA-Z]?$") {
paste0(prefix, q_number, suffix)
}
#' # Serialise for other scripts
if(file.exists(here("data", "nzl_coded.RDS"))) {
nzl_clean <- readRDS(here("data", "nzl_coded.RDS"))
} else {
#' # Data ingest
#'
#' Read SPSS data file, since all of the factor levels are already somewhat coded in
#' the SPSS metadata.
nzl_raw <- read_spss(here("data", "WVS_Wave_7_New_Zealand_Spss_v1.4.sav"),
user_na = TRUE)
#' # Variable coding
#'
#' Decide which of our questions are ordinal, on a 10-point scale (and
#' therefore may end up being treated as continuous), continuous or nominal.
q_numbers <- lst(
ordinal = c(1:6, 27:47, 51:55, 58:89, 113:118, 121, 131:138, 141:143, 146:148,
169:172, 196:199, 201:208, 221, 222, 224:239, 253, 255:259, 275:278, 287),
scale_10_point = c(48, 49, 50, 90, 106:110, 112, 120, 158:164, 176:195, 240:252,
288),
continuous = c(261, 262, 270),
# These are sort-of ordinal but will need manual coding. They are currently
# treated as categorical:
badly_ordered = c(119, 221, 222, 254),
# Everything else is nominal (including binary)
nominal = (1:290)[!(1:290 %in% c(ordinal, scale_10_point, continuous))]
)
#' Some of the variable names have suffixes in the NZ dataset. So, we build
#' regexes for each variable name so that we can use them with tidyselect.
q_names <- map(q_numbers, match_questions)
#' Code the factor levels according to their labels in SPSS. For our ordinal
#' variables set ordered = TRUE, otherwise leave them unordered. The
#' 10-point-scale variables do not need any recoding
nzl_coded <- nzl_raw %>%
# Remove existing labels
zap_labels() %>%
# Convert all types of missing values to NA
mutate(across(starts_with("Q"), ~ifelse(.x < 0, NA, .x))) %>%
# Restore labels from SPSS data
copy_labels_from(nzl_raw) %>%
drop_unused_value_labels() %>%
# Covert labels to factors ready for analysis
mutate(
across(matches(q_names$ordinal), as_factor, ordered = TRUE),
across(matches(q_names$nominal), as_factor)) %>%
# Remove label metadata so that result is a plain tibble/data frame.
zap_labels()
#' # Other data cleaning
nzl_clean <- nzl_coded %>%
# Fix "number of children" variable, Q274
mutate(Q274 = if_else(Q274 == "No children",
0L,
as.integer(Q274)))
# Serialise
saveRDS(nzl_clean, here("data", "nzl_coded.RDS"))
}
| /R/data_in.R | no_license | gardiners/wvs-nz | R | false | false | 2,942 | r | # Data ingest, coding and cleansing
#- setup, echo = FALSE
knitr::opts_chunk$set(echo = TRUE, message = FALSE, warning = FALSE)
#- libraries
library(tidyverse)
library(haven)
library(here)
library(labelled)
#' Helper function to generate regexes from question numbers, such that they match
#' the variety of question names in the dataset.
match_questions <- function(q_number, prefix = "Q", suffix = "[a-zA-Z]?$") {
paste0(prefix, q_number, suffix)
}
#' # Serialise for other scripts
if(file.exists(here("data", "nzl_coded.RDS"))) {
nzl_clean <- readRDS(here("data", "nzl_coded.RDS"))
} else {
#' # Data ingest
#'
#' Read SPSS data file, since all of the factor levels are already somewhat coded in
#' the SPSS metadata.
nzl_raw <- read_spss(here("data", "WVS_Wave_7_New_Zealand_Spss_v1.4.sav"),
user_na = TRUE)
#' # Variable coding
#'
#' Decide which of our questions are ordinal, on a 10-point scale (and
#' therefore may end up being treated as continuous), continuous or nominal.
q_numbers <- lst(
ordinal = c(1:6, 27:47, 51:55, 58:89, 113:118, 121, 131:138, 141:143, 146:148,
169:172, 196:199, 201:208, 221, 222, 224:239, 253, 255:259, 275:278, 287),
scale_10_point = c(48, 49, 50, 90, 106:110, 112, 120, 158:164, 176:195, 240:252,
288),
continuous = c(261, 262, 270),
# These are sort-of ordinal but will need manual coding. They are currently
# treated as categorical:
badly_ordered = c(119, 221, 222, 254),
# Everything else is nominal (including binary)
nominal = (1:290)[!(1:290 %in% c(ordinal, scale_10_point, continuous))]
)
#' Some of the variable names have suffixes in the NZ dataset. So, we build
#' regexes for each variable name so that we can use them with tidyselect.
q_names <- map(q_numbers, match_questions)
#' Code the factor levels according to their labels in SPSS. For our ordinal
#' variables set ordered = TRUE, otherwise leave them unordered. The
#' 10-point-scale variables do not need any recoding
nzl_coded <- nzl_raw %>%
# Remove existing labels
zap_labels() %>%
# Convert all types of missing values to NA
mutate(across(starts_with("Q"), ~ifelse(.x < 0, NA, .x))) %>%
# Restore labels from SPSS data
copy_labels_from(nzl_raw) %>%
drop_unused_value_labels() %>%
# Covert labels to factors ready for analysis
mutate(
across(matches(q_names$ordinal), as_factor, ordered = TRUE),
across(matches(q_names$nominal), as_factor)) %>%
# Remove label metadata so that result is a plain tibble/data frame.
zap_labels()
#' # Other data cleaning
nzl_clean <- nzl_coded %>%
# Fix "number of children" variable, Q274
mutate(Q274 = if_else(Q274 == "No children",
0L,
as.integer(Q274)))
# Serialise
saveRDS(nzl_clean, here("data", "nzl_coded.RDS"))
}
|
## assignment
# read data
electric_data <- read.table("household_power_consumption.txt",sep=';',header=TRUE)
# clean data
electric_data$Date <- as.Date(electric_data$Date,"%d/%m/%Y")
electric_data_filter <- filter(electric_data,(Date=='2007-02-01') | (Date =='2007-02-02'))
electric_data_filter <- mutate(electric_data_filter, datetimeStr = paste(Date, Time))
electric_data_filter$datetime <- strptime(electric_data_filter$datetimeStr , "%Y-%m-%d %H:%M:%S")
# plot data:
png('plot4.png')
par(mfrow=c(2,2), mar=c(5,5,2,2))
# plot 1
with(electric_data_filter, plot(datetime, as.numeric((as.character(electric_data_filter$Global_active_power))),
type='n',
xlab="",
ylab='Global Active Power (kilowatts)'))
with(electric_data_filter, lines(datetime, as.numeric((as.character(electric_data_filter$Global_active_power)))))
# plot 2:
with(electric_data_filter, plot(datetime, as.numeric((as.character(electric_data_filter$Voltage))),
type='n',
xlab="datetime",
ylab='Voltage'))
with(electric_data_filter, lines(datetime, as.numeric((as.character(electric_data_filter$Voltage)))))
# plot 3:
with(electric_data_filter, plot(datetime, Sub_metering_1,
type='n',
xlab="",
ylab='Energy sub metering',
ylim=c(0,40)))
with(electric_data_filter, lines(datetime, as.numeric(as.character(Sub_metering_1))))
with(electric_data_filter, lines(datetime, as.numeric(as.character(Sub_metering_2)), col='red'))
with(electric_data_filter, lines(datetime, Sub_metering_3, col='blue'))
legend("topright", box.col = "transparent", col =c("black", "red", "blue"), pch=c(NA,NA,NA),lty=c(1,1,1), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# plot 4:
with(electric_data_filter, plot(datetime, as.numeric((as.character(electric_data_filter$Global_reactive_power))),
type='n',
xlab="datetime",
ylab='Global_reactive_power'))
with(electric_data_filter, lines(datetime, as.numeric((as.character(electric_data_filter$Global_reactive_power)))))
dev.off() | /plot4.R | no_license | ThomasPfiffner/ExData_Plotting1 | R | false | false | 2,368 | r | ## assignment
# read data
electric_data <- read.table("household_power_consumption.txt",sep=';',header=TRUE)
# clean data
electric_data$Date <- as.Date(electric_data$Date,"%d/%m/%Y")
electric_data_filter <- filter(electric_data,(Date=='2007-02-01') | (Date =='2007-02-02'))
electric_data_filter <- mutate(electric_data_filter, datetimeStr = paste(Date, Time))
electric_data_filter$datetime <- strptime(electric_data_filter$datetimeStr , "%Y-%m-%d %H:%M:%S")
# plot data:
png('plot4.png')
par(mfrow=c(2,2), mar=c(5,5,2,2))
# plot 1
with(electric_data_filter, plot(datetime, as.numeric((as.character(electric_data_filter$Global_active_power))),
type='n',
xlab="",
ylab='Global Active Power (kilowatts)'))
with(electric_data_filter, lines(datetime, as.numeric((as.character(electric_data_filter$Global_active_power)))))
# plot 2:
with(electric_data_filter, plot(datetime, as.numeric((as.character(electric_data_filter$Voltage))),
type='n',
xlab="datetime",
ylab='Voltage'))
with(electric_data_filter, lines(datetime, as.numeric((as.character(electric_data_filter$Voltage)))))
# plot 3:
with(electric_data_filter, plot(datetime, Sub_metering_1,
type='n',
xlab="",
ylab='Energy sub metering',
ylim=c(0,40)))
with(electric_data_filter, lines(datetime, as.numeric(as.character(Sub_metering_1))))
with(electric_data_filter, lines(datetime, as.numeric(as.character(Sub_metering_2)), col='red'))
with(electric_data_filter, lines(datetime, Sub_metering_3, col='blue'))
legend("topright", box.col = "transparent", col =c("black", "red", "blue"), pch=c(NA,NA,NA),lty=c(1,1,1), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# plot 4:
with(electric_data_filter, plot(datetime, as.numeric((as.character(electric_data_filter$Global_reactive_power))),
type='n',
xlab="datetime",
ylab='Global_reactive_power'))
with(electric_data_filter, lines(datetime, as.numeric((as.character(electric_data_filter$Global_reactive_power)))))
dev.off() |
makeCacheMatrix <- function(x = matrix()) {
# Example input: Insert matrix e.g x<-matrix(rnorm(64),8,8)
## To check cached values:
# xMat<-makeCacheMatrix(x) # Run the function
# parent.env(xMat$getenv())$m # Check the cached mean
# environment(xMat$getmean) # refer to environment of "m"
m<-NULL
evn <- environment()
y<-NULL
setmatrix<-function(y){
x<<-y
m<<-NULL
}
getmatrix<-function() x
setinverse<-function(solve) m<<- solve
getinverse<-function() m
getenv<- function() environment()
list (setmatrix=setmatrix, getmatrix = getmatrix,
setinverse = setinverse,
getinverse = getinverse,
getenv = getenv)
}
## The function "cacheSolve" returns the inverse of the matrix that is
# returned by makeCacheMatrix function, e.g. xMat$getmatrix()
cacheSolve <- function(xMat= m(), ...) {
m <- xMat$getinverse()
if(!is.null(m)){
if(xMat$setmatrix() == xMat$getmatrix()) {
matrix<-xMat$get()
m<-solve(matrix, ...)
xMat$setmatrix(m)
return(m)
}
y <- xMat$getmatrix()
xMat$setmatrix(y)
m <- solve(y, ...)
xMat$setinverse(m)
m # return the inverse
}
}
| /CacheMatrix.R | no_license | alalapre/ProgrammingAssignment2 | R | false | false | 1,197 | r | makeCacheMatrix <- function(x = matrix()) {
# Example input: Insert matrix e.g x<-matrix(rnorm(64),8,8)
## To check cached values:
# xMat<-makeCacheMatrix(x) # Run the function
# parent.env(xMat$getenv())$m # Check the cached mean
# environment(xMat$getmean) # refer to environment of "m"
m<-NULL
evn <- environment()
y<-NULL
setmatrix<-function(y){
x<<-y
m<<-NULL
}
getmatrix<-function() x
setinverse<-function(solve) m<<- solve
getinverse<-function() m
getenv<- function() environment()
list (setmatrix=setmatrix, getmatrix = getmatrix,
setinverse = setinverse,
getinverse = getinverse,
getenv = getenv)
}
## The function "cacheSolve" returns the inverse of the matrix that is
# returned by makeCacheMatrix function, e.g. xMat$getmatrix()
cacheSolve <- function(xMat= m(), ...) {
m <- xMat$getinverse()
if(!is.null(m)){
if(xMat$setmatrix() == xMat$getmatrix()) {
matrix<-xMat$get()
m<-solve(matrix, ...)
xMat$setmatrix(m)
return(m)
}
y <- xMat$getmatrix()
xMat$setmatrix(y)
m <- solve(y, ...)
xMat$setinverse(m)
m # return the inverse
}
}
|
#
# Course 4 Week 1 Project, J. Flipse, 9 Feb 2018
#
# Source files:
URL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
td = getwd() # Extract to working directory (td)
tf = tempfile(tmpdir=td, fileext=".zip") # Create placeholder file
download.file(URL, tf) # Download ZIP file to td
library(plyr)
library(dplyr)
library(data.table)
# Get the zip files name & path (zipF), then unzip all to the working directory
zipF <- list.files(path = td, pattern = "*.zip", full.names = TRUE)
ldply(.data = zipF, .fun = unzip, exdir = td)
#
# The dataset has 2,075,259 rows and 9 columns. First calculate a rough estimate of how much memory
# the dataset will require in memory before reading into R. Make sure your computer has enough memory
# (most modern computers should be fine).
#
numRows <- 2075259
numCols <- 9
neededMB <- round(numRows*numCols*8/2^{20},2)
# > neededMB
# [1] 142.5 MB required ==> this is a low memory need, therefore no need to subset data into memory
######## Load Data ########
dtPower <- read.table(file.path(td, "household_power_consumption.txt"),sep = ";", header = TRUE)
# Restrict data bwtween 2007-02-01 to 2007-02-02 (src fmt: "16/12/2006")
dtPower$dt <- as.Date(dtPower$Date,"%d/%m/%Y")
date1 <- c("2007-02-01"); date2 <- c("2007-02-02")
dt2007 <- subset(dtPower, dt>=date1 & dt <=date2)
rm(dtPower) # Free memory
#
# Create historgram of Global Active Power vs. Frequency
#
hist(as.numeric(as.character(dt2007$Global_active_power)),col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowats)")
#
# Transfer to PNG file "plot1.png"
#
png("plot1.png") # Turn on PNG device - write file in working directory (default)
# re-run the plot
hist(as.numeric(as.character(dt2007$Global_active_power)),col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowats)")
# Close the PNG device
dev.off()
| /plot1.R | no_license | jflipse/Exploratory-Data-Analysis-Project-1 | R | false | false | 2,002 | r | #
# Course 4 Week 1 Project, J. Flipse, 9 Feb 2018
#
# Source files:
URL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
td = getwd() # Extract to working directory (td)
tf = tempfile(tmpdir=td, fileext=".zip") # Create placeholder file
download.file(URL, tf) # Download ZIP file to td
library(plyr)
library(dplyr)
library(data.table)
# Get the zip files name & path (zipF), then unzip all to the working directory
zipF <- list.files(path = td, pattern = "*.zip", full.names = TRUE)
ldply(.data = zipF, .fun = unzip, exdir = td)
#
# The dataset has 2,075,259 rows and 9 columns. First calculate a rough estimate of how much memory
# the dataset will require in memory before reading into R. Make sure your computer has enough memory
# (most modern computers should be fine).
#
numRows <- 2075259
numCols <- 9
neededMB <- round(numRows*numCols*8/2^{20},2)
# > neededMB
# [1] 142.5 MB required ==> this is a low memory need, therefore no need to subset data into memory
######## Load Data ########
dtPower <- read.table(file.path(td, "household_power_consumption.txt"),sep = ";", header = TRUE)
# Restrict data bwtween 2007-02-01 to 2007-02-02 (src fmt: "16/12/2006")
dtPower$dt <- as.Date(dtPower$Date,"%d/%m/%Y")
date1 <- c("2007-02-01"); date2 <- c("2007-02-02")
dt2007 <- subset(dtPower, dt>=date1 & dt <=date2)
rm(dtPower) # Free memory
#
# Create historgram of Global Active Power vs. Frequency
#
hist(as.numeric(as.character(dt2007$Global_active_power)),col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowats)")
#
# Transfer to PNG file "plot1.png"
#
png("plot1.png") # Turn on PNG device - write file in working directory (default)
# re-run the plot
hist(as.numeric(as.character(dt2007$Global_active_power)),col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowats)")
# Close the PNG device
dev.off()
|
ggplot(data=doaj_seal, aes(doaj_seal$JnlLicense, fill=doaj_seal$JnlLicense)) + stat_count() + labs(x="License", y="Count", title = "DOAJ Seal by Country")
| /data/insert_plot2.txt | permissive | AuthorCarpentry/FSCI-2019 | R | false | false | 155 | txt | ggplot(data=doaj_seal, aes(doaj_seal$JnlLicense, fill=doaj_seal$JnlLicense)) + stat_count() + labs(x="License", y="Count", title = "DOAJ Seal by Country")
|
getx <- function(ny, na, argvals1, argvalslr = NULL, typex = "shift", mean1 = 15, sd1 = 1.5, rate1 = 1/2, ns1 = 10) {
# Specify number of arguments/quantiles
# Specify total number of x (quantiles X days)
N <- na * ny
#x1 <- matrix(nrow = na, ncol = ny)
# For shift in distribution
if(typex == "shift") {
# Define shift, varies for each day
#rates1 <- rep(rexp(ny, rate = rate1), each = na)
rates1 <- rep(runif(ny, min = -5, max = 20), each = na)
# Base dist truncnorm, plus rates vary for each day
x0 <- rtruncnorm(N, a = 0, mean = mean1, sd = sd1)
xall <- matrix(x0 + rates1, nrow = na, byrow = F)
# For long right tail
} else if (typex == "longr") {
# Find base distribution
x1 <- rtruncnorm(N, a = 0, mean = mean1, sd = sd1)
med <- median(x1)
med <- 15
# Add in right tail
# q3 <- quantile(x1, probs = 0.75)
# What to scale right tail by
#adds <- rexp(ny, rate = rate1)
adds <- runif(ny, min = 2, max = 6)
# Same for each day
x0 <- rep(adds, each = na)
# Do not scale lower values
x0 <- (x1 > med) * ((x1 - med) * x0)
# Find xall
xall <- matrix(x0 + x1, nrow = na, byrow = F)
# For shifted left tail
} else if (typex == "longl") {
# Find base distribution
x1 <- rtruncnorm(N, a = 0, mean = mean1, sd = sd1)
med <- median(x1)
med <- 15
# Add in left tail
# q3 <- quantile(x1, probs = 0.25)
# What to scale left tail by
#adds <- rexp(ny, rate = 1 / rate1)
adds <- runif(ny, min = 0, max = 2)
# Same for each day
x0 <- rep(adds, each = na)
# Do not scale lower values
x0 <- (x1 < med) * ((-x1 + med) * -x0)
# Find xall
xall <- matrix(x0 + x1, nrow = na, byrow = F)
# For increased variance
} else if (typex == "wide") {
# Find standard deviations
sd2 <- rep(runif(ny, min = 0.3, max = 6), each = na)
# Get x as truncnorm
x0 <- rtruncnorm(N, a = 0, mean = 15, sd = sd2)
# Make matrix
xall <- matrix(x0, nrow = na, byrow = F)
} else {
stop("typex not recognized")
}
# Find mean
xM <- apply(xall, 2, mean)
# xall <- sweep(xall, 2, xM)
# Find quantiles
x1 <- apply(xall, 2, quantile, probs = argvals1)
#x1 <- abs(x1)
# Get quantiles for regression
if(is.null(argvalslr)) {
argvalslr <- argvals1
}
xREG <- apply(xall, 2, quantile, probs = argvalslr)
# Get functional x for plot
xfn1 <- getxfn(abs(x1), argvals1, ns1)
xfn <- xfn1$xfn
basis1 <- xfn1$basis1
# Get outcome
#return(list(x1 = x1, xall = xall, xM = xM))
return(list(x1 = x1, xall = xall, xfn = xfn, basis1 = basis1, xM = xM, xREG = xREG))
}
# Function to get functional x
getxfn <- function(xvar1, argvals1, ns1 = 15) {
# Get bspline basis over 0,1
basis1 <- create.bspline.basis(c(0, 1), norder = ns1)
# Get smooth of x
xfn <- smooth.basis(argvals1, xvar1, basis1)$fd
list(xfn = xfn, basis1 = basis1)
}
# beta function of x
getbeta <- function(type, val = 0, scale = 1) { function(x) {
# Beta constant over quantiles
if(type == "constant") {
b1 <- rep(val, length = length(x))
# Beta increases for lower & higher quantiles
} else if (type == "x2") {
b1 <- val + 1/4 * (x - 0.5)^2
#b1 <- val + 1 / 3 * (x - 0.5)^2
# Beta larger for low quantiles
} else if (type == "low") {
b1 <- val + 1 / 10 * exp(x * -7)
#b1 <- val + 1 / 5 * exp(x * -7)
# Beta larger for high quantiles
} else if (type == "high") {
b1 <- val + 1 / 10000 * exp(x * 7)
#b1 <- val + 1 / 5000 * exp(x * 7)
# Beta not specified
} else {
stop("Beta type not recognized")
}
#rescale for appropriately sized beta
b1 <- b1 * scale
b1
}}
gety <- function(argvals1, betaM, betaf, x1, disttype, beta0 = 0, sd1 = 0.01) {
xvar1 <- x1$x1
xM <- x1$xM
# Get values of beta at x
# If functional beta is truth
if(class(betaf) == "function") {
beta1 <- betaf(argvals1)
# find linear function of x and beta
linf <- rowSums(sweep(t(xvar1), 2, beta1, "*"))
linf <- linf * 1 / length(beta1)
#linf <- apply(linf, 2, function(x) auc(argvals1, x))
# If other is truth
} else{
stop("Beta must be a function")
nums <- as.numeric(names(betaf))/100
xhold <- apply(x1$xall, 2, quantile, probs = nums)
if(length(nums) > 1) {
xhold <- t(xhold)
linf <- rowSums(sweep(xhold, 2, betaf, "*"))
linf <- linf * 1 / length(beta1)
}else{
linf <- betaf * xhold
}
}
# Add in median and beta0
linf <- linf + xM * betaM + beta0
# For normally dist outcome
if(disttype == "norm") {
# get additive error
eps <- rnorm(ncol(xvar1), sd = sd1)
# compute y
y1 <- linf + eps
# For count outcome
} else if(disttype == "pois") {
# Find mean
mu <- exp(linf)
# Get poisson
y1 <- rpois(length(mu), mu)
}
y1
}
simout <- function(x1, argvals1, betaM, typeb, disttype = "norm", sd1 = 0.01, argvalslr = argvals1, val1 = 1, std = F, quants = F, scale1 = 1, beta0 = 0,...) {
# Get function of beta
if(class(typeb) != "numeric") {
betaf <- getbeta(typeb, val = val1, scale = scale1)
} else {
betaf <- typeb
}
# Generate y
y1 <- gety(argvals1, betaM, betaf, x1, disttype, beta0, sd1)
# Get functional x
#xfn <- x1$xfn
#ns1 <- x1$basis1$nbasis
xmat <- t(x1$xREG)
# Standardize?
if(std) {
mn1 <- apply(xmat, 2, mean)
sd1 <- apply(xmat, 2, sd)
xmat <- sweep(xmat, 2, mn1, "-")
xmat <- sweep(xmat, 2, sd1, "/")
}
#xmat <- xmat / length(argvals1)
dat1 <- data.frame(y1, xmat)
# do multivariate regression
colnames(dat1) <- c("y", paste0("x", seq(1, ncol(xmat))))
eqn1 <- paste0("y ~", paste(colnames(dat1)[-1], collapse = "+"))
beta2 <- matrix(nrow = (ncol(dat1) - 1), ncol = 4)
# Depending on type of regression
if(disttype == "norm") {
fam <- "gaussian"
}else if(disttype == "pois") {
fam <- "poisson"
}
beta3 <- summary(glm(eval(eqn1), data = dat1, family = fam))$coef[-1, ]
# Do univariate regression
for(i in 2 : ncol(dat1)) {
eqn1 <- paste("y ~", colnames(dat1)[i])
beta2[i- 1, ] <- summary(glm(eval(eqn1), data = dat1, family = fam))$coef[-1, ]
}
#betaN <- newbeta(x1 = x1, y = y1, argvals2 = argvals1, fam = fam, std = std)
betaN <- NULL
rownames(beta2) <- argvalslr
rownames(beta3) <- argvalslr
#freg1 <- fRegress(y1 ~ x1$xfn)
# Save output
list(y1 = y1, betaf = betaf, beta2 = beta2, beta3 = beta3, betaN = betaN, x1 = x1, argvals1 = argvals1)
#list(y1 = y1, betaf = betaf, fmod1 = fmod1, beta2 = beta2, beta3 = beta3, basis1 = x1$basis1)
}
newbeta <- function(x1, y1, argvals2, fam, std = F) {
xmat <- t(x1$x1)
med <- x1$xM
lm1 <- function(x) {
lm(x ~ med)$resid
}
# get residuals
xmat <- apply(xmat, 2, lm1)
# Standardize?
if(std) {
mn1 <- apply(xmat, 2, mean)
sd1 <- apply(xmat, 2, sd)
xmat <- sweep(xmat, 2, mn1, "-")
xmat <- sweep(xmat, 2, sd1, "/")
}
#xmat <- xmat / length(argvals1)
dat1 <- data.frame(y1, med, xmat)
# do multivariate regression
colnames(dat1) <- c("y", "median1", paste0("x", seq(1, ncol(xmat))))
#eqn1 <- paste0("~", paste(colnames(dat1)[-c(1, 2)], collapse = "+"))
#p1 <- penalized(y, penalized = xmat, unpenalized = med, data = dat1, lambda1 = 10)
xs <- as.matrix(data.frame(med, xmat))
# lasso alpha = 1
p1 <- cv.glmnet(xs, y1, family = fam, alpha = 1, standardize = F)
coefp <- coef(p1, s = "lambda.min")
med <- coefp[2, ]
coefp <- data.frame(coefp[-c(1, 2),],NA, NA, NA)
list(coefp, med)
}
flm <- function(x1, y1) {
xfn <- x1$xfn
xM <- x1$xM
ny <- length(y1)
# Get beta
# The intercept must be constant for a scalar response
betabasis1 <- create.constant.basis(c(0, 1))
betafd1 <- fd(0, betabasis1)
betafdPar1 <- fdPar(betafd1)
betafd2 <- with(xfn, fd(basisobj=basis, fdnames=fdnames))
# convert to an fdPar object
betafdPar2 <- fdPar(betafd2)
betalist <- list(const=betafdPar1, xM = betafdPar1, xfn=betafdPar2)
# Get x
xfdlist <- list(const=rep(1, ny), xM = xM, xfn=xfn)
# Do functional regression
fd1 <- fRegress(y1, xfdlist, betalist)
# Find corresponding CIs
yhatfdobj <- fd1$yhatfdobj
errmat <- y1 - yhatfdobj
sigmae <- as.numeric(var(errmat))
diag1 <- diag(1, length(y1))
std1 <- fRegress.stderr(fd1, diag1, diag1 * sigmae)
list(freg = fd1, betafstd = std1)
}
fglm1 <- function(x1, y1, argvals1, ns1) {
xM <- x1$xM
form1 <- formula(y1 ~ x + xM)
basisx <- create.bspline.basis(c(0, 1), norder = ns1)
basisb <- create.bspline.basis(c(0, 1), norder = ns1)
#basis1 <- x1$basis1
basx <- list(x = basisx)
basb <- list(x = basisb)
xfn <- fdata(t(x1$x1), argvals = argvals1)
y1 <- data.frame(y1, xM)
dat1 <- list("x" = xfn, "df" = y1)
fre1 <- fregre.glm(form1, family = "poisson", data = dat1,
basis.x = basx, basis.b = basb, CV = F)
fre1
}
fglm <- function(x1, y1, argvals1, ns1) {
pfr1 <- pfr(y1, funcs = t(x1$x1), kz = ns1, nbasis = ns1, kb = ns1, family = "quasipoisson" )
pfr1
}
runsim <- function(x1use, xs1, ts1, cn, lb1 = -.5, ub1 = 0.5,
argvals1 = argvals2, argvalslr = ag1, scaleb = 1, betaM1 = 0,
val1 = 0, disttype1 = "pois", std1 = T, sd2 = 0.01, beta0 = 0) {
#specify output
med <- 0
t1 <- vector()
simout1 <- list()
for(i in 1 : length(ts1)) {
# specify beta and x
ti1 <- ts1[i]
xi1 <- xs1[i, 1]
xi2 <- xs1[i, 2]
# get betas
gb1 <- getbeta(ti1, val = val1[i],scale = scaleb[i])
betas <- gb1(argvals1)
# format beta data
t1[i] <- paste(xi1, ":", ti1)
type1 <- rep(t1[i], length(betas))
data1 <- data.frame(argvals1, betas, type1)
colnames(data1) <- c("quant", "beta", "Type1")
if(i == 1) {
datb <- data1
}else{
datb <- full_join(datb, data1)
}
#inflate xs
xuse1 <- x1use[[xi2]]
# nanograms
#xuse1$xall <- xuse1$xall * 1000
simout1[[i]] <- simout(xuse1, argvals1, betaM = betaM1,
argvalslr = argvalslr,
typeb = ti1, sd1 = sd2, disttype = disttype1, val1 = val1[i],
quants = F, std = std1, scale1 = scaleb[i], beta0 = beta0)
sim1 <- simout1[[i]]
x <- as.numeric(rownames(sim1$beta2))
type1 <- rep(t1[i], length(x))
type2 <- rep("Univariate", length(x))
x1 <- data.frame(x, sim1$beta2, type1, type2)
colnames(x1) <- cn
x1$Reg <- as.character(x1$Reg)
if(i == 1) {
xfull <- x1
}else{
xfull <- full_join(x1, xfull)
}
x <- as.numeric(rownames(sim1$beta3))
type2 <- rep("Multivariate", length(x))
x2 <- data.frame(x, sim1$beta3, type1, type2)
colnames(x2) <- cn
xfull <- full_join(x2, xfull)
# Add in new betas
if(!is.null(sim1$betaN)) {
x <- argvals1
type2 <- rep("Penalized", length(x))
x2 <- data.frame(x, sim1$betaN[[1]], type1, type2)
colnames(x2) <- cn
xfull <- full_join(x2, xfull)
med <- c(med, sim1$betaN[[2]])
}
}
xfull$Type1 <- factor(xfull$Type1, levels = t1)
datb$Type1 <- factor(datb$Type1, levels = t1)
med <- med[-1]
xfull <- formfull(xfull, lb1, ub1)
list(xfull = xfull, datb = datb, med = med, simout1 = simout1)
}
| /simstudy/sim_study_fn.R | no_license | kralljr/sheds-dist | R | false | false | 11,383 | r | getx <- function(ny, na, argvals1, argvalslr = NULL, typex = "shift", mean1 = 15, sd1 = 1.5, rate1 = 1/2, ns1 = 10) {
# Specify number of arguments/quantiles
# Specify total number of x (quantiles X days)
N <- na * ny
#x1 <- matrix(nrow = na, ncol = ny)
# For shift in distribution
if(typex == "shift") {
# Define shift, varies for each day
#rates1 <- rep(rexp(ny, rate = rate1), each = na)
rates1 <- rep(runif(ny, min = -5, max = 20), each = na)
# Base dist truncnorm, plus rates vary for each day
x0 <- rtruncnorm(N, a = 0, mean = mean1, sd = sd1)
xall <- matrix(x0 + rates1, nrow = na, byrow = F)
# For long right tail
} else if (typex == "longr") {
# Find base distribution
x1 <- rtruncnorm(N, a = 0, mean = mean1, sd = sd1)
med <- median(x1)
med <- 15
# Add in right tail
# q3 <- quantile(x1, probs = 0.75)
# What to scale right tail by
#adds <- rexp(ny, rate = rate1)
adds <- runif(ny, min = 2, max = 6)
# Same for each day
x0 <- rep(adds, each = na)
# Do not scale lower values
x0 <- (x1 > med) * ((x1 - med) * x0)
# Find xall
xall <- matrix(x0 + x1, nrow = na, byrow = F)
# For shifted left tail
} else if (typex == "longl") {
# Find base distribution
x1 <- rtruncnorm(N, a = 0, mean = mean1, sd = sd1)
med <- median(x1)
med <- 15
# Add in left tail
# q3 <- quantile(x1, probs = 0.25)
# What to scale left tail by
#adds <- rexp(ny, rate = 1 / rate1)
adds <- runif(ny, min = 0, max = 2)
# Same for each day
x0 <- rep(adds, each = na)
# Do not scale lower values
x0 <- (x1 < med) * ((-x1 + med) * -x0)
# Find xall
xall <- matrix(x0 + x1, nrow = na, byrow = F)
# For increased variance
} else if (typex == "wide") {
# Find standard deviations
sd2 <- rep(runif(ny, min = 0.3, max = 6), each = na)
# Get x as truncnorm
x0 <- rtruncnorm(N, a = 0, mean = 15, sd = sd2)
# Make matrix
xall <- matrix(x0, nrow = na, byrow = F)
} else {
stop("typex not recognized")
}
# Find mean
xM <- apply(xall, 2, mean)
# xall <- sweep(xall, 2, xM)
# Find quantiles
x1 <- apply(xall, 2, quantile, probs = argvals1)
#x1 <- abs(x1)
# Get quantiles for regression
if(is.null(argvalslr)) {
argvalslr <- argvals1
}
xREG <- apply(xall, 2, quantile, probs = argvalslr)
# Get functional x for plot
xfn1 <- getxfn(abs(x1), argvals1, ns1)
xfn <- xfn1$xfn
basis1 <- xfn1$basis1
# Get outcome
#return(list(x1 = x1, xall = xall, xM = xM))
return(list(x1 = x1, xall = xall, xfn = xfn, basis1 = basis1, xM = xM, xREG = xREG))
}
# Function to get functional x
getxfn <- function(xvar1, argvals1, ns1 = 15) {
# Get bspline basis over 0,1
basis1 <- create.bspline.basis(c(0, 1), norder = ns1)
# Get smooth of x
xfn <- smooth.basis(argvals1, xvar1, basis1)$fd
list(xfn = xfn, basis1 = basis1)
}
# beta function of x
getbeta <- function(type, val = 0, scale = 1) { function(x) {
# Beta constant over quantiles
if(type == "constant") {
b1 <- rep(val, length = length(x))
# Beta increases for lower & higher quantiles
} else if (type == "x2") {
b1 <- val + 1/4 * (x - 0.5)^2
#b1 <- val + 1 / 3 * (x - 0.5)^2
# Beta larger for low quantiles
} else if (type == "low") {
b1 <- val + 1 / 10 * exp(x * -7)
#b1 <- val + 1 / 5 * exp(x * -7)
# Beta larger for high quantiles
} else if (type == "high") {
b1 <- val + 1 / 10000 * exp(x * 7)
#b1 <- val + 1 / 5000 * exp(x * 7)
# Beta not specified
} else {
stop("Beta type not recognized")
}
#rescale for appropriately sized beta
b1 <- b1 * scale
b1
}}
gety <- function(argvals1, betaM, betaf, x1, disttype, beta0 = 0, sd1 = 0.01) {
xvar1 <- x1$x1
xM <- x1$xM
# Get values of beta at x
# If functional beta is truth
if(class(betaf) == "function") {
beta1 <- betaf(argvals1)
# find linear function of x and beta
linf <- rowSums(sweep(t(xvar1), 2, beta1, "*"))
linf <- linf * 1 / length(beta1)
#linf <- apply(linf, 2, function(x) auc(argvals1, x))
# If other is truth
} else{
stop("Beta must be a function")
nums <- as.numeric(names(betaf))/100
xhold <- apply(x1$xall, 2, quantile, probs = nums)
if(length(nums) > 1) {
xhold <- t(xhold)
linf <- rowSums(sweep(xhold, 2, betaf, "*"))
linf <- linf * 1 / length(beta1)
}else{
linf <- betaf * xhold
}
}
# Add in median and beta0
linf <- linf + xM * betaM + beta0
# For normally dist outcome
if(disttype == "norm") {
# get additive error
eps <- rnorm(ncol(xvar1), sd = sd1)
# compute y
y1 <- linf + eps
# For count outcome
} else if(disttype == "pois") {
# Find mean
mu <- exp(linf)
# Get poisson
y1 <- rpois(length(mu), mu)
}
y1
}
simout <- function(x1, argvals1, betaM, typeb, disttype = "norm", sd1 = 0.01, argvalslr = argvals1, val1 = 1, std = F, quants = F, scale1 = 1, beta0 = 0,...) {
# Get function of beta
if(class(typeb) != "numeric") {
betaf <- getbeta(typeb, val = val1, scale = scale1)
} else {
betaf <- typeb
}
# Generate y
y1 <- gety(argvals1, betaM, betaf, x1, disttype, beta0, sd1)
# Get functional x
#xfn <- x1$xfn
#ns1 <- x1$basis1$nbasis
xmat <- t(x1$xREG)
# Standardize?
if(std) {
mn1 <- apply(xmat, 2, mean)
sd1 <- apply(xmat, 2, sd)
xmat <- sweep(xmat, 2, mn1, "-")
xmat <- sweep(xmat, 2, sd1, "/")
}
#xmat <- xmat / length(argvals1)
dat1 <- data.frame(y1, xmat)
# do multivariate regression
colnames(dat1) <- c("y", paste0("x", seq(1, ncol(xmat))))
eqn1 <- paste0("y ~", paste(colnames(dat1)[-1], collapse = "+"))
beta2 <- matrix(nrow = (ncol(dat1) - 1), ncol = 4)
# Depending on type of regression
if(disttype == "norm") {
fam <- "gaussian"
}else if(disttype == "pois") {
fam <- "poisson"
}
beta3 <- summary(glm(eval(eqn1), data = dat1, family = fam))$coef[-1, ]
# Do univariate regression
for(i in 2 : ncol(dat1)) {
eqn1 <- paste("y ~", colnames(dat1)[i])
beta2[i- 1, ] <- summary(glm(eval(eqn1), data = dat1, family = fam))$coef[-1, ]
}
#betaN <- newbeta(x1 = x1, y = y1, argvals2 = argvals1, fam = fam, std = std)
betaN <- NULL
rownames(beta2) <- argvalslr
rownames(beta3) <- argvalslr
#freg1 <- fRegress(y1 ~ x1$xfn)
# Save output
list(y1 = y1, betaf = betaf, beta2 = beta2, beta3 = beta3, betaN = betaN, x1 = x1, argvals1 = argvals1)
#list(y1 = y1, betaf = betaf, fmod1 = fmod1, beta2 = beta2, beta3 = beta3, basis1 = x1$basis1)
}
newbeta <- function(x1, y1, argvals2, fam, std = F) {
xmat <- t(x1$x1)
med <- x1$xM
lm1 <- function(x) {
lm(x ~ med)$resid
}
# get residuals
xmat <- apply(xmat, 2, lm1)
# Standardize?
if(std) {
mn1 <- apply(xmat, 2, mean)
sd1 <- apply(xmat, 2, sd)
xmat <- sweep(xmat, 2, mn1, "-")
xmat <- sweep(xmat, 2, sd1, "/")
}
#xmat <- xmat / length(argvals1)
dat1 <- data.frame(y1, med, xmat)
# do multivariate regression
colnames(dat1) <- c("y", "median1", paste0("x", seq(1, ncol(xmat))))
#eqn1 <- paste0("~", paste(colnames(dat1)[-c(1, 2)], collapse = "+"))
#p1 <- penalized(y, penalized = xmat, unpenalized = med, data = dat1, lambda1 = 10)
xs <- as.matrix(data.frame(med, xmat))
# lasso alpha = 1
p1 <- cv.glmnet(xs, y1, family = fam, alpha = 1, standardize = F)
coefp <- coef(p1, s = "lambda.min")
med <- coefp[2, ]
coefp <- data.frame(coefp[-c(1, 2),],NA, NA, NA)
list(coefp, med)
}
flm <- function(x1, y1) {
xfn <- x1$xfn
xM <- x1$xM
ny <- length(y1)
# Get beta
# The intercept must be constant for a scalar response
betabasis1 <- create.constant.basis(c(0, 1))
betafd1 <- fd(0, betabasis1)
betafdPar1 <- fdPar(betafd1)
betafd2 <- with(xfn, fd(basisobj=basis, fdnames=fdnames))
# convert to an fdPar object
betafdPar2 <- fdPar(betafd2)
betalist <- list(const=betafdPar1, xM = betafdPar1, xfn=betafdPar2)
# Get x
xfdlist <- list(const=rep(1, ny), xM = xM, xfn=xfn)
# Do functional regression
fd1 <- fRegress(y1, xfdlist, betalist)
# Find corresponding CIs
yhatfdobj <- fd1$yhatfdobj
errmat <- y1 - yhatfdobj
sigmae <- as.numeric(var(errmat))
diag1 <- diag(1, length(y1))
std1 <- fRegress.stderr(fd1, diag1, diag1 * sigmae)
list(freg = fd1, betafstd = std1)
}
fglm1 <- function(x1, y1, argvals1, ns1) {
xM <- x1$xM
form1 <- formula(y1 ~ x + xM)
basisx <- create.bspline.basis(c(0, 1), norder = ns1)
basisb <- create.bspline.basis(c(0, 1), norder = ns1)
#basis1 <- x1$basis1
basx <- list(x = basisx)
basb <- list(x = basisb)
xfn <- fdata(t(x1$x1), argvals = argvals1)
y1 <- data.frame(y1, xM)
dat1 <- list("x" = xfn, "df" = y1)
fre1 <- fregre.glm(form1, family = "poisson", data = dat1,
basis.x = basx, basis.b = basb, CV = F)
fre1
}
fglm <- function(x1, y1, argvals1, ns1) {
pfr1 <- pfr(y1, funcs = t(x1$x1), kz = ns1, nbasis = ns1, kb = ns1, family = "quasipoisson" )
pfr1
}
runsim <- function(x1use, xs1, ts1, cn, lb1 = -.5, ub1 = 0.5,
argvals1 = argvals2, argvalslr = ag1, scaleb = 1, betaM1 = 0,
val1 = 0, disttype1 = "pois", std1 = T, sd2 = 0.01, beta0 = 0) {
#specify output
med <- 0
t1 <- vector()
simout1 <- list()
for(i in 1 : length(ts1)) {
# specify beta and x
ti1 <- ts1[i]
xi1 <- xs1[i, 1]
xi2 <- xs1[i, 2]
# get betas
gb1 <- getbeta(ti1, val = val1[i],scale = scaleb[i])
betas <- gb1(argvals1)
# format beta data
t1[i] <- paste(xi1, ":", ti1)
type1 <- rep(t1[i], length(betas))
data1 <- data.frame(argvals1, betas, type1)
colnames(data1) <- c("quant", "beta", "Type1")
if(i == 1) {
datb <- data1
}else{
datb <- full_join(datb, data1)
}
#inflate xs
xuse1 <- x1use[[xi2]]
# nanograms
#xuse1$xall <- xuse1$xall * 1000
simout1[[i]] <- simout(xuse1, argvals1, betaM = betaM1,
argvalslr = argvalslr,
typeb = ti1, sd1 = sd2, disttype = disttype1, val1 = val1[i],
quants = F, std = std1, scale1 = scaleb[i], beta0 = beta0)
sim1 <- simout1[[i]]
x <- as.numeric(rownames(sim1$beta2))
type1 <- rep(t1[i], length(x))
type2 <- rep("Univariate", length(x))
x1 <- data.frame(x, sim1$beta2, type1, type2)
colnames(x1) <- cn
x1$Reg <- as.character(x1$Reg)
if(i == 1) {
xfull <- x1
}else{
xfull <- full_join(x1, xfull)
}
x <- as.numeric(rownames(sim1$beta3))
type2 <- rep("Multivariate", length(x))
x2 <- data.frame(x, sim1$beta3, type1, type2)
colnames(x2) <- cn
xfull <- full_join(x2, xfull)
# Add in new betas
if(!is.null(sim1$betaN)) {
x <- argvals1
type2 <- rep("Penalized", length(x))
x2 <- data.frame(x, sim1$betaN[[1]], type1, type2)
colnames(x2) <- cn
xfull <- full_join(x2, xfull)
med <- c(med, sim1$betaN[[2]])
}
}
xfull$Type1 <- factor(xfull$Type1, levels = t1)
datb$Type1 <- factor(datb$Type1, levels = t1)
med <- med[-1]
xfull <- formfull(xfull, lb1, ub1)
list(xfull = xfull, datb = datb, med = med, simout1 = simout1)
}
|
#' Perform splicing QTL analysis
#'
#' Parallelization across tested clusters is achieved using foreach/doMC, so the number of threads that will be used is determined by the cores argument passed to registerDoMC.
#'
#' @param counts An [introns] x [samples] matrix of counts. The rownames must be of the form chr:start:end:cluid. If the counts file comes from the leafcutter clustering code this should be the case already.
#' @param geno A [SNPs] x [samples] numeric matrix of the genotypes, typically encoded as 0,1,2, although in principle scaling shouldn't matter.
#' @param geno_meta SNP metadata, as a data.frame. Rows correspond to SNPs, must have a CHROM (with values e.g. chr15) and POS (position) column.
#' @param snps_within Window from center of cluster in which to test SNPs.
#' @param max_cluster_size Don't test clusters with more introns than this
#' @param min_samples_per_intron Ignore introns used (i.e. at least one supporting read) in fewer than n samples
#' @param min_samples_per_group Require this many samples in each group to have at least min_coverage reads
#' @param min_coverage Require min_samples_per_group samples in each group to have at least this many reads
#' @param timeout Maximum time (in seconds) allowed for a single optimization run
#' @param debug Turn on to see output from rstan.
#' @return A per cluster list of results. For each cluster this is a list over tested SNPs. SNPs that were not tested will be represented by a string saying why.
#' @import foreach
#' @importFrom R.utils evalWithTimeout
#' @export
splicing_qtl_bnb=function(counts,geno,geno_meta,snps_within=1e4,min_samples_per_intron=5,min_coverage=20,min_samples_per_group=8,timeout=10,debug=F,...) {
introns=leafcutter:::get_intron_meta(rownames(counts))
cluster_ids=paste(introns$chr,introns$clu,sep = ":")
clusters_to_test=unique(cluster_ids)
if (!debug)
sink(file="/dev/null")
res=foreach (clu=clusters_to_test, .errorhandling = if (debug) "stop" else "pass") %dopar% {
print(clu)
cluster_counts=t(counts[ cluster_ids==clu, ])
sample_counts=rowSums(cluster_counts)
samples_to_use=sample_counts>0
if (sum(samples_to_use)<=1 | sum(sample_counts>=min_coverage)<=min_samples_per_group ) return("no samples_to_use")
cluster_introns=introns[ cluster_ids %in% clu, ]
#m=mean(cluster_introns$middle)
#cis_snps = which( (abs( geno_meta$POS - m ) < snps_within) & (geno_meta$CHROM==cluster_introns$chr[1]) )
cis_snps = which( ( (min(cluster_introns$start) - snps_within) < geno_meta$POS ) & ( geno_meta$POS < (max(cluster_introns$end) + snps_within)) & (geno_meta$CHROM==cluster_introns$chr[1]) )
introns_to_use=colSums(cluster_counts[samples_to_use,]>0)>=min_samples_per_intron
cluster_counts=cluster_counts[,introns_to_use]
if (sum(introns_to_use)<=1) return("<=1 usable introns")
sample_counts=sample_counts[samples_to_use]
cluster_counts=cluster_counts[samples_to_use,]
#pcs_here=pcs[samples_to_use,,drop=F]
cached_fit_null=NULL
clures=foreach (cis_snp = cis_snps, .errorhandling = if (debug) "stop" else "pass") %do% {
xh=as.numeric(geno[cis_snp,])
if (length(unique(xh)) <= 1) return("Only one genotype")
ta=table(xh[sample_counts>=min_coverage])
if ( sum(ta >= min_samples_per_group) <= 1)
return("not enough valid samples")
if ( sum(introns_to_use)<2 )
return("almost all ys/sample_counts is 0 or 1")
if (debug & !is.null(cached_fit_null)) cat("Using cached null fit.\n")
res <- R.utils::evalWithTimeout( { bnb_glm(xh,cluster_counts,fit_null=cached_fit_null,...) }, timeout=timeout, onTimeout="silent" )
if (is.null(res)) "timeout" else {
cached_fit_null=res$fit_null
res
}
}
names(clures)=as.character(cis_snps)
clures
}
if (!debug)
sink()
names(res)=clusters_to_test
res
}
| /leafcutter/R/splicing_qtl_bnb.R | no_license | lpantano/leafcutter | R | false | false | 3,974 | r | #' Perform splicing QTL analysis
#'
#' Parallelization across tested clusters is achieved using foreach/doMC, so the number of threads that will be used is determined by the cores argument passed to registerDoMC.
#'
#' @param counts An [introns] x [samples] matrix of counts. The rownames must be of the form chr:start:end:cluid. If the counts file comes from the leafcutter clustering code this should be the case already.
#' @param geno A [SNPs] x [samples] numeric matrix of the genotypes, typically encoded as 0,1,2, although in principle scaling shouldn't matter.
#' @param geno_meta SNP metadata, as a data.frame. Rows correspond to SNPs, must have a CHROM (with values e.g. chr15) and POS (position) column.
#' @param snps_within Window from center of cluster in which to test SNPs.
#' @param max_cluster_size Don't test clusters with more introns than this
#' @param min_samples_per_intron Ignore introns used (i.e. at least one supporting read) in fewer than n samples
#' @param min_samples_per_group Require this many samples in each group to have at least min_coverage reads
#' @param min_coverage Require min_samples_per_group samples in each group to have at least this many reads
#' @param timeout Maximum time (in seconds) allowed for a single optimization run
#' @param debug Turn on to see output from rstan.
#' @return A per cluster list of results. For each cluster this is a list over tested SNPs. SNPs that were not tested will be represented by a string saying why.
#' @import foreach
#' @importFrom R.utils evalWithTimeout
#' @export
splicing_qtl_bnb=function(counts,geno,geno_meta,snps_within=1e4,min_samples_per_intron=5,min_coverage=20,min_samples_per_group=8,timeout=10,debug=F,...) {
introns=leafcutter:::get_intron_meta(rownames(counts))
cluster_ids=paste(introns$chr,introns$clu,sep = ":")
clusters_to_test=unique(cluster_ids)
if (!debug)
sink(file="/dev/null")
res=foreach (clu=clusters_to_test, .errorhandling = if (debug) "stop" else "pass") %dopar% {
print(clu)
cluster_counts=t(counts[ cluster_ids==clu, ])
sample_counts=rowSums(cluster_counts)
samples_to_use=sample_counts>0
if (sum(samples_to_use)<=1 | sum(sample_counts>=min_coverage)<=min_samples_per_group ) return("no samples_to_use")
cluster_introns=introns[ cluster_ids %in% clu, ]
#m=mean(cluster_introns$middle)
#cis_snps = which( (abs( geno_meta$POS - m ) < snps_within) & (geno_meta$CHROM==cluster_introns$chr[1]) )
cis_snps = which( ( (min(cluster_introns$start) - snps_within) < geno_meta$POS ) & ( geno_meta$POS < (max(cluster_introns$end) + snps_within)) & (geno_meta$CHROM==cluster_introns$chr[1]) )
introns_to_use=colSums(cluster_counts[samples_to_use,]>0)>=min_samples_per_intron
cluster_counts=cluster_counts[,introns_to_use]
if (sum(introns_to_use)<=1) return("<=1 usable introns")
sample_counts=sample_counts[samples_to_use]
cluster_counts=cluster_counts[samples_to_use,]
#pcs_here=pcs[samples_to_use,,drop=F]
cached_fit_null=NULL
clures=foreach (cis_snp = cis_snps, .errorhandling = if (debug) "stop" else "pass") %do% {
xh=as.numeric(geno[cis_snp,])
if (length(unique(xh)) <= 1) return("Only one genotype")
ta=table(xh[sample_counts>=min_coverage])
if ( sum(ta >= min_samples_per_group) <= 1)
return("not enough valid samples")
if ( sum(introns_to_use)<2 )
return("almost all ys/sample_counts is 0 or 1")
if (debug & !is.null(cached_fit_null)) cat("Using cached null fit.\n")
res <- R.utils::evalWithTimeout( { bnb_glm(xh,cluster_counts,fit_null=cached_fit_null,...) }, timeout=timeout, onTimeout="silent" )
if (is.null(res)) "timeout" else {
cached_fit_null=res$fit_null
res
}
}
names(clures)=as.character(cis_snps)
clures
}
if (!debug)
sink()
names(res)=clusters_to_test
res
}
|
getepc <- function() {
# function to acquire electric power consumption data frame
temp <- tempfile()
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url,temp,method="curl")
data <- read.csv2(unz(temp,"household_power_consumption.txt"))
unlink(temp)
data
}
# get information
allinfo <- getepc()
# convert date and time columns to date and time
require(lubridate)
allinfo$Date <- dmy(allinfo$Date)
allinfo$Time <- hms(allinfo$Time)
allinfo$timeMark <- allinfo$Date+allinfo$Time
# reduce the days of 2/1/2007 and 2/2/2007
begin<-ymd("2007-02-01")
end <-ymd("2007-02-02")
someinfo <- subset(allinfo,Date>=begin & Date<=end)
someinfo$Global_active_power <-
as.numeric(as.character(someinfo$Global_active_power))
someinfo$Sub_metering_1 <-
as.numeric(as.character(someinfo$Sub_metering_1))
someinfo$Sub_metering_2 <-
as.numeric(as.character(someinfo$Sub_metering_2))
someinfo$Sub_metering_3 <-
as.numeric(as.character(someinfo$Sub_metering_3))
someinfo$Voltage <-
as.numeric(as.character(someinfo$Voltage))
someinfo$Global_reactive_power <-
as.numeric(as.character(someinfo$Global_reactive_power))
# and prepare plot 3
png("plot3.png",width=480,height=480)
plot(someinfo$timeMark,someinfo$Sub_metering_1
,type="l"
,xlab=""
,ylab="Energy sub metering")
lines(someinfo$timeMark,someinfo$Sub_metering_2
,col="red")
lines(someinfo$timeMark,someinfo$Sub_metering_3
,col="blue")
legend("topright"
,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
,col=c("black","red","blue")
,lty="solid")
dev.off()
| /plot3.R | no_license | hoffsite/ExData_Plotting1 | R | false | false | 1,653 | r | getepc <- function() {
# function to acquire electric power consumption data frame
temp <- tempfile()
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url,temp,method="curl")
data <- read.csv2(unz(temp,"household_power_consumption.txt"))
unlink(temp)
data
}
# get information
allinfo <- getepc()
# convert date and time columns to date and time
require(lubridate)
allinfo$Date <- dmy(allinfo$Date)
allinfo$Time <- hms(allinfo$Time)
allinfo$timeMark <- allinfo$Date+allinfo$Time
# reduce the days of 2/1/2007 and 2/2/2007
begin<-ymd("2007-02-01")
end <-ymd("2007-02-02")
someinfo <- subset(allinfo,Date>=begin & Date<=end)
someinfo$Global_active_power <-
as.numeric(as.character(someinfo$Global_active_power))
someinfo$Sub_metering_1 <-
as.numeric(as.character(someinfo$Sub_metering_1))
someinfo$Sub_metering_2 <-
as.numeric(as.character(someinfo$Sub_metering_2))
someinfo$Sub_metering_3 <-
as.numeric(as.character(someinfo$Sub_metering_3))
someinfo$Voltage <-
as.numeric(as.character(someinfo$Voltage))
someinfo$Global_reactive_power <-
as.numeric(as.character(someinfo$Global_reactive_power))
# and prepare plot 3
png("plot3.png",width=480,height=480)
plot(someinfo$timeMark,someinfo$Sub_metering_1
,type="l"
,xlab=""
,ylab="Energy sub metering")
lines(someinfo$timeMark,someinfo$Sub_metering_2
,col="red")
lines(someinfo$timeMark,someinfo$Sub_metering_3
,col="blue")
legend("topright"
,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
,col=c("black","red","blue")
,lty="solid")
dev.off()
|
"merror.pairs" <- function(df,labels=names(df))
{
pairs(df,xlim=range(df,na.rm=TRUE),ylim=range(df,na.rm=TRUE),
upper.panel=panel.merror,lower.panel=NULL,labels=labels)
}
| /R/merror.pairs.R | no_license | cran/merror | R | false | false | 177 | r | "merror.pairs" <- function(df,labels=names(df))
{
pairs(df,xlim=range(df,na.rm=TRUE),ylim=range(df,na.rm=TRUE),
upper.panel=panel.merror,lower.panel=NULL,labels=labels)
}
|
#' @title Convert capture counts to table of capture classes
#'
#' @description Converts a vector of capture counts into a two-column matrix consisting of all capture classes and the individuals associated with each class.
#'
#' @param counts a vector of capture count data
#'
#' @return A two-column matrix with the first column specifiying the capture class (where all individuals in class i were caught i times) and the second column specifying the number of individuals in this capture class.
#'
#' The data can be used as the data argument for any of the model-fitting functions implemented in capwire
#'
#' @references Miller C. R., P. Joyce and L.P. Waits. 2005. A new method for estimating the size of small populations from genetic mark-recapture data. Molecular Ecology 14:1991-2005.
#'
#' Pennell, M. W., C. R. Stansbury, L. P. Waits and C. R. Miller. 2013. Capwire: a R package for estimating population census size from non-invasive genetic sampling. Molecular Ecology Resources 13:154-157.
#'
#' @seealso \code{\link{fitTirm}}, \code{\link{fitEcm}}
#'
#' @author Matthew W. Pennell
#'
#' @export buildClassTable
#'
#' @examples
#'
#' ## create a vector of capture counts
#'
#' counts <- c(1,1,1,1,1,2,2,3,3,4,5)
#'
#' ## build table
#'
#' d <- buildClassTable(counts)
#' d
#'
buildClassTable <- function(counts){
if (!inherits(counts, "numeric"))
stop("counts needs to be a numeric vector")
uni <- sort(unique(counts))
r <- sapply(uni, function(x) length(counts[counts == x]))
res <- cbind(uni, r)
colnames(res) <- c("capture.class", "n.ind")
res
}
## check capwire object
check.capwire.data <- function(x){
if (!"matrix" %in% class(x) & !"data.frame" %in% class(x))
stop("data must be entered as either a 'data.frame' or 'matrix'")
if (ncol(x) != 2)
stop("data should include exactly two columns")
}
## get sampling info
get.sampling.info <- function(d){
counts <- lapply(seq_len(nrow(d)), function(x) {rep(d[x,1], d[x,2])})
counts <- do.call(c, counts)
## remove 0 values
counts <- counts[counts > 0]
s <- sum(counts)
t <- length(counts)
list(counts=counts, s=s, t=t)
}
| /R/capwire-utils.R | no_license | mwpennell/capwire | R | false | false | 2,168 | r |
#' @title Convert capture counts to table of capture classes
#'
#' @description Converts a vector of capture counts into a two-column matrix consisting of all capture classes and the individuals associated with each class.
#'
#' @param counts a vector of capture count data
#'
#' @return A two-column matrix with the first column specifiying the capture class (where all individuals in class i were caught i times) and the second column specifying the number of individuals in this capture class.
#'
#' The data can be used as the data argument for any of the model-fitting functions implemented in capwire
#'
#' @references Miller C. R., P. Joyce and L.P. Waits. 2005. A new method for estimating the size of small populations from genetic mark-recapture data. Molecular Ecology 14:1991-2005.
#'
#' Pennell, M. W., C. R. Stansbury, L. P. Waits and C. R. Miller. 2013. Capwire: a R package for estimating population census size from non-invasive genetic sampling. Molecular Ecology Resources 13:154-157.
#'
#' @seealso \code{\link{fitTirm}}, \code{\link{fitEcm}}
#'
#' @author Matthew W. Pennell
#'
#' @export buildClassTable
#'
#' @examples
#'
#' ## create a vector of capture counts
#'
#' counts <- c(1,1,1,1,1,2,2,3,3,4,5)
#'
#' ## build table
#'
#' d <- buildClassTable(counts)
#' d
#'
buildClassTable <- function(counts){
if (!inherits(counts, "numeric"))
stop("counts needs to be a numeric vector")
uni <- sort(unique(counts))
r <- sapply(uni, function(x) length(counts[counts == x]))
res <- cbind(uni, r)
colnames(res) <- c("capture.class", "n.ind")
res
}
## check capwire object
check.capwire.data <- function(x){
if (!"matrix" %in% class(x) & !"data.frame" %in% class(x))
stop("data must be entered as either a 'data.frame' or 'matrix'")
if (ncol(x) != 2)
stop("data should include exactly two columns")
}
## get sampling info
get.sampling.info <- function(d){
counts <- lapply(seq_len(nrow(d)), function(x) {rep(d[x,1], d[x,2])})
counts <- do.call(c, counts)
## remove 0 values
counts <- counts[counts > 0]
s <- sum(counts)
t <- length(counts)
list(counts=counts, s=s, t=t)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.