content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#' pcabootAG
#'
#' @description this function produces confidence intervals for the eigenvector components and eigenvalues of a given data set
#' @details This function produces confidence intervals using bootstrap methods as well as theoretical large sample results from Anderson and Girshick
#'
#' @param data the data set to run the principal component analysis on
#' @param alpha decimal value to express the desired 100(1-alpha) percent confidence level
#' @param iter the number of iterations to be used in the bootstrap sampling
#' @param cov True or false value to indicate use of covariance or correlation matrix where true uses the covariance matrix
#'
#' @return the function automatically plots density histograms for each eigenvalue and a boxplot of estimates for the first two principal components (eigenvectors). It also returns bootstrap confidence intervals for all eigenvector components and eigenvalues as well as Anderson Girshick theoretical confidence intervals for the eigenvalues. The intervals are listed in order on the command line for each eigenvalue. Components are listed by vector in order.
#' @export
#'
#' @examples \dontrun{ pcabootAG(MATH4793MOSER::welder, alpha = 0.15, iter = 2000, cov = TRUE)}
pcabootAG <- function (data, alpha = 0.05, iter = 1000, cov = FALSE){
p <- length(data[1,])
vals <-eigestimate(mat = data, iter = iter, cov = cov)
for(i in 1:p){
h <- hist(vals$lambdas[,i], plot = FALSE)
d <- h$density
den <- d/max(d)
hist(vals$lambdas[,i], freq = FALSE,
col = rgb(0,den,1-den^2, 0.7),
main = expression(paste("Bootstrap distribution of ", widehat(lambda)[i])), xlab = bquote(widehat(lambda)[.(i)]))
}
labs <- vector()
one <- vector()
for(i in 1:p){
labs <- c(labs,paste("e1",i))
one <- c(one,1)
}
for(i in 1:p){
labs <- c(labs,paste("e2",i))
one <- c(one,1)
}
labs <- rep(labs, iter*one)
cat <- matrix(labs,nr = iter, nc = 2*p, byrow = FALSE)
boxplot(vals$vectors ~ cat,
xlab = "Eigenvector Components",
ylab = "Value of Component")
lambcis <- vector()
for(i in 1:p){
ci <- quantile(vals$lambdas[,i], c(alpha/2, 1-alpha/2))
lambcis <- c(lambcis, ci)
}
vectcis <- vector()
for(i in 1:p^2){
ci <- quantile(vals$vectors[,i], c(alpha/2, 1-alpha/2))
vectcis <- c(vectcis, ci)
}
aglambcis <- vector()
for(i in 1:p){
ci <- c(mean(vals$lambdas[,i])/(1 + qnorm(1-alpha/2)*sqrt(2/length(vals$lambdas[,i]))),mean(vals$lambdas[,i])/(1 -
qnorm(1-alpha/2)*sqrt(2/length(vals$lambdas[,i]))))
aglambcis <- c(aglambcis,ci)
}
list(BootLambdas = lambcis, BootVectors = vectcis, AGLambdas = aglambcis)
}
lambdaestimatecov <- function(mat, indices){
S <- cov(mat[indices,])
E <- eigen(S)
lambda <-E$values
return(c(lambda))
}
vectorestimatecov <- function(mat, indices){
S <- cov(mat[indices,])
E <- eigen(S)
e <- E$vectors
return(e)
}
lambdaestimatecor <- function(mat, indices){
R <- cor(mat[indices,])
E <- eigen(R)
lambda <-E$values
return(c(lambda))
}
vectorestimatecor <- function(mat, indices){
R <- cor(mat[indices,])
E <- eigen(R)
e <- E$vectors
return(e)
}
eigestimate <- function(mat, iter, cov){
dat <- mat
if(cov == TRUE){
out <-boot::boot(dat, lambdaestimatecov, R = iter)$t
out2 <- boot::boot(dat,vectorestimatecov, R = iter)$t
}
if(cov == FALSE){
out <- boot::boot(dat, lambdaestimatecor, R = iter)$t
out2 <- boot::boot(dat,vectorestimatecor, R = iter)$t
}
list(lambdas = out, vectors = out2)
}
|
/R/pcabootAG.R
|
permissive
|
jacob-moser/MATH4793MOSER
|
R
| false
| false
| 3,680
|
r
|
#' pcabootAG
#'
#' @description this function produces confidence intervals for the eigenvector components and eigenvalues of a given data set
#' @details This function produces confidence intervals using bootstrap methods as well as theoretical large sample results from Anderson and Girshick
#'
#' @param data the data set to run the principal component analysis on
#' @param alpha decimal value to express the desired 100(1-alpha) percent confidence level
#' @param iter the number of iterations to be used in the bootstrap sampling
#' @param cov True or false value to indicate use of covariance or correlation matrix where true uses the covariance matrix
#'
#' @return the function automatically plots density histograms for each eigenvalue and a boxplot of estimates for the first two principal components (eigenvectors). It also returns bootstrap confidence intervals for all eigenvector components and eigenvalues as well as Anderson Girshick theoretical confidence intervals for the eigenvalues. The intervals are listed in order on the command line for each eigenvalue. Components are listed by vector in order.
#' @export
#'
#' @examples \dontrun{ pcabootAG(MATH4793MOSER::welder, alpha = 0.15, iter = 2000, cov = TRUE)}
pcabootAG <- function (data, alpha = 0.05, iter = 1000, cov = FALSE){
p <- length(data[1,])
vals <-eigestimate(mat = data, iter = iter, cov = cov)
for(i in 1:p){
h <- hist(vals$lambdas[,i], plot = FALSE)
d <- h$density
den <- d/max(d)
hist(vals$lambdas[,i], freq = FALSE,
col = rgb(0,den,1-den^2, 0.7),
main = expression(paste("Bootstrap distribution of ", widehat(lambda)[i])), xlab = bquote(widehat(lambda)[.(i)]))
}
labs <- vector()
one <- vector()
for(i in 1:p){
labs <- c(labs,paste("e1",i))
one <- c(one,1)
}
for(i in 1:p){
labs <- c(labs,paste("e2",i))
one <- c(one,1)
}
labs <- rep(labs, iter*one)
cat <- matrix(labs,nr = iter, nc = 2*p, byrow = FALSE)
boxplot(vals$vectors ~ cat,
xlab = "Eigenvector Components",
ylab = "Value of Component")
lambcis <- vector()
for(i in 1:p){
ci <- quantile(vals$lambdas[,i], c(alpha/2, 1-alpha/2))
lambcis <- c(lambcis, ci)
}
vectcis <- vector()
for(i in 1:p^2){
ci <- quantile(vals$vectors[,i], c(alpha/2, 1-alpha/2))
vectcis <- c(vectcis, ci)
}
aglambcis <- vector()
for(i in 1:p){
ci <- c(mean(vals$lambdas[,i])/(1 + qnorm(1-alpha/2)*sqrt(2/length(vals$lambdas[,i]))),mean(vals$lambdas[,i])/(1 -
qnorm(1-alpha/2)*sqrt(2/length(vals$lambdas[,i]))))
aglambcis <- c(aglambcis,ci)
}
list(BootLambdas = lambcis, BootVectors = vectcis, AGLambdas = aglambcis)
}
lambdaestimatecov <- function(mat, indices){
S <- cov(mat[indices,])
E <- eigen(S)
lambda <-E$values
return(c(lambda))
}
vectorestimatecov <- function(mat, indices){
S <- cov(mat[indices,])
E <- eigen(S)
e <- E$vectors
return(e)
}
lambdaestimatecor <- function(mat, indices){
R <- cor(mat[indices,])
E <- eigen(R)
lambda <-E$values
return(c(lambda))
}
vectorestimatecor <- function(mat, indices){
R <- cor(mat[indices,])
E <- eigen(R)
e <- E$vectors
return(e)
}
eigestimate <- function(mat, iter, cov){
dat <- mat
if(cov == TRUE){
out <-boot::boot(dat, lambdaestimatecov, R = iter)$t
out2 <- boot::boot(dat,vectorestimatecov, R = iter)$t
}
if(cov == FALSE){
out <- boot::boot(dat, lambdaestimatecor, R = iter)$t
out2 <- boot::boot(dat,vectorestimatecor, R = iter)$t
}
list(lambdas = out, vectors = out2)
}
|
library(RUVSeq)
mat <- matrix(data=rpois(100, lambda=10), ncol=10)
rownames(mat) <- paste("gene", 1:10, sep="")
es <- newSeqExpressionSet(mat)
## dimension of W
ks <- 1:5
## matrix
r1 <- lapply(ks, function(k) RUVg(mat, 1:10, k))
print(sapply(r1, function(x) dim(x$W)))
stopifnot(all(lapply(r1, function(x) dim(x$W)[2])==ks))
## already logged data
r1b <- lapply(ks, function(k) RUVg(log(mat+1), 1:10, k))
r1c <- lapply(ks, function(k) RUVg(log(mat+1), 1:10, k, isLog=TRUE))
r1d <- lapply(ks, function(k) RUVg(mat, 1:10, k, round=FALSE))
stopifnot(all(sapply(ks, function(i) all(r1[[i]]$W==r1c[[i]]$W))))
stopifnot(all(sapply(ks, function(i) all(r1d[[i]]$W==r1c[[i]]$W))))
stopifnot(all(sapply(ks, function(i) all(log(r1d[[i]]$normalizedCounts+1)-r1c[[i]]$normalizedCounts<1e-8))))
## SeqExpressionSet
r2 <- lapply(ks, function(k) RUVg(es, rownames(es)[1:10], k))
print(sapply(r2, function(x) dim(pData(x))))
stopifnot(all(lapply(r2, function(x) dim(pData(x))[2])==ks))
## check handling of zeros
mat <- matrix(data=rpois(100, lambda=2), ncol=10)
rownames(mat) <- paste("gene", 1:10, sep="")
r3 <- RUVg(mat, 1:10, k=1)
print(table(mat==0))
print(table(r3$normalizedCounts==0))
|
/tests/RUVg.R
|
no_license
|
drisso/RUVSeq
|
R
| false
| false
| 1,188
|
r
|
library(RUVSeq)
mat <- matrix(data=rpois(100, lambda=10), ncol=10)
rownames(mat) <- paste("gene", 1:10, sep="")
es <- newSeqExpressionSet(mat)
## dimension of W
ks <- 1:5
## matrix
r1 <- lapply(ks, function(k) RUVg(mat, 1:10, k))
print(sapply(r1, function(x) dim(x$W)))
stopifnot(all(lapply(r1, function(x) dim(x$W)[2])==ks))
## already logged data
r1b <- lapply(ks, function(k) RUVg(log(mat+1), 1:10, k))
r1c <- lapply(ks, function(k) RUVg(log(mat+1), 1:10, k, isLog=TRUE))
r1d <- lapply(ks, function(k) RUVg(mat, 1:10, k, round=FALSE))
stopifnot(all(sapply(ks, function(i) all(r1[[i]]$W==r1c[[i]]$W))))
stopifnot(all(sapply(ks, function(i) all(r1d[[i]]$W==r1c[[i]]$W))))
stopifnot(all(sapply(ks, function(i) all(log(r1d[[i]]$normalizedCounts+1)-r1c[[i]]$normalizedCounts<1e-8))))
## SeqExpressionSet
r2 <- lapply(ks, function(k) RUVg(es, rownames(es)[1:10], k))
print(sapply(r2, function(x) dim(pData(x))))
stopifnot(all(lapply(r2, function(x) dim(pData(x))[2])==ks))
## check handling of zeros
mat <- matrix(data=rpois(100, lambda=2), ncol=10)
rownames(mat) <- paste("gene", 1:10, sep="")
r3 <- RUVg(mat, 1:10, k=1)
print(table(mat==0))
print(table(r3$normalizedCounts==0))
|
## code to prepare `earthquakes` dataset goes here
library(magrittr)
raw_data <- readr::read_delim("data-raw/earthquakes.tsv", "\t") %>%
readr::write_csv(file.path("data-raw", "raw_data.csv"))
earthquakes <- raw_data %>%
eq_location_clean() %>%
eq_clean_data() %>%
dplyr::select(date,
country,
area,
region,
location_name,
latitude,
longitude,
mag,
total_deaths)
save(earthquakes, file = "data-raw/earthquakes.rda")
usethis::use_data(earthquakes, overwrite = TRUE)
|
/data-raw/setup.R
|
permissive
|
odiliameneses/earthquakes
|
R
| false
| false
| 607
|
r
|
## code to prepare `earthquakes` dataset goes here
library(magrittr)
raw_data <- readr::read_delim("data-raw/earthquakes.tsv", "\t") %>%
readr::write_csv(file.path("data-raw", "raw_data.csv"))
earthquakes <- raw_data %>%
eq_location_clean() %>%
eq_clean_data() %>%
dplyr::select(date,
country,
area,
region,
location_name,
latitude,
longitude,
mag,
total_deaths)
save(earthquakes, file = "data-raw/earthquakes.rda")
usethis::use_data(earthquakes, overwrite = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reproDataCheck.R
\name{reproDataCheck}
\alias{reproDataCheck}
\title{Checks if an object can be used to perform reproduction toxicity data analysis}
\usage{
reproDataCheck(data, diagnosis.plot = TRUE)
}
\arguments{
\item{data}{any object}
\item{diagnosis.plot}{if \code{TRUE}, produces a diagnosis plot}
}
\value{
The function returns a \code{data.frame} similar to the one returned
by \code{\link{survDataCheck}}, except that it may contain the following
additional error \code{id}s:
\itemize{
\item \code{NreproInteger}: column \code{Nrepro} contains values of class other than \code{integer}
\item \code{Nrepro0T0}: \code{Nrepro} is not 0 at time 0 for each concentration and each replicate
\item \code{Nsurvt0Nreprotp1P}: at a given time \eqn{T}, the number of
alive individuals is null and the number of collected offspring is not null
for the same replicate and the same concentration at time \eqn{T+1}
}
}
\description{
The \code{reproDataCheck} function can be used to check if an object
containing data from a reproduction toxicity assay meets the expectations
of the function \code{\link{reproData}}.
}
\details{
Since in morse' reproduction data sets are a special case of survival data sets,
\code{reproDataCheck} performs the same verifications than
\code{\link{survDataCheck}} plus additional ones that are specific to
reproduction data.
}
\note{
If an error of type \code{dataframeExpected} or \code{missingColumn}
is detected, the function
\code{reproDataCheck} is stopped. When no error is detected the
\code{reproDataCheck}
function returns an empty dataframe.
}
\examples{
# Run the check data function
data(copper)
reproDataCheck(copper)
# Now we insert an error in the data set, by setting a non-zero number of
# offspring at some time, although there is no surviving individual in the
# replicate from the previous time point.
copper[148, "Nrepro"] <- as.integer(1)
reproDataCheck(copper)
}
\seealso{
\code{\link{reproData}}
}
\keyword{check}
|
/man/reproDataCheck.Rd
|
no_license
|
cran/morse
|
R
| false
| true
| 2,047
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reproDataCheck.R
\name{reproDataCheck}
\alias{reproDataCheck}
\title{Checks if an object can be used to perform reproduction toxicity data analysis}
\usage{
reproDataCheck(data, diagnosis.plot = TRUE)
}
\arguments{
\item{data}{any object}
\item{diagnosis.plot}{if \code{TRUE}, produces a diagnosis plot}
}
\value{
The function returns a \code{data.frame} similar to the one returned
by \code{\link{survDataCheck}}, except that it may contain the following
additional error \code{id}s:
\itemize{
\item \code{NreproInteger}: column \code{Nrepro} contains values of class other than \code{integer}
\item \code{Nrepro0T0}: \code{Nrepro} is not 0 at time 0 for each concentration and each replicate
\item \code{Nsurvt0Nreprotp1P}: at a given time \eqn{T}, the number of
alive individuals is null and the number of collected offspring is not null
for the same replicate and the same concentration at time \eqn{T+1}
}
}
\description{
The \code{reproDataCheck} function can be used to check if an object
containing data from a reproduction toxicity assay meets the expectations
of the function \code{\link{reproData}}.
}
\details{
Since in morse' reproduction data sets are a special case of survival data sets,
\code{reproDataCheck} performs the same verifications than
\code{\link{survDataCheck}} plus additional ones that are specific to
reproduction data.
}
\note{
If an error of type \code{dataframeExpected} or \code{missingColumn}
is detected, the function
\code{reproDataCheck} is stopped. When no error is detected the
\code{reproDataCheck}
function returns an empty dataframe.
}
\examples{
# Run the check data function
data(copper)
reproDataCheck(copper)
# Now we insert an error in the data set, by setting a non-zero number of
# offspring at some time, although there is no surviving individual in the
# replicate from the previous time point.
copper[148, "Nrepro"] <- as.integer(1)
reproDataCheck(copper)
}
\seealso{
\code{\link{reproData}}
}
\keyword{check}
|
"_________________________________NAIVE BAYES_________________________________________"
#Library
library(tm)
library(textcat)
setwd("D:\\STUDY PROCESS\\Excelr\\Assignments\\Pending\\Navie Bayes")
# Question Ham And Spam Mail Filtering
message <- read.csv("sms_raw_NB.csv",stringsAsFactors = F)
head(message)
message$type = factor(message$type)
str(message)
library("textcat")
table(textcat(x = message$type),message$type)# Clear that latvian messages are spam
# from this we can directly comment that all latvian message are spam
# As in text mining we build carpus for message
msg_cps <- Corpus(VectorSource(message$text))
str(msg_cps)
clean_cps <-tm_map(msg_cps,tolower)
clean_cps <- tm_map(clean_cps, removeNumbers)
clean_cps <- tm_map(clean_cps, removeWords, stopwords(kind = "en"))
clean_cps <- tm_map(clean_cps, removePunctuation)
clean_cps <- tm_map(clean_cps, stripWhitespace)
inspect(clean_cps[1])
# Document Term Metrix
msg.dtm = DocumentTermMatrix(clean_cps)
# Splitting the data to train and test
nrow(message)
set.seed(101);split <- sample(1:nrow(message),nrow(message)*.7,F)
tr_msg = message[split, ] # about 70%
te_msg = message[-split, ] # the rest
# then split the document-term matrix
dtm_train = msg.dtm[split, ]
dtm_test = msg.dtm[-split, ]
# and finally the corpus
cor_train = clean_cps[split]
cor_test = clean_cps[-split]
round(prop.table(table(tr_msg$type))*100)
library(wordcloud)
windows()
wordcloud(cor_train,
min.freq=10,max.words = 300,
random.order = F,colors = ifelse(message$type=="spam","red","blue"))
# Blue words for Ham messages, and red words are most frequently used in Spam messages
# Lets reduce the number of column base on the lowest frequency
freq_terms = findFreqTerms(x = dtm_train, lowfreq = 10)
dtm_train_ = DocumentTermMatrix(cor_train, list(dictionary=freq_terms))
dtm_test_ = DocumentTermMatrix(cor_test, list(dictionary=freq_terms))
# We reduced the dimension
dim(dtm_test_);dim(dtm_test)
c_counts = function(x) {
x = ifelse(x > 0, 1, 0)
x = factor(x, levels = c(0, 1), labels=c("No", "Yes"))
return (x)
}
dtm_train_ = apply(dtm_train_, MARGIN=2, c_counts);table(dtm_train_) #run once
dtm_test_ = apply(dtm_test_, MARGIN=2, c_counts);table(dtm_test_) #run once
# Model Selection ----
library(e1071)
# Model 1
model_1 = naiveBayes(dtm_train_, tr_msg$type)
pred_1 = predict(model_1,dtm_test_)
table(Predicted=pred_1,Actual=te_msg$type)
efficiency1 = mean(pred_1==te_msg$type);efficiency1
# I am sertisfied with my efficiency i.e. 0.9736
|
/NavieBayes/neoNavieBayes.R
|
no_license
|
neoaman/Assignments_ExcelR_Solutions
|
R
| false
| false
| 2,576
|
r
|
"_________________________________NAIVE BAYES_________________________________________"
#Library
library(tm)
library(textcat)
setwd("D:\\STUDY PROCESS\\Excelr\\Assignments\\Pending\\Navie Bayes")
# Question Ham And Spam Mail Filtering
message <- read.csv("sms_raw_NB.csv",stringsAsFactors = F)
head(message)
message$type = factor(message$type)
str(message)
library("textcat")
table(textcat(x = message$type),message$type)# Clear that latvian messages are spam
# from this we can directly comment that all latvian message are spam
# As in text mining we build carpus for message
msg_cps <- Corpus(VectorSource(message$text))
str(msg_cps)
clean_cps <-tm_map(msg_cps,tolower)
clean_cps <- tm_map(clean_cps, removeNumbers)
clean_cps <- tm_map(clean_cps, removeWords, stopwords(kind = "en"))
clean_cps <- tm_map(clean_cps, removePunctuation)
clean_cps <- tm_map(clean_cps, stripWhitespace)
inspect(clean_cps[1])
# Document Term Metrix
msg.dtm = DocumentTermMatrix(clean_cps)
# Splitting the data to train and test
nrow(message)
set.seed(101);split <- sample(1:nrow(message),nrow(message)*.7,F)
tr_msg = message[split, ] # about 70%
te_msg = message[-split, ] # the rest
# then split the document-term matrix
dtm_train = msg.dtm[split, ]
dtm_test = msg.dtm[-split, ]
# and finally the corpus
cor_train = clean_cps[split]
cor_test = clean_cps[-split]
round(prop.table(table(tr_msg$type))*100)
library(wordcloud)
windows()
wordcloud(cor_train,
min.freq=10,max.words = 300,
random.order = F,colors = ifelse(message$type=="spam","red","blue"))
# Blue words for Ham messages, and red words are most frequently used in Spam messages
# Lets reduce the number of column base on the lowest frequency
freq_terms = findFreqTerms(x = dtm_train, lowfreq = 10)
dtm_train_ = DocumentTermMatrix(cor_train, list(dictionary=freq_terms))
dtm_test_ = DocumentTermMatrix(cor_test, list(dictionary=freq_terms))
# We reduced the dimension
dim(dtm_test_);dim(dtm_test)
c_counts = function(x) {
x = ifelse(x > 0, 1, 0)
x = factor(x, levels = c(0, 1), labels=c("No", "Yes"))
return (x)
}
dtm_train_ = apply(dtm_train_, MARGIN=2, c_counts);table(dtm_train_) #run once
dtm_test_ = apply(dtm_test_, MARGIN=2, c_counts);table(dtm_test_) #run once
# Model Selection ----
library(e1071)
# Model 1
model_1 = naiveBayes(dtm_train_, tr_msg$type)
pred_1 = predict(model_1,dtm_test_)
table(Predicted=pred_1,Actual=te_msg$type)
efficiency1 = mean(pred_1==te_msg$type);efficiency1
# I am sertisfied with my efficiency i.e. 0.9736
|
library(pbdTEST)
settings(mpi=TRUE)
.BLDIM <- 2
comm.set.seed(seed=1234, diff=FALSE)
### --------------------------------------
module("Reductions")
M <- 250
N <- 250
# M<- N<- 10
BL <- 4
x <- matrix(rnorm(M*N, 10, 100), M, N)
dx <- as.ddmatrix(x, BL)
x[1,1] <- x[1,1] + 1
dy <- as.ddmatrix(x, BL)
submodule("any, all, ==")
test("all(dx==dx)", {
a <- all(dx==dx)
b <- TRUE
})
test("any(dx==dx)", {
a <- any(dx==dx)
b <- TRUE
})
test("!all(dx==dB)", {
a <- !all(dx==dy)
b <- TRUE
})
test("any(dx==dB)", {
a <- any(dx==dy)
b <- TRUE
})
collect()
# ---------------------------------------------------
# Tests
# ---------------------------------------------------
tests <- function(.)
{
rs1 <- rowSums(A)
rs2 <- as.vector(rowSums(dx))
comm.print(all.equal(rs2, rs2))
out1 <- colSums(A)
out2 <- as.vector(colSums(dx))
comm.print(all.equal(out1, out2))
rs1 <- rowMeans(A)
rs2 <- as.vector(rowMeans(dx))
comm.print(all.equal(rs1, rs2))
# if (!is.logical(all.equal(rs1, rs2))){
# comm.print(rs1)
# comm.print(rs2)
# }
out1 <- colMeans(A)
out2 <- as.vector(colMeans(dx))
comm.print(all.equal(out1, out2))
# if (!is.logical(all.equal(out1, out2))){
# comm.print(out1)
# comm.print(out2)
# }
out1 <- sum(A)
out2 <- sum(dx)
comm.print(all.equal(out1, out2))
out1 <- prod(A)
out2 <- prod(dx)
comm.print(all.equal(out1, out2))
out1 <- diag(A)
out2 <- diag(dx)
comm.print(all.equal(out1, out2))
# if (!is.logical(all.equal(out1, out2))){
# comm.print(out1)
# comm.print(out2)
# }
out1 <- mean(A)
out2 <- mean(dx)
comm.print(all.equal(out1, out2))
out1 <- apply(A, MARGIN=2, FUN=sd)
out2 <- as.vector(sd(dx))
comm.print(all.equal(out1, out2))
}
comm.print("-------Reductions-------")
comm.print(" Square")
A <- matrix(rnorm(M*N, 10, 100), M, N)
dx <- as.ddmatrix(A, BL)
tests()
comm.print(" Column")
A <- matrix(rnorm(M*1, 10, 100), M, 1)
dx <- as.ddmatrix(A, BL)
tests()
comm.print(" Row")
A <- matrix(rnorm(1*N, 10, 100), 1, N)
dx <- as.ddmatrix(A, BL)
tests()
finalize()
|
/pbdDMAT/inst/tests/10_reductions.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,117
|
r
|
library(pbdTEST)
settings(mpi=TRUE)
.BLDIM <- 2
comm.set.seed(seed=1234, diff=FALSE)
### --------------------------------------
module("Reductions")
M <- 250
N <- 250
# M<- N<- 10
BL <- 4
x <- matrix(rnorm(M*N, 10, 100), M, N)
dx <- as.ddmatrix(x, BL)
x[1,1] <- x[1,1] + 1
dy <- as.ddmatrix(x, BL)
submodule("any, all, ==")
test("all(dx==dx)", {
a <- all(dx==dx)
b <- TRUE
})
test("any(dx==dx)", {
a <- any(dx==dx)
b <- TRUE
})
test("!all(dx==dB)", {
a <- !all(dx==dy)
b <- TRUE
})
test("any(dx==dB)", {
a <- any(dx==dy)
b <- TRUE
})
collect()
# ---------------------------------------------------
# Tests
# ---------------------------------------------------
tests <- function(.)
{
rs1 <- rowSums(A)
rs2 <- as.vector(rowSums(dx))
comm.print(all.equal(rs2, rs2))
out1 <- colSums(A)
out2 <- as.vector(colSums(dx))
comm.print(all.equal(out1, out2))
rs1 <- rowMeans(A)
rs2 <- as.vector(rowMeans(dx))
comm.print(all.equal(rs1, rs2))
# if (!is.logical(all.equal(rs1, rs2))){
# comm.print(rs1)
# comm.print(rs2)
# }
out1 <- colMeans(A)
out2 <- as.vector(colMeans(dx))
comm.print(all.equal(out1, out2))
# if (!is.logical(all.equal(out1, out2))){
# comm.print(out1)
# comm.print(out2)
# }
out1 <- sum(A)
out2 <- sum(dx)
comm.print(all.equal(out1, out2))
out1 <- prod(A)
out2 <- prod(dx)
comm.print(all.equal(out1, out2))
out1 <- diag(A)
out2 <- diag(dx)
comm.print(all.equal(out1, out2))
# if (!is.logical(all.equal(out1, out2))){
# comm.print(out1)
# comm.print(out2)
# }
out1 <- mean(A)
out2 <- mean(dx)
comm.print(all.equal(out1, out2))
out1 <- apply(A, MARGIN=2, FUN=sd)
out2 <- as.vector(sd(dx))
comm.print(all.equal(out1, out2))
}
comm.print("-------Reductions-------")
comm.print(" Square")
A <- matrix(rnorm(M*N, 10, 100), M, N)
dx <- as.ddmatrix(A, BL)
tests()
comm.print(" Column")
A <- matrix(rnorm(M*1, 10, 100), M, 1)
dx <- as.ddmatrix(A, BL)
tests()
comm.print(" Row")
A <- matrix(rnorm(1*N, 10, 100), 1, N)
dx <- as.ddmatrix(A, BL)
tests()
finalize()
|
#Read the file
data<-read.table("household_power_consumption.txt",header = TRUE,sep = ";")
#Format the Date
data$Date <- as.Date(data$Date,"%d/%m/%Y")
#Subset the data with Date of "2007-02-01" & "2007-02-02"
subdata<-data[which(data$Date=="2007-02-01" | data$Date=="2007-02-02"),]
#Create Plot1 and output
subdata$Global_active_power<-as.numeric(as.character(subdata$Global_active_power))
Global_active_power<-subdata$Global_active_power
png("plot1.png",480,480)
hist(Global_active_power,main="Global Active Power", col="red", xlab="Global Active Power(kilowatts)")
dev.off()
|
/plot1.R
|
no_license
|
ewoliver/ExData_Plotting1
|
R
| false
| false
| 603
|
r
|
#Read the file
data<-read.table("household_power_consumption.txt",header = TRUE,sep = ";")
#Format the Date
data$Date <- as.Date(data$Date,"%d/%m/%Y")
#Subset the data with Date of "2007-02-01" & "2007-02-02"
subdata<-data[which(data$Date=="2007-02-01" | data$Date=="2007-02-02"),]
#Create Plot1 and output
subdata$Global_active_power<-as.numeric(as.character(subdata$Global_active_power))
Global_active_power<-subdata$Global_active_power
png("plot1.png",480,480)
hist(Global_active_power,main="Global Active Power", col="red", xlab="Global Active Power(kilowatts)")
dev.off()
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
##create a character vector of the names of files in the named directory.
files <- list.files(directory ,full.names = TRUE)
##create data frame to load data into
dat <- data.frame()
##create a 'for loop' to enter data into the data frame
for (i in id) {
dat <- rbind(dat, read.csv(files[i]))
}
## calulate the mean without the NAs of the pollutant
mean(dat[, pollutant], na.rm = TRUE)
}
|
/pollutantmean.R
|
no_license
|
Hippoplex/specdata
|
R
| false
| false
| 487
|
r
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
##create a character vector of the names of files in the named directory.
files <- list.files(directory ,full.names = TRUE)
##create data frame to load data into
dat <- data.frame()
##create a 'for loop' to enter data into the data frame
for (i in id) {
dat <- rbind(dat, read.csv(files[i]))
}
## calulate the mean without the NAs of the pollutant
mean(dat[, pollutant], na.rm = TRUE)
}
|
################################################################################
# #
# WBP Demographic Model LAI data #
# #
# AUTHOR: Elizabeth Pansing #
# DATA SOURCE: R. Keane, USFS RMRS Missoula Fire Lab #
# DESCRIPTION: .... The following explores LAI values for whitebark pine #
# forests collected by R. Keane and colleagues at 6 sites: #
# 1) Bear Overlook, 2) Musgrove Creek, 3) Beaver Ridge, #
# 4) Snowbowl, 5) Coyote Meadows, 6) Smith Creek. All sites #
# are in the Northern Rocky Mtns. Each site experienced #
# prescribed fire of various intensities (although some #
# plots within sites were controls) and some were thinned. #
# Plots have been monitored at various time points since. #
################################################################################
#TODO: WTF is sel?
#TODO: WTF is conf?
#TODO: Why are some values of burn "" whereas others are "unburned"?
#TODO: What are the different fuels treatments (none, nutcracker, enhancement, treatment)
#TODO: What are the different planting treatments?
rm(list = ls())
options(stringsAsFactors = FALSE)
options(scipen = 999)
################################################################################
#---------------------------------------|---------------------------------------
# Load libraries
#---------------------------------------|---------------------------------------
################################################################################
suppressMessages(library(tidyverse))
################################################################################
#---------------------------------------|---------------------------------------
# Establish directories
#---------------------------------------|---------------------------------------
################################################################################
proj_dir <- '/Users/elizabethpansing/Box/PhD/Code/WBP Demographic Model Master/WBPdemographicModel/'
rda_dir <- "Rda/"
data_dir <- "Data/"
code_dir <- "Code/"
model_file <- "Pansing et al. MPM Model Final? Gamma fire.R"
figures_dir <- "Figures/Final?/"
export_table_dir <- "Exported Data Tables/"
################################################################################
#---------------------------------------|---------------------------------------
# Load LAI data
#---------------------------------------|---------------------------------------
################################################################################
lai <- read.csv(paste0(proj_dir, "Data/LAI.csv"))
colnames(lai) <- c("sa", "date", "burn_date", "plot", "subplot", "subsubplot", "vp", "lai", "sel", "conf", "notes", "burn", "fuels", "planting")
str(lai)
lai$date <- lubridate::ymd(as.character(lai$date))
lai <- lai %>% mutate(year = lubridate::year(date))
#---------------------------------------|---------------------------------------
# Explore to make sure data match design
#---------------------------------------|---------------------------------------
nrow(lai)
length(unique(lai$sa)) # should be 6
unique(lai$sa)
sum(is.na(lai$sa))
sum(lai$sa == "")
lai %>% filter(., sa == "")
which(lai$sa == "")
lai <- lai %>% filter(., sa != "")
nrow(lai)
length(unique(lai$sa)) # should be 6
unique(lai$sa)
# B = table(lai$subplot, lai$plot, lai$sa)
# C = *Coyote Meadows
# M = *Musgrove Creek
# R =
# S = *Smith Creek
##
range(lai$year)
sum(is.na(lai$year))
unique(lai$burn_date)
sum(is.na(lai$burn_date))
sum(is.na(lai$burn_date))/nrow(lai)
unique(lai$plot)
sum(is.na(lai$plot))
unique(lai$subplot)
sum(is.na(lai$subplot))
unique(lai$subsubplot)
sum(is.na(lai$subsubplot))
unique(lai$vp)
sum(is.na(lai$vp))
unique(lai$sel)
sum(is.na(lai$sel))
unique(lai$conf)
sum(is.na(lai$conf))
unique(lai$notes)
unique(lai$burn)
sum(lai$burn == "")
lai[which(lai$burn == ""),]
sum(lai$burn == "" & is.na(lai$burn_date))/sum(lai$burn == "") # all entries with "" burn status have NA burn dates
sum(lai$burn == "" & is.na(lai$burn_date))/sum(is.na(lai$burn_date)) # not all entries with NA burn dates have "" burn status
lai[which(is.na(lai$burn_date) & !(lai$burn == "")),] # all have burn status == "unburned"
unique(lai$fuels)
# none, "", nutcracker, enhancement, treatment
sum(is.na(lai$fuels))
sum(lai$fuels == "")
unique(lai$planting)
sum(is.na(lai$planting))
sum(lai$planting == "")
table(lai[, c("burn")], useNA = "always")
table(lai[, c("fuels")], useNA = "always")
table(lai[, c("planting")], useNA = "always")
sum(is.na(lai$burn) & is.na(lai$fuels))
####################################################
lai_first <- lai %>%
dplyr::group_by(., sa) %>%
dplyr::filter(., year == min(year)) %>%
dplyr::ungroup()
R <- lai %>%
filter(., sa == "R")
Rfirst <- lai_first %>%
filter(., sa == 'R')
table(Rfirst$subplot, Rfirst$plot)
table(R$subplot, R$plot)
table( R$fuels, R$planting, R$burn)
table(lai[,c("burn","fuels", "planting")], useNA = "always")
## According to Keane & Parsons 2010, the plot and subplot (my own names) determine
# the treatment. So creating a "treatment" variable that is a combination of plot and subplot
lai <- lai %>%
mutate(., treatment = paste0(plot, subplot))
## Look at data
lai %>%
filter(., sa == "B") %>%
group_by(., treatment, date) %>%
summarise_at(., vars(lai), funs(mean)) %>%
ggplot(aes(x = date, y = lai, col = as.factor(treatment)))+
geom_point()
# Example, Beaver Ridge
# Treatment areas are 1a, 2a, 3a, etc. which corrrespond to control, nutcracker openings no burn,
# nut
|
/Code/Keane LAI data .R
|
no_license
|
erpansing/WBPdemographicModel
|
R
| false
| false
| 6,057
|
r
|
################################################################################
# #
# WBP Demographic Model LAI data #
# #
# AUTHOR: Elizabeth Pansing #
# DATA SOURCE: R. Keane, USFS RMRS Missoula Fire Lab #
# DESCRIPTION: .... The following explores LAI values for whitebark pine #
# forests collected by R. Keane and colleagues at 6 sites: #
# 1) Bear Overlook, 2) Musgrove Creek, 3) Beaver Ridge, #
# 4) Snowbowl, 5) Coyote Meadows, 6) Smith Creek. All sites #
# are in the Northern Rocky Mtns. Each site experienced #
# prescribed fire of various intensities (although some #
# plots within sites were controls) and some were thinned. #
# Plots have been monitored at various time points since. #
################################################################################
#TODO: WTF is sel?
#TODO: WTF is conf?
#TODO: Why are some values of burn "" whereas others are "unburned"?
#TODO: What are the different fuels treatments (none, nutcracker, enhancement, treatment)
#TODO: What are the different planting treatments?
rm(list = ls())
options(stringsAsFactors = FALSE)
options(scipen = 999)
################################################################################
#---------------------------------------|---------------------------------------
# Load libraries
#---------------------------------------|---------------------------------------
################################################################################
suppressMessages(library(tidyverse))
################################################################################
#---------------------------------------|---------------------------------------
# Establish directories
#---------------------------------------|---------------------------------------
################################################################################
proj_dir <- '/Users/elizabethpansing/Box/PhD/Code/WBP Demographic Model Master/WBPdemographicModel/'
rda_dir <- "Rda/"
data_dir <- "Data/"
code_dir <- "Code/"
model_file <- "Pansing et al. MPM Model Final? Gamma fire.R"
figures_dir <- "Figures/Final?/"
export_table_dir <- "Exported Data Tables/"
################################################################################
#---------------------------------------|---------------------------------------
# Load LAI data
#---------------------------------------|---------------------------------------
################################################################################
lai <- read.csv(paste0(proj_dir, "Data/LAI.csv"))
colnames(lai) <- c("sa", "date", "burn_date", "plot", "subplot", "subsubplot", "vp", "lai", "sel", "conf", "notes", "burn", "fuels", "planting")
str(lai)
lai$date <- lubridate::ymd(as.character(lai$date))
lai <- lai %>% mutate(year = lubridate::year(date))
#---------------------------------------|---------------------------------------
# Explore to make sure data match design
#---------------------------------------|---------------------------------------
nrow(lai)
length(unique(lai$sa)) # should be 6
unique(lai$sa)
sum(is.na(lai$sa))
sum(lai$sa == "")
lai %>% filter(., sa == "")
which(lai$sa == "")
lai <- lai %>% filter(., sa != "")
nrow(lai)
length(unique(lai$sa)) # should be 6
unique(lai$sa)
# B = table(lai$subplot, lai$plot, lai$sa)
# C = *Coyote Meadows
# M = *Musgrove Creek
# R =
# S = *Smith Creek
##
range(lai$year)
sum(is.na(lai$year))
unique(lai$burn_date)
sum(is.na(lai$burn_date))
sum(is.na(lai$burn_date))/nrow(lai)
unique(lai$plot)
sum(is.na(lai$plot))
unique(lai$subplot)
sum(is.na(lai$subplot))
unique(lai$subsubplot)
sum(is.na(lai$subsubplot))
unique(lai$vp)
sum(is.na(lai$vp))
unique(lai$sel)
sum(is.na(lai$sel))
unique(lai$conf)
sum(is.na(lai$conf))
unique(lai$notes)
unique(lai$burn)
sum(lai$burn == "")
lai[which(lai$burn == ""),]
sum(lai$burn == "" & is.na(lai$burn_date))/sum(lai$burn == "") # all entries with "" burn status have NA burn dates
sum(lai$burn == "" & is.na(lai$burn_date))/sum(is.na(lai$burn_date)) # not all entries with NA burn dates have "" burn status
lai[which(is.na(lai$burn_date) & !(lai$burn == "")),] # all have burn status == "unburned"
unique(lai$fuels)
# none, "", nutcracker, enhancement, treatment
sum(is.na(lai$fuels))
sum(lai$fuels == "")
unique(lai$planting)
sum(is.na(lai$planting))
sum(lai$planting == "")
table(lai[, c("burn")], useNA = "always")
table(lai[, c("fuels")], useNA = "always")
table(lai[, c("planting")], useNA = "always")
sum(is.na(lai$burn) & is.na(lai$fuels))
####################################################
lai_first <- lai %>%
dplyr::group_by(., sa) %>%
dplyr::filter(., year == min(year)) %>%
dplyr::ungroup()
R <- lai %>%
filter(., sa == "R")
Rfirst <- lai_first %>%
filter(., sa == 'R')
table(Rfirst$subplot, Rfirst$plot)
table(R$subplot, R$plot)
table( R$fuels, R$planting, R$burn)
table(lai[,c("burn","fuels", "planting")], useNA = "always")
## According to Keane & Parsons 2010, the plot and subplot (my own names) determine
# the treatment. So creating a "treatment" variable that is a combination of plot and subplot
lai <- lai %>%
mutate(., treatment = paste0(plot, subplot))
## Look at data
lai %>%
filter(., sa == "B") %>%
group_by(., treatment, date) %>%
summarise_at(., vars(lai), funs(mean)) %>%
ggplot(aes(x = date, y = lai, col = as.factor(treatment)))+
geom_point()
# Example, Beaver Ridge
# Treatment areas are 1a, 2a, 3a, etc. which corrrespond to control, nutcracker openings no burn,
# nut
|
library(tidyverse)
#Random sample of 10000 SNPs from the full data set
data.geno = read_csv("Rice_44K_genotypes.csv.gz",
na=c("NA","00"))
data.geno = data.geno %>% select(-`6_17160794_1`)
names(data.geno)[1] = "ID"
data.geno.10000 = data.geno[,c(1,sample(2:ncol(data.geno),10000))]
geno.numeric = data.geno.10000[,-1] %>%
lapply(factor) %>%
as.data.frame() %>%
data.matrix()
geno.numeric.fill =
apply(geno.numeric, 2, function(x) {
x[is.na(x)] <- mean(x, na.rm=T)
x})
#PCA of first 10 PCs
geno.pca = prcomp(geno.numeric.fill,
rank.=10)
pcvar = geno.pca$sdev^2
pcvar.pct = tibble(pctvar=pcvar/sum(pcvar) * 100,
PC=1:length(pcvar))
ggplot(data = pcvar.pct[1:10,], aes(x = PC, y = pctvar)) +
geom_bar(stat = "identity")
#Population structure visible from PC1 and PC2 & PC2 and PC3
PCs = as_tibble(geno.pca$x) %>%
mutate(ID=data.geno.10000$ID) %>%
select(ID, everything())
ggplot(data = PCs, aes(x = PC1, y = PC2)) +
geom_point()
ggplot(data = PCs, aes(x = PC3, y = PC2)) +
geom_point()
data.pheno = read_csv("RiceDiversity.44K.MSU6.Phenotypes.csv")
data.pheno.pca = full_join(data.pheno, PCs, by = c("NSFTVID"="ID"))
# PCA plots to explore if subgroups vary by Amylose content, Pericarp color, or Region.
ggplot(data = data.pheno.pca, aes(x = PC1, y = PC2)) +
geom_point(aes(color = `Amylose content`))
ggplot(data = data.pheno.pca, aes(x = PC1, y = PC2)) +
geom_point(aes(color = `Pericarp color`))
ggplot(data = data.pheno.pca, aes(x = PC1, y = PC2)) +
geom_point(aes(color = `Region`)) +
scale_color_brewer(type="qual", palette = "Set1")
ggplot(data = data.pheno.pca, aes(x = PC3, y = PC2)) +
geom_point(aes(color = `Region`)) +
scale_color_brewer(type="qual", palette = "Set1")
data.geno.10000.fs = matrix("",nrow=nrow(data.geno.10000)*2,ncol=ncol(data.geno.10000)-1+6)
for (i in 1:nrow(data.geno.10000)) {
data.geno.10000.fs[(i-1)*2+1,1:6] <- data.geno.10000[[i,1]]
data.geno.10000.fs[(i-1)*2+2,1:6] <- data.geno.10000[[i,1]]
data.geno.10000.fs[(i-1)*2+1,-1:-6] <- substr(data.geno.10000[i,-1],1,1)
data.geno.10000.fs[(i-1)*2+2,-1:-6] <- substr(data.geno.10000[i,-1],2,2)
}
data.geno.10000.fs[is.na(data.geno.10000.fs)] = -9
write.table(data.geno.10000.fs,file="rice.data.fastStructure.input.str", col.names = FALSE, row.names = FALSE, quote = FALSE)
fam = tibble(
FID=data.geno.10000$ID,
IID=data.geno.10000$ID,
PID=0,
MID=0,
Sex=0,
Ptype=-9)
write.table(fam,file="rice.data.fastStructure.input.fam",col.names = FALSE, row.names = FALSE, quote = FALSE)
bim = data.geno.10000.fs[,-1:-6]
colnames(bim) = colnames(data.geno.10000)[-1]
bim[bim=="-9"] = NA
bim = apply(bim,2,function(x) unique(na.omit(x)))
bim = t(bim) %>%
as_tibble() %>%
mutate(SNP_ID=colnames(bim), cM=0)
bim = bim %>%
separate(SNP_ID,into = c("chromosome","position"),sep="_",remove=FALSE) %>%
select(chromosome, SNP_ID, cM, position, allele1=V1, allele2=V2)
write.table(bim,file="rice.data.fastStructure.input.bim",col.names = FALSE, row.names = FALSE, quote = FALSE)
#fastStructure population assignment
system("python /usr/local/src/fastStructure/structure.py -K 4 --input=rice.data.fastStructure.input --output=rice.fastStructure.out --format=str")
fs_results = read_delim("rice.fastStructure.out.4.meanQ", delim=" ", col_names = FALSE, col_types = 'nnnn')
fs_results = fs_results %>%
mutate(ID=data.geno.10000$ID) %>%
select(ID, pop1=X1, pop2=X2, pop3=X3, pop4=X4)
fs_results$assignedPop = apply(fs_results[,-1], 1, which.max)
fs_results$maxPr = apply(fs_results[,2:5],1,max)
fs_results = fs_results %>%
arrange(assignedPop,desc(maxPr)) %>%
mutate(plot.order=row_number())
fs_results_long = fs_results %>% pivot_longer(pop1:pop4,
names_to="population",
values_to="proportion")
fs_results_long %>%
ggplot(aes(x=plot.order, y=proportion, color=population, fill=population)) +
geom_col() +
ylab("genome proportion") +
scale_color_brewer(type="div") + scale_fill_brewer(type="div")
fs_results = fs_results %>% mutate(assignedPop=as.character(assignedPop))
geno.pca.pop = full_join(fs_results, PCs, by = "ID")
ggplot(data = geno.pca.pop, aes(x = PC1, y = PC2)) +
geom_point(aes(color = assignedPop))
ggplot(data = geno.pca.pop, aes(x = PC3, y = PC2)) +
geom_point(aes(color = assignedPop))
# GWAs of Flowering time variation
library(statgenGWAS)
pheno.geno.pca.pop = left_join(geno.pca.pop, data.pheno, by=c("ID" = "NSFTVID"))
colnames(pheno.geno.pca.pop) = make.names(colnames(pheno.geno.pca.pop))
# Histogram and Boxplot of 4 populations made by fastStructure
ggplot(data=pheno.geno.pca.pop, aes(x=Flowering.time.at.Aberdeen)) +
geom_histogram(binwidth = 10) +
facet_wrap(facets= ~ assignedPop) +
ggtitle("Flowering time at Aberdeen by Population")
ggplot(data=pheno.geno.pca.pop, aes(x=Flowering.time.at.Aberdeen)) +
geom_boxplot() +
facet_wrap(facets= ~ assignedPop) +
ggtitle("Flowering time at Aberdeen by Population")
# ANOVA of Flowering time between populations
aov2 = aov(Flowering.time.at.Aberdeen ~ assignedPop,data=pheno.geno.pca.pop)
summary(aov2) # p-value of 0.0523
# GWAS
data.geno = read_csv("Rice_44K_genotypes.csv",
na=c("NA","00")) %>%
rename(ID=X1) %>%
dplyr::select(-`6_17160794_1`)
data.geno = data.geno %>% as.data.frame() %>% column_to_rownames("ID")
data.map = data.frame(SNP=colnames(data.geno))
data.map = data.map %>%
separate(SNP, c("chr","pos"), sep = "_", remove = FALSE, convert = TRUE) %>%
column_to_rownames("SNP")
data.pheno.small = data.pheno %>%
set_names(make.names(colnames(.))) %>% # fixes the names
dplyr::rename(genotype=NSFTVID) %>%
select(genotype, where(is.numeric)) %>%
as.data.frame()
data.cv = geno.pca.pop %>%
as.data.frame() %>%
column_to_rownames("ID")
gData.rice = createGData(geno=data.geno, map = data.map, pheno = data.pheno.small, covar = data.cv)
gData.rice.recode = gData.rice %>% codeMarkers(verbose = TRUE)
data.kinship = kinship(gData.rice.recode$markers)
nullmat = matrix(0, ncol=413,nrow=413, dimnames = dimnames(data.kinship))
gwas.noCorrection = runSingleTraitGwas(gData = gData.rice.recode,
traits = "Flowering.time.at.Aberdeen",
kin = nullmat)
summary(gwas.noCorrection)
plot(gwas.noCorrection, plotType = "qq")
plot(gwas.noCorrection, plotType = "manhattan")
gwas.PCA = runSingleTraitGwas(gData = gData.rice.recode,
traits = "Flowering.time.at.Aberdeen",
kin = nullmat,
covar = c("PC1", "PC2", "PC3", "PC4"))
summary(gwas.PCA)
plot(gwas.PCA, plotType = "qq")
plot(gwas.PCA, plotType = "manhattan")
gwas.K = runSingleTraitGwas(gData = gData.rice.recode,
traits = "Flowering.time.at.Aberdeen",
kin = data.kinship)
summary(gwas.K)
plot(gwas.K, plotType = "qq")
plot(gwas.K, plotType = "manhattan")
sigSnps = gwas.K$signSnp[[1]]
head(arrange(sigSnps, pValue), 10)
# SNP: 6_9531374 has the highest significance, located within the LOC_Os06g16590 gene.
|
/GWAS of Indica and Japonica rice varieties.R
|
no_license
|
ian-whaling/GWAS-of-Indica-and-Japonica-rice-varieties
|
R
| false
| false
| 7,420
|
r
|
library(tidyverse)
#Random sample of 10000 SNPs from the full data set
data.geno = read_csv("Rice_44K_genotypes.csv.gz",
na=c("NA","00"))
data.geno = data.geno %>% select(-`6_17160794_1`)
names(data.geno)[1] = "ID"
data.geno.10000 = data.geno[,c(1,sample(2:ncol(data.geno),10000))]
geno.numeric = data.geno.10000[,-1] %>%
lapply(factor) %>%
as.data.frame() %>%
data.matrix()
geno.numeric.fill =
apply(geno.numeric, 2, function(x) {
x[is.na(x)] <- mean(x, na.rm=T)
x})
#PCA of first 10 PCs
geno.pca = prcomp(geno.numeric.fill,
rank.=10)
pcvar = geno.pca$sdev^2
pcvar.pct = tibble(pctvar=pcvar/sum(pcvar) * 100,
PC=1:length(pcvar))
ggplot(data = pcvar.pct[1:10,], aes(x = PC, y = pctvar)) +
geom_bar(stat = "identity")
#Population structure visible from PC1 and PC2 & PC2 and PC3
PCs = as_tibble(geno.pca$x) %>%
mutate(ID=data.geno.10000$ID) %>%
select(ID, everything())
ggplot(data = PCs, aes(x = PC1, y = PC2)) +
geom_point()
ggplot(data = PCs, aes(x = PC3, y = PC2)) +
geom_point()
data.pheno = read_csv("RiceDiversity.44K.MSU6.Phenotypes.csv")
data.pheno.pca = full_join(data.pheno, PCs, by = c("NSFTVID"="ID"))
# PCA plots to explore if subgroups vary by Amylose content, Pericarp color, or Region.
ggplot(data = data.pheno.pca, aes(x = PC1, y = PC2)) +
geom_point(aes(color = `Amylose content`))
ggplot(data = data.pheno.pca, aes(x = PC1, y = PC2)) +
geom_point(aes(color = `Pericarp color`))
ggplot(data = data.pheno.pca, aes(x = PC1, y = PC2)) +
geom_point(aes(color = `Region`)) +
scale_color_brewer(type="qual", palette = "Set1")
ggplot(data = data.pheno.pca, aes(x = PC3, y = PC2)) +
geom_point(aes(color = `Region`)) +
scale_color_brewer(type="qual", palette = "Set1")
data.geno.10000.fs = matrix("",nrow=nrow(data.geno.10000)*2,ncol=ncol(data.geno.10000)-1+6)
for (i in 1:nrow(data.geno.10000)) {
data.geno.10000.fs[(i-1)*2+1,1:6] <- data.geno.10000[[i,1]]
data.geno.10000.fs[(i-1)*2+2,1:6] <- data.geno.10000[[i,1]]
data.geno.10000.fs[(i-1)*2+1,-1:-6] <- substr(data.geno.10000[i,-1],1,1)
data.geno.10000.fs[(i-1)*2+2,-1:-6] <- substr(data.geno.10000[i,-1],2,2)
}
data.geno.10000.fs[is.na(data.geno.10000.fs)] = -9
write.table(data.geno.10000.fs,file="rice.data.fastStructure.input.str", col.names = FALSE, row.names = FALSE, quote = FALSE)
fam = tibble(
FID=data.geno.10000$ID,
IID=data.geno.10000$ID,
PID=0,
MID=0,
Sex=0,
Ptype=-9)
write.table(fam,file="rice.data.fastStructure.input.fam",col.names = FALSE, row.names = FALSE, quote = FALSE)
bim = data.geno.10000.fs[,-1:-6]
colnames(bim) = colnames(data.geno.10000)[-1]
bim[bim=="-9"] = NA
bim = apply(bim,2,function(x) unique(na.omit(x)))
bim = t(bim) %>%
as_tibble() %>%
mutate(SNP_ID=colnames(bim), cM=0)
bim = bim %>%
separate(SNP_ID,into = c("chromosome","position"),sep="_",remove=FALSE) %>%
select(chromosome, SNP_ID, cM, position, allele1=V1, allele2=V2)
write.table(bim,file="rice.data.fastStructure.input.bim",col.names = FALSE, row.names = FALSE, quote = FALSE)
#fastStructure population assignment
system("python /usr/local/src/fastStructure/structure.py -K 4 --input=rice.data.fastStructure.input --output=rice.fastStructure.out --format=str")
fs_results = read_delim("rice.fastStructure.out.4.meanQ", delim=" ", col_names = FALSE, col_types = 'nnnn')
fs_results = fs_results %>%
mutate(ID=data.geno.10000$ID) %>%
select(ID, pop1=X1, pop2=X2, pop3=X3, pop4=X4)
fs_results$assignedPop = apply(fs_results[,-1], 1, which.max)
fs_results$maxPr = apply(fs_results[,2:5],1,max)
fs_results = fs_results %>%
arrange(assignedPop,desc(maxPr)) %>%
mutate(plot.order=row_number())
fs_results_long = fs_results %>% pivot_longer(pop1:pop4,
names_to="population",
values_to="proportion")
fs_results_long %>%
ggplot(aes(x=plot.order, y=proportion, color=population, fill=population)) +
geom_col() +
ylab("genome proportion") +
scale_color_brewer(type="div") + scale_fill_brewer(type="div")
fs_results = fs_results %>% mutate(assignedPop=as.character(assignedPop))
geno.pca.pop = full_join(fs_results, PCs, by = "ID")
ggplot(data = geno.pca.pop, aes(x = PC1, y = PC2)) +
geom_point(aes(color = assignedPop))
ggplot(data = geno.pca.pop, aes(x = PC3, y = PC2)) +
geom_point(aes(color = assignedPop))
# GWAs of Flowering time variation
library(statgenGWAS)
pheno.geno.pca.pop = left_join(geno.pca.pop, data.pheno, by=c("ID" = "NSFTVID"))
colnames(pheno.geno.pca.pop) = make.names(colnames(pheno.geno.pca.pop))
# Histogram and Boxplot of 4 populations made by fastStructure
ggplot(data=pheno.geno.pca.pop, aes(x=Flowering.time.at.Aberdeen)) +
geom_histogram(binwidth = 10) +
facet_wrap(facets= ~ assignedPop) +
ggtitle("Flowering time at Aberdeen by Population")
ggplot(data=pheno.geno.pca.pop, aes(x=Flowering.time.at.Aberdeen)) +
geom_boxplot() +
facet_wrap(facets= ~ assignedPop) +
ggtitle("Flowering time at Aberdeen by Population")
# ANOVA of Flowering time between populations
aov2 = aov(Flowering.time.at.Aberdeen ~ assignedPop,data=pheno.geno.pca.pop)
summary(aov2) # p-value of 0.0523
# GWAS
data.geno = read_csv("Rice_44K_genotypes.csv",
na=c("NA","00")) %>%
rename(ID=X1) %>%
dplyr::select(-`6_17160794_1`)
data.geno = data.geno %>% as.data.frame() %>% column_to_rownames("ID")
data.map = data.frame(SNP=colnames(data.geno))
data.map = data.map %>%
separate(SNP, c("chr","pos"), sep = "_", remove = FALSE, convert = TRUE) %>%
column_to_rownames("SNP")
data.pheno.small = data.pheno %>%
set_names(make.names(colnames(.))) %>% # fixes the names
dplyr::rename(genotype=NSFTVID) %>%
select(genotype, where(is.numeric)) %>%
as.data.frame()
data.cv = geno.pca.pop %>%
as.data.frame() %>%
column_to_rownames("ID")
gData.rice = createGData(geno=data.geno, map = data.map, pheno = data.pheno.small, covar = data.cv)
gData.rice.recode = gData.rice %>% codeMarkers(verbose = TRUE)
data.kinship = kinship(gData.rice.recode$markers)
nullmat = matrix(0, ncol=413,nrow=413, dimnames = dimnames(data.kinship))
gwas.noCorrection = runSingleTraitGwas(gData = gData.rice.recode,
traits = "Flowering.time.at.Aberdeen",
kin = nullmat)
summary(gwas.noCorrection)
plot(gwas.noCorrection, plotType = "qq")
plot(gwas.noCorrection, plotType = "manhattan")
gwas.PCA = runSingleTraitGwas(gData = gData.rice.recode,
traits = "Flowering.time.at.Aberdeen",
kin = nullmat,
covar = c("PC1", "PC2", "PC3", "PC4"))
summary(gwas.PCA)
plot(gwas.PCA, plotType = "qq")
plot(gwas.PCA, plotType = "manhattan")
gwas.K = runSingleTraitGwas(gData = gData.rice.recode,
traits = "Flowering.time.at.Aberdeen",
kin = data.kinship)
summary(gwas.K)
plot(gwas.K, plotType = "qq")
plot(gwas.K, plotType = "manhattan")
sigSnps = gwas.K$signSnp[[1]]
head(arrange(sigSnps, pValue), 10)
# SNP: 6_9531374 has the highest significance, located within the LOC_Os06g16590 gene.
|
# libraries ----
library(tidyverse)
library(here)
library(glue)
# variables ----
mpas_csv <- here("data/mpas.csv")
# make MPAs ----
mpas <- read_csv(mpas_csv, col_types=cols()) %>%
arrange(mpa_name)
make_mpa <- function(mpa_id, mpa_name, formats=c("html")){
# mpa_id = mpas$mpa_id[1]
# show message of progress
i_row <- sprintf("%02d", which(mpa_id == mpas$mpa_id))
message(glue("{i_row} of {nrow(mpas)} MPAs: {mpa_name} ({mpa_id})"))
# render html
if ("html" %in% formats)
rmarkdown::render(
input = "mpa.Rmd",
params = list(
mpa_id = mpa_id),
output_file = glue("docs/mpa_{mpa_id}.html"))
# render pdf
if ("pdf" %in% formats)
rmarkdown::render(
input = "mpa.Rmd",
params = list(
mpa_id = mpa_id),
output_file = glue("docs/mpa_{mpa_id}.pdf"))
}
# walk through all sites to render html
mpas %>%
select(mpa_id, mpa_name) %>%
pwalk(make_mpa)
|
/make.R
|
permissive
|
mci/mpatlas-report4r
|
R
| false
| false
| 966
|
r
|
# libraries ----
library(tidyverse)
library(here)
library(glue)
# variables ----
mpas_csv <- here("data/mpas.csv")
# make MPAs ----
mpas <- read_csv(mpas_csv, col_types=cols()) %>%
arrange(mpa_name)
make_mpa <- function(mpa_id, mpa_name, formats=c("html")){
# mpa_id = mpas$mpa_id[1]
# show message of progress
i_row <- sprintf("%02d", which(mpa_id == mpas$mpa_id))
message(glue("{i_row} of {nrow(mpas)} MPAs: {mpa_name} ({mpa_id})"))
# render html
if ("html" %in% formats)
rmarkdown::render(
input = "mpa.Rmd",
params = list(
mpa_id = mpa_id),
output_file = glue("docs/mpa_{mpa_id}.html"))
# render pdf
if ("pdf" %in% formats)
rmarkdown::render(
input = "mpa.Rmd",
params = list(
mpa_id = mpa_id),
output_file = glue("docs/mpa_{mpa_id}.pdf"))
}
# walk through all sites to render html
mpas %>%
select(mpa_id, mpa_name) %>%
pwalk(make_mpa)
|
###############################################################################
## Applies mirror boundary conditions to a 1d or 2d object. Implemented in R ##
## version 3.4.3. Last update: Fuks 180203 ##
###############################################################################
mirrorbound <- function(indata, ypadding, xpadding){
indata <- as.matrix(indata)
ny <- dim(indata)[1] #Object dimensions
nx <- dim(indata)[2]
if(xpadding >= nx){ # Reduces size of padding if equal to or larger than object
xpadding <- xpadding-1
}
if(xpadding >= nx){ # Reduces size of padding if equal to or larger than object
xpadding <- xpadding-1
}
outdata <- matrix(0, ny+2*ypadding, nx+2*xpadding)
sum <- 0
yiter <- 0
xiter <- 0
kloop <- c((-ypadding):(ny+ypadding))[-(ypadding+1)] # Loop vectors excluding "0" position
lloop <- c((-xpadding):(nx+xpadding))[-(xpadding+1)]
for(k in kloop){ # Loops through y
yout <- k # Position in the original matrix
if(k < 0){
yout <- -k
}
if(k > ny){
yout <- 2*ny-k+1
}
yiter <- yiter+1
for(l in lloop){ # Loops through x
xout <- l # Position in the original matrix
if(l < 0){
xout <- -l
}
if(l > nx){
xout <- 2*nx-l+1
}
xiter <- xiter+1
outdata[yiter, xiter] <- indata[yout, xout] # Prints the padded matrix
}
xiter <- 0
}
if(xpadding == 0){
outdata <- as.numeric(outdata)
}
return(outdata)
}
|
/mirrorbound.r
|
no_license
|
jonas-raposinha/R-trend-correct
|
R
| false
| false
| 1,578
|
r
|
###############################################################################
## Applies mirror boundary conditions to a 1d or 2d object. Implemented in R ##
## version 3.4.3. Last update: Fuks 180203 ##
###############################################################################
mirrorbound <- function(indata, ypadding, xpadding){
indata <- as.matrix(indata)
ny <- dim(indata)[1] #Object dimensions
nx <- dim(indata)[2]
if(xpadding >= nx){ # Reduces size of padding if equal to or larger than object
xpadding <- xpadding-1
}
if(xpadding >= nx){ # Reduces size of padding if equal to or larger than object
xpadding <- xpadding-1
}
outdata <- matrix(0, ny+2*ypadding, nx+2*xpadding)
sum <- 0
yiter <- 0
xiter <- 0
kloop <- c((-ypadding):(ny+ypadding))[-(ypadding+1)] # Loop vectors excluding "0" position
lloop <- c((-xpadding):(nx+xpadding))[-(xpadding+1)]
for(k in kloop){ # Loops through y
yout <- k # Position in the original matrix
if(k < 0){
yout <- -k
}
if(k > ny){
yout <- 2*ny-k+1
}
yiter <- yiter+1
for(l in lloop){ # Loops through x
xout <- l # Position in the original matrix
if(l < 0){
xout <- -l
}
if(l > nx){
xout <- 2*nx-l+1
}
xiter <- xiter+1
outdata[yiter, xiter] <- indata[yout, xout] # Prints the padded matrix
}
xiter <- 0
}
if(xpadding == 0){
outdata <- as.numeric(outdata)
}
return(outdata)
}
|
### Set WD
setwd('/media/FD/RMISC/DaphStats/')
# Get the data
source('./preproc.R')
TABLES <- list(atcd = atcd2[-(1:4)], tech=tech2[-(1:5)], post=post2[-(1:7)]) #2 to get the modified names
tablenames <- names(TABLES)
dir.create('../DATA/', showWarnings=F)
save.image('../DATA/DBs.RData', safe = TRUE)
## Update the data
source('./WEB_launchQA.R')
## Copy the files
unlink('/media/FD/WEB/TEST2/current/DATA', recursive = TRUE)
file.copy('../DATA', '/media/FD/WEB/TEST2/current/', recursive = TRUE, copy.mode = TRUE)
|
/legacy/web/DBWEB_get_data.R
|
no_license
|
antoine-lizee/Promonto-stats
|
R
| false
| false
| 523
|
r
|
### Set WD
setwd('/media/FD/RMISC/DaphStats/')
# Get the data
source('./preproc.R')
TABLES <- list(atcd = atcd2[-(1:4)], tech=tech2[-(1:5)], post=post2[-(1:7)]) #2 to get the modified names
tablenames <- names(TABLES)
dir.create('../DATA/', showWarnings=F)
save.image('../DATA/DBs.RData', safe = TRUE)
## Update the data
source('./WEB_launchQA.R')
## Copy the files
unlink('/media/FD/WEB/TEST2/current/DATA', recursive = TRUE)
file.copy('../DATA', '/media/FD/WEB/TEST2/current/', recursive = TRUE, copy.mode = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/webpower.R
\name{wp.mc.chisq.diff}
\alias{wp.mc.chisq.diff}
\title{Statistical Power Analysis for SEM Based on Chi-square Difference Test}
\usage{
wp.mc.chisq.diff(full.model.pop, full.model,
reduced.model, N=100, R=1000, alpha=0.05)
}
\arguments{
\item{full.model.pop}{Full model (under the alternative hypothesis) with population parameters.}
\item{full.model}{Full model (under the alternative hypothesis) lavaan specification.}
\item{reduced.model}{Reduced model (under the null hypothesis) lavaan specification.}
\item{N}{Sample size.}
\item{R}{Number of Monte Carlo replications.}
\item{alpha}{significance level chosed for the test. It equals 0.05 by default.}
}
\value{
An object of the power analysis.
\item{power}{Statistical power.}
\item{df}{Degrees of freedom}
\item{chi.diff}{Chi-square differences between the reduced model and the full model}
}
\description{
This function is for SEM power analysis based on the chi-square difference test.
}
\examples{
\donttest{
set.seed(20220722)
full.model.pop <-'
y1 ~ 0.4*x
y2 ~ 0.5*x + 0.2*y1
y3 ~ 0.4*x
y4 ~ 0.4*y1 + 0.4*y2 + 0.4*y3
y1 ~~ 0.84*y1
y2 ~~ 0.61*y2
y3 ~~ 0.84*y3
y4 ~~ 0.27*y4
'
full.model <-'
y1 ~ x
y2 ~ x + y1
y3 ~ x
y4 ~ y1 + y2 + y3
'
reduced.model <-'
y1 ~ x
y2 ~ x
y3 ~ x
y4 ~ y1 + y3
'
wp.mc.chisq.diff(full.model.pop, full.model, reduced.model)
}
}
\references{
Demidenko, E. (2007). Sample size determination for logistic regression revisited. Statistics in medicine, 26(18), 3385-3397.
Zhang, Z., & Yuan, K.-H. (2018). Practical Statistical Power Analysis Using Webpower and R (Eds). Granger, IN: ISDSA Press.
}
|
/man/wp.mc.chisq.diff.Rd
|
no_license
|
cran/WebPower
|
R
| false
| true
| 1,748
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/webpower.R
\name{wp.mc.chisq.diff}
\alias{wp.mc.chisq.diff}
\title{Statistical Power Analysis for SEM Based on Chi-square Difference Test}
\usage{
wp.mc.chisq.diff(full.model.pop, full.model,
reduced.model, N=100, R=1000, alpha=0.05)
}
\arguments{
\item{full.model.pop}{Full model (under the alternative hypothesis) with population parameters.}
\item{full.model}{Full model (under the alternative hypothesis) lavaan specification.}
\item{reduced.model}{Reduced model (under the null hypothesis) lavaan specification.}
\item{N}{Sample size.}
\item{R}{Number of Monte Carlo replications.}
\item{alpha}{significance level chosed for the test. It equals 0.05 by default.}
}
\value{
An object of the power analysis.
\item{power}{Statistical power.}
\item{df}{Degrees of freedom}
\item{chi.diff}{Chi-square differences between the reduced model and the full model}
}
\description{
This function is for SEM power analysis based on the chi-square difference test.
}
\examples{
\donttest{
set.seed(20220722)
full.model.pop <-'
y1 ~ 0.4*x
y2 ~ 0.5*x + 0.2*y1
y3 ~ 0.4*x
y4 ~ 0.4*y1 + 0.4*y2 + 0.4*y3
y1 ~~ 0.84*y1
y2 ~~ 0.61*y2
y3 ~~ 0.84*y3
y4 ~~ 0.27*y4
'
full.model <-'
y1 ~ x
y2 ~ x + y1
y3 ~ x
y4 ~ y1 + y2 + y3
'
reduced.model <-'
y1 ~ x
y2 ~ x
y3 ~ x
y4 ~ y1 + y3
'
wp.mc.chisq.diff(full.model.pop, full.model, reduced.model)
}
}
\references{
Demidenko, E. (2007). Sample size determination for logistic regression revisited. Statistics in medicine, 26(18), 3385-3397.
Zhang, Z., & Yuan, K.-H. (2018). Practical Statistical Power Analysis Using Webpower and R (Eds). Granger, IN: ISDSA Press.
}
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837635476L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609860290-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 729
|
r
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837635476L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
71ff1be93a9cb17f222962f65f056dac query33_query08_1344.qdimacs 954 2223
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query33_query08_1344/query33_query08_1344.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 70
|
r
|
71ff1be93a9cb17f222962f65f056dac query33_query08_1344.qdimacs 954 2223
|
library(highcharter)
### Name: hc_theme_ggplot2
### Title: ggplot2 theme for highcharts
### Aliases: hc_theme_ggplot2
### ** Examples
highcharts_demo() %>%
hc_add_theme(hc_theme_ggplot2())
|
/data/genthat_extracted_code/highcharter/examples/hc_theme_ggplot2.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 203
|
r
|
library(highcharter)
### Name: hc_theme_ggplot2
### Title: ggplot2 theme for highcharts
### Aliases: hc_theme_ggplot2
### ** Examples
highcharts_demo() %>%
hc_add_theme(hc_theme_ggplot2())
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 28942
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 28942
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#134.A#48.c#.w#9.s#27.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9829
c no.of clauses 28942
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 28942
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#134.A#48.c#.w#9.s#27.asp.qdimacs 9829 28942 E1 [] 0 134 9695 28942 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#134.A#48.c#.w#9.s#27.asp/ctrl.e#1.a#3.E#134.A#48.c#.w#9.s#27.asp.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 732
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 28942
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 28942
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#134.A#48.c#.w#9.s#27.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9829
c no.of clauses 28942
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 28942
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#134.A#48.c#.w#9.s#27.asp.qdimacs 9829 28942 E1 [] 0 134 9695 28942 NONE
|
Est.PI <-
function(group.data, conf=.95){
if(missing(group.data))
stop("group.data is missing.")
# Check the number of groups
numGroups <- length(group.data)
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
# Make sure we have group names
if(is.null(names(group.data))){
grpNames <- paste("Data Set", 1:numGroups)
}else{
grpNames <- names(group.data)
}
# Calculate the pi and error bars for each group
allParamsMLE <- data.frame(matrix(0, 0, 6))
allParamsMOM <- data.frame(matrix(0, 0, 6))
thetaMLE <- data.frame(matrix(0, numGroups, 3))
thetaMOM <- data.frame(matrix(0, numGroups, 3))
for(i in 1:numGroups){
tempData <- group.data[[i]]
# Check the data has samples
numSub <- nrow(tempData)
if(numSub < 1)
stop("At least one data set in group.data is empty")
tempParam1 <- data.frame(matrix(0, ncol(tempData), 6))
tempParam2 <- data.frame(matrix(0, ncol(tempData), 6))
tempParam2[,2] <- grpNames[i]
tempParam1[,2] <- grpNames[i]
# Check for taxa with 0 column sums (add 1 to everything if this happens)
badTaxa <- which(colSums(tempData) == 0)
if(length(badTaxa) != 0)
tempData <- tempData + 1
# Handle having 1 sample
if(numSub == 1){
tempParam1[,1] <- colnames(tempData)
tempParam1[,3] <- unlist(tempData[1,]/sum(tempData))
tempParam1[,4] <- NA
tempParam1[,5] <- tempParam1[,3]
tempParam1[,6] <- tempParam1[,3]
tempParam1 <- tempParam1[order(tempParam1[,1]),]
tempTheta1 <- c(0, NA)
tempParam2 <- tempParam1
tempTheta2 <- tempTheta1
}else{
# Get the MoM and MLE for every taxa
fsum <- dirmult::dirmult.summary(tempData, dirmult::dirmult(tempData, trace=FALSE))
tempTheta <- fsum[nrow(fsum),]
fsum <- fsum[-nrow(fsum),]
# Turn the summary into a data frame we can plot from
tempParam1[,1] <- rownames(fsum)
tempParam1[,3] <- fsum$MLE
tempParam1[,4] <- fsum$se.MLE
tempTheta1 <- tempTheta[,2:3]
tempParam2[,1] <- rownames(fsum)
tempParam2[,3] <- fsum$MoM
tempParam2[,4] <- fsum$se.MOM
tempTheta2 <- tempTheta[,4:5]
# Calc Upper and Lower bounds for CI
minSubj <- min(sapply(group.data, function(x) nrow(x)))
if(minSubj < 30){
val <- stats::qt(0.5 + conf *0.5, df=minSubj-1)
}else{
val <- stats::qnorm(0.5 + conf*0.5)
}
tempParam1[,5] <- tempParam1[,3] + val*tempParam1[,4]
tempParam1[,6] <- tempParam1[,3] - val*tempParam1[,4]
tempParam2[,5] <- tempParam2[,3] + val*tempParam2[,4]
tempParam2[,6] <- tempParam2[,3] - val*tempParam2[,4]
}
# Save outside of loop
allParamsMLE <- rbind(allParamsMLE, tempParam1)
thetaMLE[i,] <- c(grpNames[i], tempTheta1)
allParamsMOM <- rbind(allParamsMOM, tempParam2)
thetaMOM[i,] <- c(grpNames[i], tempTheta2)
}
colnames(allParamsMLE) <- c("Taxa", "Group", "PI", "SE", "Upper", "Lower")
colnames(thetaMLE) <- c("Group", colnames(tempTheta1))
colnames(allParamsMOM) <- c("Taxa", "Group", "PI", "SE", "Upper", "Lower")
colnames(thetaMOM) <- c("Group", colnames(tempTheta2))
# Make sure none of our error bars go over 100 or below 0
allParamsMLE$Upper <- ifelse(allParamsMLE$Upper > 1, 1, allParamsMLE$Upper)
allParamsMLE$Lower <- ifelse(allParamsMLE$Lower < 0, 0, allParamsMLE$Lower)
allParamsMOM$Upper <- ifelse(allParamsMOM$Upper > 1, 1, allParamsMOM$Upper)
allParamsMOM$Lower <- ifelse(allParamsMOM$Lower < 0, 0, allParamsMOM$Lower)
# Factor the data so it stays in the right order
allParamsMLE$Group <- factor(allParamsMLE$Group, levels=grpNames)
allParamsMLE$Taxa <- factor(allParamsMLE$Taxa, levels=unique(colnames(group.data[[1]])))
allParamsMOM$Group <- factor(allParamsMOM$Group, levels=grpNames)
allParamsMOM$Taxa <- factor(allParamsMOM$Taxa, levels=unique(colnames(group.data[[1]])))
MLE <- list(params=allParamsMLE, theta=thetaMLE)
MOM <- list(params=allParamsMOM, theta=thetaMOM)
return(list(MLE=MLE, MOM=MOM))
}
|
/R/Est.PI.R
|
no_license
|
cran/HMP
|
R
| false
| false
| 4,227
|
r
|
Est.PI <-
function(group.data, conf=.95){
if(missing(group.data))
stop("group.data is missing.")
# Check the number of groups
numGroups <- length(group.data)
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
# Make sure we have group names
if(is.null(names(group.data))){
grpNames <- paste("Data Set", 1:numGroups)
}else{
grpNames <- names(group.data)
}
# Calculate the pi and error bars for each group
allParamsMLE <- data.frame(matrix(0, 0, 6))
allParamsMOM <- data.frame(matrix(0, 0, 6))
thetaMLE <- data.frame(matrix(0, numGroups, 3))
thetaMOM <- data.frame(matrix(0, numGroups, 3))
for(i in 1:numGroups){
tempData <- group.data[[i]]
# Check the data has samples
numSub <- nrow(tempData)
if(numSub < 1)
stop("At least one data set in group.data is empty")
tempParam1 <- data.frame(matrix(0, ncol(tempData), 6))
tempParam2 <- data.frame(matrix(0, ncol(tempData), 6))
tempParam2[,2] <- grpNames[i]
tempParam1[,2] <- grpNames[i]
# Check for taxa with 0 column sums (add 1 to everything if this happens)
badTaxa <- which(colSums(tempData) == 0)
if(length(badTaxa) != 0)
tempData <- tempData + 1
# Handle having 1 sample
if(numSub == 1){
tempParam1[,1] <- colnames(tempData)
tempParam1[,3] <- unlist(tempData[1,]/sum(tempData))
tempParam1[,4] <- NA
tempParam1[,5] <- tempParam1[,3]
tempParam1[,6] <- tempParam1[,3]
tempParam1 <- tempParam1[order(tempParam1[,1]),]
tempTheta1 <- c(0, NA)
tempParam2 <- tempParam1
tempTheta2 <- tempTheta1
}else{
# Get the MoM and MLE for every taxa
fsum <- dirmult::dirmult.summary(tempData, dirmult::dirmult(tempData, trace=FALSE))
tempTheta <- fsum[nrow(fsum),]
fsum <- fsum[-nrow(fsum),]
# Turn the summary into a data frame we can plot from
tempParam1[,1] <- rownames(fsum)
tempParam1[,3] <- fsum$MLE
tempParam1[,4] <- fsum$se.MLE
tempTheta1 <- tempTheta[,2:3]
tempParam2[,1] <- rownames(fsum)
tempParam2[,3] <- fsum$MoM
tempParam2[,4] <- fsum$se.MOM
tempTheta2 <- tempTheta[,4:5]
# Calc Upper and Lower bounds for CI
minSubj <- min(sapply(group.data, function(x) nrow(x)))
if(minSubj < 30){
val <- stats::qt(0.5 + conf *0.5, df=minSubj-1)
}else{
val <- stats::qnorm(0.5 + conf*0.5)
}
tempParam1[,5] <- tempParam1[,3] + val*tempParam1[,4]
tempParam1[,6] <- tempParam1[,3] - val*tempParam1[,4]
tempParam2[,5] <- tempParam2[,3] + val*tempParam2[,4]
tempParam2[,6] <- tempParam2[,3] - val*tempParam2[,4]
}
# Save outside of loop
allParamsMLE <- rbind(allParamsMLE, tempParam1)
thetaMLE[i,] <- c(grpNames[i], tempTheta1)
allParamsMOM <- rbind(allParamsMOM, tempParam2)
thetaMOM[i,] <- c(grpNames[i], tempTheta2)
}
colnames(allParamsMLE) <- c("Taxa", "Group", "PI", "SE", "Upper", "Lower")
colnames(thetaMLE) <- c("Group", colnames(tempTheta1))
colnames(allParamsMOM) <- c("Taxa", "Group", "PI", "SE", "Upper", "Lower")
colnames(thetaMOM) <- c("Group", colnames(tempTheta2))
# Make sure none of our error bars go over 100 or below 0
allParamsMLE$Upper <- ifelse(allParamsMLE$Upper > 1, 1, allParamsMLE$Upper)
allParamsMLE$Lower <- ifelse(allParamsMLE$Lower < 0, 0, allParamsMLE$Lower)
allParamsMOM$Upper <- ifelse(allParamsMOM$Upper > 1, 1, allParamsMOM$Upper)
allParamsMOM$Lower <- ifelse(allParamsMOM$Lower < 0, 0, allParamsMOM$Lower)
# Factor the data so it stays in the right order
allParamsMLE$Group <- factor(allParamsMLE$Group, levels=grpNames)
allParamsMLE$Taxa <- factor(allParamsMLE$Taxa, levels=unique(colnames(group.data[[1]])))
allParamsMOM$Group <- factor(allParamsMOM$Group, levels=grpNames)
allParamsMOM$Taxa <- factor(allParamsMOM$Taxa, levels=unique(colnames(group.data[[1]])))
MLE <- list(params=allParamsMLE, theta=thetaMLE)
MOM <- list(params=allParamsMOM, theta=thetaMOM)
return(list(MLE=MLE, MOM=MOM))
}
|
##Webscrapping Trial Run
library(rvest)
library(httr)
col = POST(url="https://sanctionssearch.ofac.treas.gov/",
encode="form",
body=list(Name="NAME OF ENTITY",
search="Search"))
col_html = read_html(col)
col_table = html_table(col_html,fill=F)
## Run selenium through docker
remDr$open()
library(RSelenium)
rsDriver()
|
/Side Projects/Webscrapping_screenshot.R
|
no_license
|
remembrance1/Essential-R-Codes
|
R
| false
| false
| 365
|
r
|
##Webscrapping Trial Run
library(rvest)
library(httr)
col = POST(url="https://sanctionssearch.ofac.treas.gov/",
encode="form",
body=list(Name="NAME OF ENTITY",
search="Search"))
col_html = read_html(col)
col_table = html_table(col_html,fill=F)
## Run selenium through docker
remDr$open()
library(RSelenium)
rsDriver()
|
library(tm)
library(wordcloud2)
library(plyr)
library(ggplot2)
library(dplyr)
library(tidytext)
library(writexl)
setwd("C:\\Users\\Lynn\\Dropbox\\BA\\17Fa\\BUAN 6357\\Assignment\\Assignment_2")
test <- read.csv('FewProducts.csv', stringsAsFactors=FALSE)
afinn <- read.table('AFINN-111.txt', stringsAsFactors=FALSE)
# Q1
review1 <- gsub("(\n|<br />)"," ",test$Text)
review1 <- stripWhitespace(test$Text)
review1 <- removePunctuation(review1)
review2 <- sapply(review1, tolower)
review2 <- stemDocument(review2, language = "english")
stopwords_regex <- paste(stopwords('en'), collapse = '\\b|\\b')
stopwords_regex <- paste0('\\b', stopwords_regex, '\\b')
review3 <- stringr::str_replace_all(review2, stopwords_regex, '')
normalized_review <- cbind(test[, c(1:3, 10)], review3)
colnames(normalized_review)[4:5] <- c("Review", "Normalized Review")
# Q7
forcalcu_review <- cbind(test[, 1], review2)
colnames(forcalcu_review)[1] <- "Id"
row.names(forcalcu_review) <- NULL
forcalcu_review <- data.frame(forcalcu_review)
forcalcu_review$review2 <- as.character(forcalcu_review$review2)
words <- forcalcu_review %>%
unnest_tokens(word, review2) %>%
group_by(Id, word) %>%
tally()
total_words <- aggregate(words$n, by=list(Id = words$Id), FUN=sum)
review_words <- left_join(words, total_words, by = c("Id" = "Id"))
colnames(review_words)[4] <- "total"
review_tf_idf <- review_words %>%
bind_tf_idf(word, Id, n)
score_review_words <- left_join(review_tf_idf, afinn, by = c("word" = "V1"))
score_review_words$V2[is.na(score_review_words$V2)] <- 0
score_review_words$score <- score_review_words$tf_idf*score_review_words$V2
srw_table <- aggregate(score_review_words$score, by=list(Id = score_review_words$Id), FUN=sum)
srw_table$Id <- as.integer(srw_table$Id)
score_word <- cbind(test[, c(1:3, 7)], srw_table[,2])
colnames(score_word)[5] <- c("Sentiment Score")
score_word$`Sentiment Score` <- as.numeric(as.character(score_word$`Sentiment Score`))
# Q8
test_score_word <- score_word[c("ProductId", "Sentiment Score")]
sum_product <- ddply(test_score_word, .(ProductId), summarize,
Number_of_Reviews=length(ProductId),
Average_Score=mean(`Sentiment Score`))
# Q9
temp <- sum_product[order(sum_product$Number_of_Reviews, decreasing = T), ]
top_reviews <- unique(temp$Number_of_Reviews)[1:6]
top_table <- temp[as.character(temp$Number_of_Reviews) %in% as.character(top_reviews) == TRUE, ]
# Q10
top_product <- score_word[score_word$ProductId %in% top_table$ProductId == TRUE,]
ggplot(top_product)+geom_point(aes(`Sentiment Score`, Score))+facet_grid(ProductId ~ .)
result <- by(top_product[,4:5], top_product$ProductId, function(top_product) {cor(top_product$`Sentiment Score`, top_product$Score)})
corr_df <- as.data.frame(as.matrix(result))
corr_df <- t(corr_df)
rownames(corr_df) <- "Correaltion"
corr_df
# B000KV61FC B0013NUGDE B001EO5Q64 B0026RQTGE B002QWP89S B003GTR8IO B005K4Q37A B007M83302 B0090X8IPM
# Correaltion 0.04272221 -0.0229633 0.1100829 0.01581265 -0.0255755 -0.006800036 0.0574318 -0.09418172 0.05572269
|
/02_Natural Language Processing/for_tf_idf.R
|
no_license
|
xyLynn/RShiny
|
R
| false
| false
| 3,104
|
r
|
library(tm)
library(wordcloud2)
library(plyr)
library(ggplot2)
library(dplyr)
library(tidytext)
library(writexl)
setwd("C:\\Users\\Lynn\\Dropbox\\BA\\17Fa\\BUAN 6357\\Assignment\\Assignment_2")
test <- read.csv('FewProducts.csv', stringsAsFactors=FALSE)
afinn <- read.table('AFINN-111.txt', stringsAsFactors=FALSE)
# Q1
review1 <- gsub("(\n|<br />)"," ",test$Text)
review1 <- stripWhitespace(test$Text)
review1 <- removePunctuation(review1)
review2 <- sapply(review1, tolower)
review2 <- stemDocument(review2, language = "english")
stopwords_regex <- paste(stopwords('en'), collapse = '\\b|\\b')
stopwords_regex <- paste0('\\b', stopwords_regex, '\\b')
review3 <- stringr::str_replace_all(review2, stopwords_regex, '')
normalized_review <- cbind(test[, c(1:3, 10)], review3)
colnames(normalized_review)[4:5] <- c("Review", "Normalized Review")
# Q7
forcalcu_review <- cbind(test[, 1], review2)
colnames(forcalcu_review)[1] <- "Id"
row.names(forcalcu_review) <- NULL
forcalcu_review <- data.frame(forcalcu_review)
forcalcu_review$review2 <- as.character(forcalcu_review$review2)
words <- forcalcu_review %>%
unnest_tokens(word, review2) %>%
group_by(Id, word) %>%
tally()
total_words <- aggregate(words$n, by=list(Id = words$Id), FUN=sum)
review_words <- left_join(words, total_words, by = c("Id" = "Id"))
colnames(review_words)[4] <- "total"
review_tf_idf <- review_words %>%
bind_tf_idf(word, Id, n)
score_review_words <- left_join(review_tf_idf, afinn, by = c("word" = "V1"))
score_review_words$V2[is.na(score_review_words$V2)] <- 0
score_review_words$score <- score_review_words$tf_idf*score_review_words$V2
srw_table <- aggregate(score_review_words$score, by=list(Id = score_review_words$Id), FUN=sum)
srw_table$Id <- as.integer(srw_table$Id)
score_word <- cbind(test[, c(1:3, 7)], srw_table[,2])
colnames(score_word)[5] <- c("Sentiment Score")
score_word$`Sentiment Score` <- as.numeric(as.character(score_word$`Sentiment Score`))
# Q8
test_score_word <- score_word[c("ProductId", "Sentiment Score")]
sum_product <- ddply(test_score_word, .(ProductId), summarize,
Number_of_Reviews=length(ProductId),
Average_Score=mean(`Sentiment Score`))
# Q9
temp <- sum_product[order(sum_product$Number_of_Reviews, decreasing = T), ]
top_reviews <- unique(temp$Number_of_Reviews)[1:6]
top_table <- temp[as.character(temp$Number_of_Reviews) %in% as.character(top_reviews) == TRUE, ]
# Q10
top_product <- score_word[score_word$ProductId %in% top_table$ProductId == TRUE,]
ggplot(top_product)+geom_point(aes(`Sentiment Score`, Score))+facet_grid(ProductId ~ .)
result <- by(top_product[,4:5], top_product$ProductId, function(top_product) {cor(top_product$`Sentiment Score`, top_product$Score)})
corr_df <- as.data.frame(as.matrix(result))
corr_df <- t(corr_df)
rownames(corr_df) <- "Correaltion"
corr_df
# B000KV61FC B0013NUGDE B001EO5Q64 B0026RQTGE B002QWP89S B003GTR8IO B005K4Q37A B007M83302 B0090X8IPM
# Correaltion 0.04272221 -0.0229633 0.1100829 0.01581265 -0.0255755 -0.006800036 0.0574318 -0.09418172 0.05572269
|
library (ggvis)
library (tidyr)
rdata_file <- '../train_agg3.RData'
rtest_file <- '../test_agg3.RData'
run_id_pref <- 'csv_out/xgbm_allvars_100pct'
solver_script <- '../dave/gbm_cv.R'
create_submission <- FALSE
cv_frac_trn <- 1.0
tcheck.print <- TRUE
set_rain_thresh <- 65
mae_res <- data.frame()
run_time <- numeric()
kaggle <- c("rd"
, "Ref", "Ref_5x5_50th", "Ref_5x5_90th"
, "RefComposite", "RefComposite_5x5_50th", "RefComposite_5x5_90th"
, "Zdr", "Zdr_5x5_50th", "Zdr_5x5_90th"
, "nrec", "naRef"
, "Ref_rz", "Kdp", "Kdp_rk", "rr_Katsumata_ref", "rr_refzdr", "rr_kdpzdr"
)
nonpolar <- c("rd"
, "Ref", "Ref_5x5_50th", "Ref_5x5_90th"
, "nrec", "naRef"
, "Ref_rz", "rr_Katsumata_ref"
)
all <- c("rd"
, "Ref", "Ref_5x5_10th", "Ref_5x5_50th", "Ref_5x5_90th"
, "RefComposite", "RefComposite_5x5_10th", "RefComposite_5x5_50th", "RefComposite_5x5_90th"
, "RhoHV", "RhoHV_5x5_10th", "RhoHV_5x5_50th", "RhoHV_5x5_90th"
, "Zdr", "Zdr_5x5_10th", "Zdr_5x5_50th", "Zdr_5x5_90th"
, "Kdp", "Kdp_5x5_10th", "Kdp_5x5_50th", "Kdp_5x5_90th"
, "nrec", "naRef", "naRefC", "naRho", "naZdr", "naKdp"
, "Ref_rz", "Ref_rz_comp", "Kdp_rk", "rr_Katsumata_ref", "rr_Katsumata_ref_comp"
, "rr_refzdr", "rr_refzdr_comp", "rr_kdpzdr", "Ref2", "RefComposite2", "Zdr2"
, "Kdp2", "rd_Ref", "rd_RefComposite", "rd_Kdp"
)
cs_list <- list( all = all)
for (set_seed in c(99)) { #} c(1999, 2015, 7, 86, 99)) {
run_id <- paste( run_id_pref, set_seed, sep="_")
mae_base <- -1
for (i in 1:length(cs_list)) {
print( cs_list[i])
set_cs <- cs_list[[i]]
source (solver_script)
elapsed <- sum( time_df$delta )
mae_base <- ifelse( mae_base > 0 , mae_base, mae_cv_test)
mae_res <- rbind( mae_res, data.frame( seed=set_seed, xSet=names(cs_list[i])
, xvars=paste(set_cs, collapse = ",")
, mae_xgb, mae_cv_test, mae_cv_trn
, delta = mae_cv_test - mae_base
, elapsed))
run_time <- c(run_time, elapsed )
}
}
print(mae_res)
# mae_res %>% group_by(seed) %>%
# ggvis( ~xSet, ~delta) %>% layer_points(fill = ~as.factor(seed))
#mae_res %>% filter( xSet == 'rf_mpk') %>% ggvis( ~as.character(seed), ~delta) %>% layer_points()
# mae_res %>% group_by(seed) %>%
# ggvis( ~rain_thresh, ~mae_cv_test ) %>%
# layer_points( fill = ~as.factor(seed)) %>%
# layer_lines ( stroke = ~as.factor(seed))
|
/dave/run_xgbm.R
|
no_license
|
dsdaveh/weddingcap_rain
|
R
| false
| false
| 2,733
|
r
|
library (ggvis)
library (tidyr)
rdata_file <- '../train_agg3.RData'
rtest_file <- '../test_agg3.RData'
run_id_pref <- 'csv_out/xgbm_allvars_100pct'
solver_script <- '../dave/gbm_cv.R'
create_submission <- FALSE
cv_frac_trn <- 1.0
tcheck.print <- TRUE
set_rain_thresh <- 65
mae_res <- data.frame()
run_time <- numeric()
kaggle <- c("rd"
, "Ref", "Ref_5x5_50th", "Ref_5x5_90th"
, "RefComposite", "RefComposite_5x5_50th", "RefComposite_5x5_90th"
, "Zdr", "Zdr_5x5_50th", "Zdr_5x5_90th"
, "nrec", "naRef"
, "Ref_rz", "Kdp", "Kdp_rk", "rr_Katsumata_ref", "rr_refzdr", "rr_kdpzdr"
)
nonpolar <- c("rd"
, "Ref", "Ref_5x5_50th", "Ref_5x5_90th"
, "nrec", "naRef"
, "Ref_rz", "rr_Katsumata_ref"
)
all <- c("rd"
, "Ref", "Ref_5x5_10th", "Ref_5x5_50th", "Ref_5x5_90th"
, "RefComposite", "RefComposite_5x5_10th", "RefComposite_5x5_50th", "RefComposite_5x5_90th"
, "RhoHV", "RhoHV_5x5_10th", "RhoHV_5x5_50th", "RhoHV_5x5_90th"
, "Zdr", "Zdr_5x5_10th", "Zdr_5x5_50th", "Zdr_5x5_90th"
, "Kdp", "Kdp_5x5_10th", "Kdp_5x5_50th", "Kdp_5x5_90th"
, "nrec", "naRef", "naRefC", "naRho", "naZdr", "naKdp"
, "Ref_rz", "Ref_rz_comp", "Kdp_rk", "rr_Katsumata_ref", "rr_Katsumata_ref_comp"
, "rr_refzdr", "rr_refzdr_comp", "rr_kdpzdr", "Ref2", "RefComposite2", "Zdr2"
, "Kdp2", "rd_Ref", "rd_RefComposite", "rd_Kdp"
)
cs_list <- list( all = all)
for (set_seed in c(99)) { #} c(1999, 2015, 7, 86, 99)) {
run_id <- paste( run_id_pref, set_seed, sep="_")
mae_base <- -1
for (i in 1:length(cs_list)) {
print( cs_list[i])
set_cs <- cs_list[[i]]
source (solver_script)
elapsed <- sum( time_df$delta )
mae_base <- ifelse( mae_base > 0 , mae_base, mae_cv_test)
mae_res <- rbind( mae_res, data.frame( seed=set_seed, xSet=names(cs_list[i])
, xvars=paste(set_cs, collapse = ",")
, mae_xgb, mae_cv_test, mae_cv_trn
, delta = mae_cv_test - mae_base
, elapsed))
run_time <- c(run_time, elapsed )
}
}
print(mae_res)
# mae_res %>% group_by(seed) %>%
# ggvis( ~xSet, ~delta) %>% layer_points(fill = ~as.factor(seed))
#mae_res %>% filter( xSet == 'rf_mpk') %>% ggvis( ~as.character(seed), ~delta) %>% layer_points()
# mae_res %>% group_by(seed) %>%
# ggvis( ~rain_thresh, ~mae_cv_test ) %>%
# layer_points( fill = ~as.factor(seed)) %>%
# layer_lines ( stroke = ~as.factor(seed))
|
# install.packages("ggdendro")
# install.packages("knitr")
# Load required libraries
suppressPackageStartupMessages({
library(configr) # NOT Required for Faust - Helps with user configurations
library(flowWorkspaceData) # Required for Faust
library(flowWorkspace) # Required for Faust
# library(ggdendro) # NOT Required for Faust
library(scamp) # Required for FAUST
library(ggplot2) # Required for FAUST
library(cowplot) # Required for FAUST
# library(knitr) # NOT Required for FAUST
library(dplyr) # Required for FAUST
library(tidyr) # Required for FAUST
library(faust) # Required for FAUST
})
# Install and load user configurations
user_configurations_file_path <- file.path("/opt", "faust", "faust_configurations.yaml")
user_configurations <- read.config(file=user_configurations_file_path)
if ((!"annotations_approved" %in% names(user_configurations))
|| user_configurations$annotations_approved == "PLEASE_CHANGE_ME") {
stop("The user MUST provide an explicit value in `faust_configurations.yaml` for the setting `annotations_approved`")
}
if ((!"depth_score_threshold" %in% names(user_configurations))
|| user_configurations$depth_score_threshold == "PLEASE_CHANGE_ME") {
stop("The user MUST provide an explicit value in `faust_configurations.yaml` for the setting `depth_score_threshold`")
}
if ((!"experimental_unit" %in% names(user_configurations))
|| user_configurations$experimental_unit == "PLEASE_CHANGE_ME") {
stop("The user MUST provide an explicit value in `faust_configurations.yaml` for the setting `experimental_unit`")
}
if ((!"selection_quantile" %in% names(user_configurations))
|| user_configurations$selection_quantile == "PLEASE_CHANGE_ME") {
stop("The user MUST provide an explicit value in `faust_configurations.yaml` for the setting `selection_quantile`")
}
if ((!"starting_node" %in% names(user_configurations))
|| user_configurations$starting_node == "PLEASE_CHANGE_ME") {
stop("The user MUST provide an explicit value in `faust_configurations.yaml` for the setting `starting_node`")
}
# Load data sets into gating set
directories <- list.dirs(path=file.path("/opt", "faust", "input_files"), full.names=TRUE, recursive=FALSE)
if (lengths(directories) != 1) {
stop("Only one workspace can be provided for FAUST execution. Please make sure that the `input_files` directory contains only one workspace directory and try running FAUST again.")
}
dataset_file_path <- directories[1]
gating_set <- load_gs(dataset_file_path)
active_channels_in <- markernames(gating_set)
# Select the channel boundaries
channel_bounds_in <- matrix(0, nrow=2, ncol=length(active_channels_in))
colnames(channel_bounds_in) <- active_channels_in
rownames(channel_bounds_in) <- c("Low","High")
channel_bounds_in["High",] <- 3500
# Set the FAUST project path for execution
faust_processing_directory <- file.path("/opt", "faust", "output_files")
dir.create(faust_processing_directory, recursive = TRUE)
print("=======================================================================")
print("Beginning FAUST execution - This may take awhile!")
print("=======================================================================")
# Perform FAUST Analysis with a revised depth score
faust(
gatingSet = gating_set,
experimentalUnit = user_configurations$experimental_unit,
activeChannels = active_channels_in,
channelBounds = channel_bounds_in,
startingCellPop = user_configurations$starting_node,
projectPath = faust_processing_directory,
depthScoreThreshold = user_configurations$depth_score_threshold,
selectionQuantile = user_configurations$selection_quantile,
debugFlag = FALSE,
#set this to the number of threads you want to use on your system
threadNum = parallel::detectCores() / 2 - 1,
seedValue = 271828,
annotationsApproved = user_configurations$annotations_approved
)
|
/run_faust.R
|
no_license
|
FredHutch/FAUST_local
|
R
| false
| false
| 4,032
|
r
|
# install.packages("ggdendro")
# install.packages("knitr")
# Load required libraries
suppressPackageStartupMessages({
library(configr) # NOT Required for Faust - Helps with user configurations
library(flowWorkspaceData) # Required for Faust
library(flowWorkspace) # Required for Faust
# library(ggdendro) # NOT Required for Faust
library(scamp) # Required for FAUST
library(ggplot2) # Required for FAUST
library(cowplot) # Required for FAUST
# library(knitr) # NOT Required for FAUST
library(dplyr) # Required for FAUST
library(tidyr) # Required for FAUST
library(faust) # Required for FAUST
})
# Install and load user configurations
user_configurations_file_path <- file.path("/opt", "faust", "faust_configurations.yaml")
user_configurations <- read.config(file=user_configurations_file_path)
if ((!"annotations_approved" %in% names(user_configurations))
|| user_configurations$annotations_approved == "PLEASE_CHANGE_ME") {
stop("The user MUST provide an explicit value in `faust_configurations.yaml` for the setting `annotations_approved`")
}
if ((!"depth_score_threshold" %in% names(user_configurations))
|| user_configurations$depth_score_threshold == "PLEASE_CHANGE_ME") {
stop("The user MUST provide an explicit value in `faust_configurations.yaml` for the setting `depth_score_threshold`")
}
if ((!"experimental_unit" %in% names(user_configurations))
|| user_configurations$experimental_unit == "PLEASE_CHANGE_ME") {
stop("The user MUST provide an explicit value in `faust_configurations.yaml` for the setting `experimental_unit`")
}
if ((!"selection_quantile" %in% names(user_configurations))
|| user_configurations$selection_quantile == "PLEASE_CHANGE_ME") {
stop("The user MUST provide an explicit value in `faust_configurations.yaml` for the setting `selection_quantile`")
}
if ((!"starting_node" %in% names(user_configurations))
|| user_configurations$starting_node == "PLEASE_CHANGE_ME") {
stop("The user MUST provide an explicit value in `faust_configurations.yaml` for the setting `starting_node`")
}
# Load data sets into gating set
directories <- list.dirs(path=file.path("/opt", "faust", "input_files"), full.names=TRUE, recursive=FALSE)
if (lengths(directories) != 1) {
stop("Only one workspace can be provided for FAUST execution. Please make sure that the `input_files` directory contains only one workspace directory and try running FAUST again.")
}
dataset_file_path <- directories[1]
gating_set <- load_gs(dataset_file_path)
active_channels_in <- markernames(gating_set)
# Select the channel boundaries
channel_bounds_in <- matrix(0, nrow=2, ncol=length(active_channels_in))
colnames(channel_bounds_in) <- active_channels_in
rownames(channel_bounds_in) <- c("Low","High")
channel_bounds_in["High",] <- 3500
# Set the FAUST project path for execution
faust_processing_directory <- file.path("/opt", "faust", "output_files")
dir.create(faust_processing_directory, recursive = TRUE)
print("=======================================================================")
print("Beginning FAUST execution - This may take awhile!")
print("=======================================================================")
# Perform FAUST Analysis with a revised depth score
faust(
gatingSet = gating_set,
experimentalUnit = user_configurations$experimental_unit,
activeChannels = active_channels_in,
channelBounds = channel_bounds_in,
startingCellPop = user_configurations$starting_node,
projectPath = faust_processing_directory,
depthScoreThreshold = user_configurations$depth_score_threshold,
selectionQuantile = user_configurations$selection_quantile,
debugFlag = FALSE,
#set this to the number of threads you want to use on your system
threadNum = parallel::detectCores() / 2 - 1,
seedValue = 271828,
annotationsApproved = user_configurations$annotations_approved
)
|
source("Load data.R")
#Filter the data ith respect to baltimore and Los Angeles.
plot6_subdata <- subset(neidf, fips %in% c("24510","06037") & type == "ON-ROAD")
#sum the total emission for the respective cities.
aggregate(Emissions~(year+fips), plot6_subdata, sum) ->tot_plot6
colnames(tot_plot6)[2] <- "City"
tot_plot6$City[tot_plot6$City == "06037"] <- "Los Angeles"
tot_plot6$City[tot_plot6$City == "24510"] <- "Baltimore"
#Which city has seen greater changes over time in motor vehicle emissions?
png("plot6.png")
g <- ggplot(data = tot_plot6, aes(x = factor(year), y = Emissions, fill = City ))
g + geom_bar(stat = "identity") + facet_wrap(~City) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))+
xlab("year") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle('Total Emissions from motor vehicle (type=ON-ROAD) \n in Baltimore City vs Los Angeles from 1999-2008')
dev.off()
|
/plot6.R
|
no_license
|
haripriyarishikesh/Exploratory-Data-Analysis-Course-Project-2
|
R
| false
| false
| 955
|
r
|
source("Load data.R")
#Filter the data ith respect to baltimore and Los Angeles.
plot6_subdata <- subset(neidf, fips %in% c("24510","06037") & type == "ON-ROAD")
#sum the total emission for the respective cities.
aggregate(Emissions~(year+fips), plot6_subdata, sum) ->tot_plot6
colnames(tot_plot6)[2] <- "City"
tot_plot6$City[tot_plot6$City == "06037"] <- "Los Angeles"
tot_plot6$City[tot_plot6$City == "24510"] <- "Baltimore"
#Which city has seen greater changes over time in motor vehicle emissions?
png("plot6.png")
g <- ggplot(data = tot_plot6, aes(x = factor(year), y = Emissions, fill = City ))
g + geom_bar(stat = "identity") + facet_wrap(~City) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))+
xlab("year") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle('Total Emissions from motor vehicle (type=ON-ROAD) \n in Baltimore City vs Los Angeles from 1999-2008')
dev.off()
|
##### Version:2
##### Update Date:2020-02-12
setwd("~/137_share/147_backup/VIP/")
annovar_path<-"~/137_share/147_backup/annovar/"
interpretor_path<-"~/137_share/147_backup/interpretation/"
## check required packages
# if(!requireNamespace("data.table", quietly = TRUE)){
# install.packages("data.table")
# }else if(!requireNamespace("vcfR", quietly = TRUE)){
# install.packages("vcfR")
# }else if(!requireNamespace("BiocManager", quietly = TRUE)){
# install.packages("BiocManager")
# }else if(!requireNamespace("Rsamtools", quietly = TRUE)){
# BiocManager::install("Rsamtools")
# }else if(!requireNamespace("BSgenome", quietly = TRUE)){
# BiocManager::install("BSgenome")
# }else{
#
# }
library(data.table)
source(paste0(interpretor_path,"script/functions.R"))
args <- commandArgs(trailingOnly = TRUE)
#args="-avinput=media/pathogenic_table_WPlcEpR/pathogenic_table.txt -out=media/pathogenic_table_WPlcEpR/pathogenic_table_WPlcEpR.txt"
# args <-"-vcf=00228512_OCPv1.vcf -out=00228512_OCPv1.txt"
options<-unlist(strsplit(args,split=" "))
input_vcf<-unlist(strsplit(options[grep(options,pattern="-vcf")],split="="))[2]
input_avinput<-unlist(strsplit(options[grep(options,pattern="-avinput")],split="="))[2]
output<-unlist(strsplit(options[grep(options,pattern="-out")],split="="))[2]
if(length(input_vcf)==0&length(input_avinput)==0){ ### no input file
cat("please input a vcf or avinput file!\n")
}else{
if(length(input_vcf)!=0){
cat(paste0("Input vcf: ",input_vcf,"\n"))
tmp_output_avinput <- gsub(input_vcf,pattern = ".vcf$",replacement = ".decompose.avinput")
cat("---prepare avinput-----------------------------\n")
source(paste0(interpretor_path,"script/prepare_my_avinput.R"))
cat(paste0("Create avinput: ",tmp_output_avinput,"\n"))
tmp_annovar<-gsub(input_vcf,pattern = ".vcf$",replacement = "_annovar")
}else if(length(input_avinput)!=0){
cat(paste0("Input avinput: ",input_avinput,"\n"))
tmp_output_avinput <- input_avinput
tmp_annovar <- gsub(input_avinput,pattern = ".avinput$",replacement = "_annovar")
}else{
cat("please input a vcf or avinput file!")
}
cat("---run annovar---------------------------------\n")
system(paste0("perl ",annovar_path,"table_annovar.pl ",
tmp_output_avinput," ",
annovar_path,"humandb/ -buildver hg19 -out ",
tmp_annovar," -remove -protocol refGene,avsnp150,ClinGen_annotation,gnomad211_genome,Taiwan_Biobank,LOVD_all,clinvar_20191125,cosmic90_coding,dbnsfp35a,CIVIC_annotation,OCP_ver2 -operation g,f,f,f,f,f,f,f,f,f,f -nastring ."))
annovar_result<-paste0(tmp_annovar,".hg19_multianno.txt")
cat(paste0("Create annovar result: ",annovar_result,"\n"))
#### testing data ####################
# annovar_result="../../../KD/interpretation/02-13528-4_02-13528-4_RNA_Non-Filtered_2018-05-01_14_27_22_annovar.hg19_multianno.txt"
# tmp_output_avinput<-"../../../KD/interpretation/02-13528-4_02-13528-4_RNA_Non-Filtered_2018-05-01_14_27_22.decompose.avinput"
# output<-"../../../KD/interpretation/02-13528-4_02_test_ver4.txt"
######################################
cat("---annotate oncoKB-----------------------------\n")
target <- fread(annovar_result,sep = "\t",stringsAsFactors = F,data.table = F)
source(paste0(interpretor_path,"script/annotate_oncoKB.R"))
cat("---annotate CGI -------------------------------\n")
source(paste0(interpretor_path,"script/annotate_CGI.R"))
cat("---summarize prediction -----------------------\n")
source(paste0(interpretor_path,"script/summarize_prediction.R"))
cat("---annotate VAF--------------------------------\n")
source(paste0(interpretor_path,"script/annotateVAF.R"))
cat(paste0("Create final result: ",output,"\n"))
cat("---Layering -----------------------------------\n")
system(paste0("python3 ",interpretor_path,"script/pipeline.py --input ",output," --output ",gsub(output,pattern = ".txt$",replacement = "")))
cat("Job finished!\n")
}
|
/my_annotation_pipeline.R
|
no_license
|
cadilac7882/NCKUH_interpretion
|
R
| false
| false
| 4,009
|
r
|
##### Version:2
##### Update Date:2020-02-12
setwd("~/137_share/147_backup/VIP/")
annovar_path<-"~/137_share/147_backup/annovar/"
interpretor_path<-"~/137_share/147_backup/interpretation/"
## check required packages
# if(!requireNamespace("data.table", quietly = TRUE)){
# install.packages("data.table")
# }else if(!requireNamespace("vcfR", quietly = TRUE)){
# install.packages("vcfR")
# }else if(!requireNamespace("BiocManager", quietly = TRUE)){
# install.packages("BiocManager")
# }else if(!requireNamespace("Rsamtools", quietly = TRUE)){
# BiocManager::install("Rsamtools")
# }else if(!requireNamespace("BSgenome", quietly = TRUE)){
# BiocManager::install("BSgenome")
# }else{
#
# }
library(data.table)
source(paste0(interpretor_path,"script/functions.R"))
args <- commandArgs(trailingOnly = TRUE)
#args="-avinput=media/pathogenic_table_WPlcEpR/pathogenic_table.txt -out=media/pathogenic_table_WPlcEpR/pathogenic_table_WPlcEpR.txt"
# args <-"-vcf=00228512_OCPv1.vcf -out=00228512_OCPv1.txt"
options<-unlist(strsplit(args,split=" "))
input_vcf<-unlist(strsplit(options[grep(options,pattern="-vcf")],split="="))[2]
input_avinput<-unlist(strsplit(options[grep(options,pattern="-avinput")],split="="))[2]
output<-unlist(strsplit(options[grep(options,pattern="-out")],split="="))[2]
if(length(input_vcf)==0&length(input_avinput)==0){ ### no input file
cat("please input a vcf or avinput file!\n")
}else{
if(length(input_vcf)!=0){
cat(paste0("Input vcf: ",input_vcf,"\n"))
tmp_output_avinput <- gsub(input_vcf,pattern = ".vcf$",replacement = ".decompose.avinput")
cat("---prepare avinput-----------------------------\n")
source(paste0(interpretor_path,"script/prepare_my_avinput.R"))
cat(paste0("Create avinput: ",tmp_output_avinput,"\n"))
tmp_annovar<-gsub(input_vcf,pattern = ".vcf$",replacement = "_annovar")
}else if(length(input_avinput)!=0){
cat(paste0("Input avinput: ",input_avinput,"\n"))
tmp_output_avinput <- input_avinput
tmp_annovar <- gsub(input_avinput,pattern = ".avinput$",replacement = "_annovar")
}else{
cat("please input a vcf or avinput file!")
}
cat("---run annovar---------------------------------\n")
system(paste0("perl ",annovar_path,"table_annovar.pl ",
tmp_output_avinput," ",
annovar_path,"humandb/ -buildver hg19 -out ",
tmp_annovar," -remove -protocol refGene,avsnp150,ClinGen_annotation,gnomad211_genome,Taiwan_Biobank,LOVD_all,clinvar_20191125,cosmic90_coding,dbnsfp35a,CIVIC_annotation,OCP_ver2 -operation g,f,f,f,f,f,f,f,f,f,f -nastring ."))
annovar_result<-paste0(tmp_annovar,".hg19_multianno.txt")
cat(paste0("Create annovar result: ",annovar_result,"\n"))
#### testing data ####################
# annovar_result="../../../KD/interpretation/02-13528-4_02-13528-4_RNA_Non-Filtered_2018-05-01_14_27_22_annovar.hg19_multianno.txt"
# tmp_output_avinput<-"../../../KD/interpretation/02-13528-4_02-13528-4_RNA_Non-Filtered_2018-05-01_14_27_22.decompose.avinput"
# output<-"../../../KD/interpretation/02-13528-4_02_test_ver4.txt"
######################################
cat("---annotate oncoKB-----------------------------\n")
target <- fread(annovar_result,sep = "\t",stringsAsFactors = F,data.table = F)
source(paste0(interpretor_path,"script/annotate_oncoKB.R"))
cat("---annotate CGI -------------------------------\n")
source(paste0(interpretor_path,"script/annotate_CGI.R"))
cat("---summarize prediction -----------------------\n")
source(paste0(interpretor_path,"script/summarize_prediction.R"))
cat("---annotate VAF--------------------------------\n")
source(paste0(interpretor_path,"script/annotateVAF.R"))
cat(paste0("Create final result: ",output,"\n"))
cat("---Layering -----------------------------------\n")
system(paste0("python3 ",interpretor_path,"script/pipeline.py --input ",output," --output ",gsub(output,pattern = ".txt$",replacement = "")))
cat("Job finished!\n")
}
|
##### Book name: Beginning R
##### Author: Larry Pace
##### Published: 2012
##### Chapter 1: Getting R and Getting Started
##### Working with Data in R
# Vectors are most common data type in R, must be homogenous (all same data types--
# character, numeric or logical--but if mixed may be coerced (forced) into single
# type
# Everything is an object; no required declarations so just type and begin using
x <- c(1, 2, 3, 4, "Pi")
x
# MODE is the storage class of the object, not the modal value in a series
mode(x)
# Building vectors uses the c function--combine, not concatenate (cat())
x <- c(1:10)
#The colon (:) creates a sequence, which can also be created with seq(1:10)
#How many elements are in the x vector?
length(x)
# Assign the numeric value 10 to y and see how many elements are in it
y <- 10
length(y)
# Add the two vectors together; what happens?
x + y
# R recycles the y element to add it to each element in the x vector
# Note how R handles recycling when vector length is odd
y <- c(0,1)
y
x + y
y <- c(1, 3, 5)
y
x + y
##### Order of operations and complex numbers
2 + 3 * x
# Note the differences
(2 + 3) * x
sqrt(x)
# Modulo (remainder) divide operation
x %% 4
# R does complex numbers
y <- 3 + 2i
Re(y) # Real part of the complex number
Im(y) #Imaginary part of the complex number
x * y
##### Four ways to create a sequence vector
x <- c(1:10)
x
y <- 1:10
y
z <- seq(10)
z
# Note: This creates a vector with 1 value REPEATED 10x, not 1-10, as above
a <- rep(1, 10)
a
##### R sequences all indexes starting with 1, not 0 like C/C++ or other languages
##### Adding elements (members) to a vector
x
# Concat (really, combine) the sequence 11-15 to the end of vector x
x <- c(x, 11:15)
x
##### Benford's Distribution: predicting the first digit in a string of numbers
# Build the vectors
P = c(0.301, 0.176, 0.125, 0.097, 0.079, 0.067, 0.058, 0.051, 0.046)
V = c(1, 2, 3, 4, 5, 6, 7, 8, 9)
P
V
# Discrete probability (additive) where each member of V is multiplied by corresponding member of P
# and summed for each pairing until totaled--implicit looping benefit within R
sum(V * P)
# Find the variance of the discrete probability distribution, again with implicit looping through the vectors
# Loop through vector V for each value substract the vector's mean and then square the difference
Dev <- (V - mean(V)) ^ 2
Dev
#Multiply the stddev by the probability in another implicit loop through both vectors
sum(Dev * P)
#Calculate the standard deviation of the same data
stdev <- sqrt(sum(Dev * P))
stdev
##### In R, a matrix is a vector but a vector is not a one-column or one-row matrix
x <- 1:10
# Create a matrix of the x vector sequence 1-10 in two rows and five columns in a DEFAULT by-column format
x <- matrix(x, 2, 5)
x
# This would also do the same matrix without using another variable
y <- matrix(1:10, 2, 5)
y
##### Referring to matrix rows and columns
colnames(x) <- c("A", "B", "C", "D", "E")
x
x[1, "C"]
x[1, 2]
x[ , 1]
x[1, ]
x[2, "E"]
##### More with Benford's Distribution, this time with a Matrix
# Create the ACCTDATA vector first
acctdata <- c(1, 132, 86.7,
2, 50, 50.7,
3, 32, 36.0,
4, 20, 27.9,
5, 19, 22.8,
6, 11, 19.3,
7, 10, 16.7,
8, 9, 14.7,
9, 5, 13.2)
acctdata
# Convert the vector into a MATRIX
acctdata <- matrix(acctdata, 9, 3, byrow = TRUE)
# Name the columns of the MATRIX
colnames(acctdata) <- c("digit", "actual", "expected")
# Show me the matrix
acctdata
# We are going to write the chi-square analysis to see if the Observed fits the Expected
# Another advantage of implicit looping, this one line...
# 1) Substracts each value in COL 3 from each value in COL 2
# 2) Squares the difference between those values
# 3) Divides that squared difference by the matching value in COL 3
# 4) Sums all of the divided values
chisquare <- sum((acctdata[, 2] - acctdata[ , 3]) ^ 2 / acctdata[ , 3])
chisquare
# Matrix transposition, multiplication and inversion
# Create two small matrices to work with
A <- matrix(c(6, 1,
0, -3,
-1, -2), 3, 2, byrow = TRUE)
B <- matrix(c(4, 2,
0, 1,
-5, -1), 3, 2, byrow = TRUE)
A
B
# Matrix addition
A + B
# Matrix subtraction
A - B
# Matrix component-by-component multiplication, not matrix multiplication
A * B
# Transpose matrix A
t(A)
# Matrix inversion: only done with square matrixes (same # rows and cols)
# Inversion handled with solve() function
A <- matrix(c(4, 0, 5,
0, 1, -6,
3, 0, 4), 3, 3, byrow = TRUE)
#Next command finds the inverse of matrix A
B <- solve(A)
A %*% B #Actual matrix multiplication
B %*% A
# Possible to have one-row or column matrix by adding drop = FALSE to index
# Print the entire matrix
A
# Print column one as a vector
A[ , 1]
# Print row one as a vector
A[1, ]
# Print row one as a one-row matrix
A[1, , drop = FALSE]
# Print col one as a one-col matrix
A[ , 1, drop = FALSE]
# Lists (which won't be used much as the Data Frame is the data structure of choice for
# most statistical analyses)
address <- list("Sam Johnson", "16902 W. 83rd Terr.", "Shawnee", "KS", 66219)
address
# Quick refresher on retrieving list elements; pull my name
address[[1]]
# Also pulls my name, but this time as a data slice
address[1]
# Data Frames
# Data Frames must have the same kind of data in each column
# Data Frames have rows and columns
# Data Frames can have a combination of numeric, character or logical data
# Think of Data Frame like an Excel spreadsheet
# Most of the time we'll be importing data into Data Frames, not building in R
# Get ready for the lapply() function to use when certain functions don't work with Data Frames
# Building a simple Data Frame from vectors
people <- c("Kim", "Bob", "Ted", "Sue", "Liz", "Amanda", "Tricia", "Jonathan", "Luis", "Isabel")
scores <- c(17, 19, 24, 25, 16, 15, 23, 24, 29, 17)
people
scores
# Create the Data Frame from the People and Scores vectors
quiz_scores <- data.frame(people, scores)
quiz_scores
# We don't need the individual People and Scores vectors anymore, so let's remove
rm(people, scores)
# Shouldn't be there as individual vector; error
people
# Should find in the quiz_scores Data Frame, though
quiz_scores
# Like a matrix, can get individual columns and rows by indexing
# But this pulls a data slice as a vector, not the column
quiz_scores[ , 2]
# This pulls the column only
quiz_scores[2]
quiz_scores$people
# If we attach() the Data Frame to the search path, we have immediate vector access to each column
attach(quiz_scores)
people
scores
# More sophisticated manipulation with cbind() (column bind) and rbind() (row bind)
# to bring Data Frames together
# Creating a Data Frame in the R Data Editor
# Invoke the Data Frame and initialize the columns with their types
Min_Wage <- data.frame(Year = numeric(), Value = numeric())
# Open the R Data Editor on the Min_Wage Data Frame
Min_Wage <- edit(Min_Wage)
# Show the Data Frame
Min_Wage
# Reading a CSV file into a Data Frame
# Taken from http://www.infoplease.com/ipa/A0104652.html
# R does not like numbers as column headers; placed Yr at beginning of each year
# NOTE: I pulled the US averages
percapita <- read.csv("c:\\r\\data\\uspercapita.csv", header = TRUE)
head(percapita)
class(percapita)
# R defaults the read-in data as a Data Frame
# To get the column averages, use the colMeans() function; means() has been deprecated
# In this case, get the column averages for columns 2 through 9
colMeans(percapita[2:9]) # Returning NA b/c some missing data?
# summary() function returns frequency counts
summary(percapita[2:9])
# If I'd have stored summary in an object, I would have had access to the summary information
summaryPerCapita = summary(percapita[2:9])
summaryPerCapita
class(summaryPerCapita)
# Handling missing data in R
# To remove NAs (really ignore), such as in stats, use na.rm = TRUE
# Fill a vector
x <- c(1, 2, 3, 4, 5, 6, NA, 8, 9, 10)
# Show the vector
x
# Vector average, which should error out
mean(x)
# This version works because it ignores the NA
mean(x, na.rm = TRUE)
# NA is still in the vector, though
x
##### Flow control: Looping, conditionals, and branching
# Looping
# R offers 3 types of explicit loops
# Conditional statements (if-then-else, for example) let us execute a branch of code when certain conditions are satisfied
# R offers the standard arithmetic operators, which follow the PEMDAS order of operation
# +, -, *, /, ^ or ** for exponentiation, %% modulus (remainder), and %/% for integer (truncated) division
# Comparison operators are the usual, but unlike SAS, do not have letter equivalents
# >, <, >=, <=, == equal to, != not equal to
# Operators evaluate to TRUE and FALSE (note case is important!)
# Logical operators also evaluate to TRUE and FALSE
# R provides vectorized and unvectorized versions of the "and" and "or" operators
# & and (vectorized), && logical and (unvectorized), | logical or (vectorized), || logical or (unvectorized), ! logical not
# Looking at vectorized AND
x <- 0:2
y <- 2:0
# Checks each pair (0,2), (1,1), (2,0)
(x < 1) & (y > 1)
# Unvectorized checks only the first value in each vector, L to R,
# returning only the first logical result
(x < 1) && (y > 1)
!y == x
identical(x, y) # identical tests for exact equality
##### Input and Output
# R defaults to 7 digits for numerical data, which can be changed with the options() function
getOption("digits")
# Note digits = 4 includes numbers larger than 0, in the 1s, 10s columns, etc.
# pi will be 3.142; digits is not just after the decimal
options(digits = 4)
pi
# Prompting for feedback from the keyboard with readline()
size <- readline("How many digits do you want to display?")
pi
options(digits = size)
pi
# Reading data using the scan() function; similar to read.table()
# Scan opens a channel to enter data at the command line until nothing more to enter and stores in the object
x <- scan()
# Scan() is the slightly harder way to do x <- c(1, 2, 3, 4, 5) or x <- seq(1:5) or x <- 1:5
##### Understanding the R environment
# Everything to R is an object; functions have names too
# CV is a function of my design that is calculating coefficient of variance
CV <- function(x) sd(x) / mean(x)
# Print x
x
# Print the coefficient of variance of x with my CV function
CV(x)
##### Implementing Program Flow
# Three basic loops: for, while, and repeat
# Remember, though, that vectorized implicit looping is far better and more effective
# The for loop
i <- c(1:5)
for(n in i) print(n * 10)
# The above loop was unnecessary, as looping through i can be done implicitly
print(i * 10)
# For loops can be used with lists and vectors
carbrands <- c("Honda", "Toyota", "Ford", "GM", "BMW", "Fiat")
for(brand in carbrands) {
print(brand)
}
# Next two are functionally equivalent
carbrands
print(carbrands)
##### While and Repeat loops
# Most R programmers try to avoid these and use explicit for loops if required
# While loops
even <- 0
while(even < 10) {
even <- even + 2
print(even)
}
# Repeat loop, where a condition is met to break out of the loop
i <- 1
repeat
{
print(i)
i <- i + 1
if(i > 5)
{
break
}
}
##### Avoiding explicit looping; using the apply() function family
# Note that median is not applicable to the data frame
median(percapita[2:9])
# The lapply() function allows us to find the median for each column in the data frame
# lapply() returns a list
lapply(percapita[2:9], median)
# While lapply() has ugly output, wrapping a DATAFRAME around the output can help
as.data.frame(lapply(percapita[2:9], median))
# lapply() is pretty ugly output; try apply() with three arguments (data, 1 = rows or 2 = columns, called function)
# apply(the percapita data frame, columns, median of each column)
apply(percapita[2:9], 2, median)
# tapply() applies a function to arrays of variable length (ragged arrays), defined by a vector
# Three stats classes of differing size and their scores, in three vectors
class1 <- c(17, 18, 12, 13, 15, 14, 20, 11, 16, 17)
class2 <- c(18, 15, 16, 19, 20, 20, 19, 17, 14)
class3 <- c(17, 16, 15, 18, 11, 10)
# Combine the vectors into a Class list
classes <- list(class1, class2, class3)
# lapply() returns a list
lapply(classes, length)
# sapply() (simplify apply) tries to return a bit more table-like structure
sapply(classes, length)
# We can also use sapply() to apply the mean to the list
sapply(classes, mean)
sapply(classes, summary)
##### A simple expense report
# Invoke the Data Frame and initialize the columns with their types
# See page 40
##### Generating Pythagorean triples with a, b, and c where a2 + b2 = c2
pythag <- function(x)
{
s <- x[1]
t <- x[2]
a <- t^2 - s^2
b <- 2 * s * t
c <- s^2 + t^2
cat("The Pythagorean triple is: ", a, b, c, "\n")
}
input <- scan()
##### Writing reusable functions
# Calculating the confidence interval of a single mean
confint <- function(x, alpha = .05)
{
conflevel <- (1 - alpha) * 100
stderr <- sd(x) / sqrt(length(x))
tcrit <- qt(1 - alpha / 2, length(x) - 1)
margin <- stderr * tcrit
lower <- mean(x) - margin
upper <- mean(x) + margin
cat("Mean: ", mean(x), "Std. Error: ", stderr, "\n")
cat("Lower Limit: ", lower, "\n")
cat("Upper Limit: ", upper, "\n")
}
##### Avoiding loops with vectorized operations
# Understanding where vectorized implicit looping is available can save a ton of time and code
# This example is Euler's formula to find prime numbers, first in a function call
# Formula is x^2 - x + 41
TryIt <- function(x)
flush.console()
for (n in x)
{
result <- n^2 - n + 41
cat("For x =", n, "Result is", result, "\n")
}
# Implicit looping makes this much, much easier
# Re-set x as a vector again for code purposes only
x <- 0:50
y <- x^2 - x + 41
y
##### Vectorizing if-else statements with ifelse()
x <- -5:5
x
# Square roots of negative numbers will have Not a Number (NaN) outputs
sqrt(x)
# Do ifelse() inside the sqrt function to leave NAs alone
sqrt(ifelse(x >= 0, x, NA))
# Moving ifelse to the outside creates NaNs again
ifelse(x >= 0, sqrt(x), NA)
##### Chapter 4: Summary statistics
# Mean, median and mode
# Remember that the mode() function returns the storage class of the object, not the most common value in a vector
# Summary() is the six-number summary of a vector
# colMeans() gives all of the column averages of the supplied object, typically a data frame
# Put percapita in the search stream
attach(percapita)
head(percapita)
summary(percapita[2:9])
colMeans(percapita[2:9])
mean(percapita[2:9])
# Median is NOT vectorized, so have to apply() wrapper
# apply(percapita cols 2-9, 2 = columns, using the median() function
apply(percapita[2:9], 2, median)
# Can also use apply with the quantile function to find the median and other quantiles (percentiles)
apply(percapita[2:9], 2, quantile)
# Add a specific quantile to one column of data to find its value
quantile(percapita$Yr2005, 0.75)
# R has no built-in method for finding mode
# Use the table and sort functions to identify the modal value
# Manually check frequency count of values; 36421 is the only repeat, thus the modal value
sort(table(Yr2010))
apply(percapita[2:9], 2, mean, trim = .5)
x <- c(0:10, 50)
xm <- mean(x)
c(xm, mean(x, trim = 0.1))
##### Measuring location via standard scores
# scale() is the z-score function call
# Accepting the defaults uses mean as center of measure and sddev as scaling value
zYr2010 <- scale(Yr2010)
zYr2010
# Let's make sure the defaults of stddev = 1 and mean are true
mean(zYr2010)
# Obviously sd is not vectorized
apply(zYr2010, 2, sd)
# This works, too, just prints to the console; doesn't save the results to another object
scale(percapita[2:9])
##### Measuring variability
# Many ways to measure spread, or dispersion: Variance, std dev,
# range, mean absolute deviation, median absolute deviation, IQR,
# coefficient of variation
# Variance and Standard Deviation
# Get the variance and stddev for Yr2010
var(Yr2010)
# Are var and sd vectorized? YES, but get a variance-covariance matrix
var(percapita[2:9])
sd(Yr2010)
# R recommends using sapply wrapper because sd is deprecated
sd(percapita[2:9])
sapply(percapita[2:9], sd)
##### Range
# Most commonly defined as the difference between the highest and lowest
# values in a data vector. R gives you the high and low value; you do the rest
# This uses the WEIGHTS data we created in Chapter 3
range(weights)
range.diff <- function(x) max(x) - min(x)
range.diff(weights)
##### Mean and Median Absolute Deviations
# mad() function where we can specify measure of central tendency
# by supplying an argument
mad(weights)
mad(weights, center = mean(weights))
sd(weights)
# Because these values are relatively close to each other, the
# data may be symmetrical. Let's find out with a histogram
hist(weights)
##### IQR
# Difference between 3rd and 1st quartiles, a view of the middle 50% of the data
IQR(weights)
# My first real function: IQRVALS finds the LOW and HIGH values of an IQR'd data set
# Compare the LOW and HIGH VALS to the MIN and MAX of the data set to determine whether
# using the IQR values is even required
# Remember to use apply() if more than one column passed in
iqrvals <- function(x) {
# Get the min and max values
minx <- min(x)
maxx <- max(x)
# Get the IQR value
iqrx <- IQR(x)
# Calculate the IQR multiplier, which will be used to calculate LOW and HIGH values
iqrmult <- 1.5 * iqrx
# Calculate the LOW IQR value for the data
lowiqrval <- quantile(x, 0.25) - iqrmult
# Calculate the HIGH IQR value for the data
highiqrval <- quantile(x, 0.75) + iqrmult
# Don't use IFELSE because returns NULL for argument that isn't used
# Don't use IQR values if one or both is above the MAX or below the MIN of the data
if(lowiqrval < minx || highiqrval > maxx)
cat("Use data min:", minx, "and max: ", maxx, "\n")
else
cat("Low IQR value: ", lowiqrval, "and High IQR value: ", highiqrval, "\n")
}
##### Coefficient of variation
# Measuring the standard deviation relative to the size of either the sample or population
# mean. Often used as measure of relative risk, showing variability relative to size of
# average; good for rates of return. Not built in to R but can easily be turned into a
# function.
# CV is simply the standard deviation divided by the mean of the data (sample or population)
CV <- function(x) sd(x) / mean(x)
CV(weights) #0.15258
##### Covariance and Correlation
# Covariance is the positive, zero, or negative numerator value in calculating correlation.
# Correlation is the value between -1 and 1 that indicates the directional relationship
# of change between two variables
# Hypothetical advertising and sales figures
region <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
ads <- c(5.5, 5.8, 6.9, 7.2, 6.3, 6.5, 6.4, 5.7, 6.1, 6.8)
sales <- c(101, 110, 120, 125, 117, 118, 120, 108, 115, 116)
adsales <- data.frame(region, ads, sales)
adsales
cov(ads, sales)
cor(ads, sales)
##### Measuring symmetry (or lack thereof) p. 74
# Examples use the PSYCH package, which we cannot pull down right now
# Button ticket submitted 7/11/14 re: how to mirror CRAN download site
# Skewness (data balance to one side or another, positive (left) and negative (right)-skewed)
# and kurtosis (peaks, such platykurtic (flat) and leptokurtic (tall) or meso- (just right)
# Functions not available in Base R packages, but skew() and kurtosi()
# available in PSYCH
##### Chapter 5: Creating tables and graphs p. 77
# Frequency distributions and tables
# Already seen that table() produces a simple frequency distribution
# Using base R faithful data set
# Print the first few rows with headers
head(faithful)
# Pull waiting out of data frame into its own vector
waiting <- faithful[ , 2]
# Range of waiting = time between eruptions
range(waiting)
# Create a basic frequency distribution with table()--too many values to be very helpful
table(waiting)
# Create intervals (bins) to better view the data
# Not exact science though my rule of thumb is 2^x bins
# based on number of data points; for example, 272 eruptions
# would be 2^8 or 2^9, so we need 8 or 9 bins, probably 8
# Bins are 8 * 7.5 = 60, the width of our data representation
# Use sequence function to build bins vector
bins <- seq(40, 100, by = 7.5)
bins
# CUT uses the bins to divide the WAITING data into intervals;
# right = FALSE defines whether the intervals should be closed on the right side
wait_time <- cut(waiting, bins, right = FALSE)
# TABLE produces a tabular view of the data in a row-like format
table(wait_time)
wait_time
# CBIND converts the TABLE function into a more vertical columnar view
cbind(table(wait_time))
# table() with two variables gives a cross-tabulation, whether
# quantitative or qualitative variables
##### Pie charts and bar charts (bar plots in R parlance) p. 79
# While both can be used for representing the same kind of data
# humans are more inclined to bar charts; use them over pie charts
# wherever possible for nominal (Repulican or Democrat) or ordinal data
# (1st place, 2nd place, etc.)
# Using DuPont's data on car colors in 2012 through table editor
car_colors <- data.frame(color = factor(), percentage = numeric())
car_colors <- edit(car_colors)
car_colors
# Attach the object to the search chain
attach(car_colors)
# Create the base pie with ugly pastels by default
pie(percentage)
# Add a new set of colors with a colors vector
piecolors = c("#C0C0C0", "black", "white", "#696969", "red", "blue", "brown", "green", "#F0E68C")
piecolors
# Rebuild the pie chart with the actual colors represented
pie(percentage, col = piecolors)
# Let's present the actual names of the pieces of pie
names(percentage) = c("Silver", "Black", "White", "Gray", "Red", "Blue", "Brown", "Green", "Other")
names(percentage)
# Rebuild the pie with the new names
# This would work, too, without creating a new object:
# pie(percentage,
col = c("Silver", "Black", "White", "Gray", "Red", "Blue", "Brown", "Green", "Other"),
main = "Pie Graph of Car Color Preferences")
pie(percentage, col = piecolors, main = "Pie Graph of Car Color Preferences")
# Bar charts p. 83
# Mostly a change in the function call; not going to reproduce all of the previous code
barplot(percentage, col = piecolors, main = "Bar Chart of Car Color Preferences")
# Boxplots
# Also called box plots or box-and-whisker plots; popularized by statistician John Tukey
# Represents five-number summary of summary()
# Relative lengths of whiskers indicates skew; IQR = length of box; median = line in box
# If distribution relatively unskewed median be close to center of box and whiskers roughly
# same length; outliers represented by small circles
test1 <- c(72.41, 72.73, 71.63, 70.26, 77.98, 83.48, 87.25, 84.25)
test2 <- c(92.71, 86.35, 87.80, 107.92, 58.54, 91.93, 103.85, 101.56)
test3 <- c(73.63, 90.97, 44.59, 67.04, 78.57, 82.36, 72.96, 78.55)
quizzes <- data.frame(test1, test2, test3)
boxplot(quizzes)
detach(percapita)
##### Chapter 6: Discrete probability distributions
# Discrete probabilities must satisfy these conditions:
# 1) Probabilities sum to 1
# 2) Probability of any one outcome is between 0 and 1
# 3) List of outcomes is exhaustive (can be no others) and mutually exclusive (no outcome in more than 1 category)
# Jonathan sells cars and has tracked his sales on Saturday
saturday_sales <- data.frame(numsold = c(0, 1, 2, 3, 4, 5), prob = c(0.6, 0.15, 0.1, 0.08, 0.05, 0.02))
attach(saturday_sales)
# Calculate the mean and variance given the probability distribution
mu <- sum(numsold * prob)
variance <- sum((numsold - mu)^ 2 * prob)
standarddev <- sqrt(variance)
# Bernoulli processes have only two outcomes per trial, each of which is
# independent, such as the toss of a coin, a free throw attempt, or the
# selection of the correct answer on a multiple choice question
# One outcome is a SUCCESS and the other FAILURE.
x <- c(0, 1, 2, 3, 4, 5)
# DBINOM() is the density or mass function with probabilities for each outcome
# Other options include PBINOM (cumulative density or mass function for the distribution),
# QBINOM (Quantile function or reverse lookup to find the value of the random variable
# associated with any probability, RBINOM (Random function to generate random samples from
# the probability distribution))
# Show the results in a column (CBIND) of the density binomial distribution of vector x
cbind(dbinom(x, size = 5, prob = .5))
# MicroSort technique to increase chances of having a specific gendered child, where
# probability is .91 for femail and .76 for male
# Probability that of 10 families exactly 8 will have a male?
dbinom(8, size = 10, prob = .76)
# Of 10 families, probability that all 10 will have a male child
dbinom(10, size = 10, prob = .76)
# Probability that <=6 families will have a male child
xvec <- 0:6
sum(dbinom(xvec, 10, .76))
# Or use PBINOM for cumulative probability of <=6 families having male child
# If use XVEC here function returns cumulative probability of each vector member!
pbinom(6, 10, .76)
# Want to generate 100 samples of size N = 10 with p = .76; RBINOM generates this
# set of a random binomial distribution
randombinom <- rbinom(100, 10, .76)
# Can find confidence intervals through quantiles of the distribution at 95% empirical
# confidence. This example notes that of 10 families sample, between 5 and 10 will have
# a male child with 95% confidence
quantile(randombinom, .025) # 5 is lower confidence limit
quantile(randombinom, .975) # 10 is high confidence limit
# Chapter 7 Computing Normal Probabilities
# Learn early that the distribution of sample means, regardless of the shape of the
# parent distribution, approaches a normal distribution as sample size increases.
# The SCALE() function produces z-scores for any set of data, which gives us
# SCALE is a combination stat; gives the location of the raw score relative to
# the mean and the number of standard deviations the raw score is away from
# the mean, which makes z-scores both descriptive and inferential
# Most often we are interested in finding the areas under the normal curve
# (or assuming the curve is normal, as Karl Pearson told us, even though he
# eventually regretted the term 'normal').
# DNORM can be used to draw a graph of a normal distribution
# We'll put two curves on the same plot, too
# Build a sequence starting at 0 to 40 going up by .5
xaxis <- seq(0, 40, .5)
# Show me xaxis
xaxis
# Build two normal distributions with the same mean but different stdev
y1 <- dnorm(xaxis, 20, 3)
y2 <- dnorm(xaxis, 20, 6)
# Plot the curves
plot(xaxis, y2, type = "l", main = "Comparing Two Normal Distributions")
# POINTS adds the second line to the graph and TYPE = "l" tells R to plot lines
points(xaxis, y1, type = "l", col = "red")
# Different normal distributions have different amounts of spread based on the
# standard deviation. It's often helpful to convert a normal distribution to
# the standard normal distribution and work with z-scores, which have a mean
# of 0 and stdev of 1
# Finding probabilities using the PNORM function
# PNORM() function finds a left-tailed probability, where the critical value
# of z for a 95% confidence interval is +/-1.96, and the area between those
# values is 95% of the standard normal distribution. PNORM defaults to mean
# of 0 and stdev 1; these defaults can be changed as arguments in the call.
# Finding a left-tailed probability
# This is the CUMULATIVE probability under the standard normal distribution
# up to a z-score of 1.96.
pnorm(1.96) # This says 97.5% of the area lies to the left while 2.5% is to the right
# Finding the area between two z-scores
# Is simply a matter of subtraction
pnorm(1.96) - pnorm(-1.96)
# Finding a right-tailed probability
# Is just as easy; just take the reciprocal value of the left-tailed probability
1 - prob(1.96)
# You took a standardized test with a mean of 500 and stdev of 100 and scored a 720.
# How'd you do? How much better than what % of test takers?
# First standardize your score against the test
scale(720, 500, 100)
pnorm(2.2) # Your score is better than 98.6% of other test takers
# With PNORM, given a z-score, it returns the left-tailed probability in the sample
# that the value is higher than
# Finding critical values using the QNORM function
# With QNORM, given a probability, it returns the equivalent z-score
# of the value
qnorm(.975)
# To find a one-tailed critical value...
# Subtract the alpha level (.05 = standard) from 1
qnorm(1 - .05)
# Using RNORM to generate random samples
# RNORM(sample size, mean, stdev) generates a random sample from a normal
# distribution. Omitting MEAN and STDEV results in a mean of 0 and stdev 1.
samples <- rnorm(50, 100, 15)
samples
hist(samples) # Not exactly "normal"
# What if we bump the sample size to 1000?
sample2 <- rnorm(1000, 50, 15)
hist(sample2) # Much more "normal"-looking
# R does not include a one-sample z-test
ztest <- function(xbar, mu, stdev, n) {
z = (mean(xbar) - mu) / (stdev / sqrt(n))
return(z)
}
# Generate 100 weights for adult males who exercise
exmen <- rnorm(100, 187, 17)
# Let's compare that group with weights of all adult males, averaging 191 pounds
ztest(exmen, 191, sd(exmen), length(exmen)) # -4.852295 for one run; -2.403974 for another
# This returns a z-score, so convert it to a probability
pnorm(-2.403974) # .00024; much lower than .05 so we could reject the NULL hypothesis
# that there is no difference in the weights of exercising adult males and average
# adult male
# The above could be wrapped into a single line of code
pnorm(ztest(exmen, 191, sd(exmen), length(exmen)))
# Chapter 8 Creating confidence intervals
# A confidence interval (or interval estimate) is a range of possible values used
# to estimate the true value of a population parameter, which is associated with
# a level of confidence, such as 90%, 95% or 99%. The confidence level is the complement
# to our alpha level, so these three confidence levels correspond to alpha levels of
# .1, .05, and .01
#
# The general idea behind a confidence interval is that we have an estimate of a parameter
# and we calculate a margin of error, adding the margin to the estimate to get an upper
# limit and subtracting the margin for a lower limit
# If our confidence interval for a population mean has a lower limit of 164.13 and an
# upper limit of 180.97, we can say we are 95% confident that the true population mean
# (for men who exercise, for example) is contained within that range.
#
# INCORRECT:
# 1) There is a 95% chance (.95 probability) that the true value of the mean is between those values
# 2) 95% of the sample means fall between those numbers
# Let's build a sample size function!
# E = Desired margin of error
# sigma = stdev
# alpha = complement of confidence level
sampsize.est <- function(E, sigma, alpha = .05) {
# Remember QNORM converts alpha to a z-score value
n <- ((qnorm(alpha / 2) * sigma)/ E) ^ 2
estsize <- ceiling(n) # Rounds to the next highest integer
cat("For a desired margin of error of:", E, "the required sample size is:", estsize, "\n")
}
# Does it work with our random sample of adult male exercisers?
sampsize.est(5, sd(exmen)) # 49 required to be 95% confident that the true pop mean is +/-5 pounds
sampsize.est(2.5, sd(exmen)) # 195
# Is there a general rule of thumb for sample size here?
# Yes, about 4x the sample size for each 1/2 of margin of error
sampsize.est(1.25, sd(exmen)) # 780
sampsize.est(10, sd(exmen)) # Predict 12, actually 13
sampsize.est(7.5, sd(exmen)) # 1 1/2 original MOE = 1/2x sample, predict 24; actual 22
sampsize.est(5, sd(exmen), alpha = .01) # NOTE alpha change; sample needed = 85
sampsize.est(5, sd(exmen), alpha = .1) # NOTE alpha change; sample needed = 35
# Confirdence intervals for the mean using the t distribution
# When we don't know the population standard deviation we use the sample
# standard deviation as a reasonable estimate. We also use the t distribution
# instead of the normal distribution to calculate the confidence interval.
confint.mean <- function(x, alpha = .05, two.tailed = TRUE)
{
cat("\t", "Confidence Interval for the Mean", "\n")
cat("Mean: ", mean(x), "\n")
df <- length(x) - 1
conflevel <- ifelse(two.tailed == TRUE, 1 - alpha / 2, 1 - alpha)
stderr <- sd(x) / sqrt(length(x))
tcrit <- qt(conflevel, df)
margin <- stderr * tcrit
lower <- mean(x) - margin
upper <- mean(x) + margin
if(two.tailed == FALSE) {
cat("You are doing a one-tailed test.", "\n")
cat("If your test is left-tailed, the lower bound", "\n")
cat("is negative infinity. If your test is right-tailed", "\n")
cat("the upper bound is infinity.", "\n")
cat("Either add the margin", margin, "to or subtract it from", "\n")
cat("the sample mean as appropriate.", "\n")
cat("For a left-tailed test, the upper bound is", lower, ".", "\n")
cat("For a right-tailed test, the lower bound is", upper, ".", "\n")
}
cat("Mean: ", mean(x), "Std. Error: ", stderr, "\n")
cat("Lower Limit: ", lower, "\n")
cat("Upper Limit: ", upper, "\n")
}
confint.mean(exmen)
t.test(exmen)
confint.mean(exmen, two.tailed = FALSE)
##### Chapter 12: Correlation and Regression
# Covariance and Pearson product-moment correlation are cov() and cor(), respectively
# Create a matrix of weights and heights
weights <- c(237.1, 220.6, 214.5, 213.3, 209.4, 204.6, 201.5, 198.0,
193.8, 191.1, 189.1, 186.6, 179.3, 176.7, 175.8, 175.2,
174.8, 173.3, 172.9, 170.1, 169.8, 169.1, 166.8, 166.1,
164.7, 164.2, 162.4, 161.9, 156.3, 152.6, 151.8, 151.3,
151.0, 144.2, 144.1, 139.0, 137.4, 137.1, 135.0, 119.5)
y <- sort(weights)
y
x <- sort(rnorm(40, 70, 6))
x
matrix <- cbind(x, y)
head(matrix)
cov(x, y)
cor(x, y)
plot(x, y, xlab = "height", ylab = "weight", main = "Weights and Heights")
abline(lm(y ~ x))
##### Three or more variables in a correlation analysis
# Let's add resting heart rate
z <- rnorm(40, 80, 10)
z <- sort(z)
z
matrix <- cbind(x, y, z)
cov(matrix)
cor(matrix)
var(x)
cov(x, y)
cov(x, z)
cor(x, z)
# Computations to calculate effect size
# Suppose the primary study reported a t-test
# value for differences between 2 groups. Then,
# where tes = T Effect Size where
# t = t-value, n.1 = first group size (n), and
# n.2 = second group size (n)
tes(t = 1.74, n.1 = 30, n.2 = 31)
# Or, more simply,
tes(1.74, 30, 31) # Produces effect sizes for MeanDifference, Correlation, LogOdds, Fishers z, and gives the total sample size
# Predicting milk prices
milk.prices <- read.csv("C:\\R\\Data\\RBook\\milkprices.csv", header = TRUE, stringsAsFactors = FALSE)
plot(milk.prices$Index, milk.prices$Average)
abline(lm(milk.prices$Average ~ milk.prices$Index)) # Doesn't look very linear
results <- lm(milk.prices$Average ~ milk.prices$Index)
summary(results)
# Would a quadratic formula be a better fit if we square the index?
milk.prices$indexsq <- milk.prices$Index ^ 2
results <- lm(milk.prices$Average ~ milk.prices$Index + milk.prices$indexsq)
summary(results)
milk.prices$predicted <- predict(results)
plot(milk.prices$Index, milk.prices$Average); lines(milk.prices$predicted)
# Chapter 14: Logistic Regression p.201
# Univariate and multiple regression is great for continuous dependent variable prediction. Sometimes we want to determine
# the outcome of a binary (dichotomous) outcome, where 1 = success and 0 = failure. Fisher developed discriminant
# analysis to determine group membership, but the technique works ONLY WITH CONTINUOUS VARIABLES. Logistic regression
# uses both continuous and binary predictors, a major advantage.
#
# Examples of dichotomous outcomes include the presence or absence of defects, attendance and absenteeism, and student retention
# versus dropout. Logistic regression seeks out the dichotomous outcome answer, where p = the proportion of 1s (successes) and q is
# the proportion of 0s (failures), in this case q = 1 - p. Correlation and regression is problematic with dichotomous outcomes because
# they may return values < 0 and > 1. The logistic curve, on the other hand, has a very nice property of being asymptotic to 0 and 1
# and always lying between 0 and 1.
#
# Probability can be converted to odds, where if p is the probability of success, the odds in favor of success are: p / 1 -p.
#
# Odds can be converted to probabilities, too. If the odds are 5:1, then the probability of success is 5/6, or 83%. Odds can be greater
# than 1; for example, if the probability of rain is .25 then the odds of rain are .25 / .75 = .33, but the odds against rain are
# .75 / .25 = 3, or 3:1.
# Logistic regression works with LOGIT, the natural logarithm of the odds. Logistic regression allows us to model the probability of
# "success" as a function of the logistic curve, which is never less than 0 and never greater than 1.
# Logistic Regression with One Dichotomous Predictor
# 50 pairs of observations, which represent binge drinking amongst 25 men and 25 college students.
gender <- c(rep(1, 25), rep(0, 25))
binge <- c(rep(1, 12), rep(0, 13), rep(1, 5), rep(0, 20))
bingedata <- data.frame(gender, binge)
binge.table <- table(bingedata)
chisq.test(table(bingedata)) # p = .07325, approaching significance; could be better with larger sample
# What are the proportions of men and women binge drinking?
men.binge.prop <- binge.table["1", "1"] / sum(binge.table["1", "0"], binge.table["1", "1"]) # .48 or 48%
women.binge.prop <- binge.table["0", "1"] / sum(binge.table["0", "0"], binge.table["0", "1"]) # .2 or 20%
# What are the odds of men and women being binge drinkers?
odds.men <- men.binge.prop / (1 - men.binge.prop) # .9231
odds.women <- women.binge.prop / (1 - women.binge.prop) # .25 or 1:4
# The INTERCEPT term for women is the log(odds)
log(odds.women) # -1.386294
# The INTERCEPT term for men
log(odds.men) # -.08001771
# The slope will the be the difference between the two log(odds) INTERCEPT values
b1 <- log(odds.men) - log(odds.women)
b1 # 1.306252
# Let R do the logistic regression now
results <- glm(binge ~ gender, family = "binomial")
summary(results) # Note the (Intercept) is the WOMEN's INTERCEPT we calculated and the gender ESTIMATE
# is the B1 slope we calculated as the difference between MEN and WOMEN. Unlike the CHI-SQUARE test the
# GLM summary notes that GENDER is significant to the model.
# Logistic Regression with a Continuous Predictor p.205
# The book example reviewed a liberal arts college's returning sophomores and their retention rate, which
# was only 62%. The goal was to increase retention by predicting the likelihood of returning when understanding
# what drives students to stay or leave after their freshman year. The single best predictor turned out to be
# the student's interest in the school in the first place; this was marked by whether the student sent SAT scores
# to the school 1st on their list. Another big predictor was, of course, high school GPA; past behavior is a
# good predictor of future behavior in this case.
# A T-TEST between HSGPA ~ Retained found a very significant difference in HSGPA between those who returned for their sophomore
# and those who did not (were not retained). Side note: Would an ANOVA do this for all of the variables, including any
# potential interactions?
#
# The results were calculated with results <- glm(Retained ~ HSGPA, family = "binomial"); summary(results).
# The results$fitted.values were plotted against the HSGPA with:
# predicted <= results$fitted.values
# plot(HSGPA, predicted)
# The plot found that students with HSGPAs < 3.0 had less than a .50 chance of staying on, while those with HSGPAs > 4.0
# had about .70 probability of returning for their sophomore year.
# Logistic regression with multiple predictors
# Working with the most recent Lionshare data; can logistic regression tell us the probability of a participant being at 70% IRR?
lion.orig <- read.csv("c:\\r\\data\\Lionshare JPM Data 03282014.csv", stringsAsFactors = TRUE)
lion.orig$begin_date <- as.Date(as.character(lion.orig$begin_date), format = "%m/%d/%Y")
lion.orig$Eligibility_Date <- as.Date(as.character(lion.orig$Eligibility_Date), format = "%m/%d/%Y")
lion.orig$end.IRR.70 <- factor(ifelse(lion.orig$End_IRR < .7, "Less than 70%", "Greater than 70%"))
# Drop unnecessary columns
lion.orig$Change_To_IRR <- NULL
lion.orig$Chg_IRR <- NULL
lion.orig$Begin_IRR <- NULL
lion.orig$End_IRR <- NULL
lion.orig$part <- NULL
lion.orig$snp500_3Yr <- NULL
lion.orig$snp500_5Yr <- NULL
lion.orig$Three_Year_PROR <- NULL
lion.orig$Five_Year_PROR <- NULL
lion.orig$OTTR_2009 <- NULL
lion.orig$OTTR_2011 <- NULL
summary(lion.orig)
# Lots of NA values in the set; let's only use complete rows
lion.samp <- na.omit(lion.orig)
# Get 1000 records to work with; first randomize the data
# Don't need to do this with only 1,044 rows of complete data
# set.seed(12345)
# lion.rand <- lion.orig[order(runif(nrow(lion.orig))), ]
#
# # Pull 1000 records for use
# lion.samp <- lion.rand[1000:2000, ]
# Dump the original files because we don't need them now
# rm(lion.orig, lion.rand)
# str(lion.samp)
# CUT? begin_date (col 3), snp500_3yr (5), snp500_5yr (6), Eligibility_Date (34)
# GROUP? Number_of_Investments (14), AGE (30)
# Add a descriptor column for Number of Investments
inv.summary <- summary(lion.samp$Number_of_Investments)
lion.samp$inv.group <- ifelse(lion.samp$Number_of_Investments <= inv.summary["1st Qu."], "1Q",
ifelse(lion.samp$Number_of_Investments <= inv.summary["Median"], "Med",
ifelse(lion.samp$Number_of_Investments <= inv.summary["3rd Qu."], "3Q","4Q")))
lion.samp$inv.group <- factor(lion.samp$inv.group, levels = c("1Q", "Med", "3Q", "4Q"))
# Add age groupings by GENERATION and BY 10s
lion.samp$generation <- factor(ifelse(lion.samp$AGE <= 33, "Millennial",
ifelse(lion.samp$AGE <= 46, "Gen X",
ifelse(lion.samp$AGE <= 57, "Early Boomer", "Late Boomer"))), levels = c("Millennial", "Gen X", "Late Boomer", "Early Boomer"))
lion.samp$age.cohort <- factor(ifelse(lion.samp$AGE < 30, "< 30",
ifelse(lion.samp$AGE < 40, "30-39",
ifelse(lion.samp$AGE < 50, "40-49",
ifelse(lion.samp$AGE < 60, "50-59", "60+")))), levels = c("< 30", "30-39", "40-49", "50-59", "60+"))
# Add a Y/N variable for participants who are CONTRIBUTING, not how much
lion.samp$Pretax_Contrib <- factor(ifelse(lion.samp$PRETAX_CONTRB_PCT == 0, "Zero",
ifelse(lion.samp$PRETAX_CONTRB_PCT <= 4,"<= 4.0",
ifelse(lion.samp$PRETAX_CONTRB_PCT <= 7,"<= 7.0", "> 7.0"))), levels = c("Zero", "<= 4.0", "<= 7.0", "> 7.0"))
lion.samp$comp.group <- factor(ifelse(lion.samp$ANNUAL_COMP_AMT <= 45000, "Low",
ifelse(lion.samp$ANNUAL_COMP_AMT <= 115000, "Moderate", "High")), levels = c("Low", "Moderate", "High"))
# Full logistic model run of available variables
lion.irr.mdl <- glm(end.IRR.70 ~ begin_date + segment + Ao1_Managed_Accounts + Personal_Asset_Manager + Ao1_S_and_I_Experience + Personalized_Messaging
+ After_Tax_Source + Catch_Up_Contributions + Roth_Source + QDIA + Number_of_Investments + Auto_Rebalancing + Auto_Enrollment
+ Auto_Increase + db_available + NQ_Available + Brokerage + Target_Date_Funds + Re_Enrollment + Company_Stock + Auto_Enrolled +
EE_Match + ER_Match1 + Dream_Machine_Interactive + ANNUAL_COMP_AMT + AGE + GENDER_CD + PRETAX_CONTRB_PCT +
Eligibility_Date + Strategy + Hardship_Count + Loan_Count +
generation + age.cohort + Pretax_Contrib + comp.group, data = lion.samp, family = "binomial")
lion.irr.mdl <- glm(end.IRR.70 ~ begin_date + segment + Ao1_Managed_Accounts + Ao1_S_and_I_Experience + After_Tax_Source
+ Auto_Rebalancing + Auto_Enrollment + Auto_Increase + NQ_Available + EE_Match + generation +
GENDER_CD + Eligibility_Date + Strategy + Hardship_Count + Loan_Count + Pretax_Contrib + comp.group
, data = lion.samp, family = "binomial")
summary(lion.irr.mdl)
|
/PaceBeginningR.R
|
no_license
|
lenexajayhawk/R-Scripts
|
R
| false
| false
| 45,941
|
r
|
##### Book name: Beginning R
##### Author: Larry Pace
##### Published: 2012
##### Chapter 1: Getting R and Getting Started
##### Working with Data in R
# Vectors are most common data type in R, must be homogenous (all same data types--
# character, numeric or logical--but if mixed may be coerced (forced) into single
# type
# Everything is an object; no required declarations so just type and begin using
x <- c(1, 2, 3, 4, "Pi")
x
# MODE is the storage class of the object, not the modal value in a series
mode(x)
# Building vectors uses the c function--combine, not concatenate (cat())
x <- c(1:10)
#The colon (:) creates a sequence, which can also be created with seq(1:10)
#How many elements are in the x vector?
length(x)
# Assign the numeric value 10 to y and see how many elements are in it
y <- 10
length(y)
# Add the two vectors together; what happens?
x + y
# R recycles the y element to add it to each element in the x vector
# Note how R handles recycling when vector length is odd
y <- c(0,1)
y
x + y
y <- c(1, 3, 5)
y
x + y
##### Order of operations and complex numbers
2 + 3 * x
# Note the differences
(2 + 3) * x
sqrt(x)
# Modulo (remainder) divide operation
x %% 4
# R does complex numbers
y <- 3 + 2i
Re(y) # Real part of the complex number
Im(y) #Imaginary part of the complex number
x * y
##### Four ways to create a sequence vector
x <- c(1:10)
x
y <- 1:10
y
z <- seq(10)
z
# Note: This creates a vector with 1 value REPEATED 10x, not 1-10, as above
a <- rep(1, 10)
a
##### R sequences all indexes starting with 1, not 0 like C/C++ or other languages
##### Adding elements (members) to a vector
x
# Concat (really, combine) the sequence 11-15 to the end of vector x
x <- c(x, 11:15)
x
##### Benford's Distribution: predicting the first digit in a string of numbers
# Build the vectors
P = c(0.301, 0.176, 0.125, 0.097, 0.079, 0.067, 0.058, 0.051, 0.046)
V = c(1, 2, 3, 4, 5, 6, 7, 8, 9)
P
V
# Discrete probability (additive) where each member of V is multiplied by corresponding member of P
# and summed for each pairing until totaled--implicit looping benefit within R
sum(V * P)
# Find the variance of the discrete probability distribution, again with implicit looping through the vectors
# Loop through vector V for each value substract the vector's mean and then square the difference
Dev <- (V - mean(V)) ^ 2
Dev
#Multiply the stddev by the probability in another implicit loop through both vectors
sum(Dev * P)
#Calculate the standard deviation of the same data
stdev <- sqrt(sum(Dev * P))
stdev
##### In R, a matrix is a vector but a vector is not a one-column or one-row matrix
x <- 1:10
# Create a matrix of the x vector sequence 1-10 in two rows and five columns in a DEFAULT by-column format
x <- matrix(x, 2, 5)
x
# This would also do the same matrix without using another variable
y <- matrix(1:10, 2, 5)
y
##### Referring to matrix rows and columns
colnames(x) <- c("A", "B", "C", "D", "E")
x
x[1, "C"]
x[1, 2]
x[ , 1]
x[1, ]
x[2, "E"]
##### More with Benford's Distribution, this time with a Matrix
# Create the ACCTDATA vector first
acctdata <- c(1, 132, 86.7,
2, 50, 50.7,
3, 32, 36.0,
4, 20, 27.9,
5, 19, 22.8,
6, 11, 19.3,
7, 10, 16.7,
8, 9, 14.7,
9, 5, 13.2)
acctdata
# Convert the vector into a MATRIX
acctdata <- matrix(acctdata, 9, 3, byrow = TRUE)
# Name the columns of the MATRIX
colnames(acctdata) <- c("digit", "actual", "expected")
# Show me the matrix
acctdata
# We are going to write the chi-square analysis to see if the Observed fits the Expected
# Another advantage of implicit looping, this one line...
# 1) Substracts each value in COL 3 from each value in COL 2
# 2) Squares the difference between those values
# 3) Divides that squared difference by the matching value in COL 3
# 4) Sums all of the divided values
chisquare <- sum((acctdata[, 2] - acctdata[ , 3]) ^ 2 / acctdata[ , 3])
chisquare
# Matrix transposition, multiplication and inversion
# Create two small matrices to work with
A <- matrix(c(6, 1,
0, -3,
-1, -2), 3, 2, byrow = TRUE)
B <- matrix(c(4, 2,
0, 1,
-5, -1), 3, 2, byrow = TRUE)
A
B
# Matrix addition
A + B
# Matrix subtraction
A - B
# Matrix component-by-component multiplication, not matrix multiplication
A * B
# Transpose matrix A
t(A)
# Matrix inversion: only done with square matrixes (same # rows and cols)
# Inversion handled with solve() function
A <- matrix(c(4, 0, 5,
0, 1, -6,
3, 0, 4), 3, 3, byrow = TRUE)
#Next command finds the inverse of matrix A
B <- solve(A)
A %*% B #Actual matrix multiplication
B %*% A
# Possible to have one-row or column matrix by adding drop = FALSE to index
# Print the entire matrix
A
# Print column one as a vector
A[ , 1]
# Print row one as a vector
A[1, ]
# Print row one as a one-row matrix
A[1, , drop = FALSE]
# Print col one as a one-col matrix
A[ , 1, drop = FALSE]
# Lists (which won't be used much as the Data Frame is the data structure of choice for
# most statistical analyses)
address <- list("Sam Johnson", "16902 W. 83rd Terr.", "Shawnee", "KS", 66219)
address
# Quick refresher on retrieving list elements; pull my name
address[[1]]
# Also pulls my name, but this time as a data slice
address[1]
# Data Frames
# Data Frames must have the same kind of data in each column
# Data Frames have rows and columns
# Data Frames can have a combination of numeric, character or logical data
# Think of Data Frame like an Excel spreadsheet
# Most of the time we'll be importing data into Data Frames, not building in R
# Get ready for the lapply() function to use when certain functions don't work with Data Frames
# Building a simple Data Frame from vectors
people <- c("Kim", "Bob", "Ted", "Sue", "Liz", "Amanda", "Tricia", "Jonathan", "Luis", "Isabel")
scores <- c(17, 19, 24, 25, 16, 15, 23, 24, 29, 17)
people
scores
# Create the Data Frame from the People and Scores vectors
quiz_scores <- data.frame(people, scores)
quiz_scores
# We don't need the individual People and Scores vectors anymore, so let's remove
rm(people, scores)
# Shouldn't be there as individual vector; error
people
# Should find in the quiz_scores Data Frame, though
quiz_scores
# Like a matrix, can get individual columns and rows by indexing
# But this pulls a data slice as a vector, not the column
quiz_scores[ , 2]
# This pulls the column only
quiz_scores[2]
quiz_scores$people
# If we attach() the Data Frame to the search path, we have immediate vector access to each column
attach(quiz_scores)
people
scores
# More sophisticated manipulation with cbind() (column bind) and rbind() (row bind)
# to bring Data Frames together
# Creating a Data Frame in the R Data Editor
# Invoke the Data Frame and initialize the columns with their types
Min_Wage <- data.frame(Year = numeric(), Value = numeric())
# Open the R Data Editor on the Min_Wage Data Frame
Min_Wage <- edit(Min_Wage)
# Show the Data Frame
Min_Wage
# Reading a CSV file into a Data Frame
# Taken from http://www.infoplease.com/ipa/A0104652.html
# R does not like numbers as column headers; placed Yr at beginning of each year
# NOTE: I pulled the US averages
percapita <- read.csv("c:\\r\\data\\uspercapita.csv", header = TRUE)
head(percapita)
class(percapita)
# R defaults the read-in data as a Data Frame
# To get the column averages, use the colMeans() function; means() has been deprecated
# In this case, get the column averages for columns 2 through 9
colMeans(percapita[2:9]) # Returning NA b/c some missing data?
# summary() function returns frequency counts
summary(percapita[2:9])
# If I'd have stored summary in an object, I would have had access to the summary information
summaryPerCapita = summary(percapita[2:9])
summaryPerCapita
class(summaryPerCapita)
# Handling missing data in R
# To remove NAs (really ignore), such as in stats, use na.rm = TRUE
# Fill a vector
x <- c(1, 2, 3, 4, 5, 6, NA, 8, 9, 10)
# Show the vector
x
# Vector average, which should error out
mean(x)
# This version works because it ignores the NA
mean(x, na.rm = TRUE)
# NA is still in the vector, though
x
##### Flow control: Looping, conditionals, and branching
# Looping
# R offers 3 types of explicit loops
# Conditional statements (if-then-else, for example) let us execute a branch of code when certain conditions are satisfied
# R offers the standard arithmetic operators, which follow the PEMDAS order of operation
# +, -, *, /, ^ or ** for exponentiation, %% modulus (remainder), and %/% for integer (truncated) division
# Comparison operators are the usual, but unlike SAS, do not have letter equivalents
# >, <, >=, <=, == equal to, != not equal to
# Operators evaluate to TRUE and FALSE (note case is important!)
# Logical operators also evaluate to TRUE and FALSE
# R provides vectorized and unvectorized versions of the "and" and "or" operators
# & and (vectorized), && logical and (unvectorized), | logical or (vectorized), || logical or (unvectorized), ! logical not
# Looking at vectorized AND
x <- 0:2
y <- 2:0
# Checks each pair (0,2), (1,1), (2,0)
(x < 1) & (y > 1)
# Unvectorized checks only the first value in each vector, L to R,
# returning only the first logical result
(x < 1) && (y > 1)
!y == x
identical(x, y) # identical tests for exact equality
##### Input and Output
# R defaults to 7 digits for numerical data, which can be changed with the options() function
getOption("digits")
# Note digits = 4 includes numbers larger than 0, in the 1s, 10s columns, etc.
# pi will be 3.142; digits is not just after the decimal
options(digits = 4)
pi
# Prompting for feedback from the keyboard with readline()
size <- readline("How many digits do you want to display?")
pi
options(digits = size)
pi
# Reading data using the scan() function; similar to read.table()
# Scan opens a channel to enter data at the command line until nothing more to enter and stores in the object
x <- scan()
# Scan() is the slightly harder way to do x <- c(1, 2, 3, 4, 5) or x <- seq(1:5) or x <- 1:5
##### Understanding the R environment
# Everything to R is an object; functions have names too
# CV is a function of my design that is calculating coefficient of variance
CV <- function(x) sd(x) / mean(x)
# Print x
x
# Print the coefficient of variance of x with my CV function
CV(x)
##### Implementing Program Flow
# Three basic loops: for, while, and repeat
# Remember, though, that vectorized implicit looping is far better and more effective
# The for loop
i <- c(1:5)
for(n in i) print(n * 10)
# The above loop was unnecessary, as looping through i can be done implicitly
print(i * 10)
# For loops can be used with lists and vectors
carbrands <- c("Honda", "Toyota", "Ford", "GM", "BMW", "Fiat")
for(brand in carbrands) {
print(brand)
}
# Next two are functionally equivalent
carbrands
print(carbrands)
##### While and Repeat loops
# Most R programmers try to avoid these and use explicit for loops if required
# While loops
even <- 0
while(even < 10) {
even <- even + 2
print(even)
}
# Repeat loop, where a condition is met to break out of the loop
i <- 1
repeat
{
print(i)
i <- i + 1
if(i > 5)
{
break
}
}
##### Avoiding explicit looping; using the apply() function family
# Note that median is not applicable to the data frame
median(percapita[2:9])
# The lapply() function allows us to find the median for each column in the data frame
# lapply() returns a list
lapply(percapita[2:9], median)
# While lapply() has ugly output, wrapping a DATAFRAME around the output can help
as.data.frame(lapply(percapita[2:9], median))
# lapply() is pretty ugly output; try apply() with three arguments (data, 1 = rows or 2 = columns, called function)
# apply(the percapita data frame, columns, median of each column)
apply(percapita[2:9], 2, median)
# tapply() applies a function to arrays of variable length (ragged arrays), defined by a vector
# Three stats classes of differing size and their scores, in three vectors
class1 <- c(17, 18, 12, 13, 15, 14, 20, 11, 16, 17)
class2 <- c(18, 15, 16, 19, 20, 20, 19, 17, 14)
class3 <- c(17, 16, 15, 18, 11, 10)
# Combine the vectors into a Class list
classes <- list(class1, class2, class3)
# lapply() returns a list
lapply(classes, length)
# sapply() (simplify apply) tries to return a bit more table-like structure
sapply(classes, length)
# We can also use sapply() to apply the mean to the list
sapply(classes, mean)
sapply(classes, summary)
##### A simple expense report
# Invoke the Data Frame and initialize the columns with their types
# See page 40
##### Generating Pythagorean triples with a, b, and c where a2 + b2 = c2
pythag <- function(x)
{
s <- x[1]
t <- x[2]
a <- t^2 - s^2
b <- 2 * s * t
c <- s^2 + t^2
cat("The Pythagorean triple is: ", a, b, c, "\n")
}
input <- scan()
##### Writing reusable functions
# Calculating the confidence interval of a single mean
confint <- function(x, alpha = .05)
{
conflevel <- (1 - alpha) * 100
stderr <- sd(x) / sqrt(length(x))
tcrit <- qt(1 - alpha / 2, length(x) - 1)
margin <- stderr * tcrit
lower <- mean(x) - margin
upper <- mean(x) + margin
cat("Mean: ", mean(x), "Std. Error: ", stderr, "\n")
cat("Lower Limit: ", lower, "\n")
cat("Upper Limit: ", upper, "\n")
}
##### Avoiding loops with vectorized operations
# Understanding where vectorized implicit looping is available can save a ton of time and code
# This example is Euler's formula to find prime numbers, first in a function call
# Formula is x^2 - x + 41
TryIt <- function(x)
flush.console()
for (n in x)
{
result <- n^2 - n + 41
cat("For x =", n, "Result is", result, "\n")
}
# Implicit looping makes this much, much easier
# Re-set x as a vector again for code purposes only
x <- 0:50
y <- x^2 - x + 41
y
##### Vectorizing if-else statements with ifelse()
x <- -5:5
x
# Square roots of negative numbers will have Not a Number (NaN) outputs
sqrt(x)
# Do ifelse() inside the sqrt function to leave NAs alone
sqrt(ifelse(x >= 0, x, NA))
# Moving ifelse to the outside creates NaNs again
ifelse(x >= 0, sqrt(x), NA)
##### Chapter 4: Summary statistics
# Mean, median and mode
# Remember that the mode() function returns the storage class of the object, not the most common value in a vector
# Summary() is the six-number summary of a vector
# colMeans() gives all of the column averages of the supplied object, typically a data frame
# Put percapita in the search stream
attach(percapita)
head(percapita)
summary(percapita[2:9])
colMeans(percapita[2:9])
mean(percapita[2:9])
# Median is NOT vectorized, so have to apply() wrapper
# apply(percapita cols 2-9, 2 = columns, using the median() function
apply(percapita[2:9], 2, median)
# Can also use apply with the quantile function to find the median and other quantiles (percentiles)
apply(percapita[2:9], 2, quantile)
# Add a specific quantile to one column of data to find its value
quantile(percapita$Yr2005, 0.75)
# R has no built-in method for finding mode
# Use the table and sort functions to identify the modal value
# Manually check frequency count of values; 36421 is the only repeat, thus the modal value
sort(table(Yr2010))
apply(percapita[2:9], 2, mean, trim = .5)
x <- c(0:10, 50)
xm <- mean(x)
c(xm, mean(x, trim = 0.1))
##### Measuring location via standard scores
# scale() is the z-score function call
# Accepting the defaults uses mean as center of measure and sddev as scaling value
zYr2010 <- scale(Yr2010)
zYr2010
# Let's make sure the defaults of stddev = 1 and mean are true
mean(zYr2010)
# Obviously sd is not vectorized
apply(zYr2010, 2, sd)
# This works, too, just prints to the console; doesn't save the results to another object
scale(percapita[2:9])
##### Measuring variability
# Many ways to measure spread, or dispersion: Variance, std dev,
# range, mean absolute deviation, median absolute deviation, IQR,
# coefficient of variation
# Variance and Standard Deviation
# Get the variance and stddev for Yr2010
var(Yr2010)
# Are var and sd vectorized? YES, but get a variance-covariance matrix
var(percapita[2:9])
sd(Yr2010)
# R recommends using sapply wrapper because sd is deprecated
sd(percapita[2:9])
sapply(percapita[2:9], sd)
##### Range
# Most commonly defined as the difference between the highest and lowest
# values in a data vector. R gives you the high and low value; you do the rest
# This uses the WEIGHTS data we created in Chapter 3
range(weights)
range.diff <- function(x) max(x) - min(x)
range.diff(weights)
##### Mean and Median Absolute Deviations
# mad() function where we can specify measure of central tendency
# by supplying an argument
mad(weights)
mad(weights, center = mean(weights))
sd(weights)
# Because these values are relatively close to each other, the
# data may be symmetrical. Let's find out with a histogram
hist(weights)
##### IQR
# Difference between 3rd and 1st quartiles, a view of the middle 50% of the data
IQR(weights)
# My first real function: IQRVALS finds the LOW and HIGH values of an IQR'd data set
# Compare the LOW and HIGH VALS to the MIN and MAX of the data set to determine whether
# using the IQR values is even required
# Remember to use apply() if more than one column passed in
iqrvals <- function(x) {
# Get the min and max values
minx <- min(x)
maxx <- max(x)
# Get the IQR value
iqrx <- IQR(x)
# Calculate the IQR multiplier, which will be used to calculate LOW and HIGH values
iqrmult <- 1.5 * iqrx
# Calculate the LOW IQR value for the data
lowiqrval <- quantile(x, 0.25) - iqrmult
# Calculate the HIGH IQR value for the data
highiqrval <- quantile(x, 0.75) + iqrmult
# Don't use IFELSE because returns NULL for argument that isn't used
# Don't use IQR values if one or both is above the MAX or below the MIN of the data
if(lowiqrval < minx || highiqrval > maxx)
cat("Use data min:", minx, "and max: ", maxx, "\n")
else
cat("Low IQR value: ", lowiqrval, "and High IQR value: ", highiqrval, "\n")
}
##### Coefficient of variation
# Measuring the standard deviation relative to the size of either the sample or population
# mean. Often used as measure of relative risk, showing variability relative to size of
# average; good for rates of return. Not built in to R but can easily be turned into a
# function.
# CV is simply the standard deviation divided by the mean of the data (sample or population)
CV <- function(x) sd(x) / mean(x)
CV(weights) #0.15258
##### Covariance and Correlation
# Covariance is the positive, zero, or negative numerator value in calculating correlation.
# Correlation is the value between -1 and 1 that indicates the directional relationship
# of change between two variables
# Hypothetical advertising and sales figures
region <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
ads <- c(5.5, 5.8, 6.9, 7.2, 6.3, 6.5, 6.4, 5.7, 6.1, 6.8)
sales <- c(101, 110, 120, 125, 117, 118, 120, 108, 115, 116)
adsales <- data.frame(region, ads, sales)
adsales
cov(ads, sales)
cor(ads, sales)
##### Measuring symmetry (or lack thereof) p. 74
# Examples use the PSYCH package, which we cannot pull down right now
# Button ticket submitted 7/11/14 re: how to mirror CRAN download site
# Skewness (data balance to one side or another, positive (left) and negative (right)-skewed)
# and kurtosis (peaks, such platykurtic (flat) and leptokurtic (tall) or meso- (just right)
# Functions not available in Base R packages, but skew() and kurtosi()
# available in PSYCH
##### Chapter 5: Creating tables and graphs p. 77
# Frequency distributions and tables
# Already seen that table() produces a simple frequency distribution
# Using base R faithful data set
# Print the first few rows with headers
head(faithful)
# Pull waiting out of data frame into its own vector
waiting <- faithful[ , 2]
# Range of waiting = time between eruptions
range(waiting)
# Create a basic frequency distribution with table()--too many values to be very helpful
table(waiting)
# Create intervals (bins) to better view the data
# Not exact science though my rule of thumb is 2^x bins
# based on number of data points; for example, 272 eruptions
# would be 2^8 or 2^9, so we need 8 or 9 bins, probably 8
# Bins are 8 * 7.5 = 60, the width of our data representation
# Use sequence function to build bins vector
bins <- seq(40, 100, by = 7.5)
bins
# CUT uses the bins to divide the WAITING data into intervals;
# right = FALSE defines whether the intervals should be closed on the right side
wait_time <- cut(waiting, bins, right = FALSE)
# TABLE produces a tabular view of the data in a row-like format
table(wait_time)
wait_time
# CBIND converts the TABLE function into a more vertical columnar view
cbind(table(wait_time))
# table() with two variables gives a cross-tabulation, whether
# quantitative or qualitative variables
##### Pie charts and bar charts (bar plots in R parlance) p. 79
# While both can be used for representing the same kind of data
# humans are more inclined to bar charts; use them over pie charts
# wherever possible for nominal (Repulican or Democrat) or ordinal data
# (1st place, 2nd place, etc.)
# Using DuPont's data on car colors in 2012 through table editor
car_colors <- data.frame(color = factor(), percentage = numeric())
car_colors <- edit(car_colors)
car_colors
# Attach the object to the search chain
attach(car_colors)
# Create the base pie with ugly pastels by default
pie(percentage)
# Add a new set of colors with a colors vector
piecolors = c("#C0C0C0", "black", "white", "#696969", "red", "blue", "brown", "green", "#F0E68C")
piecolors
# Rebuild the pie chart with the actual colors represented
pie(percentage, col = piecolors)
# Let's present the actual names of the pieces of pie
names(percentage) = c("Silver", "Black", "White", "Gray", "Red", "Blue", "Brown", "Green", "Other")
names(percentage)
# Rebuild the pie with the new names
# This would work, too, without creating a new object:
# pie(percentage,
col = c("Silver", "Black", "White", "Gray", "Red", "Blue", "Brown", "Green", "Other"),
main = "Pie Graph of Car Color Preferences")
pie(percentage, col = piecolors, main = "Pie Graph of Car Color Preferences")
# Bar charts p. 83
# Mostly a change in the function call; not going to reproduce all of the previous code
barplot(percentage, col = piecolors, main = "Bar Chart of Car Color Preferences")
# Boxplots
# Also called box plots or box-and-whisker plots; popularized by statistician John Tukey
# Represents five-number summary of summary()
# Relative lengths of whiskers indicates skew; IQR = length of box; median = line in box
# If distribution relatively unskewed median be close to center of box and whiskers roughly
# same length; outliers represented by small circles
test1 <- c(72.41, 72.73, 71.63, 70.26, 77.98, 83.48, 87.25, 84.25)
test2 <- c(92.71, 86.35, 87.80, 107.92, 58.54, 91.93, 103.85, 101.56)
test3 <- c(73.63, 90.97, 44.59, 67.04, 78.57, 82.36, 72.96, 78.55)
quizzes <- data.frame(test1, test2, test3)
boxplot(quizzes)
detach(percapita)
##### Chapter 6: Discrete probability distributions
# Discrete probabilities must satisfy these conditions:
# 1) Probabilities sum to 1
# 2) Probability of any one outcome is between 0 and 1
# 3) List of outcomes is exhaustive (can be no others) and mutually exclusive (no outcome in more than 1 category)
# Jonathan sells cars and has tracked his sales on Saturday
saturday_sales <- data.frame(numsold = c(0, 1, 2, 3, 4, 5), prob = c(0.6, 0.15, 0.1, 0.08, 0.05, 0.02))
attach(saturday_sales)
# Calculate the mean and variance given the probability distribution
mu <- sum(numsold * prob)
variance <- sum((numsold - mu)^ 2 * prob)
standarddev <- sqrt(variance)
# Bernoulli processes have only two outcomes per trial, each of which is
# independent, such as the toss of a coin, a free throw attempt, or the
# selection of the correct answer on a multiple choice question
# One outcome is a SUCCESS and the other FAILURE.
x <- c(0, 1, 2, 3, 4, 5)
# DBINOM() is the density or mass function with probabilities for each outcome
# Other options include PBINOM (cumulative density or mass function for the distribution),
# QBINOM (Quantile function or reverse lookup to find the value of the random variable
# associated with any probability, RBINOM (Random function to generate random samples from
# the probability distribution))
# Show the results in a column (CBIND) of the density binomial distribution of vector x
cbind(dbinom(x, size = 5, prob = .5))
# MicroSort technique to increase chances of having a specific gendered child, where
# probability is .91 for femail and .76 for male
# Probability that of 10 families exactly 8 will have a male?
dbinom(8, size = 10, prob = .76)
# Of 10 families, probability that all 10 will have a male child
dbinom(10, size = 10, prob = .76)
# Probability that <=6 families will have a male child
xvec <- 0:6
sum(dbinom(xvec, 10, .76))
# Or use PBINOM for cumulative probability of <=6 families having male child
# If use XVEC here function returns cumulative probability of each vector member!
pbinom(6, 10, .76)
# Want to generate 100 samples of size N = 10 with p = .76; RBINOM generates this
# set of a random binomial distribution
randombinom <- rbinom(100, 10, .76)
# Can find confidence intervals through quantiles of the distribution at 95% empirical
# confidence. This example notes that of 10 families sample, between 5 and 10 will have
# a male child with 95% confidence
quantile(randombinom, .025) # 5 is lower confidence limit
quantile(randombinom, .975) # 10 is high confidence limit
# Chapter 7 Computing Normal Probabilities
# Learn early that the distribution of sample means, regardless of the shape of the
# parent distribution, approaches a normal distribution as sample size increases.
# The SCALE() function produces z-scores for any set of data, which gives us
# SCALE is a combination stat; gives the location of the raw score relative to
# the mean and the number of standard deviations the raw score is away from
# the mean, which makes z-scores both descriptive and inferential
# Most often we are interested in finding the areas under the normal curve
# (or assuming the curve is normal, as Karl Pearson told us, even though he
# eventually regretted the term 'normal').
# DNORM can be used to draw a graph of a normal distribution
# We'll put two curves on the same plot, too
# Build a sequence starting at 0 to 40 going up by .5
xaxis <- seq(0, 40, .5)
# Show me xaxis
xaxis
# Build two normal distributions with the same mean but different stdev
y1 <- dnorm(xaxis, 20, 3)
y2 <- dnorm(xaxis, 20, 6)
# Plot the curves
plot(xaxis, y2, type = "l", main = "Comparing Two Normal Distributions")
# POINTS adds the second line to the graph and TYPE = "l" tells R to plot lines
points(xaxis, y1, type = "l", col = "red")
# Different normal distributions have different amounts of spread based on the
# standard deviation. It's often helpful to convert a normal distribution to
# the standard normal distribution and work with z-scores, which have a mean
# of 0 and stdev of 1
# Finding probabilities using the PNORM function
# PNORM() function finds a left-tailed probability, where the critical value
# of z for a 95% confidence interval is +/-1.96, and the area between those
# values is 95% of the standard normal distribution. PNORM defaults to mean
# of 0 and stdev 1; these defaults can be changed as arguments in the call.
# Finding a left-tailed probability
# This is the CUMULATIVE probability under the standard normal distribution
# up to a z-score of 1.96.
pnorm(1.96) # This says 97.5% of the area lies to the left while 2.5% is to the right
# Finding the area between two z-scores
# Is simply a matter of subtraction
pnorm(1.96) - pnorm(-1.96)
# Finding a right-tailed probability
# Is just as easy; just take the reciprocal value of the left-tailed probability
1 - prob(1.96)
# You took a standardized test with a mean of 500 and stdev of 100 and scored a 720.
# How'd you do? How much better than what % of test takers?
# First standardize your score against the test
scale(720, 500, 100)
pnorm(2.2) # Your score is better than 98.6% of other test takers
# With PNORM, given a z-score, it returns the left-tailed probability in the sample
# that the value is higher than
# Finding critical values using the QNORM function
# With QNORM, given a probability, it returns the equivalent z-score
# of the value
qnorm(.975)
# To find a one-tailed critical value...
# Subtract the alpha level (.05 = standard) from 1
qnorm(1 - .05)
# Using RNORM to generate random samples
# RNORM(sample size, mean, stdev) generates a random sample from a normal
# distribution. Omitting MEAN and STDEV results in a mean of 0 and stdev 1.
samples <- rnorm(50, 100, 15)
samples
hist(samples) # Not exactly "normal"
# What if we bump the sample size to 1000?
sample2 <- rnorm(1000, 50, 15)
hist(sample2) # Much more "normal"-looking
# R does not include a one-sample z-test
ztest <- function(xbar, mu, stdev, n) {
z = (mean(xbar) - mu) / (stdev / sqrt(n))
return(z)
}
# Generate 100 weights for adult males who exercise
exmen <- rnorm(100, 187, 17)
# Let's compare that group with weights of all adult males, averaging 191 pounds
ztest(exmen, 191, sd(exmen), length(exmen)) # -4.852295 for one run; -2.403974 for another
# This returns a z-score, so convert it to a probability
pnorm(-2.403974) # .00024; much lower than .05 so we could reject the NULL hypothesis
# that there is no difference in the weights of exercising adult males and average
# adult male
# The above could be wrapped into a single line of code
pnorm(ztest(exmen, 191, sd(exmen), length(exmen)))
# Chapter 8 Creating confidence intervals
# A confidence interval (or interval estimate) is a range of possible values used
# to estimate the true value of a population parameter, which is associated with
# a level of confidence, such as 90%, 95% or 99%. The confidence level is the complement
# to our alpha level, so these three confidence levels correspond to alpha levels of
# .1, .05, and .01
#
# The general idea behind a confidence interval is that we have an estimate of a parameter
# and we calculate a margin of error, adding the margin to the estimate to get an upper
# limit and subtracting the margin for a lower limit
# If our confidence interval for a population mean has a lower limit of 164.13 and an
# upper limit of 180.97, we can say we are 95% confident that the true population mean
# (for men who exercise, for example) is contained within that range.
#
# INCORRECT:
# 1) There is a 95% chance (.95 probability) that the true value of the mean is between those values
# 2) 95% of the sample means fall between those numbers
# Let's build a sample size function!
# E = Desired margin of error
# sigma = stdev
# alpha = complement of confidence level
sampsize.est <- function(E, sigma, alpha = .05) {
# Remember QNORM converts alpha to a z-score value
n <- ((qnorm(alpha / 2) * sigma)/ E) ^ 2
estsize <- ceiling(n) # Rounds to the next highest integer
cat("For a desired margin of error of:", E, "the required sample size is:", estsize, "\n")
}
# Does it work with our random sample of adult male exercisers?
sampsize.est(5, sd(exmen)) # 49 required to be 95% confident that the true pop mean is +/-5 pounds
sampsize.est(2.5, sd(exmen)) # 195
# Is there a general rule of thumb for sample size here?
# Yes, about 4x the sample size for each 1/2 of margin of error
sampsize.est(1.25, sd(exmen)) # 780
sampsize.est(10, sd(exmen)) # Predict 12, actually 13
sampsize.est(7.5, sd(exmen)) # 1 1/2 original MOE = 1/2x sample, predict 24; actual 22
sampsize.est(5, sd(exmen), alpha = .01) # NOTE alpha change; sample needed = 85
sampsize.est(5, sd(exmen), alpha = .1) # NOTE alpha change; sample needed = 35
# Confirdence intervals for the mean using the t distribution
# When we don't know the population standard deviation we use the sample
# standard deviation as a reasonable estimate. We also use the t distribution
# instead of the normal distribution to calculate the confidence interval.
confint.mean <- function(x, alpha = .05, two.tailed = TRUE)
{
cat("\t", "Confidence Interval for the Mean", "\n")
cat("Mean: ", mean(x), "\n")
df <- length(x) - 1
conflevel <- ifelse(two.tailed == TRUE, 1 - alpha / 2, 1 - alpha)
stderr <- sd(x) / sqrt(length(x))
tcrit <- qt(conflevel, df)
margin <- stderr * tcrit
lower <- mean(x) - margin
upper <- mean(x) + margin
if(two.tailed == FALSE) {
cat("You are doing a one-tailed test.", "\n")
cat("If your test is left-tailed, the lower bound", "\n")
cat("is negative infinity. If your test is right-tailed", "\n")
cat("the upper bound is infinity.", "\n")
cat("Either add the margin", margin, "to or subtract it from", "\n")
cat("the sample mean as appropriate.", "\n")
cat("For a left-tailed test, the upper bound is", lower, ".", "\n")
cat("For a right-tailed test, the lower bound is", upper, ".", "\n")
}
cat("Mean: ", mean(x), "Std. Error: ", stderr, "\n")
cat("Lower Limit: ", lower, "\n")
cat("Upper Limit: ", upper, "\n")
}
confint.mean(exmen)
t.test(exmen)
confint.mean(exmen, two.tailed = FALSE)
##### Chapter 12: Correlation and Regression
# Covariance and Pearson product-moment correlation are cov() and cor(), respectively
# Create a matrix of weights and heights
weights <- c(237.1, 220.6, 214.5, 213.3, 209.4, 204.6, 201.5, 198.0,
193.8, 191.1, 189.1, 186.6, 179.3, 176.7, 175.8, 175.2,
174.8, 173.3, 172.9, 170.1, 169.8, 169.1, 166.8, 166.1,
164.7, 164.2, 162.4, 161.9, 156.3, 152.6, 151.8, 151.3,
151.0, 144.2, 144.1, 139.0, 137.4, 137.1, 135.0, 119.5)
y <- sort(weights)
y
x <- sort(rnorm(40, 70, 6))
x
matrix <- cbind(x, y)
head(matrix)
cov(x, y)
cor(x, y)
plot(x, y, xlab = "height", ylab = "weight", main = "Weights and Heights")
abline(lm(y ~ x))
##### Three or more variables in a correlation analysis
# Let's add resting heart rate
z <- rnorm(40, 80, 10)
z <- sort(z)
z
matrix <- cbind(x, y, z)
cov(matrix)
cor(matrix)
var(x)
cov(x, y)
cov(x, z)
cor(x, z)
# Computations to calculate effect size
# Suppose the primary study reported a t-test
# value for differences between 2 groups. Then,
# where tes = T Effect Size where
# t = t-value, n.1 = first group size (n), and
# n.2 = second group size (n)
tes(t = 1.74, n.1 = 30, n.2 = 31)
# Or, more simply,
tes(1.74, 30, 31) # Produces effect sizes for MeanDifference, Correlation, LogOdds, Fishers z, and gives the total sample size
# Predicting milk prices
milk.prices <- read.csv("C:\\R\\Data\\RBook\\milkprices.csv", header = TRUE, stringsAsFactors = FALSE)
plot(milk.prices$Index, milk.prices$Average)
abline(lm(milk.prices$Average ~ milk.prices$Index)) # Doesn't look very linear
results <- lm(milk.prices$Average ~ milk.prices$Index)
summary(results)
# Would a quadratic formula be a better fit if we square the index?
milk.prices$indexsq <- milk.prices$Index ^ 2
results <- lm(milk.prices$Average ~ milk.prices$Index + milk.prices$indexsq)
summary(results)
milk.prices$predicted <- predict(results)
plot(milk.prices$Index, milk.prices$Average); lines(milk.prices$predicted)
# Chapter 14: Logistic Regression p.201
# Univariate and multiple regression is great for continuous dependent variable prediction. Sometimes we want to determine
# the outcome of a binary (dichotomous) outcome, where 1 = success and 0 = failure. Fisher developed discriminant
# analysis to determine group membership, but the technique works ONLY WITH CONTINUOUS VARIABLES. Logistic regression
# uses both continuous and binary predictors, a major advantage.
#
# Examples of dichotomous outcomes include the presence or absence of defects, attendance and absenteeism, and student retention
# versus dropout. Logistic regression seeks out the dichotomous outcome answer, where p = the proportion of 1s (successes) and q is
# the proportion of 0s (failures), in this case q = 1 - p. Correlation and regression is problematic with dichotomous outcomes because
# they may return values < 0 and > 1. The logistic curve, on the other hand, has a very nice property of being asymptotic to 0 and 1
# and always lying between 0 and 1.
#
# Probability can be converted to odds, where if p is the probability of success, the odds in favor of success are: p / 1 -p.
#
# Odds can be converted to probabilities, too. If the odds are 5:1, then the probability of success is 5/6, or 83%. Odds can be greater
# than 1; for example, if the probability of rain is .25 then the odds of rain are .25 / .75 = .33, but the odds against rain are
# .75 / .25 = 3, or 3:1.
# Logistic regression works with LOGIT, the natural logarithm of the odds. Logistic regression allows us to model the probability of
# "success" as a function of the logistic curve, which is never less than 0 and never greater than 1.
# Logistic Regression with One Dichotomous Predictor
# 50 pairs of observations, which represent binge drinking amongst 25 men and 25 college students.
gender <- c(rep(1, 25), rep(0, 25))
binge <- c(rep(1, 12), rep(0, 13), rep(1, 5), rep(0, 20))
bingedata <- data.frame(gender, binge)
binge.table <- table(bingedata)
chisq.test(table(bingedata)) # p = .07325, approaching significance; could be better with larger sample
# What are the proportions of men and women binge drinking?
men.binge.prop <- binge.table["1", "1"] / sum(binge.table["1", "0"], binge.table["1", "1"]) # .48 or 48%
women.binge.prop <- binge.table["0", "1"] / sum(binge.table["0", "0"], binge.table["0", "1"]) # .2 or 20%
# What are the odds of men and women being binge drinkers?
odds.men <- men.binge.prop / (1 - men.binge.prop) # .9231
odds.women <- women.binge.prop / (1 - women.binge.prop) # .25 or 1:4
# The INTERCEPT term for women is the log(odds)
log(odds.women) # -1.386294
# The INTERCEPT term for men
log(odds.men) # -.08001771
# The slope will the be the difference between the two log(odds) INTERCEPT values
b1 <- log(odds.men) - log(odds.women)
b1 # 1.306252
# Let R do the logistic regression now
results <- glm(binge ~ gender, family = "binomial")
summary(results) # Note the (Intercept) is the WOMEN's INTERCEPT we calculated and the gender ESTIMATE
# is the B1 slope we calculated as the difference between MEN and WOMEN. Unlike the CHI-SQUARE test the
# GLM summary notes that GENDER is significant to the model.
# Logistic Regression with a Continuous Predictor p.205
# The book example reviewed a liberal arts college's returning sophomores and their retention rate, which
# was only 62%. The goal was to increase retention by predicting the likelihood of returning when understanding
# what drives students to stay or leave after their freshman year. The single best predictor turned out to be
# the student's interest in the school in the first place; this was marked by whether the student sent SAT scores
# to the school 1st on their list. Another big predictor was, of course, high school GPA; past behavior is a
# good predictor of future behavior in this case.
# A T-TEST between HSGPA ~ Retained found a very significant difference in HSGPA between those who returned for their sophomore
# and those who did not (were not retained). Side note: Would an ANOVA do this for all of the variables, including any
# potential interactions?
#
# The results were calculated with results <- glm(Retained ~ HSGPA, family = "binomial"); summary(results).
# The results$fitted.values were plotted against the HSGPA with:
# predicted <= results$fitted.values
# plot(HSGPA, predicted)
# The plot found that students with HSGPAs < 3.0 had less than a .50 chance of staying on, while those with HSGPAs > 4.0
# had about .70 probability of returning for their sophomore year.
# Logistic regression with multiple predictors
# Working with the most recent Lionshare data; can logistic regression tell us the probability of a participant being at 70% IRR?
lion.orig <- read.csv("c:\\r\\data\\Lionshare JPM Data 03282014.csv", stringsAsFactors = TRUE)
lion.orig$begin_date <- as.Date(as.character(lion.orig$begin_date), format = "%m/%d/%Y")
lion.orig$Eligibility_Date <- as.Date(as.character(lion.orig$Eligibility_Date), format = "%m/%d/%Y")
lion.orig$end.IRR.70 <- factor(ifelse(lion.orig$End_IRR < .7, "Less than 70%", "Greater than 70%"))
# Drop unnecessary columns
lion.orig$Change_To_IRR <- NULL
lion.orig$Chg_IRR <- NULL
lion.orig$Begin_IRR <- NULL
lion.orig$End_IRR <- NULL
lion.orig$part <- NULL
lion.orig$snp500_3Yr <- NULL
lion.orig$snp500_5Yr <- NULL
lion.orig$Three_Year_PROR <- NULL
lion.orig$Five_Year_PROR <- NULL
lion.orig$OTTR_2009 <- NULL
lion.orig$OTTR_2011 <- NULL
summary(lion.orig)
# Lots of NA values in the set; let's only use complete rows
lion.samp <- na.omit(lion.orig)
# Get 1000 records to work with; first randomize the data
# Don't need to do this with only 1,044 rows of complete data
# set.seed(12345)
# lion.rand <- lion.orig[order(runif(nrow(lion.orig))), ]
#
# # Pull 1000 records for use
# lion.samp <- lion.rand[1000:2000, ]
# Dump the original files because we don't need them now
# rm(lion.orig, lion.rand)
# str(lion.samp)
# CUT? begin_date (col 3), snp500_3yr (5), snp500_5yr (6), Eligibility_Date (34)
# GROUP? Number_of_Investments (14), AGE (30)
# Add a descriptor column for Number of Investments
inv.summary <- summary(lion.samp$Number_of_Investments)
lion.samp$inv.group <- ifelse(lion.samp$Number_of_Investments <= inv.summary["1st Qu."], "1Q",
ifelse(lion.samp$Number_of_Investments <= inv.summary["Median"], "Med",
ifelse(lion.samp$Number_of_Investments <= inv.summary["3rd Qu."], "3Q","4Q")))
lion.samp$inv.group <- factor(lion.samp$inv.group, levels = c("1Q", "Med", "3Q", "4Q"))
# Add age groupings by GENERATION and BY 10s
lion.samp$generation <- factor(ifelse(lion.samp$AGE <= 33, "Millennial",
ifelse(lion.samp$AGE <= 46, "Gen X",
ifelse(lion.samp$AGE <= 57, "Early Boomer", "Late Boomer"))), levels = c("Millennial", "Gen X", "Late Boomer", "Early Boomer"))
lion.samp$age.cohort <- factor(ifelse(lion.samp$AGE < 30, "< 30",
ifelse(lion.samp$AGE < 40, "30-39",
ifelse(lion.samp$AGE < 50, "40-49",
ifelse(lion.samp$AGE < 60, "50-59", "60+")))), levels = c("< 30", "30-39", "40-49", "50-59", "60+"))
# Add a Y/N variable for participants who are CONTRIBUTING, not how much
lion.samp$Pretax_Contrib <- factor(ifelse(lion.samp$PRETAX_CONTRB_PCT == 0, "Zero",
ifelse(lion.samp$PRETAX_CONTRB_PCT <= 4,"<= 4.0",
ifelse(lion.samp$PRETAX_CONTRB_PCT <= 7,"<= 7.0", "> 7.0"))), levels = c("Zero", "<= 4.0", "<= 7.0", "> 7.0"))
lion.samp$comp.group <- factor(ifelse(lion.samp$ANNUAL_COMP_AMT <= 45000, "Low",
ifelse(lion.samp$ANNUAL_COMP_AMT <= 115000, "Moderate", "High")), levels = c("Low", "Moderate", "High"))
# Full logistic model run of available variables
lion.irr.mdl <- glm(end.IRR.70 ~ begin_date + segment + Ao1_Managed_Accounts + Personal_Asset_Manager + Ao1_S_and_I_Experience + Personalized_Messaging
+ After_Tax_Source + Catch_Up_Contributions + Roth_Source + QDIA + Number_of_Investments + Auto_Rebalancing + Auto_Enrollment
+ Auto_Increase + db_available + NQ_Available + Brokerage + Target_Date_Funds + Re_Enrollment + Company_Stock + Auto_Enrolled +
EE_Match + ER_Match1 + Dream_Machine_Interactive + ANNUAL_COMP_AMT + AGE + GENDER_CD + PRETAX_CONTRB_PCT +
Eligibility_Date + Strategy + Hardship_Count + Loan_Count +
generation + age.cohort + Pretax_Contrib + comp.group, data = lion.samp, family = "binomial")
lion.irr.mdl <- glm(end.IRR.70 ~ begin_date + segment + Ao1_Managed_Accounts + Ao1_S_and_I_Experience + After_Tax_Source
+ Auto_Rebalancing + Auto_Enrollment + Auto_Increase + NQ_Available + EE_Match + generation +
GENDER_CD + Eligibility_Date + Strategy + Hardship_Count + Loan_Count + Pretax_Contrib + comp.group
, data = lion.samp, family = "binomial")
summary(lion.irr.mdl)
|
#This Method 'read.nii' can read NIFTI S4 class from .nii files
setGeneric("read.nii",function(object, filename) standardGeneric("read.nii"));
setMethod("read.nii", signature(object = "nifti_one"), function(object, filename)
{
object <- new('nifti_one')
con <- file(filename,'rb');
endian <- if ((sizeof_hdr <- readBin(con,"int",1,4,endian="little")) == 348) "little" else "big"
object@sizeof_hdr <- 348
object@data_type <- readChar(con,10,TRUE)
object@db_name <- readChar(con,18,TRUE)
object@extents <- readBin(con,'int',1,4,endian = endian)
object@session_error <- readBin(con,'int',1,2,endian=endian)
object@regular <- readChar(con,1,TRUE)
object@dim_info <- readChar(con,1,TRUE)
object@dim_ = readBin(con,'int',8,2,endian=endian)
object@intent_p1 <- readBin(con,'double',1,4,endian=endian)
object@intent_p2 <- readBin(con,'double',1,4,endian=endian)
object@intent_p3 <- readBin(con,'double',1,4,endian=endian)
object@intent_code <- readBin(con,'int',1,2,endian=endian)
object@datatype <- readBin(con,'int',1,2,endian=endian)
object@bitpix <- readBin(con,'int',1,2,endian=endian)
object@slice_start <- readBin(con,'int',1,2,endian=endian)
object@pixdim <- readBin(con,'double',8,4,endian=endian)
object@vox_offset <- readBin(con,'double',1,4,endian=endian)
object@scl_slope <- readBin(con,'double',1,4,endian=endian)
object@scl_inter <- readBin(con,'double',1,4,endian=endian)
object@slice_end <- readBin(con,'int',1,2,endian=endian)
object@slice_code <- readChar(con,1,TRUE)
object@xyzt_units <- readChar(con,1,TRUE)
object@cal_min <- readBin(con,'double',1,4,endian=endian)
object@cal_max <- readBin(con,'double',1,4,endian=endian)
object@slice_duration <- readBin(con,'double',1,4,endian=endian)
object@toffset <- readBin(con,'double',1,4,endian=endian)
object@glmax <- readBin(con,'int',1,4,endian=endian)
object@glmin <- readBin(con,'int',1,4,endian=endian)
object@descrip <- readChar(con,80,TRUE)
object@aux_file <- readChar(con,24,TRUE)
object@qform_code <- readBin(con,'int',1,2,endian=endian)
object@sform_code <- readBin(con,'int',1,2,endian=endian)
object@quatern_b <- readBin(con,'double',1,4,endian=endian)
object@quatern_c <- readBin(con,'double',1,4,endian=endian)
object@quatern_d <- readBin(con,'double',1,4,endian=endian)
object@qoffset_x <- readBin(con,'double',1,4,endian=endian)
object@qoffset_y <- readBin(con,'double',1,4,endian=endian)
object@qoffset_z <- readBin(con,'double',1,4,endian=endian)
object@srow_x <- readBin(con,'double',4,4,endian=endian)
object@srow_y <- readBin(con,'double',4,4,endian=endian)
object@srow_z <- readBin(con,'double',4,4,endian=endian)
object@intent_name <- readChar(con,16,TRUE)
object@magic <- readChar(con,4,TRUE)
object@extender <- readChar(con,4,TRUE)
object@file_type <- "NIFTI"
bp = 352;
id = 1;
if (object@extender != "")
{
while (bp < object@vox_offset)
{
object@extention$esize[[id]] <- readBin(con,'int',1,4,endian=endian)
object@extention$ecode[[id]] <- readBin(con,'int',1,4,endian=endian)
object@extention$edata[[id]] <- readChar(con,object@extention$esize[[id]] - 8,TRUE)
bp <- bp + object@extention$esize[[id]]
id <- id + 1
}
}
dx <- object@dim_[2]
dy <- object@dim_[3]
dz <- object@dim_[4]
dt <- object@dim_[5]
dd <- object@dim_[6]
if (object@datatype == 1) #binary
{
type <- "raw"
size <- 1 #may be wrong
signed <- TRUE
}
else if (object@datatype == 2) #unsigned char
{
type <- "int"
signed <- FALSE
size <- if (object@bitpix) object@bitpix / dd / 8 else 1
}
else if (object@datatype == 4) #signed short
{
type <- "int"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 2
}
else if (object@datatype == 8) #signed int
{
type <- "int"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 4
}
else if (object@datatype == 16) #float
{
type <- "double"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 4
}
else if (object@datatype == 32) #complex
{
type <- "complex"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 8
}
else if (object@datatype == 64) #double
{
type <- "double"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 8
}
else if (object@datatype == 128) #RGB
{
type <- "int"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 3
}
else
{
type <- 'int'
signed <- TRUE
size <- 1
}
object@image <- array(readBin(con,type,dx * dy * dz * dt * dd, size, signed = signed, endian = endian) ,dim =
if(object@dim_[1] == 5) c(dx,dy,dz,dt,dd)
else if (object@dim_[1] == 4) c(dx,dy,dz,dt)
else if (object@dim_[1] == 3) c(dx,dy,dz)
else if (object@dim_[1] == 2) c(dx,dy)
)
close(con)
object
}
)
read.NIFTI.nii <- function(filename)
{
a <- new("nifti_one")
read.nii(a,filename)
}
setGeneric("read.hdr_img",function(object, filename ) standardGeneric("read.hdr_img"))
setMethod("read.hdr_img", signature(object = "nifti_one"), function(object, filename )
{
object <- new('nifti_one')
con <- file(paste(filename,'.hdr',sep=""),'rb')
endian <- if ((sizeof_hdr <- readBin(con,"int",1,4,endian="little")) == 348) "little" else "big"
object@sizeof_hdr <- 348
object@data_type <- readChar(con,10,TRUE)
object@db_name <- readChar(con,18,TRUE)
object@extents <- readBin(con,'int',1,4,endian = endian)
object@session_error <- readBin(con,'int',1,2,endian=endian)
object@regular <- readChar(con,1,TRUE)
object@dim_info <- readChar(con,1,TRUE)
object@dim_ = readBin(con,'int',8,2,endian=endian)
object@intent_p1 <- readBin(con,'double',1,4,endian=endian)
object@intent_p2 <- readBin(con,'double',1,4,endian=endian)
object@intent_p3 <- readBin(con,'double',1,4,endian=endian)
object@intent_code <- readBin(con,'int',1,2,endian=endian)
object@datatype <- readBin(con,'int',1,2,endian=endian)
object@bitpix <- readBin(con,'int',1,2,endian=endian)
object@slice_start <- readBin(con,'int',1,2,endian=endian)
object@pixdim <- readBin(con,'double',8,4,endian=endian)
object@vox_offset <- readBin(con,'double',1,4,endian=endian)
object@scl_slope <- readBin(con,'double',1,4,endian=endian)
object@scl_inter <- readBin(con,'double',1,4,endian=endian)
object@slice_end <- readBin(con,'int',1,2,endian=endian)
object@slice_code <- readChar(con,1,TRUE)
object@xyzt_units <- readChar(con,1,TRUE)
object@cal_min <- readBin(con,'double',1,4,endian=endian)
object@cal_max <- readBin(con,'double',1,4,endian=endian)
object@slice_duration <- readBin(con,'double',1,4,endian=endian)
object@toffset <- readBin(con,'double',1,4,endian=endian)
object@glmax <- readBin(con,'int',1,4,endian=endian)
object@glmin <- readBin(con,'int',1,4,endian=endian)
object@descrip <- readChar(con,80,TRUE)
object@aux_file <- readChar(con,24,TRUE)
object@qform_code <- readBin(con,'int',1,2,endian=endian)
object@sform_code <- readBin(con,'int',1,2,endian=endian)
object@quatern_b <- readBin(con,'double',1,4,endian=endian)
object@quatern_c <- readBin(con,'double',1,4,endian=endian)
object@quatern_d <- readBin(con,'double',1,4,endian=endian)
object@qoffset_x <- readBin(con,'double',1,4,endian=endian)
object@qoffset_y <- readBin(con,'double',1,4,endian=endian)
object@qoffset_z <- readBin(con,'double',1,4,endian=endian)
object@srow_x <- readBin(con,'double',4,4,endian=endian)
object@srow_y <- readBin(con,'double',4,4,endian=endian)
object@srow_z <- readBin(con,'double',4,4,endian=endian)
object@intent_name <- readChar(con,16,TRUE)
object@magic <- readChar(con,4,TRUE)
object@extender <- readChar(con,4,TRUE)
object@file_type <- "NIFTI"
id <- 1
if (object@extender != "")
{
while (1)
{
e_size = readBin(con,'int',1,4,endian=endian);
if (length(e_size) == 0)
{
break;
}
object@extention$esize[[id]] <- readBin(con,'int',1,4,endian=endian)
object@extention$ecode[[id]] <- readBin(con,'int',1,4,endian=endian)
object@extention$edata[[id]] <- readChar(con,object@extention$esize[[id]] - 8,TRUE)
bp <- bp + object@extention$esize[[id]]
id <- id + 1
}
}
dx <- object@dim_[2]
dy <- object@dim_[3]
dz <- object@dim_[4]
dt <- object@dim_[5]
dd <- object@dim_[6]
if (object@datatype == 1) #binary
{
type <- "raw"
size <- 1 #may be wrong
signed <- TRUE
}
else if (object@datatype == 2) #unsigned char
{
type <- "int"
signed <- FALSE
size <- if (object@bitpix) object@bitpix / dd / 8 else 1
}
else if (object@datatype == 4) #signed short
{
type <- "int"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 2
}
else if (object@datatype == 8) #signed int
{
type <- "int"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 4
}
else if (object@datatype == 16) #float
{
type <- "double"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 4
}
else if (object@datatype == 32) #complex
{
type <- "complex"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 8
}
else if (object@datatype == 64) #double
{
type <- "double"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 8
}
else if (object@datatype == 128) #RGB
{
type <- "int"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 3
}
else
{
type <- 'int'
signed <- TRUE
size <- 1
}
close(con)
con <- file(paste(filename,'.img',sep=""),'rb')
object@image <- array(readBin(con,type,dx * dy * dz * dt * dd, size, signed = signed, endian = endian) ,dim =
if(object@dim_[1] == 5) c(dx,dy,dz,dt,dd)
else if (object@dim_[1] == 4) c(dx,dy,dz,dt)
else if (object@dim_[1] == 3) c(dx,dy,dz)
else if (object@dim_[1] == 2) c(dx,dy)
)
close(con)
object
}
)
read.NIFTI.hdr_img <- function(filename) #no .img and .hdr in filename
{
a <- new("nifti_one")
read.hdr_img(a,filename)
}
|
/Brainbase/R/readNIFTI.R
|
no_license
|
haixiao990/BrainConductor
|
R
| false
| false
| 10,394
|
r
|
#This Method 'read.nii' can read NIFTI S4 class from .nii files
setGeneric("read.nii",function(object, filename) standardGeneric("read.nii"));
setMethod("read.nii", signature(object = "nifti_one"), function(object, filename)
{
object <- new('nifti_one')
con <- file(filename,'rb');
endian <- if ((sizeof_hdr <- readBin(con,"int",1,4,endian="little")) == 348) "little" else "big"
object@sizeof_hdr <- 348
object@data_type <- readChar(con,10,TRUE)
object@db_name <- readChar(con,18,TRUE)
object@extents <- readBin(con,'int',1,4,endian = endian)
object@session_error <- readBin(con,'int',1,2,endian=endian)
object@regular <- readChar(con,1,TRUE)
object@dim_info <- readChar(con,1,TRUE)
object@dim_ = readBin(con,'int',8,2,endian=endian)
object@intent_p1 <- readBin(con,'double',1,4,endian=endian)
object@intent_p2 <- readBin(con,'double',1,4,endian=endian)
object@intent_p3 <- readBin(con,'double',1,4,endian=endian)
object@intent_code <- readBin(con,'int',1,2,endian=endian)
object@datatype <- readBin(con,'int',1,2,endian=endian)
object@bitpix <- readBin(con,'int',1,2,endian=endian)
object@slice_start <- readBin(con,'int',1,2,endian=endian)
object@pixdim <- readBin(con,'double',8,4,endian=endian)
object@vox_offset <- readBin(con,'double',1,4,endian=endian)
object@scl_slope <- readBin(con,'double',1,4,endian=endian)
object@scl_inter <- readBin(con,'double',1,4,endian=endian)
object@slice_end <- readBin(con,'int',1,2,endian=endian)
object@slice_code <- readChar(con,1,TRUE)
object@xyzt_units <- readChar(con,1,TRUE)
object@cal_min <- readBin(con,'double',1,4,endian=endian)
object@cal_max <- readBin(con,'double',1,4,endian=endian)
object@slice_duration <- readBin(con,'double',1,4,endian=endian)
object@toffset <- readBin(con,'double',1,4,endian=endian)
object@glmax <- readBin(con,'int',1,4,endian=endian)
object@glmin <- readBin(con,'int',1,4,endian=endian)
object@descrip <- readChar(con,80,TRUE)
object@aux_file <- readChar(con,24,TRUE)
object@qform_code <- readBin(con,'int',1,2,endian=endian)
object@sform_code <- readBin(con,'int',1,2,endian=endian)
object@quatern_b <- readBin(con,'double',1,4,endian=endian)
object@quatern_c <- readBin(con,'double',1,4,endian=endian)
object@quatern_d <- readBin(con,'double',1,4,endian=endian)
object@qoffset_x <- readBin(con,'double',1,4,endian=endian)
object@qoffset_y <- readBin(con,'double',1,4,endian=endian)
object@qoffset_z <- readBin(con,'double',1,4,endian=endian)
object@srow_x <- readBin(con,'double',4,4,endian=endian)
object@srow_y <- readBin(con,'double',4,4,endian=endian)
object@srow_z <- readBin(con,'double',4,4,endian=endian)
object@intent_name <- readChar(con,16,TRUE)
object@magic <- readChar(con,4,TRUE)
object@extender <- readChar(con,4,TRUE)
object@file_type <- "NIFTI"
bp = 352;
id = 1;
if (object@extender != "")
{
while (bp < object@vox_offset)
{
object@extention$esize[[id]] <- readBin(con,'int',1,4,endian=endian)
object@extention$ecode[[id]] <- readBin(con,'int',1,4,endian=endian)
object@extention$edata[[id]] <- readChar(con,object@extention$esize[[id]] - 8,TRUE)
bp <- bp + object@extention$esize[[id]]
id <- id + 1
}
}
dx <- object@dim_[2]
dy <- object@dim_[3]
dz <- object@dim_[4]
dt <- object@dim_[5]
dd <- object@dim_[6]
if (object@datatype == 1) #binary
{
type <- "raw"
size <- 1 #may be wrong
signed <- TRUE
}
else if (object@datatype == 2) #unsigned char
{
type <- "int"
signed <- FALSE
size <- if (object@bitpix) object@bitpix / dd / 8 else 1
}
else if (object@datatype == 4) #signed short
{
type <- "int"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 2
}
else if (object@datatype == 8) #signed int
{
type <- "int"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 4
}
else if (object@datatype == 16) #float
{
type <- "double"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 4
}
else if (object@datatype == 32) #complex
{
type <- "complex"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 8
}
else if (object@datatype == 64) #double
{
type <- "double"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 8
}
else if (object@datatype == 128) #RGB
{
type <- "int"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 3
}
else
{
type <- 'int'
signed <- TRUE
size <- 1
}
object@image <- array(readBin(con,type,dx * dy * dz * dt * dd, size, signed = signed, endian = endian) ,dim =
if(object@dim_[1] == 5) c(dx,dy,dz,dt,dd)
else if (object@dim_[1] == 4) c(dx,dy,dz,dt)
else if (object@dim_[1] == 3) c(dx,dy,dz)
else if (object@dim_[1] == 2) c(dx,dy)
)
close(con)
object
}
)
read.NIFTI.nii <- function(filename)
{
a <- new("nifti_one")
read.nii(a,filename)
}
setGeneric("read.hdr_img",function(object, filename ) standardGeneric("read.hdr_img"))
setMethod("read.hdr_img", signature(object = "nifti_one"), function(object, filename )
{
object <- new('nifti_one')
con <- file(paste(filename,'.hdr',sep=""),'rb')
endian <- if ((sizeof_hdr <- readBin(con,"int",1,4,endian="little")) == 348) "little" else "big"
object@sizeof_hdr <- 348
object@data_type <- readChar(con,10,TRUE)
object@db_name <- readChar(con,18,TRUE)
object@extents <- readBin(con,'int',1,4,endian = endian)
object@session_error <- readBin(con,'int',1,2,endian=endian)
object@regular <- readChar(con,1,TRUE)
object@dim_info <- readChar(con,1,TRUE)
object@dim_ = readBin(con,'int',8,2,endian=endian)
object@intent_p1 <- readBin(con,'double',1,4,endian=endian)
object@intent_p2 <- readBin(con,'double',1,4,endian=endian)
object@intent_p3 <- readBin(con,'double',1,4,endian=endian)
object@intent_code <- readBin(con,'int',1,2,endian=endian)
object@datatype <- readBin(con,'int',1,2,endian=endian)
object@bitpix <- readBin(con,'int',1,2,endian=endian)
object@slice_start <- readBin(con,'int',1,2,endian=endian)
object@pixdim <- readBin(con,'double',8,4,endian=endian)
object@vox_offset <- readBin(con,'double',1,4,endian=endian)
object@scl_slope <- readBin(con,'double',1,4,endian=endian)
object@scl_inter <- readBin(con,'double',1,4,endian=endian)
object@slice_end <- readBin(con,'int',1,2,endian=endian)
object@slice_code <- readChar(con,1,TRUE)
object@xyzt_units <- readChar(con,1,TRUE)
object@cal_min <- readBin(con,'double',1,4,endian=endian)
object@cal_max <- readBin(con,'double',1,4,endian=endian)
object@slice_duration <- readBin(con,'double',1,4,endian=endian)
object@toffset <- readBin(con,'double',1,4,endian=endian)
object@glmax <- readBin(con,'int',1,4,endian=endian)
object@glmin <- readBin(con,'int',1,4,endian=endian)
object@descrip <- readChar(con,80,TRUE)
object@aux_file <- readChar(con,24,TRUE)
object@qform_code <- readBin(con,'int',1,2,endian=endian)
object@sform_code <- readBin(con,'int',1,2,endian=endian)
object@quatern_b <- readBin(con,'double',1,4,endian=endian)
object@quatern_c <- readBin(con,'double',1,4,endian=endian)
object@quatern_d <- readBin(con,'double',1,4,endian=endian)
object@qoffset_x <- readBin(con,'double',1,4,endian=endian)
object@qoffset_y <- readBin(con,'double',1,4,endian=endian)
object@qoffset_z <- readBin(con,'double',1,4,endian=endian)
object@srow_x <- readBin(con,'double',4,4,endian=endian)
object@srow_y <- readBin(con,'double',4,4,endian=endian)
object@srow_z <- readBin(con,'double',4,4,endian=endian)
object@intent_name <- readChar(con,16,TRUE)
object@magic <- readChar(con,4,TRUE)
object@extender <- readChar(con,4,TRUE)
object@file_type <- "NIFTI"
id <- 1
if (object@extender != "")
{
while (1)
{
e_size = readBin(con,'int',1,4,endian=endian);
if (length(e_size) == 0)
{
break;
}
object@extention$esize[[id]] <- readBin(con,'int',1,4,endian=endian)
object@extention$ecode[[id]] <- readBin(con,'int',1,4,endian=endian)
object@extention$edata[[id]] <- readChar(con,object@extention$esize[[id]] - 8,TRUE)
bp <- bp + object@extention$esize[[id]]
id <- id + 1
}
}
dx <- object@dim_[2]
dy <- object@dim_[3]
dz <- object@dim_[4]
dt <- object@dim_[5]
dd <- object@dim_[6]
if (object@datatype == 1) #binary
{
type <- "raw"
size <- 1 #may be wrong
signed <- TRUE
}
else if (object@datatype == 2) #unsigned char
{
type <- "int"
signed <- FALSE
size <- if (object@bitpix) object@bitpix / dd / 8 else 1
}
else if (object@datatype == 4) #signed short
{
type <- "int"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 2
}
else if (object@datatype == 8) #signed int
{
type <- "int"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 4
}
else if (object@datatype == 16) #float
{
type <- "double"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 4
}
else if (object@datatype == 32) #complex
{
type <- "complex"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 8
}
else if (object@datatype == 64) #double
{
type <- "double"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 8
}
else if (object@datatype == 128) #RGB
{
type <- "int"
signed <- TRUE
size <- if (object@bitpix) object@bitpix / dd / 8 else 3
}
else
{
type <- 'int'
signed <- TRUE
size <- 1
}
close(con)
con <- file(paste(filename,'.img',sep=""),'rb')
object@image <- array(readBin(con,type,dx * dy * dz * dt * dd, size, signed = signed, endian = endian) ,dim =
if(object@dim_[1] == 5) c(dx,dy,dz,dt,dd)
else if (object@dim_[1] == 4) c(dx,dy,dz,dt)
else if (object@dim_[1] == 3) c(dx,dy,dz)
else if (object@dim_[1] == 2) c(dx,dy)
)
close(con)
object
}
)
read.NIFTI.hdr_img <- function(filename) #no .img and .hdr in filename
{
a <- new("nifti_one")
read.hdr_img(a,filename)
}
|
library("dplyr")
library("magrittr")
library("data.table")
library("RCurl")
library("XML")
library("httr")
library("jsonlite")
#### Get Dig Data ####
load("01Data_alldig.RData")
load("01Data_case1.RData")
load("01Data_case2.RData")
#### 建立資料表 ####
appcase<-mutate(lottery.case2,StartDate=as.Date(AllowStart))
appcase<-arrange(appcase,CaseID)
appcase$town_name<-""
appcase$un_na<-""
appcase$ur_dr<-""
appcase$CurrentStatus = appcase$CaseStatus
appcase$un_na = appcase$PPName2
appcase$town_name = substr(appcase$Town,1,3)
appcase$DistrictNo = ""
for (i in 1:nrow(appcase)) { #修正鄉鎮名
if (appcase$CaseID[i]=="3407") {appcase$town_name[i]="金湖鎮"}
if (appcase$CaseID[i]=="3410") {appcase$town_name[i]="金湖鎮"}
if (appcase$CaseID[i]=="3522") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3545") {appcase$town_name[i]="金湖鎮"}
if (appcase$CaseID[i]=="3627") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3658") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3730") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3806") {appcase$town_name[i]="金沙鎮"}
if (appcase$CaseID[i]=="3808") {appcase$town_name[i]="金沙鎮"}
if (appcase$CaseID[i]=="3815") {appcase$town_name[i]="金城鎮"}
if (appcase$CaseID[i]=="3821") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3843") {appcase$town_name[i]="金湖鎮"}
if (appcase$CaseID[i]=="3876") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3895") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3899") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3916") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3931") {appcase$town_name[i]="金湖鎮"}
if (appcase$CaseID[i]=="3961") {appcase$town_name[i]="金湖鎮"}
if (appcase$CaseID[i]=="3976") {appcase$town_name[i]="金寧鄉"}
if (appcase$town_name[i]=="金城鎮") {appcase$DistrictNo[i]="0902001"}
if (appcase$town_name[i]=="金湖鎮") {appcase$DistrictNo[i]="0902003"}
if (appcase$town_name[i]=="金沙鎮") {appcase$DistrictNo[i]="0902002"}
if (appcase$town_name[i]=="金寧鄉") {appcase$DistrictNo[i]="0902004"}
if (appcase$town_name[i]=="烈嶼鄉") {appcase$DistrictNo[i]="0902005"}
if (appcase$town_name[i]=="烏坵鄉") {appcase$DistrictNo[i]="0902006"}
}
#建立
#完工結案日期CL_DA>done、最後核定復工/展延起始日期CBE_DA#1041100007、最後核定復工/展延結束日期CEN_DA
#變更類型(1~3)CHG_TYPE、最後異動日期LASTMOD
appcase$cl_da<-appcase$FiDate
appcase$abe_da<-"" #初核起
appcase$aen_da<-"" #初核末
appcase$adg_da<-"" #工期日數
appcase$cbe_da<-"" #最後核起
appcase$cen_da<-"" #最後核末
appcase$ac_no<-"" #許可證號
appcase$daco_ti<-"" #日間施工時間
appcase$naco_ti<-"" #夜間施工時間
appcase$area_ta<-0
for (i in 1:nrow(appcase)) {
appcase$abe_da[i] = as.character(as.Date(appcase$AllowStart[i]))
appcase$aen_da[i] = as.character(as.Date(appcase$AllowStop[i]))
appcase$adg_da[i] = difftime(as.Date(appcase$AllowStop[i]),as.Date(appcase$AllowStart[i]),units = "days")
appcase$cbe_da[i] = as.character(as.Date(appcase$AllowStart[i]))
appcase$cen_da[i] = as.character(as.Date(appcase$AllowStart[i]))
appcase$ac_no[i] = appcase$CaseID[i]
appcase$daco_ti[i] = "09:00~17:00"
appcase$naco_ti[i] = "09:00~17:00"
appcase$area_ta[i] = appcase$Area[i]
}
####計算巷弄####
info1<-appcase$Road
info1 = gsub("\\(","",info1)
info1 = gsub("\\)","",info1)
####清理座標中心####
info2<-select(appcase,X,Y)
####清理座標範圍清單####
info3<-paste0('<gml:MultiPolygon srsName="EPSG:3825">',"",'</gml:MultiPolygon>') %>% noquote()
#### 組合欄位 ####
case.nor<-data.frame(
COUNTY_CODE=rep("W",nrow(appcase)),#縣市代碼
TOWN_CODE=appcase$DistrictNo,#行政區代碼 DistrictNo
TOWN_NAME=appcase$town_name,#行政區名稱
AC_NO=appcase$ac_no,#許可證號 IssuanceNo
CASE_ID=appcase$CaseID,#案件編號 AppNo
CONST_NAME=appcase$EngUse,#工程名稱 Purpose
LOCATION=appcase$Road,#工程地點 LocationRoadName
ADD_VI="",#村里 info1$村里
DG_ROAD=info1,#街路大道名 info1$路
ADD_DAN="",#段 info1$段
ADD_SH="",#巷 info1$巷
ADD_NA="",#弄 info1$弄
ADD_NO="",#號 info1$號
DG_ROAD2=rep("",nrow(appcase)),#省縣鄉道名
DG_ROAD2_BE=rep("00",nrow(appcase)),#省縣鄉道起始公里
DG_ROAD2_EE=rep("00",nrow(appcase)),#省縣鄉道結束公里
A_UN=rep("金門縣政府",nrow(appcase)),#核定單位
ABE_DA=appcase$abe_da,#核定挖掘起日 ApprovedStartDate
AEN_DA=appcase$aen_da,#核定挖掘迄日 ApprovedEndDate
ADG_DA=as.numeric(appcase$adg_da),#核定工期日數 IssuanceDate
DACO_TI=appcase$daco_ti,#核定日間施工時段 ApprovedStartTime
NACO_TI=appcase$naco_ti,#核定夜間施工時段 ApprovedEndTime
AREA_TA=appcase$area_ta,#申請施工面積 TarArea+CementArea+PedestrianArea
UN_NA=appcase$un_na,#管線單位名稱 un_na
UR_NA=rep("",nrow(appcase)),#管線單位連絡人名稱 UndertakerName
UR_DR=appcase$ur_dr,#聯絡地址 ur_dr
UR_TI=rep("",nrow(appcase)),#聯絡電話 UndertakerPhone
PURP=appcase$EngUse,#施工內容 Purpose
PH_URLS=rep("",nrow(appcase)),#施工前照片超連結
PH_URLA=rep("",nrow(appcase)),#施工後照片超連結
DG_STATUS=as.character(appcase$CurrentStatus),#施工狀態 CurrentStatus
CL_DA=appcase$cl_da,#完工結案日期 done
CHG_TYPE=rep("",nrow(appcase)),#變更類型(1~3)
CBE_DA=appcase$cbe_da,#最後核定復工/展延起始日期
CEN_DA=appcase$cen_da,#最後核定復工/展延結束日期
CENTER_COORDS_X=info2$X,#施工範圍中心點x坐標 info2$X
CENTER_COORDS_Y=info2$Y,#施工範圍中心點y坐標 info2$Y
LASTMOD=rep("",nrow(appcase))#最後異動日期
,stringsAsFactors=F)
#### Check and create a export file####
case.update = case.nor
case.update$XY<-paste0(case.update$CENTER_COORDS_X,",",case.update$CENTER_COORDS_Y)
case.update$ADG_DA<-ifelse(is.na(case.update$ADG_DA),0,case.update$ADG_DA)
case.update<-filter(case.update,AEN_DA!="")
xmlbuff<-sprintf('<CASE_DETAIL LASTMOD="%s"><TOWN_CODE>%s</TOWN_CODE><TOWN_NAME>%s</TOWN_NAME><AC_NO>%s</AC_NO><CASE_ID>%s</CASE_ID><CONST_NAME>%s</CONST_NAME><LOCATION>%s</LOCATION><A_UN>%s</A_UN><ABE_DA>%s</ABE_DA><AEN_DA>%s</AEN_DA><ADG_DA>%s</ADG_DA><DACO_TI>%s</DACO_TI><NACO_TI>%s</NACO_TI><AREA_TA>%s</AREA_TA><UN_NA>%s</UN_NA><UR_NA>%s</UR_NA><UR_DR>%s</UR_DR><UR_TI>%s</UR_TI><DG_STATUS>%s</DG_STATUS><CENTER_COORDS><gml:Point><gml:coordinates>%s</gml:coordinates></gml:Point></CENTER_COORDS><POLY_LIST>%s</POLY_LIST></CASE_DETAIL>',
case.update$LASTMOD,case.update$TOWN_CODE,case.update$TOWN_NAME,case.update$AC_NO,case.update$CASE_ID,case.update$CONST_NAME,
case.update$LOCATION,case.update$A_UN,case.update$ABE_DA,case.update$AEN_DA,case.update$ADG_DA,
case.update$DACO_TI,case.update$NACO_TI,case.update$AREA_TA,case.update$UN_NA,case.update$UR_NA,
case.update$UR_DR,case.update$UR_TI,case.update$DG_STATUS,case.update$XY,
info3) %>% noquote()
for (i in 1:length(xmlbuff)) {
if (i==1) {temp<-data.frame()}
temp<-paste0(temp,xmlbuff[i])
if (i==length(xmlbuff)) {
xmlbuff<-temp %>% noquote()
rm(temp)
}
}
xmlbuff<-paste0('<?xml version="1.0" encoding="UTF-8"?><DIG_CASE xmlns:gml="http://www.opengis.net/gml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><INFO County_Code="W" Count="',nrow(case.update),'"/><CASE_LIST>',
xmlbuff,'</CASE_LIST></DIG_CASE>') # %>% base64Encode()
writeLines(xmlbuff,con = file("110digcaselist.xml"))
|
/02toXML.R
|
no_license
|
lzxccc/110DigEvaluation
|
R
| false
| false
| 7,688
|
r
|
library("dplyr")
library("magrittr")
library("data.table")
library("RCurl")
library("XML")
library("httr")
library("jsonlite")
#### Get Dig Data ####
load("01Data_alldig.RData")
load("01Data_case1.RData")
load("01Data_case2.RData")
#### 建立資料表 ####
appcase<-mutate(lottery.case2,StartDate=as.Date(AllowStart))
appcase<-arrange(appcase,CaseID)
appcase$town_name<-""
appcase$un_na<-""
appcase$ur_dr<-""
appcase$CurrentStatus = appcase$CaseStatus
appcase$un_na = appcase$PPName2
appcase$town_name = substr(appcase$Town,1,3)
appcase$DistrictNo = ""
for (i in 1:nrow(appcase)) { #修正鄉鎮名
if (appcase$CaseID[i]=="3407") {appcase$town_name[i]="金湖鎮"}
if (appcase$CaseID[i]=="3410") {appcase$town_name[i]="金湖鎮"}
if (appcase$CaseID[i]=="3522") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3545") {appcase$town_name[i]="金湖鎮"}
if (appcase$CaseID[i]=="3627") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3658") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3730") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3806") {appcase$town_name[i]="金沙鎮"}
if (appcase$CaseID[i]=="3808") {appcase$town_name[i]="金沙鎮"}
if (appcase$CaseID[i]=="3815") {appcase$town_name[i]="金城鎮"}
if (appcase$CaseID[i]=="3821") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3843") {appcase$town_name[i]="金湖鎮"}
if (appcase$CaseID[i]=="3876") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3895") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3899") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3916") {appcase$town_name[i]="金寧鄉"}
if (appcase$CaseID[i]=="3931") {appcase$town_name[i]="金湖鎮"}
if (appcase$CaseID[i]=="3961") {appcase$town_name[i]="金湖鎮"}
if (appcase$CaseID[i]=="3976") {appcase$town_name[i]="金寧鄉"}
if (appcase$town_name[i]=="金城鎮") {appcase$DistrictNo[i]="0902001"}
if (appcase$town_name[i]=="金湖鎮") {appcase$DistrictNo[i]="0902003"}
if (appcase$town_name[i]=="金沙鎮") {appcase$DistrictNo[i]="0902002"}
if (appcase$town_name[i]=="金寧鄉") {appcase$DistrictNo[i]="0902004"}
if (appcase$town_name[i]=="烈嶼鄉") {appcase$DistrictNo[i]="0902005"}
if (appcase$town_name[i]=="烏坵鄉") {appcase$DistrictNo[i]="0902006"}
}
#建立
#完工結案日期CL_DA>done、最後核定復工/展延起始日期CBE_DA#1041100007、最後核定復工/展延結束日期CEN_DA
#變更類型(1~3)CHG_TYPE、最後異動日期LASTMOD
appcase$cl_da<-appcase$FiDate
appcase$abe_da<-"" #初核起
appcase$aen_da<-"" #初核末
appcase$adg_da<-"" #工期日數
appcase$cbe_da<-"" #最後核起
appcase$cen_da<-"" #最後核末
appcase$ac_no<-"" #許可證號
appcase$daco_ti<-"" #日間施工時間
appcase$naco_ti<-"" #夜間施工時間
appcase$area_ta<-0
for (i in 1:nrow(appcase)) {
appcase$abe_da[i] = as.character(as.Date(appcase$AllowStart[i]))
appcase$aen_da[i] = as.character(as.Date(appcase$AllowStop[i]))
appcase$adg_da[i] = difftime(as.Date(appcase$AllowStop[i]),as.Date(appcase$AllowStart[i]),units = "days")
appcase$cbe_da[i] = as.character(as.Date(appcase$AllowStart[i]))
appcase$cen_da[i] = as.character(as.Date(appcase$AllowStart[i]))
appcase$ac_no[i] = appcase$CaseID[i]
appcase$daco_ti[i] = "09:00~17:00"
appcase$naco_ti[i] = "09:00~17:00"
appcase$area_ta[i] = appcase$Area[i]
}
####計算巷弄####
info1<-appcase$Road
info1 = gsub("\\(","",info1)
info1 = gsub("\\)","",info1)
####清理座標中心####
info2<-select(appcase,X,Y)
####清理座標範圍清單####
info3<-paste0('<gml:MultiPolygon srsName="EPSG:3825">',"",'</gml:MultiPolygon>') %>% noquote()
#### 組合欄位 ####
case.nor<-data.frame(
COUNTY_CODE=rep("W",nrow(appcase)),#縣市代碼
TOWN_CODE=appcase$DistrictNo,#行政區代碼 DistrictNo
TOWN_NAME=appcase$town_name,#行政區名稱
AC_NO=appcase$ac_no,#許可證號 IssuanceNo
CASE_ID=appcase$CaseID,#案件編號 AppNo
CONST_NAME=appcase$EngUse,#工程名稱 Purpose
LOCATION=appcase$Road,#工程地點 LocationRoadName
ADD_VI="",#村里 info1$村里
DG_ROAD=info1,#街路大道名 info1$路
ADD_DAN="",#段 info1$段
ADD_SH="",#巷 info1$巷
ADD_NA="",#弄 info1$弄
ADD_NO="",#號 info1$號
DG_ROAD2=rep("",nrow(appcase)),#省縣鄉道名
DG_ROAD2_BE=rep("00",nrow(appcase)),#省縣鄉道起始公里
DG_ROAD2_EE=rep("00",nrow(appcase)),#省縣鄉道結束公里
A_UN=rep("金門縣政府",nrow(appcase)),#核定單位
ABE_DA=appcase$abe_da,#核定挖掘起日 ApprovedStartDate
AEN_DA=appcase$aen_da,#核定挖掘迄日 ApprovedEndDate
ADG_DA=as.numeric(appcase$adg_da),#核定工期日數 IssuanceDate
DACO_TI=appcase$daco_ti,#核定日間施工時段 ApprovedStartTime
NACO_TI=appcase$naco_ti,#核定夜間施工時段 ApprovedEndTime
AREA_TA=appcase$area_ta,#申請施工面積 TarArea+CementArea+PedestrianArea
UN_NA=appcase$un_na,#管線單位名稱 un_na
UR_NA=rep("",nrow(appcase)),#管線單位連絡人名稱 UndertakerName
UR_DR=appcase$ur_dr,#聯絡地址 ur_dr
UR_TI=rep("",nrow(appcase)),#聯絡電話 UndertakerPhone
PURP=appcase$EngUse,#施工內容 Purpose
PH_URLS=rep("",nrow(appcase)),#施工前照片超連結
PH_URLA=rep("",nrow(appcase)),#施工後照片超連結
DG_STATUS=as.character(appcase$CurrentStatus),#施工狀態 CurrentStatus
CL_DA=appcase$cl_da,#完工結案日期 done
CHG_TYPE=rep("",nrow(appcase)),#變更類型(1~3)
CBE_DA=appcase$cbe_da,#最後核定復工/展延起始日期
CEN_DA=appcase$cen_da,#最後核定復工/展延結束日期
CENTER_COORDS_X=info2$X,#施工範圍中心點x坐標 info2$X
CENTER_COORDS_Y=info2$Y,#施工範圍中心點y坐標 info2$Y
LASTMOD=rep("",nrow(appcase))#最後異動日期
,stringsAsFactors=F)
#### Check and create a export file####
case.update = case.nor
case.update$XY<-paste0(case.update$CENTER_COORDS_X,",",case.update$CENTER_COORDS_Y)
case.update$ADG_DA<-ifelse(is.na(case.update$ADG_DA),0,case.update$ADG_DA)
case.update<-filter(case.update,AEN_DA!="")
xmlbuff<-sprintf('<CASE_DETAIL LASTMOD="%s"><TOWN_CODE>%s</TOWN_CODE><TOWN_NAME>%s</TOWN_NAME><AC_NO>%s</AC_NO><CASE_ID>%s</CASE_ID><CONST_NAME>%s</CONST_NAME><LOCATION>%s</LOCATION><A_UN>%s</A_UN><ABE_DA>%s</ABE_DA><AEN_DA>%s</AEN_DA><ADG_DA>%s</ADG_DA><DACO_TI>%s</DACO_TI><NACO_TI>%s</NACO_TI><AREA_TA>%s</AREA_TA><UN_NA>%s</UN_NA><UR_NA>%s</UR_NA><UR_DR>%s</UR_DR><UR_TI>%s</UR_TI><DG_STATUS>%s</DG_STATUS><CENTER_COORDS><gml:Point><gml:coordinates>%s</gml:coordinates></gml:Point></CENTER_COORDS><POLY_LIST>%s</POLY_LIST></CASE_DETAIL>',
case.update$LASTMOD,case.update$TOWN_CODE,case.update$TOWN_NAME,case.update$AC_NO,case.update$CASE_ID,case.update$CONST_NAME,
case.update$LOCATION,case.update$A_UN,case.update$ABE_DA,case.update$AEN_DA,case.update$ADG_DA,
case.update$DACO_TI,case.update$NACO_TI,case.update$AREA_TA,case.update$UN_NA,case.update$UR_NA,
case.update$UR_DR,case.update$UR_TI,case.update$DG_STATUS,case.update$XY,
info3) %>% noquote()
for (i in 1:length(xmlbuff)) {
if (i==1) {temp<-data.frame()}
temp<-paste0(temp,xmlbuff[i])
if (i==length(xmlbuff)) {
xmlbuff<-temp %>% noquote()
rm(temp)
}
}
xmlbuff<-paste0('<?xml version="1.0" encoding="UTF-8"?><DIG_CASE xmlns:gml="http://www.opengis.net/gml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><INFO County_Code="W" Count="',nrow(case.update),'"/><CASE_LIST>',
xmlbuff,'</CASE_LIST></DIG_CASE>') # %>% base64Encode()
writeLines(xmlbuff,con = file("110digcaselist.xml"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estP.r
\name{estP}
\alias{estP}
\title{Estimate atmospheric pressure (P)}
\usage{
estP(elev, control = list(Tko = 20))
}
\arguments{
\item{elev}{elevation [m]}
\item{control}{list for control parameters and empirical factors defined in
\code{\link{controlDefaults}} and \code{\link{constDefaults}} (see Details)}
}
\value{
atmospheric pressure [kPa]
}
\description{
Values for atmospheric pressure as a function of altitude.
}
\details{
\describe{\item{Control variables:}{
Tko: reference temperature [degreeC] at elevation z0. Often assumed to be 20 degreeC \cr \cr
z0: elevation at reference level [m] \cr \cr
a1: constant lapse rate moist air (0.0065 [K/m]) \cr \cr
g: gravitational acceleration (9.807 [m/s2]) \cr \cr
R: specific gas constant (287 [J/(kg K)])}}
}
\note{
eq. 3-2 of Reference
}
\examples{
estP(elev = 25, control = list(Tko = 20))
}
\references{
Allen, R. G., Pereira, L. S., Raes, D., & Smith, M. (1998). Crop evapotranspiration-Guidelines for computing crop water requirements-FAO Irrigation and drainage paper 56. FAO, Rome, 300(9).
}
|
/man/estP.Rd
|
no_license
|
cran/MeTo
|
R
| false
| true
| 1,137
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estP.r
\name{estP}
\alias{estP}
\title{Estimate atmospheric pressure (P)}
\usage{
estP(elev, control = list(Tko = 20))
}
\arguments{
\item{elev}{elevation [m]}
\item{control}{list for control parameters and empirical factors defined in
\code{\link{controlDefaults}} and \code{\link{constDefaults}} (see Details)}
}
\value{
atmospheric pressure [kPa]
}
\description{
Values for atmospheric pressure as a function of altitude.
}
\details{
\describe{\item{Control variables:}{
Tko: reference temperature [degreeC] at elevation z0. Often assumed to be 20 degreeC \cr \cr
z0: elevation at reference level [m] \cr \cr
a1: constant lapse rate moist air (0.0065 [K/m]) \cr \cr
g: gravitational acceleration (9.807 [m/s2]) \cr \cr
R: specific gas constant (287 [J/(kg K)])}}
}
\note{
eq. 3-2 of Reference
}
\examples{
estP(elev = 25, control = list(Tko = 20))
}
\references{
Allen, R. G., Pereira, L. S., Raes, D., & Smith, M. (1998). Crop evapotranspiration-Guidelines for computing crop water requirements-FAO Irrigation and drainage paper 56. FAO, Rome, 300(9).
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/est_gmm.R
\name{sargan}
\alias{sargan}
\title{Hansen--Sargan Test of Overidentifying Restrictions}
\usage{
sargan(object, weights = c("twosteps", "onestep"))
}
\arguments{
\item{object}{an object of class \code{"pgmm"},}
\item{weights}{the weighting matrix to be used for the computation of the
test.}
}
\value{
An object of class \code{"htest"}.
}
\description{
A test of overidentifying restrictions for models estimated by GMM.
}
\details{
The Hansen--Sargan test ("J test") calculates the quadratic form of the moment
restrictions that is minimized while computing the GMM estimator. It follows
asymptotically a chi-square distribution with number of degrees of freedom
equal to the difference between the number of moment conditions and the
number of coefficients.
}
\examples{
data("EmplUK", package = "plm")
ar <- pgmm(log(emp) ~ lag(log(emp), 1:2) + lag(log(wage), 0:1) +
lag(log(capital), 0:2) + lag(log(output), 0:2) | lag(log(emp), 2:99),
data = EmplUK, effect = "twoways", model = "twosteps")
sargan(ar)
}
\references{
\insertCite{HANS:82}{plm}
\insertCite{SARG:58}{plm}
}
\seealso{
\code{\link[=pgmm]{pgmm()}}
}
\author{
Yves Croissant
}
\keyword{htest}
|
/man/sargan.Rd
|
no_license
|
cran/plm
|
R
| false
| true
| 1,271
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/est_gmm.R
\name{sargan}
\alias{sargan}
\title{Hansen--Sargan Test of Overidentifying Restrictions}
\usage{
sargan(object, weights = c("twosteps", "onestep"))
}
\arguments{
\item{object}{an object of class \code{"pgmm"},}
\item{weights}{the weighting matrix to be used for the computation of the
test.}
}
\value{
An object of class \code{"htest"}.
}
\description{
A test of overidentifying restrictions for models estimated by GMM.
}
\details{
The Hansen--Sargan test ("J test") calculates the quadratic form of the moment
restrictions that is minimized while computing the GMM estimator. It follows
asymptotically a chi-square distribution with number of degrees of freedom
equal to the difference between the number of moment conditions and the
number of coefficients.
}
\examples{
data("EmplUK", package = "plm")
ar <- pgmm(log(emp) ~ lag(log(emp), 1:2) + lag(log(wage), 0:1) +
lag(log(capital), 0:2) + lag(log(output), 0:2) | lag(log(emp), 2:99),
data = EmplUK, effect = "twoways", model = "twosteps")
sargan(ar)
}
\references{
\insertCite{HANS:82}{plm}
\insertCite{SARG:58}{plm}
}
\seealso{
\code{\link[=pgmm]{pgmm()}}
}
\author{
Yves Croissant
}
\keyword{htest}
|
##### info ####
# file: elymus_seedling_seed_production_model_2019_density_exp
# author: Amy Kendig
# date last edited: 11/12/20
# goal: estimate Elymus seedling seed production based on density and fungicide treatments
#### set up ####
# clear all existing data
rm(list=ls())
# load packages
library(tidyverse)
library(brms)
# import data
seedD2Dat <- read_csv("intermediate-data/ev_processed_seeds_both_year_conversion_2019_density_exp.csv")
bioD2Dat <- read_csv("data/ev_biomass_seeds_oct_2019_density_exp.csv")
plotsD <- read_csv("data/plot_treatments_2018_2019_density_exp.csv")
plotsD2 <- read_csv("data/plot_treatments_for_analyses_2018_2019_density_exp.csv")
#### edit data ####
# plant group densities
plotDens <- plotsD %>%
mutate(mv_seedling_density = case_when(background == "Mv seedling" ~ background_density, TRUE ~ 0),
ev_seedling_density = case_when(background == "Ev seedling" ~ background_density, TRUE ~ 0),
ev_adult_density = case_when(background == "Ev adult" ~ background_density, TRUE ~ 0))
# check for notes about seeds
unique(seedD2Dat$spikelet_notes)
filter(seedD2Dat, is.na(spikelet_weight.g))
# no missing data
# add columns
# remove missing data
evSSeedD2Dat <- seedD2Dat %>%
group_by(site, plot, treatment, sp, ID) %>%
summarise(seeds = sum(seeds)) %>%
ungroup() %>%
full_join(bioD2Dat %>%
select(site, plot, treatment, sp, ID)) %>%
left_join(plotDens) %>%
mutate(seeds = replace_na(seeds, 0),
log_seeds = log(seeds + 1),
fungicide = ifelse(treatment == "fungicide", 1, 0),
plotr = ifelse(treatment == "fungicide", plot + 10, plot),
age = ifelse(ID == "A", "adult", "seedling"),
treatment = ifelse(treatment == "water", "control", treatment)) %>%
filter(!is.na(seeds) & age == "seedling")
#### initial visualizations ####
# modify dataset so that zero plots are repeated
vizDat <- evSSeedD2Dat %>%
mutate(treatment = ifelse(treatment == "control", "water", treatment)) %>%
select(-c(background, background_sp)) %>%
left_join(plotsD2)
# non-transformed biomass
ggplot(vizDat, aes(background_density, seeds, color = treatment)) +
stat_summary(geom = "point", fun = "mean", size = 2) +
stat_summary(geom = "errorbar", fun.data = "mean_cl_boot", width = 0) +
facet_wrap(~background, scales = "free_x") +
theme_bw()
# log-transformed biomass
ggplot(vizDat, aes(background_density, log_seeds, color = treatment)) +
stat_summary(geom = "point", fun = "mean", size = 2) +
stat_summary(geom = "errorbar", fun.data = "mean_cl_boot", width = 0) +
facet_wrap(~background, scales = "free_x") +
theme_bw()
#### fit regression ####
# remove plots with no background (negative effect on growth)
evSSeedD2Dat2 <- evSSeedD2Dat %>%
filter(background != "none")
# initial fit
evSSeedD2Mod1 <- brm(bf(log_seeds ~ logS - log(1 + alphaA * mv_seedling_density + alphaS * ev_seedling_density + alphaP * ev_adult_density),
logS ~ treatment + (1|site),
alphaA ~ 0 + treatment,
alphaS ~ 0 + treatment,
alphaP ~ 0 + treatment,
nl = T),
data = evSSeedD2Dat2, family = gaussian,
prior <- c(prior(normal(3, 10), nlpar = "logS", class = "b", coef = "Intercept"),
prior(normal(0, 10), nlpar = "logS", class = "b"),
prior(exponential(0.5), nlpar = "alphaA", lb = 0),
prior(exponential(0.5), nlpar = "alphaS", lb = 0),
prior(exponential(0.5), nlpar = "alphaP", lb = 0),
prior(cauchy(0, 1), nlpar = "logS", class = "sd"),
prior(cauchy(0, 1), class = "sigma")),
iter = 6000, warmup = 1000, chains = 1)
# 3 divergent transitions
summary(evSSeedD2Mod1)
# increase chains and adapt delta
evSSeedD2Mod2 <- update(evSSeedD2Mod1, chains = 3,
control = list(adapt_delta = 0.999))
summary(evSSeedD2Mod2)
plot(evSSeedD2Mod2)
pp_check(evSSeedD2Mod2, nsamples = 50)
#### visualize ####
# simulation data
simDat <- tibble(mv_seedling_density = c(seq(0, 64, length.out = 100), rep(0, 200)),
ev_seedling_density = c(rep(0, 100), seq(0, 16, length.out = 100), rep(0, 100)),
ev_adult_density = c(rep(0, 200), seq(0, 8, length.out = 100)),
background = rep(c("Mv seedling", "Ev seedling", "Ev adult"), each = 100)) %>%
expand_grid(fungicide = c(0, 1)) %>%
mutate(treatment = ifelse(fungicide == 1, "fungicide", "control"),
site = NA,
background_density = case_when(background == "Mv seedling" ~ mv_seedling_density,
background == "Ev seedling" ~ ev_seedling_density,
TRUE ~ ev_adult_density))
# simulate fit
fitDat <- simDat %>%
mutate(log_seeds = fitted(evSSeedD2Mod2, newdata = ., re_formula = NA)[, "Estimate"],
log_seeds_lower = fitted(evSSeedD2Mod2, newdata = ., re_formula = NA)[, "Q2.5"],
log_seeds_upper = fitted(evSSeedD2Mod2, newdata = ., re_formula = NA)[, "Q97.5"],
treatment = ifelse(treatment == "control", "water", treatment))
# change levels on raw data
evSSeedD2Dat3 <- evSSeedD2Dat2 %>%
mutate(treatment = ifelse(treatment == "control", "water", treatment))
# fit figure
(evSSeedD2Plot <- ggplot(fitDat, aes(background_density, log_seeds, color = treatment, fill = treatment)) +
geom_ribbon(aes(ymin = log_seeds_lower, ymax = log_seeds_upper), alpha = 0.5, color = NA) +
geom_line(size = 1.5) +
stat_summary(data = evSSeedD2Dat3, geom = "errorbar", width = 0, fun.data = "mean_cl_boot") +
stat_summary(data = evSSeedD2Dat3, geom = "point", size = 2, fun = "mean") +
facet_wrap(~ background, scales = "free_x") +
theme_bw())
#### output ####
save(evSSeedD2Mod2, file = "output/elymus_seedling_seed_model_2019_density_exp.rda")
save(evSSeedD2Plot, file = "output/elymus_seedling_seed_figure_2019_density_exp.rda")
|
/code/elymus_seedling_seed_production_model_2019_density_exp.R
|
no_license
|
aekendig/microstegium-bipolaris
|
R
| false
| false
| 6,113
|
r
|
##### info ####
# file: elymus_seedling_seed_production_model_2019_density_exp
# author: Amy Kendig
# date last edited: 11/12/20
# goal: estimate Elymus seedling seed production based on density and fungicide treatments
#### set up ####
# clear all existing data
rm(list=ls())
# load packages
library(tidyverse)
library(brms)
# import data
seedD2Dat <- read_csv("intermediate-data/ev_processed_seeds_both_year_conversion_2019_density_exp.csv")
bioD2Dat <- read_csv("data/ev_biomass_seeds_oct_2019_density_exp.csv")
plotsD <- read_csv("data/plot_treatments_2018_2019_density_exp.csv")
plotsD2 <- read_csv("data/plot_treatments_for_analyses_2018_2019_density_exp.csv")
#### edit data ####
# plant group densities
plotDens <- plotsD %>%
mutate(mv_seedling_density = case_when(background == "Mv seedling" ~ background_density, TRUE ~ 0),
ev_seedling_density = case_when(background == "Ev seedling" ~ background_density, TRUE ~ 0),
ev_adult_density = case_when(background == "Ev adult" ~ background_density, TRUE ~ 0))
# check for notes about seeds
unique(seedD2Dat$spikelet_notes)
filter(seedD2Dat, is.na(spikelet_weight.g))
# no missing data
# add columns
# remove missing data
evSSeedD2Dat <- seedD2Dat %>%
group_by(site, plot, treatment, sp, ID) %>%
summarise(seeds = sum(seeds)) %>%
ungroup() %>%
full_join(bioD2Dat %>%
select(site, plot, treatment, sp, ID)) %>%
left_join(plotDens) %>%
mutate(seeds = replace_na(seeds, 0),
log_seeds = log(seeds + 1),
fungicide = ifelse(treatment == "fungicide", 1, 0),
plotr = ifelse(treatment == "fungicide", plot + 10, plot),
age = ifelse(ID == "A", "adult", "seedling"),
treatment = ifelse(treatment == "water", "control", treatment)) %>%
filter(!is.na(seeds) & age == "seedling")
#### initial visualizations ####
# modify dataset so that zero plots are repeated
vizDat <- evSSeedD2Dat %>%
mutate(treatment = ifelse(treatment == "control", "water", treatment)) %>%
select(-c(background, background_sp)) %>%
left_join(plotsD2)
# non-transformed biomass
ggplot(vizDat, aes(background_density, seeds, color = treatment)) +
stat_summary(geom = "point", fun = "mean", size = 2) +
stat_summary(geom = "errorbar", fun.data = "mean_cl_boot", width = 0) +
facet_wrap(~background, scales = "free_x") +
theme_bw()
# log-transformed biomass
ggplot(vizDat, aes(background_density, log_seeds, color = treatment)) +
stat_summary(geom = "point", fun = "mean", size = 2) +
stat_summary(geom = "errorbar", fun.data = "mean_cl_boot", width = 0) +
facet_wrap(~background, scales = "free_x") +
theme_bw()
#### fit regression ####
# remove plots with no background (negative effect on growth)
evSSeedD2Dat2 <- evSSeedD2Dat %>%
filter(background != "none")
# initial fit
evSSeedD2Mod1 <- brm(bf(log_seeds ~ logS - log(1 + alphaA * mv_seedling_density + alphaS * ev_seedling_density + alphaP * ev_adult_density),
logS ~ treatment + (1|site),
alphaA ~ 0 + treatment,
alphaS ~ 0 + treatment,
alphaP ~ 0 + treatment,
nl = T),
data = evSSeedD2Dat2, family = gaussian,
prior <- c(prior(normal(3, 10), nlpar = "logS", class = "b", coef = "Intercept"),
prior(normal(0, 10), nlpar = "logS", class = "b"),
prior(exponential(0.5), nlpar = "alphaA", lb = 0),
prior(exponential(0.5), nlpar = "alphaS", lb = 0),
prior(exponential(0.5), nlpar = "alphaP", lb = 0),
prior(cauchy(0, 1), nlpar = "logS", class = "sd"),
prior(cauchy(0, 1), class = "sigma")),
iter = 6000, warmup = 1000, chains = 1)
# 3 divergent transitions
summary(evSSeedD2Mod1)
# increase chains and adapt delta
evSSeedD2Mod2 <- update(evSSeedD2Mod1, chains = 3,
control = list(adapt_delta = 0.999))
summary(evSSeedD2Mod2)
plot(evSSeedD2Mod2)
pp_check(evSSeedD2Mod2, nsamples = 50)
#### visualize ####
# simulation data
simDat <- tibble(mv_seedling_density = c(seq(0, 64, length.out = 100), rep(0, 200)),
ev_seedling_density = c(rep(0, 100), seq(0, 16, length.out = 100), rep(0, 100)),
ev_adult_density = c(rep(0, 200), seq(0, 8, length.out = 100)),
background = rep(c("Mv seedling", "Ev seedling", "Ev adult"), each = 100)) %>%
expand_grid(fungicide = c(0, 1)) %>%
mutate(treatment = ifelse(fungicide == 1, "fungicide", "control"),
site = NA,
background_density = case_when(background == "Mv seedling" ~ mv_seedling_density,
background == "Ev seedling" ~ ev_seedling_density,
TRUE ~ ev_adult_density))
# simulate fit
fitDat <- simDat %>%
mutate(log_seeds = fitted(evSSeedD2Mod2, newdata = ., re_formula = NA)[, "Estimate"],
log_seeds_lower = fitted(evSSeedD2Mod2, newdata = ., re_formula = NA)[, "Q2.5"],
log_seeds_upper = fitted(evSSeedD2Mod2, newdata = ., re_formula = NA)[, "Q97.5"],
treatment = ifelse(treatment == "control", "water", treatment))
# change levels on raw data
evSSeedD2Dat3 <- evSSeedD2Dat2 %>%
mutate(treatment = ifelse(treatment == "control", "water", treatment))
# fit figure
(evSSeedD2Plot <- ggplot(fitDat, aes(background_density, log_seeds, color = treatment, fill = treatment)) +
geom_ribbon(aes(ymin = log_seeds_lower, ymax = log_seeds_upper), alpha = 0.5, color = NA) +
geom_line(size = 1.5) +
stat_summary(data = evSSeedD2Dat3, geom = "errorbar", width = 0, fun.data = "mean_cl_boot") +
stat_summary(data = evSSeedD2Dat3, geom = "point", size = 2, fun = "mean") +
facet_wrap(~ background, scales = "free_x") +
theme_bw())
#### output ####
save(evSSeedD2Mod2, file = "output/elymus_seedling_seed_model_2019_density_exp.rda")
save(evSSeedD2Plot, file = "output/elymus_seedling_seed_figure_2019_density_exp.rda")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{gather_nrg}
\alias{gather_nrg}
\title{A function}
\usage{
gather_nrg(df)
}
\description{
This function allows you to
}
\examples{
gather_nrg()
}
|
/man/gather_nrg.Rd
|
no_license
|
srhoads/srhoads
|
R
| false
| true
| 241
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{gather_nrg}
\alias{gather_nrg}
\title{A function}
\usage{
gather_nrg(df)
}
\description{
This function allows you to
}
\examples{
gather_nrg()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R
\docType{class}
\name{asinht-class}
\alias{asinht-class}
\alias{asinht}
\alias{eval,asinht,missing-method}
\title{Class "asinht"}
\description{
Inverse hyperbolic sine transform class, which represents a transformation
defined by the function:
\deqn{f(parameter,a,b)=sinh^{-1}(a*parameter)*b}
This definition is such that it can function as an inverse of
\code{\linkS4class{sinht}} using the same definitions of the constants a
and b.
}
\section{Slots}{
\describe{
\item{\code{.Data}}{Object of class \code{"function"}.}
\item{\code{a}}{Object of class \code{"numeric"} -- non-zero constant.}
\item{\code{b}}{Object of class \code{"numeric"} -- non-zero constant.}
\item{\code{parameters}}{Object of class \code{"transformation"} -- flow parameter
to be transformed}
\item{\code{transformationId}}{Object of class \code{"character"} -- unique ID to
reference the transformation.}
}}
\note{
The inverse hyperbolic sin transformation object can be evaluated
using the eval method by passing the data frame as an argument.The
transformed parameters are returned as a matrix with a single column. (See
example below)
}
\section{Objects from the Class}{
Objects can be created by calls to the
constructor \code{asinht(parameter,a,b,transformationId)}
}
\section{Extends}{
Class \code{"\linkS4class{singleParameterTransform}"}, directly.
Class \code{"\linkS4class{transform}"}, by class "singleParameterTransform", distance 2.
Class \code{"\linkS4class{transformation}"}, by class "singleParameterTransform", distance 3.
Class \code{"\linkS4class{characterOrTransformation}"}, by class "singleParameterTransform", distance 4.
}
\examples{
dat <- read.FCS(system.file("extdata","0877408774.B08", package="flowCore"))
asinh1<-asinht(parameters="FSC-H",a=2,b=1,transformationId="asinH1")
transOut<-eval(asinh1)(exprs(dat))
}
\references{
Gating-ML Candidate Recommendation for Gating Description in
Flow Cytometry V 1.5
}
\seealso{
sinht
Other mathematical transform classes:
\code{\link{EHtrans-class}},
\code{\link{asinhtGml2-class}},
\code{\link{dg1polynomial-class}},
\code{\link{exponential-class}},
\code{\link{hyperlog-class}},
\code{\link{hyperlogtGml2-class}},
\code{\link{invsplitscale-class}},
\code{\link{lintGml2-class}},
\code{\link{logarithm-class}},
\code{\link{logicletGml2-class}},
\code{\link{logtGml2-class}},
\code{\link{quadratic-class}},
\code{\link{ratio-class}},
\code{\link{ratiotGml2-class}},
\code{\link{sinht-class}},
\code{\link{splitscale-class}},
\code{\link{squareroot-class}},
\code{\link{unitytransform-class}}
}
\author{
Gopalakrishnan N, F.Hahne
}
\concept{mathematical transform classes}
\keyword{classes}
|
/man/asinht-class.Rd
|
no_license
|
RGLab/flowCore
|
R
| false
| true
| 2,753
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R
\docType{class}
\name{asinht-class}
\alias{asinht-class}
\alias{asinht}
\alias{eval,asinht,missing-method}
\title{Class "asinht"}
\description{
Inverse hyperbolic sine transform class, which represents a transformation
defined by the function:
\deqn{f(parameter,a,b)=sinh^{-1}(a*parameter)*b}
This definition is such that it can function as an inverse of
\code{\linkS4class{sinht}} using the same definitions of the constants a
and b.
}
\section{Slots}{
\describe{
\item{\code{.Data}}{Object of class \code{"function"}.}
\item{\code{a}}{Object of class \code{"numeric"} -- non-zero constant.}
\item{\code{b}}{Object of class \code{"numeric"} -- non-zero constant.}
\item{\code{parameters}}{Object of class \code{"transformation"} -- flow parameter
to be transformed}
\item{\code{transformationId}}{Object of class \code{"character"} -- unique ID to
reference the transformation.}
}}
\note{
The inverse hyperbolic sin transformation object can be evaluated
using the eval method by passing the data frame as an argument.The
transformed parameters are returned as a matrix with a single column. (See
example below)
}
\section{Objects from the Class}{
Objects can be created by calls to the
constructor \code{asinht(parameter,a,b,transformationId)}
}
\section{Extends}{
Class \code{"\linkS4class{singleParameterTransform}"}, directly.
Class \code{"\linkS4class{transform}"}, by class "singleParameterTransform", distance 2.
Class \code{"\linkS4class{transformation}"}, by class "singleParameterTransform", distance 3.
Class \code{"\linkS4class{characterOrTransformation}"}, by class "singleParameterTransform", distance 4.
}
\examples{
dat <- read.FCS(system.file("extdata","0877408774.B08", package="flowCore"))
asinh1<-asinht(parameters="FSC-H",a=2,b=1,transformationId="asinH1")
transOut<-eval(asinh1)(exprs(dat))
}
\references{
Gating-ML Candidate Recommendation for Gating Description in
Flow Cytometry V 1.5
}
\seealso{
sinht
Other mathematical transform classes:
\code{\link{EHtrans-class}},
\code{\link{asinhtGml2-class}},
\code{\link{dg1polynomial-class}},
\code{\link{exponential-class}},
\code{\link{hyperlog-class}},
\code{\link{hyperlogtGml2-class}},
\code{\link{invsplitscale-class}},
\code{\link{lintGml2-class}},
\code{\link{logarithm-class}},
\code{\link{logicletGml2-class}},
\code{\link{logtGml2-class}},
\code{\link{quadratic-class}},
\code{\link{ratio-class}},
\code{\link{ratiotGml2-class}},
\code{\link{sinht-class}},
\code{\link{splitscale-class}},
\code{\link{squareroot-class}},
\code{\link{unitytransform-class}}
}
\author{
Gopalakrishnan N, F.Hahne
}
\concept{mathematical transform classes}
\keyword{classes}
|
preproc <- function(filename) {
source('util/set_features.R')
source('util/set_kernel.R')
source('util/check_accuracy.R')
source('util/tobool.R')
source('util/fix_preproc_name_inconsistency.R');
if (!set_features('kernel_')) {
return(TRUE)
}
pname <- fix_preproc_name_inconsistency(preproc_name)
if (regexpr('PRUNEVARSUBMEAN', pname)>0) {
sg('add_preproc', pname, tobool(preproc_arg0_divide))
} else {
sg('add_preproc', pname)
}
sg('attach_preproc', 'TRAIN')
sg('attach_preproc', 'TEST')
if (!set_kernel()) {
return(TRUE);
}
kmatrix <- sg('get_kernel_matrix', 'TRAIN')
km_train <- max(max(abs(kernel_matrix_train-kmatrix)))
kmatrix <- sg('get_kernel_matrix', 'TEST')
km_test <- max(max(abs(kernel_matrix_test-kmatrix)))
data <- list(km_train, km_test)
return(check_accuracy(kernel_accuracy, 'kernel', data))
}
|
/JS-CS-Detection-byExample/Dataset (ALERT 5 GB)/362764/shogun-1.1.0/shogun-1.1.0/testsuite/r_static/preproc.R
|
permissive
|
mkaouer/Code-Smells-Detection-in-JavaScript
|
R
| false
| false
| 846
|
r
|
preproc <- function(filename) {
source('util/set_features.R')
source('util/set_kernel.R')
source('util/check_accuracy.R')
source('util/tobool.R')
source('util/fix_preproc_name_inconsistency.R');
if (!set_features('kernel_')) {
return(TRUE)
}
pname <- fix_preproc_name_inconsistency(preproc_name)
if (regexpr('PRUNEVARSUBMEAN', pname)>0) {
sg('add_preproc', pname, tobool(preproc_arg0_divide))
} else {
sg('add_preproc', pname)
}
sg('attach_preproc', 'TRAIN')
sg('attach_preproc', 'TEST')
if (!set_kernel()) {
return(TRUE);
}
kmatrix <- sg('get_kernel_matrix', 'TRAIN')
km_train <- max(max(abs(kernel_matrix_train-kmatrix)))
kmatrix <- sg('get_kernel_matrix', 'TEST')
km_test <- max(max(abs(kernel_matrix_test-kmatrix)))
data <- list(km_train, km_test)
return(check_accuracy(kernel_accuracy, 'kernel', data))
}
|
#Read the "Human development" and "Gender inequality" datas into R.
hd <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human_development.csv", stringsAsFactors = F)
gii <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/gender_inequality.csv", stringsAsFactors = F, na.strings = "..")
#names of human developement
names(hd)
#names of gender inequlity
names(gii)
# look at the structure of human and gender inequality
str(hd)
str(gii)
# print out summaries of the human developemnt
summary(hd)
# print out summaries of the gender inequality
summary(gii)
#Look at the meta files and rename the variables with (shorter) descriptive names. (1 point)
colnames(hd) <- c("HDI_rank", "Country", "HDI", "Life_expectancy", "Exp_education", "Mean_education",
"GNI_per_Capita","GNI_minus_HDI_rank" )
colnames(gii) <- c("GII_rank", "Country", "GII_index", "MMR", "ABR",
"PR", "SE_females", "SE_males",
"LFPR_females", "LFPR_males")
##Abbreviations used:
#Maternal mortality ratio (MMR), Adolescent birth rate (ABR)
#Share of parliamentary seats held by each sex (PR)
#Population with at least some secondary education (SE)
# Labour force participation rate (LFPR):
#Mutate the "Gender inequality" data and create two new variables.
#First: ratio of Female and Male populations with secondary education
?mutate()
gii <- mutate(gii, SE_ratio_FM = SE_females/SE_males)
#Second: ratio of labour force participation (LFPR) of females and males in each country
gii <- mutate(gii, LFPR_ratio_FM = LFPR_females/LFPR_males)
#check data
head(gii)
#Join together dataset, Country is id
human <- inner_join(hd, gii, by = "Country")
head(human)
dim(human);str(human)
#'data.frame': 195 obs. of 19 variables:looks good!
.
#save data
write.table(human, "~/GitHub/IODS-project/data/human_2018.txt", quote=F, sep="\t", row.names=F)
#Exercise 5
#read in data:
human <- read.delim("~/GitHub/IODS-project/data/human_2018.txt")
str(human)
head(human)
#rename according file in datacamp exercise
?rename()
library(stringr)
human <- rename(human,
GNI = GNI_per_Capita,
Life.Exp = Life_expectancy,
Edu.Exp = Exp_education,
Edu.Mean =Mean_education,
GII.Rank=GII_rank,
GII=GII_index,
Mat.Mor =MMR,
Ado.Birth=ABR,
Parli.F=PR,
Edu2.F=SE_females,
Edu2.M=SE_males,
Labo.F=LFPR_females,
Labo.M=LFPR_males,
Edu2.FM=SE_ratio_FM,
Labo.FM=LFPR_ratio_FM)
head(human)
#GNI colum values are not okay, look at its structure
str(human$GNI)
#Factor w/ 194 levels "1,096","1,123",..: 166 135 156 139 140 137 127 154 134 117
# remove the commas from GNI and print out a numeric version of it
library(tidyr);library(stringr)
human$GNI <- str_replace(human$GNI, pattern=",", replace ="") %>% as.numeric
?mutate()
head(human);str(human$GNI)
#looks good!
#select a subset of the variables in human and drop the others from the dataset
keep <- c("Country", "Edu2.FM", "Labo.FM", "Edu.Exp", "Life.Exp",
"GNI", "Mat.Mor", "Ado.Birth", "Parli.F")
human <- select(human, one_of(keep))
# print out a completeness indicator of the 'human' data
complete.cases(human)
# print out the data along with a completeness indicator as the last column
data.frame(human[-1], comp = complete.cases(human))
# filter out all rows with NA values
#human_ <- filter(human, complete.cases(human))
human_ <- human[complete.cases(human), ]
# look at the last 10 observations
tail(human_, 10)
# last indice we want to keep
last <- nrow(human_) - 7
# choose everything until the last 7 observations
human_ <- human_[1:last, ]
# add countries as rownames
rownames(human_) <- human_$Country
#remove country column
human_ <- select(human_, -Country)
str(human_);head(human_)
complete.cases(human_)
write.table(human_, "~/GitHub/IODS-project/data/human_new_2018.txt", quote=F, sep="\t", row.names=T)
|
/data/create_human.R
|
no_license
|
EmmDah/IODS-project
|
R
| false
| false
| 4,160
|
r
|
#Read the "Human development" and "Gender inequality" datas into R.
hd <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human_development.csv", stringsAsFactors = F)
gii <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/gender_inequality.csv", stringsAsFactors = F, na.strings = "..")
#names of human developement
names(hd)
#names of gender inequlity
names(gii)
# look at the structure of human and gender inequality
str(hd)
str(gii)
# print out summaries of the human developemnt
summary(hd)
# print out summaries of the gender inequality
summary(gii)
#Look at the meta files and rename the variables with (shorter) descriptive names. (1 point)
colnames(hd) <- c("HDI_rank", "Country", "HDI", "Life_expectancy", "Exp_education", "Mean_education",
"GNI_per_Capita","GNI_minus_HDI_rank" )
colnames(gii) <- c("GII_rank", "Country", "GII_index", "MMR", "ABR",
"PR", "SE_females", "SE_males",
"LFPR_females", "LFPR_males")
##Abbreviations used:
#Maternal mortality ratio (MMR), Adolescent birth rate (ABR)
#Share of parliamentary seats held by each sex (PR)
#Population with at least some secondary education (SE)
# Labour force participation rate (LFPR):
#Mutate the "Gender inequality" data and create two new variables.
#First: ratio of Female and Male populations with secondary education
?mutate()
gii <- mutate(gii, SE_ratio_FM = SE_females/SE_males)
#Second: ratio of labour force participation (LFPR) of females and males in each country
gii <- mutate(gii, LFPR_ratio_FM = LFPR_females/LFPR_males)
#check data
head(gii)
#Join together dataset, Country is id
human <- inner_join(hd, gii, by = "Country")
head(human)
dim(human);str(human)
#'data.frame': 195 obs. of 19 variables:looks good!
.
#save data
write.table(human, "~/GitHub/IODS-project/data/human_2018.txt", quote=F, sep="\t", row.names=F)
#Exercise 5
#read in data:
human <- read.delim("~/GitHub/IODS-project/data/human_2018.txt")
str(human)
head(human)
#rename according file in datacamp exercise
?rename()
library(stringr)
human <- rename(human,
GNI = GNI_per_Capita,
Life.Exp = Life_expectancy,
Edu.Exp = Exp_education,
Edu.Mean =Mean_education,
GII.Rank=GII_rank,
GII=GII_index,
Mat.Mor =MMR,
Ado.Birth=ABR,
Parli.F=PR,
Edu2.F=SE_females,
Edu2.M=SE_males,
Labo.F=LFPR_females,
Labo.M=LFPR_males,
Edu2.FM=SE_ratio_FM,
Labo.FM=LFPR_ratio_FM)
head(human)
#GNI colum values are not okay, look at its structure
str(human$GNI)
#Factor w/ 194 levels "1,096","1,123",..: 166 135 156 139 140 137 127 154 134 117
# remove the commas from GNI and print out a numeric version of it
library(tidyr);library(stringr)
human$GNI <- str_replace(human$GNI, pattern=",", replace ="") %>% as.numeric
?mutate()
head(human);str(human$GNI)
#looks good!
#select a subset of the variables in human and drop the others from the dataset
keep <- c("Country", "Edu2.FM", "Labo.FM", "Edu.Exp", "Life.Exp",
"GNI", "Mat.Mor", "Ado.Birth", "Parli.F")
human <- select(human, one_of(keep))
# print out a completeness indicator of the 'human' data
complete.cases(human)
# print out the data along with a completeness indicator as the last column
data.frame(human[-1], comp = complete.cases(human))
# filter out all rows with NA values
#human_ <- filter(human, complete.cases(human))
human_ <- human[complete.cases(human), ]
# look at the last 10 observations
tail(human_, 10)
# last indice we want to keep
last <- nrow(human_) - 7
# choose everything until the last 7 observations
human_ <- human_[1:last, ]
# add countries as rownames
rownames(human_) <- human_$Country
#remove country column
human_ <- select(human_, -Country)
str(human_);head(human_)
complete.cases(human_)
write.table(human_, "~/GitHub/IODS-project/data/human_new_2018.txt", quote=F, sep="\t", row.names=T)
|
## Run analysis, write model results
## Before: VME_scores.csv (data)
## VME_survey_method.csv (data)
## VMEdb_Extraction_formatted.csv (data)
## After: Score_confidence.csv (model)
library(icesTAF)
library(data.table)
library(dplyr)
mkdir("model")
# Read VME indicator list with score; as agreed in the ICES WGDEC 2016
VMEscores <- read.taf("data/VME_Scores.csv")
# Read VME survey method; as agreed in the ICES WGDEC 2016
VMEsurveymeth <- read.taf("data/VME_survey_method.csv")
# Read VME database file
VMEdb <- read.taf("data/VMEdb_Extraction_formatted.csv")
# ----------------- model settings -------------------
# Determine VME index weighting values for VME scores and Abundance threshold
# (abundance will only accounts for 10% of the final scores, 90% to taxonomy)
wVMEscore <- 0.90
wABUNDthr <- 0.10
# Determine weight (kg) thresholds to be used in score calculation
SpongeThr1 <- 200
SpongeThr2 <- 60
CoralThr1 <- 30
CoralThr2 <- 1
# Determine abdundance score based on weight values
# (scores related to above thresholds, note that abundance only accounts for 10% of the final scores)
Score_below_thr <- 1 * wABUNDthr
Score_betw_thr <- 3 * wABUNDthr
Score_abov_thr <- 5 * wABUNDthr
# Define threshold on how old the last survey was
LowMyears <- 10
HighMyears <- 30
# Define confidence score for how old the last survey was
LowMaxYScore <- 1
MidMaxYScore <- 0.5
HighMaxYScore <- 0
# Define threshold for the range of years covered in the cell
LowRange <- 5
HighRange <- 10
# Define confidence score for the range of years
LowRangeScore <- 0
MidRangeScore <- 0.5
HighRangeScore <- 1
# Define the thresholds for number of surveys per CSquare
LowNsurveythr <- 3
HighNsurveythr <- 5
# Define uncertainty score for number of surveys per CSquare
LowNsurveyScore <- 0
MidNsurveyScore <- 0.5
HighNsurveyScore <- 1
# Confidence Score for type of Survey Method (K=known, Infer=infered, WK = well-known)
WK <- 1
Infer <- 0
K <- 0.5
# calculating the score of each the VME indicator group in the VMEscores data frame
VMEscores$indicator_score <-
sqrt(rowMeans(
VMEscores[,c("Rarity", "Functionality", "Fragility", "Life history", "Structural complexity")]^2
))
VMEscores <-
within(VMEscores, {
# calculating the score of each VME indicator when no abundance data is available in the VMEscores data frame
indicator_score_noabund <- indicator_score * wVMEscore
# calculating the score of each VME indicator when below 60kg of sponges or 1kg for corals in the VMEscores data frame
indicator_score_below <- indicator_score_noabund + Score_below_thr
# calculating the score of each VME indicator when between 60kg and 200 kg of sponges (1kg and 30kg of corals) in the VMEscores data frame
indicator_score_between <- indicator_score_noabund + Score_betw_thr
# calculating the score of each VME indicator when above 200 kg of sponges and 30kg of corals in the VMEscores data frame
indicator_score_above <- indicator_score_noabund + Score_abov_thr
})
# ADDING the VME indicator score when no abundance data exists in the VMEdb(i.e. vlookup)
VMEdb$noabundscore <- left_join(VMEdb, VMEscores, by = c("VME_Indicator" = "VME indicator"))$indicator_score_noabund
# ADDING the SurveyMethod score to each Method in the VMEdb
VMEdb$SurveyMethodcode <- left_join(VMEdb, VMEsurveymeth, by = "SurveyMethod")$description
VMEdb$SurveyMethodscore <-
ifelse(VMEdb$SurveyMethodcode == "WK" | !is.na(VMEdb$HabitatType),
WK,
ifelse(is.na(VMEdb$SurveyMethodcode),
Infer,
ifelse(VMEdb$SurveyMethodcode == "Infer", Infer, K)))
# VMEScores not used below this line
# ----------------- VME SCORE CALCULATION -------------------
VMEdb$Score <-
ifelse(is.na(VMEdb$VME_Indicator) & is.na(VMEdb$HabitatType),
1,
ifelse(!is.na(VMEdb$HabitatType),
5,
VMEdb$noabundscore +
ifelse(is.na(VMEdb$Weight_kg),
0,
ifelse(VMEdb$VME_Indicator == "Large Sponge" | VMEdb$VME_Indicator == "Generic Sponge",
ifelse(VMEdb$Weight_kg < SpongeThr2, Score_below_thr,
ifelse(VMEdb$Weight_kg < SpongeThr1, Score_betw_thr, Score_abov_thr)
),
ifelse(VMEdb$Weight_kg < CoralThr2, Score_below_thr,
ifelse(VMEdb$Weight_kg < CoralThr1, Score_betw_thr, Score_abov_thr))
)
)
)
)
# ---------------------- CONFIDENCE (based on MAX scores of each cell) ----------------
# Subset of VMEdb having all the records with max scores for each CSquares
VMEdb1 <- VMEdb[!is.na(VMEdb$Score),]
dt <- data.table(VMEdb1)
dt.max <- dt[,.SD[which(Score == max(Score))],by = CSquare]
df.max <- data.frame(dt.max)
VMEdb$SkeyMeth <- paste (VMEdb$SurveyKey,VMEdb$SurveyMethod, sep = "---")
dt_meth <- data.table(VMEdb)
# EXTRACTING average score per CSquare of SurveyMethod in the VMEdb
## new way where take average value of survey methods associated with unique surveys within that cell
dt_meth1 <- data.table(df.max)
dt.methmax1 <- dt_meth[,unique(SkeyMeth, na.action = NULL, na.rm = TRUE), by = CSquare]
df.methmax1 <- data.frame(dt.methmax1)
df.methmax1 <- transform(df.methmax1, test = do.call(rbind, strsplit(V1, "---", fixed = TRUE)), stringsAsFactors = FALSE)
colnames(df.methmax1) <- c("CSquare", "MethSurv", "surveykey", "method")
df.methmax1$method[which(df.methmax1$method == "NA")] <- NA
df.methmax1$SurveyMethodcode <- left_join(df.methmax1, VMEsurveymeth, by = c("method" = "SurveyMethod"))$description
df.methmax1$SurveyMethodscore <-
ifelse(df.methmax1$SurveyMethodcode == "WK" ,
WK,
ifelse(is.na(df.methmax1$SurveyMethodcode),
Infer,
ifelse(df.methmax1$SurveyMethodcode == "Infer",
Infer,
K)))
# Calcuylate the average “survey method” score for each individual csquares
# (of all the individual records within a csquare).
MeanMethScore_csq2_new <- unique(df.methmax1["CSquare"])
rownames(MeanMethScore_csq2_new) <- MeanMethScore_csq2_new$CSquare
tbl.methodScore <- tapply(df.methmax1$SurveyMethodscore, df.methmax1$CSquare, mean)
MeanMethScore_csq2_new$MeanMethErr_new <- tbl.methodScore[MeanMethScore_csq2_new$CSquare]
# EXTRACTING the highest year in each CSquare in the df.max
# WARNINGS
MaxYear_csq2 <- aggregate(df.max$Year ~ df.max$CSquare, df.max, max, na.action = NULL, na.rm = TRUE)
colnames(MaxYear_csq2) <- c("CSquare", "MaxYear")
# EXTRACTING the lowest year in each CSquare in the df.max
## WARNINGS
MinYear_csq2 <- aggregate(df.max$Year ~ df.max$CSquare, df.max, min, na.action = NULL, na.rm = TRUE)
colnames(MinYear_csq2) <- c("CSquare", "MinYear")
# MERGING MIN AND MAX YEARS IN ONE DATAFRAME
Years_csq2 <- merge(MaxYear_csq2,MinYear_csq2, by = "CSquare")
# CALCULATING the range of years in each CSquare
Years_csq2$Range <- (Years_csq2$MaxYear - Years_csq2$MinYear) + 1
# CALCULATING the number of years since 2016 in each CSquare
Years_csq2$Myears <- (2016 - Years_csq2$MaxYear)
# SUBSTITUTING THE -INF BY NA
Years_csq2[Years_csq2 == "-Inf" ] <- NA
Years_csq2[Years_csq2 == "Inf" ] <- NA
# EXTRACTING the max VME score in each CSquare in the df.max
MaxScore_csq2 <- aggregate(df.max$Score ~ df.max$CSquare, df.max, max)
colnames(MaxScore_csq2) <- c("CSquare", "MaxScore")
# EXTRACTING the number of different surveys in each CSquare
# 1 define the function then pass it into aggregate
count_unq <- function(x) {
if(all(is.na(x)) == TRUE) return(NA)
length(unique(x))
}
# 2 pass the function into aggregate
Nsurvey_csq2 <- aggregate(SurveyKey ~ CSquare, data = df.max, na.action= NULL, count_unq)
colnames(Nsurvey_csq2) <- c("CSquare", "Nsurvey")
# --------- CALCULATING CONFIDENCE ASSOCIATED WITH NSURVEYS --------
Nsurvey_csq2$NsurveyErr <-
ifelse(Nsurvey_csq2$Nsurvey < LowNsurveythr | is.na(Nsurvey_csq2$Nsurvey),
LowNsurveyScore,
ifelse(Nsurvey_csq2$Nsurvey > HighNsurveythr,
HighNsurveyScore,
MidNsurveyScore))
# --------- CALCULATING CONFIDENCE ASSOCIATED WITH TIMES SPAN (RANGE) --------
Years_csq2$timespanErr <-
ifelse(Years_csq2$Range < LowRange | is.na(Years_csq2$Range),
LowRangeScore,
ifelse(Years_csq2$Range > HighRange,
HighRangeScore,
MidRangeScore))
# --------- CALCULATING CONFIDENCE ASSOCIATED WITH AGE OF YOUNGEST RECORDS --------
Years_csq2$MyearsErr <-
ifelse(Years_csq2$Myears > HighMyears| is.na(Years_csq2$Myears),
HighMaxYScore,
ifelse(Years_csq2$Myears < LowMyears,
LowMaxYScore,
MidMaxYScore))
# CREATING NEW DATAFRAME FOR calculating final uncertainty score
Final2 <- cbind(Years_csq2[, c("timespanErr", "MyearsErr")],
MaxScore_csq2[, c("CSquare", "MaxScore")],
MeanMethErr_new = MeanMethScore_csq2_new$MeanMethErr_new,
NsurveysErr = Nsurvey_csq2$NsurveyErr)
Final2$Confidence_mean <-
ifelse(Final2$MaxScore == 5,
1,
rowMeans(subset(Final2, select = c(timespanErr, MyearsErr, MeanMethErr_new, NsurveysErr)), na.rm = TRUE))
Final2$Uncertainty_score <-
ifelse(Final2$MaxScore == 5,
1,
sqrt(rowMeans(Final2[,c("timespanErr","MyearsErr","NsurveysErr","Confidence_mean")]^2))
)
# rename columns
Final2 <- dplyr::rename(Final2, VME_index = MaxScore)
# select only the columns we need
Final_score <- Final2[,c("CSquare","VME_index", "Uncertainty_score")]
# write output
write.taf(Final_score, file = "model/final_vme_score.csv")
|
/vme_weighting_algorithm/model.R
|
no_license
|
ices-eg/wg_WGDEC
|
R
| false
| false
| 9,635
|
r
|
## Run analysis, write model results
## Before: VME_scores.csv (data)
## VME_survey_method.csv (data)
## VMEdb_Extraction_formatted.csv (data)
## After: Score_confidence.csv (model)
library(icesTAF)
library(data.table)
library(dplyr)
mkdir("model")
# Read VME indicator list with score; as agreed in the ICES WGDEC 2016
VMEscores <- read.taf("data/VME_Scores.csv")
# Read VME survey method; as agreed in the ICES WGDEC 2016
VMEsurveymeth <- read.taf("data/VME_survey_method.csv")
# Read VME database file
VMEdb <- read.taf("data/VMEdb_Extraction_formatted.csv")
# ----------------- model settings -------------------
# Determine VME index weighting values for VME scores and Abundance threshold
# (abundance will only accounts for 10% of the final scores, 90% to taxonomy)
wVMEscore <- 0.90
wABUNDthr <- 0.10
# Determine weight (kg) thresholds to be used in score calculation
SpongeThr1 <- 200
SpongeThr2 <- 60
CoralThr1 <- 30
CoralThr2 <- 1
# Determine abdundance score based on weight values
# (scores related to above thresholds, note that abundance only accounts for 10% of the final scores)
Score_below_thr <- 1 * wABUNDthr
Score_betw_thr <- 3 * wABUNDthr
Score_abov_thr <- 5 * wABUNDthr
# Define threshold on how old the last survey was
LowMyears <- 10
HighMyears <- 30
# Define confidence score for how old the last survey was
LowMaxYScore <- 1
MidMaxYScore <- 0.5
HighMaxYScore <- 0
# Define threshold for the range of years covered in the cell
LowRange <- 5
HighRange <- 10
# Define confidence score for the range of years
LowRangeScore <- 0
MidRangeScore <- 0.5
HighRangeScore <- 1
# Define the thresholds for number of surveys per CSquare
LowNsurveythr <- 3
HighNsurveythr <- 5
# Define uncertainty score for number of surveys per CSquare
LowNsurveyScore <- 0
MidNsurveyScore <- 0.5
HighNsurveyScore <- 1
# Confidence Score for type of Survey Method (K=known, Infer=infered, WK = well-known)
WK <- 1
Infer <- 0
K <- 0.5
# calculating the score of each the VME indicator group in the VMEscores data frame
VMEscores$indicator_score <-
sqrt(rowMeans(
VMEscores[,c("Rarity", "Functionality", "Fragility", "Life history", "Structural complexity")]^2
))
VMEscores <-
within(VMEscores, {
# calculating the score of each VME indicator when no abundance data is available in the VMEscores data frame
indicator_score_noabund <- indicator_score * wVMEscore
# calculating the score of each VME indicator when below 60kg of sponges or 1kg for corals in the VMEscores data frame
indicator_score_below <- indicator_score_noabund + Score_below_thr
# calculating the score of each VME indicator when between 60kg and 200 kg of sponges (1kg and 30kg of corals) in the VMEscores data frame
indicator_score_between <- indicator_score_noabund + Score_betw_thr
# calculating the score of each VME indicator when above 200 kg of sponges and 30kg of corals in the VMEscores data frame
indicator_score_above <- indicator_score_noabund + Score_abov_thr
})
# ADDING the VME indicator score when no abundance data exists in the VMEdb(i.e. vlookup)
VMEdb$noabundscore <- left_join(VMEdb, VMEscores, by = c("VME_Indicator" = "VME indicator"))$indicator_score_noabund
# ADDING the SurveyMethod score to each Method in the VMEdb
VMEdb$SurveyMethodcode <- left_join(VMEdb, VMEsurveymeth, by = "SurveyMethod")$description
VMEdb$SurveyMethodscore <-
ifelse(VMEdb$SurveyMethodcode == "WK" | !is.na(VMEdb$HabitatType),
WK,
ifelse(is.na(VMEdb$SurveyMethodcode),
Infer,
ifelse(VMEdb$SurveyMethodcode == "Infer", Infer, K)))
# VMEScores not used below this line
# ----------------- VME SCORE CALCULATION -------------------
VMEdb$Score <-
ifelse(is.na(VMEdb$VME_Indicator) & is.na(VMEdb$HabitatType),
1,
ifelse(!is.na(VMEdb$HabitatType),
5,
VMEdb$noabundscore +
ifelse(is.na(VMEdb$Weight_kg),
0,
ifelse(VMEdb$VME_Indicator == "Large Sponge" | VMEdb$VME_Indicator == "Generic Sponge",
ifelse(VMEdb$Weight_kg < SpongeThr2, Score_below_thr,
ifelse(VMEdb$Weight_kg < SpongeThr1, Score_betw_thr, Score_abov_thr)
),
ifelse(VMEdb$Weight_kg < CoralThr2, Score_below_thr,
ifelse(VMEdb$Weight_kg < CoralThr1, Score_betw_thr, Score_abov_thr))
)
)
)
)
# ---------------------- CONFIDENCE (based on MAX scores of each cell) ----------------
# Subset of VMEdb having all the records with max scores for each CSquares
VMEdb1 <- VMEdb[!is.na(VMEdb$Score),]
dt <- data.table(VMEdb1)
dt.max <- dt[,.SD[which(Score == max(Score))],by = CSquare]
df.max <- data.frame(dt.max)
VMEdb$SkeyMeth <- paste (VMEdb$SurveyKey,VMEdb$SurveyMethod, sep = "---")
dt_meth <- data.table(VMEdb)
# EXTRACTING average score per CSquare of SurveyMethod in the VMEdb
## new way where take average value of survey methods associated with unique surveys within that cell
dt_meth1 <- data.table(df.max)
dt.methmax1 <- dt_meth[,unique(SkeyMeth, na.action = NULL, na.rm = TRUE), by = CSquare]
df.methmax1 <- data.frame(dt.methmax1)
df.methmax1 <- transform(df.methmax1, test = do.call(rbind, strsplit(V1, "---", fixed = TRUE)), stringsAsFactors = FALSE)
colnames(df.methmax1) <- c("CSquare", "MethSurv", "surveykey", "method")
df.methmax1$method[which(df.methmax1$method == "NA")] <- NA
df.methmax1$SurveyMethodcode <- left_join(df.methmax1, VMEsurveymeth, by = c("method" = "SurveyMethod"))$description
df.methmax1$SurveyMethodscore <-
ifelse(df.methmax1$SurveyMethodcode == "WK" ,
WK,
ifelse(is.na(df.methmax1$SurveyMethodcode),
Infer,
ifelse(df.methmax1$SurveyMethodcode == "Infer",
Infer,
K)))
# Calcuylate the average “survey method” score for each individual csquares
# (of all the individual records within a csquare).
MeanMethScore_csq2_new <- unique(df.methmax1["CSquare"])
rownames(MeanMethScore_csq2_new) <- MeanMethScore_csq2_new$CSquare
tbl.methodScore <- tapply(df.methmax1$SurveyMethodscore, df.methmax1$CSquare, mean)
MeanMethScore_csq2_new$MeanMethErr_new <- tbl.methodScore[MeanMethScore_csq2_new$CSquare]
# EXTRACTING the highest year in each CSquare in the df.max
# WARNINGS
MaxYear_csq2 <- aggregate(df.max$Year ~ df.max$CSquare, df.max, max, na.action = NULL, na.rm = TRUE)
colnames(MaxYear_csq2) <- c("CSquare", "MaxYear")
# EXTRACTING the lowest year in each CSquare in the df.max
## WARNINGS
MinYear_csq2 <- aggregate(df.max$Year ~ df.max$CSquare, df.max, min, na.action = NULL, na.rm = TRUE)
colnames(MinYear_csq2) <- c("CSquare", "MinYear")
# MERGING MIN AND MAX YEARS IN ONE DATAFRAME
Years_csq2 <- merge(MaxYear_csq2,MinYear_csq2, by = "CSquare")
# CALCULATING the range of years in each CSquare
Years_csq2$Range <- (Years_csq2$MaxYear - Years_csq2$MinYear) + 1
# CALCULATING the number of years since 2016 in each CSquare
Years_csq2$Myears <- (2016 - Years_csq2$MaxYear)
# SUBSTITUTING THE -INF BY NA
Years_csq2[Years_csq2 == "-Inf" ] <- NA
Years_csq2[Years_csq2 == "Inf" ] <- NA
# EXTRACTING the max VME score in each CSquare in the df.max
MaxScore_csq2 <- aggregate(df.max$Score ~ df.max$CSquare, df.max, max)
colnames(MaxScore_csq2) <- c("CSquare", "MaxScore")
# EXTRACTING the number of different surveys in each CSquare
# 1 define the function then pass it into aggregate
count_unq <- function(x) {
if(all(is.na(x)) == TRUE) return(NA)
length(unique(x))
}
# 2 pass the function into aggregate
Nsurvey_csq2 <- aggregate(SurveyKey ~ CSquare, data = df.max, na.action= NULL, count_unq)
colnames(Nsurvey_csq2) <- c("CSquare", "Nsurvey")
# --------- CALCULATING CONFIDENCE ASSOCIATED WITH NSURVEYS --------
Nsurvey_csq2$NsurveyErr <-
ifelse(Nsurvey_csq2$Nsurvey < LowNsurveythr | is.na(Nsurvey_csq2$Nsurvey),
LowNsurveyScore,
ifelse(Nsurvey_csq2$Nsurvey > HighNsurveythr,
HighNsurveyScore,
MidNsurveyScore))
# --------- CALCULATING CONFIDENCE ASSOCIATED WITH TIMES SPAN (RANGE) --------
Years_csq2$timespanErr <-
ifelse(Years_csq2$Range < LowRange | is.na(Years_csq2$Range),
LowRangeScore,
ifelse(Years_csq2$Range > HighRange,
HighRangeScore,
MidRangeScore))
# --------- CALCULATING CONFIDENCE ASSOCIATED WITH AGE OF YOUNGEST RECORDS --------
Years_csq2$MyearsErr <-
ifelse(Years_csq2$Myears > HighMyears| is.na(Years_csq2$Myears),
HighMaxYScore,
ifelse(Years_csq2$Myears < LowMyears,
LowMaxYScore,
MidMaxYScore))
# CREATING NEW DATAFRAME FOR calculating final uncertainty score
Final2 <- cbind(Years_csq2[, c("timespanErr", "MyearsErr")],
MaxScore_csq2[, c("CSquare", "MaxScore")],
MeanMethErr_new = MeanMethScore_csq2_new$MeanMethErr_new,
NsurveysErr = Nsurvey_csq2$NsurveyErr)
Final2$Confidence_mean <-
ifelse(Final2$MaxScore == 5,
1,
rowMeans(subset(Final2, select = c(timespanErr, MyearsErr, MeanMethErr_new, NsurveysErr)), na.rm = TRUE))
Final2$Uncertainty_score <-
ifelse(Final2$MaxScore == 5,
1,
sqrt(rowMeans(Final2[,c("timespanErr","MyearsErr","NsurveysErr","Confidence_mean")]^2))
)
# rename columns
Final2 <- dplyr::rename(Final2, VME_index = MaxScore)
# select only the columns we need
Final_score <- Final2[,c("CSquare","VME_index", "Uncertainty_score")]
# write output
write.taf(Final_score, file = "model/final_vme_score.csv")
|
library(stats)
library(gplots)
library(cluster)
#computes cosine distance function
cosineDist <- function(x){
as.dist( 1-x%*%t(x)/(sqrt(rowSums(x^2) %*% t(rowSums(x^2)))))
}
#computes pearson correlation distance function
dist2 <- function(x, ...)
as.dist(1-cor(t(x),method="pearson"))
hierarchical <- function(x,k){
d <- dist(data,method ="euclidean")
#d <- cosineDist(as.matrix(data))
h_com <- hclust(d, method = "ward")
clusters <- cutree(h_com, k = k)
list(cluster=clusters)
}
pdf("hierarchical_yeast1.pdf")
#load data
data <- read.table("yeast.txt",sep="\t",header=T)
#compute distance metric
d <- dist(data,method ="euclidean")
#d <- cosineDist(as.matrix(data))
#hierarchical clustering
h_com <- hclust(d, method = "complete")
h_avg <- hclust(d, method = "average")
h_single <- hclust(d, method = "single")
h_ward <- hclust(d,method="ward.D2")
#plot the hierarchical tree
plot(h_com)
plot(h_avg)
plot(h_single)
#cut tree at a particular height to clusters
clusters1 <- data.frame(cutree(h_com, h = 250))
#cut tree by providing the desired number of clusters
clusters2 <- data.frame(cutree(h_com, k = 2))
# visualize the dissimilarity matrix
#visualize as a heatmap
#orginal distance matrix before clustering
heatmap.2(as.matrix(d),scale="none",dendrogram="none",trace="none",Rowv=FALSE,Colv=FALSE)
#distance matrix after clustering
heatmap.2(as.matrix(d),scale="none",dendrogram="both",trace="none",Rowv=as.dendrogram(h_com),Colv=as.dendrogram(h_com))
#OR
dst <- data.matrix(d)
dst <- dst[h_com$order,h_com$order]
heatmap.2(as.matrix(dst),scale="none",dendrogram="none",trace="none",Rowv=FALSE,Colv=FALSE)
#visualizing the actual data as a heatmap
#heatmap.2(as.matrix(data),scale="col",col=bluered,dendrogram="row",trace="none",Rowv=as.dendrogram(h_com),Colv=FALSE)
#getting average silhouette and sse for different numbers of clusters
#needs cluster library
kmax=15
sil <- rep(0, kmax)
see <- rep(0, kmax)
dst <- data.matrix(d)
for(k in 2:kmax)
{
clusters <- cutree(h_ward, k = k)
si <- silhouette(clusters,dmatrix=dst)
sil[k]= mean(si[,3])
for(i in 1:k)
{
dd <- dst[clusters==i,]
dd <- dd[,clusters==i]
see[k] = see[k] + sum(sum(dd))/2
}
}
# Plot the average silhouette width
plot(1:kmax, sil[1:kmax], type = "b", pch = 19, frame = FALSE, xlab = "Number of clusters k",ylab="Avg silhouette")
abline(v = which.max(sil[1:kmax]), lty = 2)
plot(1:kmax, see[1:kmax], type = "b", pch = 19, frame = FALSE, xlab = "Number of clusters k",ylab="SSE")
#abline(v = which.min(see[1:kmax]), lty = 2)
#gap statistic (requires custom R function )
gap <- rep(0,kmax)
gap_stat <- clusGap(data,FUNcluster = hierarchical,K.max=kmax)
ELogW <- gap_stat$Tab[,2]
logW <- gap_stat$Tab[,1]
gap <- ELogW - logW
error <- gap_stat$Tab[,4]
plot(1:kmax, gap, type = "b", pch = 19, frame = FALSE, xlab = "Number of clusters k",ylab="Gap stat")
arrows(1:kmax, gap-error, 1:kmax, gap+error,length=0.05, angle=90, code=3)
#NbClust to determine the number of clusters (limited only to kmeans and hierarchical)
dev.off()
|
/hierarchical_clustering.R
|
no_license
|
amorrowunm7/Predictive
|
R
| false
| false
| 3,159
|
r
|
library(stats)
library(gplots)
library(cluster)
#computes cosine distance function
cosineDist <- function(x){
as.dist( 1-x%*%t(x)/(sqrt(rowSums(x^2) %*% t(rowSums(x^2)))))
}
#computes pearson correlation distance function
dist2 <- function(x, ...)
as.dist(1-cor(t(x),method="pearson"))
hierarchical <- function(x,k){
d <- dist(data,method ="euclidean")
#d <- cosineDist(as.matrix(data))
h_com <- hclust(d, method = "ward")
clusters <- cutree(h_com, k = k)
list(cluster=clusters)
}
pdf("hierarchical_yeast1.pdf")
#load data
data <- read.table("yeast.txt",sep="\t",header=T)
#compute distance metric
d <- dist(data,method ="euclidean")
#d <- cosineDist(as.matrix(data))
#hierarchical clustering
h_com <- hclust(d, method = "complete")
h_avg <- hclust(d, method = "average")
h_single <- hclust(d, method = "single")
h_ward <- hclust(d,method="ward.D2")
#plot the hierarchical tree
plot(h_com)
plot(h_avg)
plot(h_single)
#cut tree at a particular height to clusters
clusters1 <- data.frame(cutree(h_com, h = 250))
#cut tree by providing the desired number of clusters
clusters2 <- data.frame(cutree(h_com, k = 2))
# visualize the dissimilarity matrix
#visualize as a heatmap
#orginal distance matrix before clustering
heatmap.2(as.matrix(d),scale="none",dendrogram="none",trace="none",Rowv=FALSE,Colv=FALSE)
#distance matrix after clustering
heatmap.2(as.matrix(d),scale="none",dendrogram="both",trace="none",Rowv=as.dendrogram(h_com),Colv=as.dendrogram(h_com))
#OR
dst <- data.matrix(d)
dst <- dst[h_com$order,h_com$order]
heatmap.2(as.matrix(dst),scale="none",dendrogram="none",trace="none",Rowv=FALSE,Colv=FALSE)
#visualizing the actual data as a heatmap
#heatmap.2(as.matrix(data),scale="col",col=bluered,dendrogram="row",trace="none",Rowv=as.dendrogram(h_com),Colv=FALSE)
#getting average silhouette and sse for different numbers of clusters
#needs cluster library
kmax=15
sil <- rep(0, kmax)
see <- rep(0, kmax)
dst <- data.matrix(d)
for(k in 2:kmax)
{
clusters <- cutree(h_ward, k = k)
si <- silhouette(clusters,dmatrix=dst)
sil[k]= mean(si[,3])
for(i in 1:k)
{
dd <- dst[clusters==i,]
dd <- dd[,clusters==i]
see[k] = see[k] + sum(sum(dd))/2
}
}
# Plot the average silhouette width
plot(1:kmax, sil[1:kmax], type = "b", pch = 19, frame = FALSE, xlab = "Number of clusters k",ylab="Avg silhouette")
abline(v = which.max(sil[1:kmax]), lty = 2)
plot(1:kmax, see[1:kmax], type = "b", pch = 19, frame = FALSE, xlab = "Number of clusters k",ylab="SSE")
#abline(v = which.min(see[1:kmax]), lty = 2)
#gap statistic (requires custom R function )
gap <- rep(0,kmax)
gap_stat <- clusGap(data,FUNcluster = hierarchical,K.max=kmax)
ELogW <- gap_stat$Tab[,2]
logW <- gap_stat$Tab[,1]
gap <- ELogW - logW
error <- gap_stat$Tab[,4]
plot(1:kmax, gap, type = "b", pch = 19, frame = FALSE, xlab = "Number of clusters k",ylab="Gap stat")
arrows(1:kmax, gap-error, 1:kmax, gap+error,length=0.05, angle=90, code=3)
#NbClust to determine the number of clusters (limited only to kmeans and hierarchical)
dev.off()
|
#########################################
# Load libraries
#########################################
library(dplyr)
library(ggplot2)
library(ggExtra) # for boxplots on sides of scatterplot
library(scales) # for better ticks spacing on axis
#########################################
# settings manipulation for fig
#########################################
safety_histogram <- function(data, settings, description){
id_col <- settings[["id_col"]]
value_col <- settings[["value_col"]]
measure_col <- settings[["measure_col"]]
visit_col <- settings[["visit_col"]]
visitn_col <- settings[["time_col"]]
unit_col <- settings[["unit_col"]]
measure_selected <- ifelse(!is.null(settings[["start_value"]]),
settings[["start_value"]],
sort(unique(data[[measure_col]]))[1])
#########################################
# Prep data
#########################################
dd <- data %>%
select(one_of(c(id_col, value_col, measure_col, unit_col,
visit_col, visitn_col))) %>%
setNames(., c("id_col","value_col","measure_col", "unit_col",
"visit_col","visitn_col")) %>%
filter(!is.na(value_col)) %>%
mutate(visit_col = fct_reorder(visit_col, visitn_col)) %>%
filter(measure_col==measure_selected)
### transform based on visits selected
if (is.null(settings[["visits_base"]])){
visits_base <- levels(dd$visit_col)[1]
} else {
visits_base <- settings[["visits_base"]]}
if (is.null(settings[["visits_comp"]])){
visits_comp <- levels(dd$visit_col)[-1]
} else {
visits_comp <- settings[["visits_comp"]]
}
dd_base <- dd %>%
filter(visit_col %in% visits_base) %>%
group_by(id_col) %>%
summarise(mean_base = mean(value_col))
dd_comp <- dd %>%
filter(visit_col %in% visits_comp) %>%
group_by(id_col) %>%
summarise(mean_comp = mean(value_col))
dd_all <- left_join(dd_base, dd_comp) %>%
na.omit
#########################################
# Create figure
#########################################
# get labels for fig
ylab <- "Comparison Value"
xlab <- "Baseline Value"
plot_title <- description
plot_subtitle <- paste0("Measure: ", measure_selected, " (", dd$unit_col[1],")")
# color for points
col <- RColorBrewer::brewer.pal(3, "Set2")[1]
# scatterplot
p1 <- ggplot(data=dd_all, aes(x=mean_base, y=mean_comp)) +
geom_point(alpha=0.6, shape=21, color=col, fill=col) +
geom_abline(slope=1, intercept=0) +
theme_bw() +
labs(title=plot_title,
subtitle=plot_subtitle)+
theme(panel.border = element_blank(),
axis.line = element_line(color = 'black'))+
scale_y_continuous(ylab, breaks=pretty_breaks(n=6)) +
scale_x_continuous(xlab, breaks=pretty_breaks(n=6))
# boxplots on sides of scatterplot
ggMarginal(p1, type="boxplot", size=30, xparams = list(fill="grey90", outlier.shape=NA), yparams = list(fill="grey90", outlier.shape=NA))
}
##### testing ####
# highlight, cmd + shift + c to uncomment chunk below
# config <- list()
# config[["description"]] <- "Test page"
# config[["data"]] <- "https://raw.githubusercontent.com/RhoInc/data-library/master/data/clinical-trials/renderer-specific/adbds.csv"
# config[["settings"]] <- safetyGraphics::generateSettings("sdtm", charts="safetyshiftplot")
#
#
# data <- read.csv(config[["data"]], stringsAsFactors = FALSE, na.strings = c("NA",""))
# settings <- config[["settings"]]
# settings[["unit_col"]] <- "STRESU"
# settings[["time_col"]] <- "VISITN"
#
# # selections within the graphic
# settings[["axis"]] <- "log"
# settings[["visits_base"]] <- "Screening" # can also be NULL
# settings[["visits_comp"]] <- c("Visit 2", "Visit 3") # can also be NULL
# settings[["start_value"]] = "Bicarbonate" # if no parameter selected, defaults to first (albumin)
# description <- config$description
# safety_histogram(data=data, settings=settings, description=description)
|
/R/safety_shift_plot.R
|
permissive
|
zsigmas/safetyCharts
|
R
| false
| false
| 4,009
|
r
|
#########################################
# Load libraries
#########################################
library(dplyr)
library(ggplot2)
library(ggExtra) # for boxplots on sides of scatterplot
library(scales) # for better ticks spacing on axis
#########################################
# settings manipulation for fig
#########################################
safety_histogram <- function(data, settings, description){
id_col <- settings[["id_col"]]
value_col <- settings[["value_col"]]
measure_col <- settings[["measure_col"]]
visit_col <- settings[["visit_col"]]
visitn_col <- settings[["time_col"]]
unit_col <- settings[["unit_col"]]
measure_selected <- ifelse(!is.null(settings[["start_value"]]),
settings[["start_value"]],
sort(unique(data[[measure_col]]))[1])
#########################################
# Prep data
#########################################
dd <- data %>%
select(one_of(c(id_col, value_col, measure_col, unit_col,
visit_col, visitn_col))) %>%
setNames(., c("id_col","value_col","measure_col", "unit_col",
"visit_col","visitn_col")) %>%
filter(!is.na(value_col)) %>%
mutate(visit_col = fct_reorder(visit_col, visitn_col)) %>%
filter(measure_col==measure_selected)
### transform based on visits selected
if (is.null(settings[["visits_base"]])){
visits_base <- levels(dd$visit_col)[1]
} else {
visits_base <- settings[["visits_base"]]}
if (is.null(settings[["visits_comp"]])){
visits_comp <- levels(dd$visit_col)[-1]
} else {
visits_comp <- settings[["visits_comp"]]
}
dd_base <- dd %>%
filter(visit_col %in% visits_base) %>%
group_by(id_col) %>%
summarise(mean_base = mean(value_col))
dd_comp <- dd %>%
filter(visit_col %in% visits_comp) %>%
group_by(id_col) %>%
summarise(mean_comp = mean(value_col))
dd_all <- left_join(dd_base, dd_comp) %>%
na.omit
#########################################
# Create figure
#########################################
# get labels for fig
ylab <- "Comparison Value"
xlab <- "Baseline Value"
plot_title <- description
plot_subtitle <- paste0("Measure: ", measure_selected, " (", dd$unit_col[1],")")
# color for points
col <- RColorBrewer::brewer.pal(3, "Set2")[1]
# scatterplot
p1 <- ggplot(data=dd_all, aes(x=mean_base, y=mean_comp)) +
geom_point(alpha=0.6, shape=21, color=col, fill=col) +
geom_abline(slope=1, intercept=0) +
theme_bw() +
labs(title=plot_title,
subtitle=plot_subtitle)+
theme(panel.border = element_blank(),
axis.line = element_line(color = 'black'))+
scale_y_continuous(ylab, breaks=pretty_breaks(n=6)) +
scale_x_continuous(xlab, breaks=pretty_breaks(n=6))
# boxplots on sides of scatterplot
ggMarginal(p1, type="boxplot", size=30, xparams = list(fill="grey90", outlier.shape=NA), yparams = list(fill="grey90", outlier.shape=NA))
}
##### testing ####
# highlight, cmd + shift + c to uncomment chunk below
# config <- list()
# config[["description"]] <- "Test page"
# config[["data"]] <- "https://raw.githubusercontent.com/RhoInc/data-library/master/data/clinical-trials/renderer-specific/adbds.csv"
# config[["settings"]] <- safetyGraphics::generateSettings("sdtm", charts="safetyshiftplot")
#
#
# data <- read.csv(config[["data"]], stringsAsFactors = FALSE, na.strings = c("NA",""))
# settings <- config[["settings"]]
# settings[["unit_col"]] <- "STRESU"
# settings[["time_col"]] <- "VISITN"
#
# # selections within the graphic
# settings[["axis"]] <- "log"
# settings[["visits_base"]] <- "Screening" # can also be NULL
# settings[["visits_comp"]] <- c("Visit 2", "Visit 3") # can also be NULL
# settings[["start_value"]] = "Bicarbonate" # if no parameter selected, defaults to first (albumin)
# description <- config$description
# safety_histogram(data=data, settings=settings, description=description)
|
colNames <- c(
'geoid',
'geoid2',
'geo_display',
'total',
'total_err',
'white',
'white_err',
'black',
'black_err',
'native',
'native_err',
'asian',
'asian_err',
'pacific',
'pacific_err',
'other',
'other_err',
'two',
'two_err',
'three',
'three_err',
'other',
'other_err')
census <- read.csv('ACS_17_5YR_B02001_with_ann.csv', header=TRUE, col.names=colNames)
pct_black <- census$black / census$total
print(pct_black[1]*100)
i <- 1
for(z in census$geo_display) {
if(z == "ZCTA5 83702") {
print(c(z, census$white[i], census$black[i], census$total[i]))
}
i <- i + 1
}
|
/Census.R
|
no_license
|
MeredithConroy/Census-race-data
|
R
| false
| false
| 623
|
r
|
colNames <- c(
'geoid',
'geoid2',
'geo_display',
'total',
'total_err',
'white',
'white_err',
'black',
'black_err',
'native',
'native_err',
'asian',
'asian_err',
'pacific',
'pacific_err',
'other',
'other_err',
'two',
'two_err',
'three',
'three_err',
'other',
'other_err')
census <- read.csv('ACS_17_5YR_B02001_with_ann.csv', header=TRUE, col.names=colNames)
pct_black <- census$black / census$total
print(pct_black[1]*100)
i <- 1
for(z in census$geo_display) {
if(z == "ZCTA5 83702") {
print(c(z, census$white[i], census$black[i], census$total[i]))
}
i <- i + 1
}
|
library(splatter)
source("LAK.R")
library(SC3)
library(mclust)
dropout <- function(x){
length(which(x==0))/(ncol(x)*nrow(x))
}
#params.groups1 <- newSplatParams(batchCells = 1000, nGenes = 20000,dropout.type="none")
# One small group, one big group
#sim1 <- splatSimulateGroups(params.groups1, group.prob = c(0.3,0.3,0.3, 0.1),
# verbose = FALSE)
sim1 <- readRDS("simdata1_1000.rds")
simdata1 <- assays(sim1)$counts
assays(sim1)$logcounts <- log2(as.matrix(simdata1) + 1)
colnames(rowData(sim1))[1]<-"feature_symbol"
dropout(simdata1)
simdata1_ann <- colData(sim1)$Group
ari.lak.simdata1 <- c()
for(i in 1:100){
simdata1_LAK <- LAK(simdata1, 5,s_error = 0.05, s_value = 17.39473)
simdata1_LAK_ann <- simdata1_LAK[[1]]$Cs
simdata1_LAK_ARI <- adjustedRandIndex(as.integer(as.factor(simdata1_ann)),simdata1_LAK_ann)
ari.lak.simdata1 <- c(ari.lak.simdata1,simdata1_LAK_ARI)
cat(i,simdata1_LAK_ARI,"\n")
}
ari.lak.simdata1
ari.sc3.simdata1 <- c()
for(i in 1:100){
#simdata1_SCE <- readRDS("simdata1.rds")#14878*1886
simdata1_sc3 <- sc3(sim1,5, gene_filter = T)
simdata1_sc3_ann <- colData(simdata1_sc3)$sc3_5_clusters
simdata1_sc3_ARI <- adjustedRandIndex(as.vector(simdata1_sc3_ann),as.integer(as.factor(simdata1_ann)))
ari.sc3.simdata1 <- c(ari.sc3.simdata1, simdata1_sc3_ARI)
cat(i, simdata1_sc3_ARI, "\n")
}
ari.sc3.simdata1
ari_data1 <- data.frame(ari.lak.simdata1,ari.sc3.simdata1)
write.table(ari_data1,file="ari_data1_1000.csv",quote=FALSE,sep=",",col.names=TRUE,row.names=F)
#saveRDS(sim1,file="simdata1_1000.rds")
#params.groups2 <- newSplatParams(batchCells = 1000, nGenes = 20000,dropout.type="experiment",
# dropout.mid=0, dropout.shape = -1)
#sim2 <- splatSimulateGroups(params.groups2, group.prob = c(0.3,0.3,0.3, 0.1),
# verbose = FALSE)
sim2 <- readRDS("simdata2_1000.rds")
simdata2 <- assays(sim2)$counts
assays(sim2)$logcounts <- log2(as.matrix(simdata2) + 1)
colnames(rowData(sim2))[1]<-"feature_symbol"
simdata2_ann <- colData(sim2)$Group
ari.lak.simdata2 <- c()
for(i in 1:100){
simdata2_LAK <- LAK(simdata2, 5,s_error = 0.05, s_value = 16.09453)
simdata2_LAK_ann <- simdata2_LAK[[1]]$Cs
simdata2_LAK_ARI <- adjustedRandIndex(as.integer(as.factor(simdata2_ann)),as.vector(simdata2_LAK_ann))
ari.lak.simdata2 <- c(ari.lak.simdata2,simdata2_LAK_ARI)
cat(i, simdata2_LAK_ARI, "\n")
}
ari.lak.simdata2
ari.sc3.simdata2 <- c()
for(i in 1:100){
#simdata2_SCE <- readRDS("simdata2.rds")#14878*1886
simdata2_sc3 <- sc3(sim2,5, gene_filter = T)
simdata2_sc3_ann <- colData(simdata2_sc3)$sc3_5_clusters
simdata2_sc3_ARI <- adjustedRandIndex(as.vector(simdata2_sc3_ann),as.integer(as.factor(simdata2_ann)))
ari.sc3.simdata2 <- c(ari.sc3.simdata2, simdata2_sc3_ARI)
cat(i, simdata2_sc3_ARI, "\n")
}
ari.sc3.simdata2
ari_data2 <- data.frame(ari.lak.simdata2,ari.sc3.simdata2)
write.table(ari_data2,file="ari_data2_1000.csv",quote=FALSE,sep=",",col.names=TRUE,row.names=F)
#saveRDS(sim2,file="simdata2_1000.rds")
#params.groups3 <- newSplatParams(batchCells = 1000, nGenes = 20000,dropout.type="experiment",
# dropout.mid=1, dropout.shape = -1)
#sim3 <- splatSimulateGroups(params.groups3, group.prob = c(0.3,0.3,0.3, 0.1),
# verbose = FALSE)
sim3 <- readRDS("simdata3_1000.rds")
simdata3 <- assays(sim3)$counts
assays(sim3)$logcounts <- log2(as.matrix(simdata3) + 1)
colnames(rowData(sim3))[1]<-"feature_symbol"
simdata3_ann <- colData(sim3)$Group
ari.lak.simdata3 <- c()
for(i in 1:100){
simdata3_LAK <- LAK(simdata3, 5, s_error = 0.05, s_value = 13.3084)
simdata3_LAK_ann <- simdata3_LAK[[1]]$Cs
simdata3_LAK_ARI <- adjustedRandIndex(as.integer(as.factor(simdata3_ann)),as.vector(simdata3_LAK_ann))
ari.lak.simdata3 <- c(ari.lak.simdata3,simdata3_LAK_ARI)
cat(i, simdata3_LAK_ARI, "\n")
}
ari.lak.simdata3
ari.sc3.simdata3 <- c()
for(i in 1:100){
#simdata3_SCE <- readRDS("simdata3.rds")#14878*1886
simdata3_sc3 <- sc3(sim3,5, gene_filter = T)
simdata3_sc3_ann <- colData(simdata3_sc3)$sc3_5_clusters
simdata3_sc3_ARI <- adjustedRandIndex(as.vector(simdata3_sc3_ann),as.integer(as.factor(simdata3_ann)))
ari.sc3.simdata3 <- c(ari.sc3.simdata3, simdata3_sc3_ARI)
cat(i, simdata3_sc3_ARI, "\n")
}
ari.sc3.simdata3
ari_data3 <- data.frame(ari.lak.simdata3,ari.sc3.simdata3)
write.table(ari_data3,file="ari_data3_1000.csv",quote=FALSE,sep=",",col.names=TRUE,row.names=F)
#saveRDS(sim3,file="simdata3_1000.rds")
#params.groups4 <- newSplatParams(batchCells = 1000, nGenes = 20000,dropout.type="experiment",
# dropout.mid=2, dropout.shape = -1)
#sim4 <- splatSimulateGroups(params.groups4, group.prob = c(0.3,0.3,0.3, 0.1),
# verbose = FALSE)
sim4 <- readRDS("simdata4_1000.rds")
simdata4 <- assays(sim4)$counts
assays(sim4)$logcounts <- log2(as.matrix(simdata4) + 1)
colnames(rowData(sim4))[1]<-"feature_symbol"
simdata4_ann <- colData(sim4)$Group
ari.lak.simdata4 <- c()
for(i in 1:100){
simdata4_LAK <- LAK(simdata4, 5,s_error = 0.05, s_value = 12.47256)
simdata4_LAK_ann <- simdata4_LAK[[1]]$Cs
simdata4_LAK_ARI <- adjustedRandIndex(as.integer(as.factor(simdata4_ann)),as.vector(simdata4_LAK_ann))
ari.lak.simdata4 <- c(ari.lak.simdata4,simdata4_LAK_ARI)
cat(i)
}
ari.lak.simdata4
ari.sc3.simdata4 <- c()
for(i in 1:100){
#simdata2_SCE <- readRDS("simdata2.rds")#14878*1886
simdata4_sc3 <- sc3(sim4,5, gene_filter = T)
simdata4_sc3_ann <- colData(simdata4_sc3)$sc3_5_clusters
simdata4_sc3_ARI <- adjustedRandIndex(as.vector(simdata4_sc3_ann),as.integer(as.factor(simdata4_ann)))
ari.sc3.simdata4 <- c(ari.sc3.simdata4, simdata4_sc3_ARI)
cat(i, simdata4_sc3_ARI, "\n")
}
ari.sc3.simdata4
ari_data4 <- data.frame(ari.lak.simdata4,ari.sc3.simdata4)
write.table(ari_data4,file="ari_data4_1000.csv",quote=FALSE,sep=",",col.names=TRUE,row.names=F)
#saveRDS(sim4,file="simdata4_1000.rds")
dropout(simdata1)
dropout(simdata2)
dropout(simdata3)
dropout(simdata4)
|
/R Scripts/simulate_1000.R
|
no_license
|
HIT-biostatistical/LAK
|
R
| false
| false
| 6,239
|
r
|
library(splatter)
source("LAK.R")
library(SC3)
library(mclust)
dropout <- function(x){
length(which(x==0))/(ncol(x)*nrow(x))
}
#params.groups1 <- newSplatParams(batchCells = 1000, nGenes = 20000,dropout.type="none")
# One small group, one big group
#sim1 <- splatSimulateGroups(params.groups1, group.prob = c(0.3,0.3,0.3, 0.1),
# verbose = FALSE)
sim1 <- readRDS("simdata1_1000.rds")
simdata1 <- assays(sim1)$counts
assays(sim1)$logcounts <- log2(as.matrix(simdata1) + 1)
colnames(rowData(sim1))[1]<-"feature_symbol"
dropout(simdata1)
simdata1_ann <- colData(sim1)$Group
ari.lak.simdata1 <- c()
for(i in 1:100){
simdata1_LAK <- LAK(simdata1, 5,s_error = 0.05, s_value = 17.39473)
simdata1_LAK_ann <- simdata1_LAK[[1]]$Cs
simdata1_LAK_ARI <- adjustedRandIndex(as.integer(as.factor(simdata1_ann)),simdata1_LAK_ann)
ari.lak.simdata1 <- c(ari.lak.simdata1,simdata1_LAK_ARI)
cat(i,simdata1_LAK_ARI,"\n")
}
ari.lak.simdata1
ari.sc3.simdata1 <- c()
for(i in 1:100){
#simdata1_SCE <- readRDS("simdata1.rds")#14878*1886
simdata1_sc3 <- sc3(sim1,5, gene_filter = T)
simdata1_sc3_ann <- colData(simdata1_sc3)$sc3_5_clusters
simdata1_sc3_ARI <- adjustedRandIndex(as.vector(simdata1_sc3_ann),as.integer(as.factor(simdata1_ann)))
ari.sc3.simdata1 <- c(ari.sc3.simdata1, simdata1_sc3_ARI)
cat(i, simdata1_sc3_ARI, "\n")
}
ari.sc3.simdata1
ari_data1 <- data.frame(ari.lak.simdata1,ari.sc3.simdata1)
write.table(ari_data1,file="ari_data1_1000.csv",quote=FALSE,sep=",",col.names=TRUE,row.names=F)
#saveRDS(sim1,file="simdata1_1000.rds")
#params.groups2 <- newSplatParams(batchCells = 1000, nGenes = 20000,dropout.type="experiment",
# dropout.mid=0, dropout.shape = -1)
#sim2 <- splatSimulateGroups(params.groups2, group.prob = c(0.3,0.3,0.3, 0.1),
# verbose = FALSE)
sim2 <- readRDS("simdata2_1000.rds")
simdata2 <- assays(sim2)$counts
assays(sim2)$logcounts <- log2(as.matrix(simdata2) + 1)
colnames(rowData(sim2))[1]<-"feature_symbol"
simdata2_ann <- colData(sim2)$Group
ari.lak.simdata2 <- c()
for(i in 1:100){
simdata2_LAK <- LAK(simdata2, 5,s_error = 0.05, s_value = 16.09453)
simdata2_LAK_ann <- simdata2_LAK[[1]]$Cs
simdata2_LAK_ARI <- adjustedRandIndex(as.integer(as.factor(simdata2_ann)),as.vector(simdata2_LAK_ann))
ari.lak.simdata2 <- c(ari.lak.simdata2,simdata2_LAK_ARI)
cat(i, simdata2_LAK_ARI, "\n")
}
ari.lak.simdata2
ari.sc3.simdata2 <- c()
for(i in 1:100){
#simdata2_SCE <- readRDS("simdata2.rds")#14878*1886
simdata2_sc3 <- sc3(sim2,5, gene_filter = T)
simdata2_sc3_ann <- colData(simdata2_sc3)$sc3_5_clusters
simdata2_sc3_ARI <- adjustedRandIndex(as.vector(simdata2_sc3_ann),as.integer(as.factor(simdata2_ann)))
ari.sc3.simdata2 <- c(ari.sc3.simdata2, simdata2_sc3_ARI)
cat(i, simdata2_sc3_ARI, "\n")
}
ari.sc3.simdata2
ari_data2 <- data.frame(ari.lak.simdata2,ari.sc3.simdata2)
write.table(ari_data2,file="ari_data2_1000.csv",quote=FALSE,sep=",",col.names=TRUE,row.names=F)
#saveRDS(sim2,file="simdata2_1000.rds")
#params.groups3 <- newSplatParams(batchCells = 1000, nGenes = 20000,dropout.type="experiment",
# dropout.mid=1, dropout.shape = -1)
#sim3 <- splatSimulateGroups(params.groups3, group.prob = c(0.3,0.3,0.3, 0.1),
# verbose = FALSE)
sim3 <- readRDS("simdata3_1000.rds")
simdata3 <- assays(sim3)$counts
assays(sim3)$logcounts <- log2(as.matrix(simdata3) + 1)
colnames(rowData(sim3))[1]<-"feature_symbol"
simdata3_ann <- colData(sim3)$Group
ari.lak.simdata3 <- c()
for(i in 1:100){
simdata3_LAK <- LAK(simdata3, 5, s_error = 0.05, s_value = 13.3084)
simdata3_LAK_ann <- simdata3_LAK[[1]]$Cs
simdata3_LAK_ARI <- adjustedRandIndex(as.integer(as.factor(simdata3_ann)),as.vector(simdata3_LAK_ann))
ari.lak.simdata3 <- c(ari.lak.simdata3,simdata3_LAK_ARI)
cat(i, simdata3_LAK_ARI, "\n")
}
ari.lak.simdata3
ari.sc3.simdata3 <- c()
for(i in 1:100){
#simdata3_SCE <- readRDS("simdata3.rds")#14878*1886
simdata3_sc3 <- sc3(sim3,5, gene_filter = T)
simdata3_sc3_ann <- colData(simdata3_sc3)$sc3_5_clusters
simdata3_sc3_ARI <- adjustedRandIndex(as.vector(simdata3_sc3_ann),as.integer(as.factor(simdata3_ann)))
ari.sc3.simdata3 <- c(ari.sc3.simdata3, simdata3_sc3_ARI)
cat(i, simdata3_sc3_ARI, "\n")
}
ari.sc3.simdata3
ari_data3 <- data.frame(ari.lak.simdata3,ari.sc3.simdata3)
write.table(ari_data3,file="ari_data3_1000.csv",quote=FALSE,sep=",",col.names=TRUE,row.names=F)
#saveRDS(sim3,file="simdata3_1000.rds")
#params.groups4 <- newSplatParams(batchCells = 1000, nGenes = 20000,dropout.type="experiment",
# dropout.mid=2, dropout.shape = -1)
#sim4 <- splatSimulateGroups(params.groups4, group.prob = c(0.3,0.3,0.3, 0.1),
# verbose = FALSE)
sim4 <- readRDS("simdata4_1000.rds")
simdata4 <- assays(sim4)$counts
assays(sim4)$logcounts <- log2(as.matrix(simdata4) + 1)
colnames(rowData(sim4))[1]<-"feature_symbol"
simdata4_ann <- colData(sim4)$Group
ari.lak.simdata4 <- c()
for(i in 1:100){
simdata4_LAK <- LAK(simdata4, 5,s_error = 0.05, s_value = 12.47256)
simdata4_LAK_ann <- simdata4_LAK[[1]]$Cs
simdata4_LAK_ARI <- adjustedRandIndex(as.integer(as.factor(simdata4_ann)),as.vector(simdata4_LAK_ann))
ari.lak.simdata4 <- c(ari.lak.simdata4,simdata4_LAK_ARI)
cat(i)
}
ari.lak.simdata4
ari.sc3.simdata4 <- c()
for(i in 1:100){
#simdata2_SCE <- readRDS("simdata2.rds")#14878*1886
simdata4_sc3 <- sc3(sim4,5, gene_filter = T)
simdata4_sc3_ann <- colData(simdata4_sc3)$sc3_5_clusters
simdata4_sc3_ARI <- adjustedRandIndex(as.vector(simdata4_sc3_ann),as.integer(as.factor(simdata4_ann)))
ari.sc3.simdata4 <- c(ari.sc3.simdata4, simdata4_sc3_ARI)
cat(i, simdata4_sc3_ARI, "\n")
}
ari.sc3.simdata4
ari_data4 <- data.frame(ari.lak.simdata4,ari.sc3.simdata4)
write.table(ari_data4,file="ari_data4_1000.csv",quote=FALSE,sep=",",col.names=TRUE,row.names=F)
#saveRDS(sim4,file="simdata4_1000.rds")
dropout(simdata1)
dropout(simdata2)
dropout(simdata3)
dropout(simdata4)
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shiny)
# Load the ggplot2 package which provides
# the 'mpg' dataset.
library(ggplot2)
# Define the overall UI
shinyUI(
fluidPage(
titlePanel("Basic Data Table"),
# Create a new Row in the UI for selectInputs
fluidRow(
column(4,
selectInput("man",
"Manufacturer:",
c("All",
unique(as.character(mpg$manufacturer))))
),
column(4,
selectInput("trans",
"Transmission:",
c("All",
unique(as.character(mpg$trans))))
),
column(4,
selectInput("cyl",
"Cylinders:",
c("All",
unique(as.character(mpg$cyl))))
)
),
# Create a new row for the table.
fluidRow(
DT::dataTableOutput("table")
)
)
)
|
/ui.R
|
no_license
|
akohli70/Developing_Data_Products
|
R
| false
| false
| 1,183
|
r
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shiny)
# Load the ggplot2 package which provides
# the 'mpg' dataset.
library(ggplot2)
# Define the overall UI
shinyUI(
fluidPage(
titlePanel("Basic Data Table"),
# Create a new Row in the UI for selectInputs
fluidRow(
column(4,
selectInput("man",
"Manufacturer:",
c("All",
unique(as.character(mpg$manufacturer))))
),
column(4,
selectInput("trans",
"Transmission:",
c("All",
unique(as.character(mpg$trans))))
),
column(4,
selectInput("cyl",
"Cylinders:",
c("All",
unique(as.character(mpg$cyl))))
)
),
# Create a new row for the table.
fluidRow(
DT::dataTableOutput("table")
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weirds.R
\name{weirds_list}
\alias{weirds_list}
\title{Provides the list of available methods}
\usage{
weirds_list(onlynames = FALSE)
}
\arguments{
\item{onlynames}{- logical: should be return some details or only the nickname
of methods to be used in \code{\link{strange}}, \code{\link{stranger}},
\code{\link{weird}} or \code{\link{lucky_odds}}.}
}
\description{
Provides the list of available methods
}
|
/man/weirds_list.Rd
|
no_license
|
welovedatascience/stranger
|
R
| false
| true
| 484
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weirds.R
\name{weirds_list}
\alias{weirds_list}
\title{Provides the list of available methods}
\usage{
weirds_list(onlynames = FALSE)
}
\arguments{
\item{onlynames}{- logical: should be return some details or only the nickname
of methods to be used in \code{\link{strange}}, \code{\link{stranger}},
\code{\link{weird}} or \code{\link{lucky_odds}}.}
}
\description{
Provides the list of available methods
}
|
#' Prior predictive checks
#'
#' @param n_draws An integer that indicates how many time-series will be
#' returned.
#' @inheritParams read_xmile
#' @inheritParams sd_Bayes
#' @inheritParams sd_simulate
#'
#' @return A list of two data frames.
#' @export
#'
#' @examples
#' filepath <- system.file("models/", "SEIR.stmx", package = "readsdr")
#' meas_mdl <- list("y ~ neg_binomial_2(net_flow(C), phi)")
#' estimated_params <- list(
#' sd_prior("par_beta", "lognormal", c(0, 1)),
#' sd_prior("par_rho", "beta", c(2, 2)),
#' sd_prior("I0", "lognormal", c(0, 1), "init"))
#' sd_prior_checks(filepath, meas_mdl, estimated_params, n_draws = 2,
#' start_time = 0, stop_time = 5,
#' integ_method = "rk4", timestep = 1/32)
sd_prior_checks <- function(filepath, meas_mdl, estimated_params, n_draws,
start_time = NULL,
stop_time = NULL,
timestep = NULL,
integ_method = "euler") {
pars_names <- get_names(estimated_params, "par_name")
estimated_params <- get_meas_params(meas_mdl, estimated_params)
unk_types <- sapply(estimated_params,
function(prior_obj) prior_obj$type)
idx_meas <- which(unk_types == "meas_par")
n_meas_par <- length(idx_meas)
prior_fun_list <- prior_fun_factory(estimated_params, n_draws)
prior_vals <- lapply(prior_fun_list,
function(prior_fun) prior_fun())
df1 <- cbind(data.frame(iter = 1:n_draws), as.data.frame(prior_vals))
mdl_structure <- extract_structure_from_XMILE(filepath, pars_names)
ds_inputs <- get_deSolve_elems(mdl_structure)
if(!(integ_method %in% c("euler", "rk4"))) stop("Invalid integration method")
ds_inputs <- update_sim_params(ds_inputs, start_time, stop_time, timestep)
if(n_meas_par > 0) {
meas_params <- estimated_params[idx_meas]
# List of configured measurement models.
meas_mdl_conf <- configure_meas_models(meas_mdl, meas_params, prior_vals)
}
df2 <- purrr::map_dfr(1:n_draws, function(i) {
for(param in pars_names) ds_inputs$consts[[param]] <- prior_vals[[param]][[i]]
par_list <- unlist(purrr::transpose(prior_vals)[i], recursive = FALSE)
readsdr_env <- list2env(par_list)
ds_inputs$stocks <- purrr::map_dbl(ds_inputs$stocks, function(x) {
eval(parse(text = x), envir = readsdr_env)
})
measurement_df <- sd_measurements(1, meas_mdl_conf[[i]],
ds_inputs,
integ_method = integ_method)
measurement_df$iter <- i
measurement_df
})
list(parameters = df1,
measurements = df2)
}
|
/R/prior_checks.R
|
permissive
|
jandraor/readsdr
|
R
| false
| false
| 2,741
|
r
|
#' Prior predictive checks
#'
#' @param n_draws An integer that indicates how many time-series will be
#' returned.
#' @inheritParams read_xmile
#' @inheritParams sd_Bayes
#' @inheritParams sd_simulate
#'
#' @return A list of two data frames.
#' @export
#'
#' @examples
#' filepath <- system.file("models/", "SEIR.stmx", package = "readsdr")
#' meas_mdl <- list("y ~ neg_binomial_2(net_flow(C), phi)")
#' estimated_params <- list(
#' sd_prior("par_beta", "lognormal", c(0, 1)),
#' sd_prior("par_rho", "beta", c(2, 2)),
#' sd_prior("I0", "lognormal", c(0, 1), "init"))
#' sd_prior_checks(filepath, meas_mdl, estimated_params, n_draws = 2,
#' start_time = 0, stop_time = 5,
#' integ_method = "rk4", timestep = 1/32)
sd_prior_checks <- function(filepath, meas_mdl, estimated_params, n_draws,
start_time = NULL,
stop_time = NULL,
timestep = NULL,
integ_method = "euler") {
pars_names <- get_names(estimated_params, "par_name")
estimated_params <- get_meas_params(meas_mdl, estimated_params)
unk_types <- sapply(estimated_params,
function(prior_obj) prior_obj$type)
idx_meas <- which(unk_types == "meas_par")
n_meas_par <- length(idx_meas)
prior_fun_list <- prior_fun_factory(estimated_params, n_draws)
prior_vals <- lapply(prior_fun_list,
function(prior_fun) prior_fun())
df1 <- cbind(data.frame(iter = 1:n_draws), as.data.frame(prior_vals))
mdl_structure <- extract_structure_from_XMILE(filepath, pars_names)
ds_inputs <- get_deSolve_elems(mdl_structure)
if(!(integ_method %in% c("euler", "rk4"))) stop("Invalid integration method")
ds_inputs <- update_sim_params(ds_inputs, start_time, stop_time, timestep)
if(n_meas_par > 0) {
meas_params <- estimated_params[idx_meas]
# List of configured measurement models.
meas_mdl_conf <- configure_meas_models(meas_mdl, meas_params, prior_vals)
}
df2 <- purrr::map_dfr(1:n_draws, function(i) {
for(param in pars_names) ds_inputs$consts[[param]] <- prior_vals[[param]][[i]]
par_list <- unlist(purrr::transpose(prior_vals)[i], recursive = FALSE)
readsdr_env <- list2env(par_list)
ds_inputs$stocks <- purrr::map_dbl(ds_inputs$stocks, function(x) {
eval(parse(text = x), envir = readsdr_env)
})
measurement_df <- sd_measurements(1, meas_mdl_conf[[i]],
ds_inputs,
integ_method = integ_method)
measurement_df$iter <- i
measurement_df
})
list(parameters = df1,
measurements = df2)
}
|
# Graph 2: Violin plot of Album Ranks by Decade
library(dplyr)
library(ggplot2)
plot2 <- function(dataset) {
artist_data <- dataset %>%
mutate("decade" = floor(Year / 10) * 10)
artist_data$decade <- as.factor(artist_data$decade)
plot_ly(x = ~artist_data$decade, y = ~artist_data$Number,
split = ~artist_data$decade, type = 'violin',
box = list(visible = T), meanline = list(visible = T)) %>%
layout(title = "Album Ranks by Decade",
xaxis = list(title = "Decade"),
yaxis = list(title = "Rank", zeroline = F))
}
|
/scripts/chart2.R
|
no_license
|
sjain1224/purple_wranglers
|
R
| false
| false
| 548
|
r
|
# Graph 2: Violin plot of Album Ranks by Decade
library(dplyr)
library(ggplot2)
plot2 <- function(dataset) {
artist_data <- dataset %>%
mutate("decade" = floor(Year / 10) * 10)
artist_data$decade <- as.factor(artist_data$decade)
plot_ly(x = ~artist_data$decade, y = ~artist_data$Number,
split = ~artist_data$decade, type = 'violin',
box = list(visible = T), meanline = list(visible = T)) %>%
layout(title = "Album Ranks by Decade",
xaxis = list(title = "Decade"),
yaxis = list(title = "Rank", zeroline = F))
}
|
#' @title DataReviewFile
#'
#' @import openxlsx
#'
#' @description Exports the data review of all tables in lists to an excel file
#'
#' @param pathfile: the path of the xlsx file where you want your output
#' listTables: list of the names of all the tables you want to include in your review
#'
#' @return NULL
#'
#' @examples
#'
#' @export DataReviewFile
DataReviewFile <- function(pathfile, listTables){
workbook <- createWorkbook()
addWorksheet(workbook, sheetName = "General")
addWorksheet(workbook, sheetName = "Category")
addWorksheet(workbook, sheetName = "Numeric")
rowgen = 2
rowcat = 2
rownum = 2
for (elem in listTables){
DRgeneral <- data.frame(diagnose(get(elem)))
DRcategory <- data.frame(diagnose_category(get(elem)))
DRnumeric <- data.frame(diagnose_numeric(get(elem)))
writeData(workbook, sheet = "General", startCol = 2, startRow = rowgen , x = elem)
writeDataTable(workbook, sheet = "General", x = DRgeneral, startCol = 2, startRow = (rowgen + 2))
if (nrow(DRcategory)>0){
writeData(workbook, sheet = "Category", startCol = 2, startRow = rowgen , x = elem)
writeDataTable(workbook, sheet = "Category", x = DRcategory, startCol = 2, startRow = (rowcat + 2))
}
if (nrow(DRnumeric)>0){
writeData(workbook, sheet = "Category", startCol = 2, startRow = rowgen , x = elem)
writeDataTable(workbook, sheet = "Numeric", x = DRnumeric, startCol = 2, startRow = (rownum + 2))
}
rowgen = rowgen + nrow(DRgeneral) + 4
rowcat = rowcat + nrow(DRcategory) + 4
rownum = rownum + nrow(DRnumeric) + 4
}
saveWorkbook(workbook, file = pathfile)
}
|
/R/DataReview.R
|
no_license
|
elecmc03/DataReview
|
R
| false
| false
| 1,639
|
r
|
#' @title DataReviewFile
#'
#' @import openxlsx
#'
#' @description Exports the data review of all tables in lists to an excel file
#'
#' @param pathfile: the path of the xlsx file where you want your output
#' listTables: list of the names of all the tables you want to include in your review
#'
#' @return NULL
#'
#' @examples
#'
#' @export DataReviewFile
DataReviewFile <- function(pathfile, listTables){
workbook <- createWorkbook()
addWorksheet(workbook, sheetName = "General")
addWorksheet(workbook, sheetName = "Category")
addWorksheet(workbook, sheetName = "Numeric")
rowgen = 2
rowcat = 2
rownum = 2
for (elem in listTables){
DRgeneral <- data.frame(diagnose(get(elem)))
DRcategory <- data.frame(diagnose_category(get(elem)))
DRnumeric <- data.frame(diagnose_numeric(get(elem)))
writeData(workbook, sheet = "General", startCol = 2, startRow = rowgen , x = elem)
writeDataTable(workbook, sheet = "General", x = DRgeneral, startCol = 2, startRow = (rowgen + 2))
if (nrow(DRcategory)>0){
writeData(workbook, sheet = "Category", startCol = 2, startRow = rowgen , x = elem)
writeDataTable(workbook, sheet = "Category", x = DRcategory, startCol = 2, startRow = (rowcat + 2))
}
if (nrow(DRnumeric)>0){
writeData(workbook, sheet = "Category", startCol = 2, startRow = rowgen , x = elem)
writeDataTable(workbook, sheet = "Numeric", x = DRnumeric, startCol = 2, startRow = (rownum + 2))
}
rowgen = rowgen + nrow(DRgeneral) + 4
rowcat = rowcat + nrow(DRcategory) + 4
rownum = rownum + nrow(DRnumeric) + 4
}
saveWorkbook(workbook, file = pathfile)
}
|
#' Bruno is an R package of Bruno's random packages.
#'
#' @docType package
#' @name bruno
#' @import yaml RJDBC RPostgreSQL
NULL
|
/R/bruno-package.R
|
permissive
|
bruno615/bruno
|
R
| false
| false
| 130
|
r
|
#' Bruno is an R package of Bruno's random packages.
#'
#' @docType package
#' @name bruno
#' @import yaml RJDBC RPostgreSQL
NULL
|
# Coursera - Developing Data Products- Course Project
# Authour: Velladurai of Malaysia
# server.R file for the shiny app
# This app was developed to help people choose the best car for their trip,
# using mtcars dataset, from [R]
library(shiny)
library(datasets)
library(dplyr)
shinyServer(function(input, output) {
# Show the cars that correspond to the filters
output$table <- renderDataTable({
disp_seq <- seq(from = input$disp[1], to = input$disp[2], by = 0.1)
hp_seq <- seq(from = input$hp[1], to = input$hp[2], by = 1)
data <- transmute(mtcars, Car = rownames(mtcars), MilesPerGallon = mpg,
GasolineExpenditure = input$dis/mpg*input$cost,
Cylinders = cyl, Displacement = disp, Horsepower = hp,
Transmission = am)
data <- filter(data, GasolineExpenditure <= input$gas, Cylinders %in% input$cyl,
Displacement %in% disp_seq, Horsepower %in% hp_seq, Transmission %in% input$am)
data <- mutate(data, Transmission = ifelse(Transmission==0, "Automatic", "Manual"))
data <- arrange(data, GasolineExpenditure)
data
}, options = list(lengthMenu = c(5, 15, 30), pageLength = 30))
})
|
/Server.R
|
no_license
|
MSCDataScience/Developing_Data_Products
|
R
| false
| false
| 1,208
|
r
|
# Coursera - Developing Data Products- Course Project
# Authour: Velladurai of Malaysia
# server.R file for the shiny app
# This app was developed to help people choose the best car for their trip,
# using mtcars dataset, from [R]
library(shiny)
library(datasets)
library(dplyr)
shinyServer(function(input, output) {
# Show the cars that correspond to the filters
output$table <- renderDataTable({
disp_seq <- seq(from = input$disp[1], to = input$disp[2], by = 0.1)
hp_seq <- seq(from = input$hp[1], to = input$hp[2], by = 1)
data <- transmute(mtcars, Car = rownames(mtcars), MilesPerGallon = mpg,
GasolineExpenditure = input$dis/mpg*input$cost,
Cylinders = cyl, Displacement = disp, Horsepower = hp,
Transmission = am)
data <- filter(data, GasolineExpenditure <= input$gas, Cylinders %in% input$cyl,
Displacement %in% disp_seq, Horsepower %in% hp_seq, Transmission %in% input$am)
data <- mutate(data, Transmission = ifelse(Transmission==0, "Automatic", "Manual"))
data <- arrange(data, GasolineExpenditure)
data
}, options = list(lengthMenu = c(5, 15, 30), pageLength = 30))
})
|
# load necessary packages
libs <- c("tidyverse", "stringr", "readr", "dplyr", "ggplot2", "readstata13","foreign",
"magrittr","lubridate","here","ggrepel","treemapify","packcircles", "ggalluvial","ggrepel",
"extrafont","ggfittext","cowplot","googleway","ggspatial","sf","rnaturalearth",
"rnaturalearthdata","rgeos","ggridges","jsonlite")
lapply(libs, library, character.only=TRUE)
# Read in the data
intake <- read.csv(here('CC Dashboard','SAO Data','Intake.csv'),stringsAsFactors = FALSE)
initiation <- read.csv(here('CC Dashboard',"SAO Data","Initiation.csv"),stringsAsFactors = FALSE)
sentence <- read.csv(here('CC Dashboard',"SAO Data","Sentencing.csv"),stringsAsFactors = FALSE)
disposition <- read.csv(here('CC Dashboard',"SAO Data","Dispositions.csv"),stringsAsFactors = FALSE)
################################################################################################################
###################################################################################################
############################################################################################################
# Steps:
# 1) Clean each data set individually
# 2) Make dataset unique on CASE PARTICIPANT ID level
# 3) Merge intake, dispositions, and sentencing data
# 4) make adjustments for "Pending data"
# 5) Collapse in different ways - for sankey, by race, by gender
# Clean Intake Data
# There are 2 relevant variables - participant status and Felony Review result. participant status is the final
# status of a case brought against a person but it doesn't necessarily come from Felony Review since Narcotics cases
# are filed directly by Law Enforcement agencies. Accordingly, will make "clean" variable:
# case status detailed using FR Result variable and assuming all narcotics are filed by LEA -
# approved by felony review, filed by LEA (narcotics), rejected, continuing investigation, other
# will use variable 2 for D3 for now
# Numbers match CCSAO Dashboard if I dont filter for missing race/gender
# Intake
intake %<>%
filter(GENDER %in% c("Male","Female")) %>%
filter(RACE %in% c("Albino", "American Indian", "Asian", "ASIAN", "Biracial", "Black", "CAUCASIAN", "HISPANIC",
"Unknown", "White", "White [Hispanic or Latino]", "White/Black [Hispanic or Latino]")) %>%
mutate(RECEIVED_DATE = as.Date(RECEIVED_DATE,'%m/%d/%Y')) %>%
mutate(receive_month = round_date(RECEIVED_DATE, "month")) %>%
mutate(FR_DATE = as.Date(FR_Date,'%m/%d/%Y')) %>%
mutate(FR_year = year(FR_DATE)) %>%
mutate(FR_year = year(FR_DATE)) %>%
mutate(receive_year = year(RECEIVED_DATE)) %>%
mutate(ARREST_DATE = as.Date(ARREST_DATE,'%m/%d/%Y')) %>%
mutate(arrest_year = year(ARREST_DATE)) %>%
mutate(Offense_Category_broad = case_when(
grepl("Retail Theft", Offense_Category) ~ "Retail Theft",
grepl("Burglary", Offense_Category) ~ "Burglary",
grepl("Homicide", Offense_Category) ~ "Homicide",
grepl("Robbery", Offense_Category) ~ "Robbery",
grepl("Battery", Offense_Category) ~ "Battery",
grepl("Assault", Offense_Category) ~ "Battery",
grepl("DUI", Offense_Category) ~ "DUI",
grepl("UUW", Offense_Category) ~ "Unlawful Use \n of Weapon",
Offense_Category == "Theft" ~ "Theft\n (Non-retail)",
Offense_Category == "Narcotics" ~ "Narcotics",
Offense_Category == "Sex Crimes" ~ "Sex Crimes",
Offense_Category == "Possession of Stolen Motor Vehicle" ~ "MVT",
Offense_Category == "Driving With Suspended Or Revoked License" ~ "Driving With \n Revoked License",
TRUE ~ "Other (e.g. Forgery, \n Identity Theft)"
)) %>%
mutate(Race_short = case_when(
RACE == "Black" ~ "Black",
RACE == "White" ~ "White",
RACE == "CAUCASIAN" ~ "White",
RACE == "HISPANIC" ~ "Latinx",
RACE == "White [Hispanic or Latino]" ~ "Latinx",
RACE == "White/Black [Hispanic or Latino]" ~ "Latinx",
RACE == "Unknown" ~ "Other",
TRUE ~ "Other"
)) %>%
mutate(initiation_result = case_when(
grepl("Approved", FR_RESULT) ~ "Approved",
grepl("Rejected", FR_RESULT) ~ "Rejected",
grepl("Continued Investigation", FR_RESULT) ~ "Continued Investigation",
FR_RESULT == "" & Offense_Category == "Narcotics" ~ "Filed by LEA",
FR_RESULT == "" & Offense_Category != "Narcotics" ~ "Other",
TRUE ~ "Other"))
intake_for_join <- intake %>%
select(CASE_PARTICIPANT_ID,receive_year,initiation_result,Race_short,GENDER)
# Disposition
disposition %<>%
#filter(PRIMARY_CHARGE == "true") %>%
filter(GENDER %in% c("Male","Female")) %>%
filter(RACE %in% c("Albino", "American Indian", "Asian", "ASIAN", "Biracial", "Black", "CAUCASIAN", "HISPANIC",
"Unknown", "White", "White [Hispanic or Latino]", "White/Black [Hispanic or Latino]")) %>%
mutate(CHARGE_DISPOSITION = trimws(CHARGE_DISPOSITION)) %>%
mutate(DISPO_DATE = as.Date(DISPO_DATE,'%m/%d/%Y')) %>%
mutate(dispo_month = round_date(DISPO_DATE, "month")) %>%
mutate(dispo_year = year(DISPO_DATE)) %>%
mutate(RECEIVED_DATE = as.Date(RECEIVED_DATE,'%m/%d/%Y')) %>%
mutate(receive_month = round_date(RECEIVED_DATE, "month")) %>%
mutate(receive_year = year(RECEIVED_DATE)) %>%
mutate(conviction = case_when(
CHARGE_DISPOSITION == "Plea Of Guilty" ~ "Conviction",
CHARGE_DISPOSITION == "Finding Guilty" ~ "Conviction",
CHARGE_DISPOSITION == "Verdict Guilty" ~ "Conviction",
CHARGE_DISPOSITION == "Plea of Guilty - Amended Charge" ~ "Conviction",
CHARGE_DISPOSITION == "Nolle Prosecution" ~ "No Conviction",
CHARGE_DISPOSITION == "FNPC" ~ "No Conviction",
CHARGE_DISPOSITION == "FNG" ~ "No Conviction",
CHARGE_DISPOSITION == "Verdict-Not Guilty" ~ "No Conviction",
CHARGE_DISPOSITION == "Case Dismissed" ~ "No Conviction",
CHARGE_DISPOSITION == "Finding Not Not Guilty" ~ "No Conviction",
CHARGE_DISPOSITION == "FNG Reason Insanity" ~ "No Conviction",
CHARGE_DISPOSITION == "Death Suggested-Cause Abated" ~ "No Conviction",
CHARGE_DISPOSITION == "BFW" ~ "No Conviction",
CHARGE_DISPOSITION == "Transferred - Misd Crt" ~ "No Conviction",
TRUE ~ ""
)) %>%
mutate(conviction_d = case_when(
CHARGE_DISPOSITION == "Plea Of Guilty" ~ "Plea Of Guilty",
CHARGE_DISPOSITION == "Finding Guilty" ~ "Finding Guilty",
CHARGE_DISPOSITION == "Verdict Guilty" ~ "Verdict Guilty",
CHARGE_DISPOSITION == "Plea of Guilty - Amended Charge" ~ "Plea Of Guilty",
CHARGE_DISPOSITION == "Nolle Prosecution" ~ "Nolle Prosecution",
CHARGE_DISPOSITION == "FNPC" ~ "Finding No Probably Cause",
CHARGE_DISPOSITION == "FNG" ~ "Finding - Not Guilty",
CHARGE_DISPOSITION == "Verdict-Not Guilty" ~ "Verdict - Not Guilty",
CHARGE_DISPOSITION == "Case Dismissed" ~ "No Conviction - Other",
CHARGE_DISPOSITION == "Finding Not Not Guilty" ~ "No Conviction - Other",
CHARGE_DISPOSITION == "FNG Reason Insanity" ~ "No Conviction - Other",
CHARGE_DISPOSITION == "Death Suggested-Cause Abated" ~ "No Conviction - Other",
CHARGE_DISPOSITION == "BFW" ~ "No Conviction - Other",
CHARGE_DISPOSITION == "Transferred - Misd Crt" ~ "No Conviction - Other",
TRUE ~ ""
)) %>%
mutate(conviction = ifelse(conviction=="",NA,conviction)) %>%
mutate(conviction_d = ifelse(conviction_d=="",NA,conviction_d)) %>%
filter(!is.na(conviction)) %>%
mutate(Race_short = case_when(
RACE == "Black" ~ "Black",
RACE == "White" ~ "White",
RACE == "CAUCASIAN" ~ "White",
RACE == "HISPANIC" ~ "Latinx",
RACE == "White [Hispanic or Latino]" ~ "Latinx",
RACE == "White/Black [Hispanic or Latino]" ~ "Latinx",
RACE == "Unknown" ~ "Other",
TRUE ~ "Other"
))
# Deal with joining intake with disposition
# First filter cases missing receive year i.e. initiated before 2011
# Then categorize cases missing disposition as Pending IF they have been approved or continued investigation or
# Filed By LEA. This is an assumption we're making - filed by LEA or continued investigation may not have reached
# disposition stage at all and may have been rejected, but since we don't know, PENDING seems like a safe guess
# Next I filter out those that are still missing conviction (i.e. Other or Rejected that don't show up in disposition)
# The assumption here being that those cases are not PENDING, but didn't proceed further in the system
# Summarizing this gives us the flow of cases from intake into conviction, removing cases that should have
# "stopped" after intake
# NOTE - i'm joining intake with disposition_max_conviction. In the CC SAO data / reports, they discuss only
# looking at the topline charge in the dispositions file. While this makes sense in some cases, it really
# doesn't make sense when combining dispositions with sentences data. This is because while the topline
# charge may not receive a conviction for a given case_participant, another charge for the same case_participant
# might, and that conviction will receive a sentence. For the purposes of tracking case-participants from
# intake to sentencing, it is therefore confusing to see many cases get "no convictions" and then many of the
# "no convictions" receive sentences.
# Instead, I think it makes more sense to count a conviction if ANY charges for a given case_participant
# result in a conviction, because then it follows that that case_participant would receive a sentence
disposition_max_conviction_receive <- disposition %>%
mutate(conviction_number = ifelse(conviction=="Conviction",1,0)) %>%
select(CASE_PARTICIPANT_ID,PRIMARY_CHARGE,receive_year,conviction,conviction_number) %>%
# filtering to the "max" of conviction vs no conviction, rather than topline charge
group_by(CASE_PARTICIPANT_ID) %>%
filter(conviction_number==max(conviction_number)) %>%
select(CASE_PARTICIPANT_ID,receive_year,conviction,conviction_number) %>%
distinct() %>%
select(CASE_PARTICIPANT_ID,conviction,conviction_number)
# Clean up sentencing data
# As per the CC SAO Data report, Sentences should NOT be filtered only on the primary charge (one per case participant ID)
# For many case participants, the primary charge may not receive a conviction (e.g. might be nolle'd) but
# that doesnt mean that case participant won't receive a sentence because some of the lower charges
# may receive a conviction and then therefore a sentence. So to summarize this data, we use n_distinct(Case participant ID)
# Sentences
sentence %<>%
#filter(PRIMARY_CHARGE == "true") %>%
filter(GENDER %in% c("Male","Female")) %>%
filter(RACE %in% c("Albino", "American Indian", "Asian", "ASIAN", "Biracial", "Black", "CAUCASIAN", "HISPANIC",
"Unknown", "White", "White [Hispanic or Latino]", "White/Black [Hispanic or Latino]")) %>%
mutate(RECEIVED_DATE = as.Date(RECEIVED_DATE,'%m/%d/%Y')) %>%
mutate(receive_month = round_date(RECEIVED_DATE, "month")) %>%
mutate(receive_year = year(RECEIVED_DATE)) %>%
mutate(SENTENCE_DATE = as.Date(SENTENCE_DATE,'%m/%d/%Y')) %>%
mutate(sentence_month = round_date(SENTENCE_DATE, "month")) %>%
mutate(sentence_year = year(SENTENCE_DATE)) %>%
mutate(Race_short = case_when(
RACE == "Black" ~ "Black",
RACE == "White" ~ "White",
RACE == "CAUCASIAN" ~ "White",
RACE == "HISPANIC" ~ "Latinx",
RACE == "White [Hispanic or Latino]" ~ "Latinx",
RACE == "White/Black [Hispanic or Latino]" ~ "Latinx",
RACE == "Unknown" ~ "Other",
TRUE ~ "Other"
)) %>%
mutate(sentence_type_short = case_when(
SENTENCE_TYPE=="Prison" ~ "Prison",
SENTENCE_TYPE=="Probation" ~ "Probation",
SENTENCE_TYPE=="Jail" ~ "Jail",
TRUE ~ "Other Sentence"
))
# In the sentences dataset, there are about 6,900 duplicates on CASE PARTICIPANT IDS when we take "distinct" on CPI, receive year
# and sentence type short. This appears to be because in some cases, a case participant DOES receive 2 different sentences - one
# original and one at resentencing or during probation violation. So instead i think I should do the following:
# group by case participant ID, select the LATEST sentence date, THEN make it distinct - even if there are still multiple charges
# at the latest date they should be receivign the same sentence
sent_for_join_receive <- sentence %>%
group_by(CASE_PARTICIPANT_ID) %>%
filter(SENTENCE_DATE == max(SENTENCE_DATE)) %>%
select(CASE_PARTICIPANT_ID,receive_year,sentence_type_short) %>%
distinct() %>%
select(CASE_PARTICIPANT_ID,sentence_type_short)
# After doing this, still 200 duplicates left which don't seem to make sense (no reason for same case, same day to receive)
# 2 sentences. The number is so small, I'm going to drop these observations
sent_for_join_receive <- sent_for_join_receive[!duplicated(sent_for_join_receive$CASE_PARTICIPANT_ID),]
#
# duplicates <- sent_for_join_receive[duplicated(sent_for_join_receive$CASE_PARTICIPANT_ID),]
#
# sent_for_join_receive2 <- sentence %>%
# select(CASE_PARTICIPANT_ID,receive_year) %>%
# distinct()
#
# sent_for_join_receive <- sentence %>%
# group_by(CASE_PARTICIPANT_ID) %>%
# filter(SENTENCE_DATE == max(SENTENCE_DATE)) %>%
# filter(CASE_PARTICIPANT_ID==173015444916)
#
# select(CASE_PARTICIPANT_ID, sentence_type_short)
# Combine intake, disposition, sentences
intake_for_join_years <- intake_for_join %>%
count(receive_year)
intake_dispo_join <- full_join(intake_for_join, disposition_max_conviction_receive,by="CASE_PARTICIPANT_ID")
intake_dispo_join_NA <- intake_dispo_join %>% filter(is.na(conviction)) %>% group_by(receive_year,initiation_result) %>% summarise(cases=n())
sent_intake_dispo_join <- full_join(intake_dispo_join, sent_for_join_receive,by="CASE_PARTICIPANT_ID")
# Make adjustments on full dataset to deal with "pending" cases
# Difference between clean1 and clean2 is coming from the fact that some CASE PARTICIPANT IDS become duplicate
# when intake_dispo_join is merged with sentence, because the sentence dataset is not unique on case
# participant ID (in theory it should be, but about 1.7% of observations get duplicated since they might have
# 2 different sentence types for the same year and case participant)
# I think the cleanest thing to do is make sentence type dataset unique on CPI (select in the case of duplicates)
# and then merge
# intake_dispo_join_clean3 <- anti_join(intake_dispo_join_clean2,intake_dispo_join_clean1,by="CASE_PARTICIPANT_ID")
#
# intake_dispo_join_clean3_yearsX <- intake_dispo_join_clean3 %>%
# count(receive_year.x)
#
# intake_dispo_join_clean3_yearsY <- intake_dispo_join_clean3 %>%
# count(receive_year.y)
# Clean1 and clean2 match!!! as they should!!!
sent_intake_dispo_join %<>%
# Remove cases missing receive_year i.e. cases that exist in dispositions/sentences but not in
# intake because they were initiated mostly before 2011 (before data was clean)
filter(!is.na(receive_year)) %>%
mutate(case_participant = "Case Participants") %>%
mutate(conviction2 = case_when(
is.na(conviction) & initiation_result %in% c("Approved","Continued Investigation", "Filed by LEA") ~ "Pending Conviction",
TRUE ~ conviction
)) %>%
# Remove those missing conviction2 i.e. not in dispositions dataset and not pending
# About 21,000 obs over 8 years, ~15k of which are Rejected and 6k are other
filter(!is.na(conviction2))
# Dataset 1 for sankey
intake_dispo_join_clean <- sent_intake_dispo_join %>%
group_by(receive_year,initiation_result,conviction2) %>%
summarise(cases = n()) %>%
rename(source=initiation_result,target=conviction2,value=cases)
# Dataset 2 for sankey
intake_all <- intake_dispo_join %>%
mutate(case_participant = "Case Participants") %>%
group_by(receive_year,initiation_result,case_participant) %>%
filter(!is.na(receive_year)) %>%
summarise(cases = n()) %>%
rename(source=case_participant,target=initiation_result,value=cases)
# Dataset 3 for sankey
sentence_dispo_join_clean <- sent_intake_dispo_join %>%
mutate(sentence2 = case_when(
is.na(sentence_type_short) & conviction2 %in% c("Conviction","Pending Conviction") ~ "Pending Sentence",
TRUE ~ sentence_type_short
)) %>%
# remove those missing sentence2 i.e. not in sentences dataset and not pending a sentence (i.e. they dont
# belong in this dataset to begin with)
filter(!is.na(sentence2)) %>%
group_by(receive_year,conviction2,sentence2) %>%
summarise(cases = n()) %>%
#filter(receive_year==2017) %>%
# ungroup() %>%
select(receive_year,conviction2,sentence2,cases) %>%
rename(source=conviction2,target=sentence2,value=cases)
sent_intake_dispo_join_final <- rbind(intake_dispo_join_clean,intake_all,sentence_dispo_join_clean)
write_json(sent_intake_dispo_join_final,here("CC Dashboard", "processed_data","sent_intake_dispo_join_final.json"))
## Now create intake race/gender, disposition race/gender, and sentence race/gender from the same
# Intake
intake_year_status_gender <- sent_intake_dispo_join %>%
group_by(receive_year,initiation_result,GENDER) %>%
summarise(cases = n()) %>%
spread(GENDER,cases) %>%
rename(Year=receive_year)
write_json(intake_year_status_gender,here("CC Dashboard","processed_data","intake_year_status_gender.json"))
intake_year_status_race <- sent_intake_dispo_join %>%
group_by(receive_year,initiation_result,Race_short) %>%
summarise(cases = n()) %>%
spread(Race_short,cases) %>%
rename(Year=receive_year)
write_json(intake_year_status_race,here("CC Dashboard", "processed_data","intake_year_status_race.json"))
# Disposition
disp_year_status_gender <- sent_intake_dispo_join %>%
group_by(receive_year,conviction2,GENDER) %>%
summarise(cases = n()) %>%
spread(GENDER,cases) %>%
rename(Year=receive_year,conviction=conviction2)
write_json(disp_year_status_gender,here("CC Dashboard","processed_data","disp_year_status_gender.json"))
disp_year_status_race <- sent_intake_dispo_join %>%
group_by(receive_year,conviction2,Race_short) %>%
summarise(cases = n()) %>%
spread(Race_short,cases) %>%
rename(Year=receive_year,conviction=conviction2)
write_json(disp_year_status_race,here("CC Dashboard","processed_data","disp_year_status_race.json"))
# Sentence
sent_year_status_gender <- sent_intake_dispo_join %>%
mutate(sentence2 = case_when(
is.na(sentence_type_short) & conviction2 %in% c("Conviction","Pending Conviction") ~ "Pending Sentence",
TRUE ~ sentence_type_short
)) %>%
# remove those missing sentence2 i.e. not in sentences dataset and not pending a sentence (i.e. they dont
# belong in this dataset to begin with)
filter(!is.na(sentence2)) %>%
group_by(receive_year,sentence2,GENDER) %>%
summarise(cases = n()) %>%
spread(GENDER,cases) %>%
rename(Year=receive_year,sentence_type_short=sentence2)
write_json(sent_year_status_gender,here("CC Dashboard","processed_data","sent_year_status_gender.json"))
sent_year_status_race <- sent_intake_dispo_join %>%
mutate(sentence2 = case_when(
is.na(sentence_type_short) & conviction2 %in% c("Conviction","Pending Conviction") ~ "Pending Sentence",
TRUE ~ sentence_type_short
)) %>%
# remove those missing sentence2 i.e. not in sentences dataset and not pending a sentence (i.e. they dont
# belong in this dataset to begin with)
filter(!is.na(sentence2)) %>%
group_by(receive_year,sentence2,Race_short) %>%
summarise(cases = n()) %>%
spread(Race_short,cases) %>%
rename(Year=receive_year,sentence_type_short=sentence2)
write_json(sent_year_status_race,here("CC Dashboard","processed_data","sent_year_status_race.json"))
intake_year_race_gender <- inner_join(intake_year_status_gender,intake_year_status_race,by=c("Year","initiation_result"))
write_json(intake_year_race_gender,here("CC Dashboard","processed_data","intake_year_race_gender.json"))
dispo_year_race_gender <- inner_join(disp_year_status_gender,disp_year_status_race,by=c("Year","conviction"))
write_json(dispo_year_race_gender,here("CC Dashboard","processed_data","dispo_year_race_gender.json"))
sent_year_race_gender <- inner_join(sent_year_status_gender,sent_year_status_race,by=c("Year","sentence_type_short"))
write_json(sent_year_race_gender,here("CC Dashboard","processed_data","sent_year_race_gender.json"))
|
/d3_data_assembly.R
|
no_license
|
DSharm/cook-county-SAO-d3
|
R
| false
| false
| 20,295
|
r
|
# load necessary packages
libs <- c("tidyverse", "stringr", "readr", "dplyr", "ggplot2", "readstata13","foreign",
"magrittr","lubridate","here","ggrepel","treemapify","packcircles", "ggalluvial","ggrepel",
"extrafont","ggfittext","cowplot","googleway","ggspatial","sf","rnaturalearth",
"rnaturalearthdata","rgeos","ggridges","jsonlite")
lapply(libs, library, character.only=TRUE)
# Read in the data
intake <- read.csv(here('CC Dashboard','SAO Data','Intake.csv'),stringsAsFactors = FALSE)
initiation <- read.csv(here('CC Dashboard',"SAO Data","Initiation.csv"),stringsAsFactors = FALSE)
sentence <- read.csv(here('CC Dashboard',"SAO Data","Sentencing.csv"),stringsAsFactors = FALSE)
disposition <- read.csv(here('CC Dashboard',"SAO Data","Dispositions.csv"),stringsAsFactors = FALSE)
################################################################################################################
###################################################################################################
############################################################################################################
# Steps:
# 1) Clean each data set individually
# 2) Make dataset unique on CASE PARTICIPANT ID level
# 3) Merge intake, dispositions, and sentencing data
# 4) make adjustments for "Pending data"
# 5) Collapse in different ways - for sankey, by race, by gender
# Clean Intake Data
# There are 2 relevant variables - participant status and Felony Review result. participant status is the final
# status of a case brought against a person but it doesn't necessarily come from Felony Review since Narcotics cases
# are filed directly by Law Enforcement agencies. Accordingly, will make "clean" variable:
# case status detailed using FR Result variable and assuming all narcotics are filed by LEA -
# approved by felony review, filed by LEA (narcotics), rejected, continuing investigation, other
# will use variable 2 for D3 for now
# Numbers match CCSAO Dashboard if I dont filter for missing race/gender
# Intake
intake %<>%
filter(GENDER %in% c("Male","Female")) %>%
filter(RACE %in% c("Albino", "American Indian", "Asian", "ASIAN", "Biracial", "Black", "CAUCASIAN", "HISPANIC",
"Unknown", "White", "White [Hispanic or Latino]", "White/Black [Hispanic or Latino]")) %>%
mutate(RECEIVED_DATE = as.Date(RECEIVED_DATE,'%m/%d/%Y')) %>%
mutate(receive_month = round_date(RECEIVED_DATE, "month")) %>%
mutate(FR_DATE = as.Date(FR_Date,'%m/%d/%Y')) %>%
mutate(FR_year = year(FR_DATE)) %>%
mutate(FR_year = year(FR_DATE)) %>%
mutate(receive_year = year(RECEIVED_DATE)) %>%
mutate(ARREST_DATE = as.Date(ARREST_DATE,'%m/%d/%Y')) %>%
mutate(arrest_year = year(ARREST_DATE)) %>%
mutate(Offense_Category_broad = case_when(
grepl("Retail Theft", Offense_Category) ~ "Retail Theft",
grepl("Burglary", Offense_Category) ~ "Burglary",
grepl("Homicide", Offense_Category) ~ "Homicide",
grepl("Robbery", Offense_Category) ~ "Robbery",
grepl("Battery", Offense_Category) ~ "Battery",
grepl("Assault", Offense_Category) ~ "Battery",
grepl("DUI", Offense_Category) ~ "DUI",
grepl("UUW", Offense_Category) ~ "Unlawful Use \n of Weapon",
Offense_Category == "Theft" ~ "Theft\n (Non-retail)",
Offense_Category == "Narcotics" ~ "Narcotics",
Offense_Category == "Sex Crimes" ~ "Sex Crimes",
Offense_Category == "Possession of Stolen Motor Vehicle" ~ "MVT",
Offense_Category == "Driving With Suspended Or Revoked License" ~ "Driving With \n Revoked License",
TRUE ~ "Other (e.g. Forgery, \n Identity Theft)"
)) %>%
mutate(Race_short = case_when(
RACE == "Black" ~ "Black",
RACE == "White" ~ "White",
RACE == "CAUCASIAN" ~ "White",
RACE == "HISPANIC" ~ "Latinx",
RACE == "White [Hispanic or Latino]" ~ "Latinx",
RACE == "White/Black [Hispanic or Latino]" ~ "Latinx",
RACE == "Unknown" ~ "Other",
TRUE ~ "Other"
)) %>%
mutate(initiation_result = case_when(
grepl("Approved", FR_RESULT) ~ "Approved",
grepl("Rejected", FR_RESULT) ~ "Rejected",
grepl("Continued Investigation", FR_RESULT) ~ "Continued Investigation",
FR_RESULT == "" & Offense_Category == "Narcotics" ~ "Filed by LEA",
FR_RESULT == "" & Offense_Category != "Narcotics" ~ "Other",
TRUE ~ "Other"))
intake_for_join <- intake %>%
select(CASE_PARTICIPANT_ID,receive_year,initiation_result,Race_short,GENDER)
# Disposition
disposition %<>%
#filter(PRIMARY_CHARGE == "true") %>%
filter(GENDER %in% c("Male","Female")) %>%
filter(RACE %in% c("Albino", "American Indian", "Asian", "ASIAN", "Biracial", "Black", "CAUCASIAN", "HISPANIC",
"Unknown", "White", "White [Hispanic or Latino]", "White/Black [Hispanic or Latino]")) %>%
mutate(CHARGE_DISPOSITION = trimws(CHARGE_DISPOSITION)) %>%
mutate(DISPO_DATE = as.Date(DISPO_DATE,'%m/%d/%Y')) %>%
mutate(dispo_month = round_date(DISPO_DATE, "month")) %>%
mutate(dispo_year = year(DISPO_DATE)) %>%
mutate(RECEIVED_DATE = as.Date(RECEIVED_DATE,'%m/%d/%Y')) %>%
mutate(receive_month = round_date(RECEIVED_DATE, "month")) %>%
mutate(receive_year = year(RECEIVED_DATE)) %>%
mutate(conviction = case_when(
CHARGE_DISPOSITION == "Plea Of Guilty" ~ "Conviction",
CHARGE_DISPOSITION == "Finding Guilty" ~ "Conviction",
CHARGE_DISPOSITION == "Verdict Guilty" ~ "Conviction",
CHARGE_DISPOSITION == "Plea of Guilty - Amended Charge" ~ "Conviction",
CHARGE_DISPOSITION == "Nolle Prosecution" ~ "No Conviction",
CHARGE_DISPOSITION == "FNPC" ~ "No Conviction",
CHARGE_DISPOSITION == "FNG" ~ "No Conviction",
CHARGE_DISPOSITION == "Verdict-Not Guilty" ~ "No Conviction",
CHARGE_DISPOSITION == "Case Dismissed" ~ "No Conviction",
CHARGE_DISPOSITION == "Finding Not Not Guilty" ~ "No Conviction",
CHARGE_DISPOSITION == "FNG Reason Insanity" ~ "No Conviction",
CHARGE_DISPOSITION == "Death Suggested-Cause Abated" ~ "No Conviction",
CHARGE_DISPOSITION == "BFW" ~ "No Conviction",
CHARGE_DISPOSITION == "Transferred - Misd Crt" ~ "No Conviction",
TRUE ~ ""
)) %>%
mutate(conviction_d = case_when(
CHARGE_DISPOSITION == "Plea Of Guilty" ~ "Plea Of Guilty",
CHARGE_DISPOSITION == "Finding Guilty" ~ "Finding Guilty",
CHARGE_DISPOSITION == "Verdict Guilty" ~ "Verdict Guilty",
CHARGE_DISPOSITION == "Plea of Guilty - Amended Charge" ~ "Plea Of Guilty",
CHARGE_DISPOSITION == "Nolle Prosecution" ~ "Nolle Prosecution",
CHARGE_DISPOSITION == "FNPC" ~ "Finding No Probably Cause",
CHARGE_DISPOSITION == "FNG" ~ "Finding - Not Guilty",
CHARGE_DISPOSITION == "Verdict-Not Guilty" ~ "Verdict - Not Guilty",
CHARGE_DISPOSITION == "Case Dismissed" ~ "No Conviction - Other",
CHARGE_DISPOSITION == "Finding Not Not Guilty" ~ "No Conviction - Other",
CHARGE_DISPOSITION == "FNG Reason Insanity" ~ "No Conviction - Other",
CHARGE_DISPOSITION == "Death Suggested-Cause Abated" ~ "No Conviction - Other",
CHARGE_DISPOSITION == "BFW" ~ "No Conviction - Other",
CHARGE_DISPOSITION == "Transferred - Misd Crt" ~ "No Conviction - Other",
TRUE ~ ""
)) %>%
mutate(conviction = ifelse(conviction=="",NA,conviction)) %>%
mutate(conviction_d = ifelse(conviction_d=="",NA,conviction_d)) %>%
filter(!is.na(conviction)) %>%
mutate(Race_short = case_when(
RACE == "Black" ~ "Black",
RACE == "White" ~ "White",
RACE == "CAUCASIAN" ~ "White",
RACE == "HISPANIC" ~ "Latinx",
RACE == "White [Hispanic or Latino]" ~ "Latinx",
RACE == "White/Black [Hispanic or Latino]" ~ "Latinx",
RACE == "Unknown" ~ "Other",
TRUE ~ "Other"
))
# Deal with joining intake with disposition
# First filter cases missing receive year i.e. initiated before 2011
# Then categorize cases missing disposition as Pending IF they have been approved or continued investigation or
# Filed By LEA. This is an assumption we're making - filed by LEA or continued investigation may not have reached
# disposition stage at all and may have been rejected, but since we don't know, PENDING seems like a safe guess
# Next I filter out those that are still missing conviction (i.e. Other or Rejected that don't show up in disposition)
# The assumption here being that those cases are not PENDING, but didn't proceed further in the system
# Summarizing this gives us the flow of cases from intake into conviction, removing cases that should have
# "stopped" after intake
# NOTE - i'm joining intake with disposition_max_conviction. In the CC SAO data / reports, they discuss only
# looking at the topline charge in the dispositions file. While this makes sense in some cases, it really
# doesn't make sense when combining dispositions with sentences data. This is because while the topline
# charge may not receive a conviction for a given case_participant, another charge for the same case_participant
# might, and that conviction will receive a sentence. For the purposes of tracking case-participants from
# intake to sentencing, it is therefore confusing to see many cases get "no convictions" and then many of the
# "no convictions" receive sentences.
# Instead, I think it makes more sense to count a conviction if ANY charges for a given case_participant
# result in a conviction, because then it follows that that case_participant would receive a sentence
disposition_max_conviction_receive <- disposition %>%
mutate(conviction_number = ifelse(conviction=="Conviction",1,0)) %>%
select(CASE_PARTICIPANT_ID,PRIMARY_CHARGE,receive_year,conviction,conviction_number) %>%
# filtering to the "max" of conviction vs no conviction, rather than topline charge
group_by(CASE_PARTICIPANT_ID) %>%
filter(conviction_number==max(conviction_number)) %>%
select(CASE_PARTICIPANT_ID,receive_year,conviction,conviction_number) %>%
distinct() %>%
select(CASE_PARTICIPANT_ID,conviction,conviction_number)
# Clean up sentencing data
# As per the CC SAO Data report, Sentences should NOT be filtered only on the primary charge (one per case participant ID)
# For many case participants, the primary charge may not receive a conviction (e.g. might be nolle'd) but
# that doesnt mean that case participant won't receive a sentence because some of the lower charges
# may receive a conviction and then therefore a sentence. So to summarize this data, we use n_distinct(Case participant ID)
# Sentences
sentence %<>%
#filter(PRIMARY_CHARGE == "true") %>%
filter(GENDER %in% c("Male","Female")) %>%
filter(RACE %in% c("Albino", "American Indian", "Asian", "ASIAN", "Biracial", "Black", "CAUCASIAN", "HISPANIC",
"Unknown", "White", "White [Hispanic or Latino]", "White/Black [Hispanic or Latino]")) %>%
mutate(RECEIVED_DATE = as.Date(RECEIVED_DATE,'%m/%d/%Y')) %>%
mutate(receive_month = round_date(RECEIVED_DATE, "month")) %>%
mutate(receive_year = year(RECEIVED_DATE)) %>%
mutate(SENTENCE_DATE = as.Date(SENTENCE_DATE,'%m/%d/%Y')) %>%
mutate(sentence_month = round_date(SENTENCE_DATE, "month")) %>%
mutate(sentence_year = year(SENTENCE_DATE)) %>%
mutate(Race_short = case_when(
RACE == "Black" ~ "Black",
RACE == "White" ~ "White",
RACE == "CAUCASIAN" ~ "White",
RACE == "HISPANIC" ~ "Latinx",
RACE == "White [Hispanic or Latino]" ~ "Latinx",
RACE == "White/Black [Hispanic or Latino]" ~ "Latinx",
RACE == "Unknown" ~ "Other",
TRUE ~ "Other"
)) %>%
mutate(sentence_type_short = case_when(
SENTENCE_TYPE=="Prison" ~ "Prison",
SENTENCE_TYPE=="Probation" ~ "Probation",
SENTENCE_TYPE=="Jail" ~ "Jail",
TRUE ~ "Other Sentence"
))
# In the sentences dataset, there are about 6,900 duplicates on CASE PARTICIPANT IDS when we take "distinct" on CPI, receive year
# and sentence type short. This appears to be because in some cases, a case participant DOES receive 2 different sentences - one
# original and one at resentencing or during probation violation. So instead i think I should do the following:
# group by case participant ID, select the LATEST sentence date, THEN make it distinct - even if there are still multiple charges
# at the latest date they should be receivign the same sentence
sent_for_join_receive <- sentence %>%
group_by(CASE_PARTICIPANT_ID) %>%
filter(SENTENCE_DATE == max(SENTENCE_DATE)) %>%
select(CASE_PARTICIPANT_ID,receive_year,sentence_type_short) %>%
distinct() %>%
select(CASE_PARTICIPANT_ID,sentence_type_short)
# After doing this, still 200 duplicates left which don't seem to make sense (no reason for same case, same day to receive)
# 2 sentences. The number is so small, I'm going to drop these observations
sent_for_join_receive <- sent_for_join_receive[!duplicated(sent_for_join_receive$CASE_PARTICIPANT_ID),]
#
# duplicates <- sent_for_join_receive[duplicated(sent_for_join_receive$CASE_PARTICIPANT_ID),]
#
# sent_for_join_receive2 <- sentence %>%
# select(CASE_PARTICIPANT_ID,receive_year) %>%
# distinct()
#
# sent_for_join_receive <- sentence %>%
# group_by(CASE_PARTICIPANT_ID) %>%
# filter(SENTENCE_DATE == max(SENTENCE_DATE)) %>%
# filter(CASE_PARTICIPANT_ID==173015444916)
#
# select(CASE_PARTICIPANT_ID, sentence_type_short)
# Combine intake, disposition, sentences
intake_for_join_years <- intake_for_join %>%
count(receive_year)
intake_dispo_join <- full_join(intake_for_join, disposition_max_conviction_receive,by="CASE_PARTICIPANT_ID")
intake_dispo_join_NA <- intake_dispo_join %>% filter(is.na(conviction)) %>% group_by(receive_year,initiation_result) %>% summarise(cases=n())
sent_intake_dispo_join <- full_join(intake_dispo_join, sent_for_join_receive,by="CASE_PARTICIPANT_ID")
# Make adjustments on full dataset to deal with "pending" cases
# Difference between clean1 and clean2 is coming from the fact that some CASE PARTICIPANT IDS become duplicate
# when intake_dispo_join is merged with sentence, because the sentence dataset is not unique on case
# participant ID (in theory it should be, but about 1.7% of observations get duplicated since they might have
# 2 different sentence types for the same year and case participant)
# I think the cleanest thing to do is make sentence type dataset unique on CPI (select in the case of duplicates)
# and then merge
# intake_dispo_join_clean3 <- anti_join(intake_dispo_join_clean2,intake_dispo_join_clean1,by="CASE_PARTICIPANT_ID")
#
# intake_dispo_join_clean3_yearsX <- intake_dispo_join_clean3 %>%
# count(receive_year.x)
#
# intake_dispo_join_clean3_yearsY <- intake_dispo_join_clean3 %>%
# count(receive_year.y)
# Clean1 and clean2 match!!! as they should!!!
sent_intake_dispo_join %<>%
# Remove cases missing receive_year i.e. cases that exist in dispositions/sentences but not in
# intake because they were initiated mostly before 2011 (before data was clean)
filter(!is.na(receive_year)) %>%
mutate(case_participant = "Case Participants") %>%
mutate(conviction2 = case_when(
is.na(conviction) & initiation_result %in% c("Approved","Continued Investigation", "Filed by LEA") ~ "Pending Conviction",
TRUE ~ conviction
)) %>%
# Remove those missing conviction2 i.e. not in dispositions dataset and not pending
# About 21,000 obs over 8 years, ~15k of which are Rejected and 6k are other
filter(!is.na(conviction2))
# Dataset 1 for sankey
intake_dispo_join_clean <- sent_intake_dispo_join %>%
group_by(receive_year,initiation_result,conviction2) %>%
summarise(cases = n()) %>%
rename(source=initiation_result,target=conviction2,value=cases)
# Dataset 2 for sankey
intake_all <- intake_dispo_join %>%
mutate(case_participant = "Case Participants") %>%
group_by(receive_year,initiation_result,case_participant) %>%
filter(!is.na(receive_year)) %>%
summarise(cases = n()) %>%
rename(source=case_participant,target=initiation_result,value=cases)
# Dataset 3 for sankey
sentence_dispo_join_clean <- sent_intake_dispo_join %>%
mutate(sentence2 = case_when(
is.na(sentence_type_short) & conviction2 %in% c("Conviction","Pending Conviction") ~ "Pending Sentence",
TRUE ~ sentence_type_short
)) %>%
# remove those missing sentence2 i.e. not in sentences dataset and not pending a sentence (i.e. they dont
# belong in this dataset to begin with)
filter(!is.na(sentence2)) %>%
group_by(receive_year,conviction2,sentence2) %>%
summarise(cases = n()) %>%
#filter(receive_year==2017) %>%
# ungroup() %>%
select(receive_year,conviction2,sentence2,cases) %>%
rename(source=conviction2,target=sentence2,value=cases)
sent_intake_dispo_join_final <- rbind(intake_dispo_join_clean,intake_all,sentence_dispo_join_clean)
write_json(sent_intake_dispo_join_final,here("CC Dashboard", "processed_data","sent_intake_dispo_join_final.json"))
## Now create intake race/gender, disposition race/gender, and sentence race/gender from the same
# Intake
intake_year_status_gender <- sent_intake_dispo_join %>%
group_by(receive_year,initiation_result,GENDER) %>%
summarise(cases = n()) %>%
spread(GENDER,cases) %>%
rename(Year=receive_year)
write_json(intake_year_status_gender,here("CC Dashboard","processed_data","intake_year_status_gender.json"))
intake_year_status_race <- sent_intake_dispo_join %>%
group_by(receive_year,initiation_result,Race_short) %>%
summarise(cases = n()) %>%
spread(Race_short,cases) %>%
rename(Year=receive_year)
write_json(intake_year_status_race,here("CC Dashboard", "processed_data","intake_year_status_race.json"))
# Disposition
disp_year_status_gender <- sent_intake_dispo_join %>%
group_by(receive_year,conviction2,GENDER) %>%
summarise(cases = n()) %>%
spread(GENDER,cases) %>%
rename(Year=receive_year,conviction=conviction2)
write_json(disp_year_status_gender,here("CC Dashboard","processed_data","disp_year_status_gender.json"))
disp_year_status_race <- sent_intake_dispo_join %>%
group_by(receive_year,conviction2,Race_short) %>%
summarise(cases = n()) %>%
spread(Race_short,cases) %>%
rename(Year=receive_year,conviction=conviction2)
write_json(disp_year_status_race,here("CC Dashboard","processed_data","disp_year_status_race.json"))
# Sentence
sent_year_status_gender <- sent_intake_dispo_join %>%
mutate(sentence2 = case_when(
is.na(sentence_type_short) & conviction2 %in% c("Conviction","Pending Conviction") ~ "Pending Sentence",
TRUE ~ sentence_type_short
)) %>%
# remove those missing sentence2 i.e. not in sentences dataset and not pending a sentence (i.e. they dont
# belong in this dataset to begin with)
filter(!is.na(sentence2)) %>%
group_by(receive_year,sentence2,GENDER) %>%
summarise(cases = n()) %>%
spread(GENDER,cases) %>%
rename(Year=receive_year,sentence_type_short=sentence2)
write_json(sent_year_status_gender,here("CC Dashboard","processed_data","sent_year_status_gender.json"))
sent_year_status_race <- sent_intake_dispo_join %>%
mutate(sentence2 = case_when(
is.na(sentence_type_short) & conviction2 %in% c("Conviction","Pending Conviction") ~ "Pending Sentence",
TRUE ~ sentence_type_short
)) %>%
# remove those missing sentence2 i.e. not in sentences dataset and not pending a sentence (i.e. they dont
# belong in this dataset to begin with)
filter(!is.na(sentence2)) %>%
group_by(receive_year,sentence2,Race_short) %>%
summarise(cases = n()) %>%
spread(Race_short,cases) %>%
rename(Year=receive_year,sentence_type_short=sentence2)
write_json(sent_year_status_race,here("CC Dashboard","processed_data","sent_year_status_race.json"))
intake_year_race_gender <- inner_join(intake_year_status_gender,intake_year_status_race,by=c("Year","initiation_result"))
write_json(intake_year_race_gender,here("CC Dashboard","processed_data","intake_year_race_gender.json"))
dispo_year_race_gender <- inner_join(disp_year_status_gender,disp_year_status_race,by=c("Year","conviction"))
write_json(dispo_year_race_gender,here("CC Dashboard","processed_data","dispo_year_race_gender.json"))
sent_year_race_gender <- inner_join(sent_year_status_gender,sent_year_status_race,by=c("Year","sentence_type_short"))
write_json(sent_year_race_gender,here("CC Dashboard","processed_data","sent_year_race_gender.json"))
|
#' Clip CHM Data Based on extent of RGB file
#'
#' @param rgb_filename path to a projected file to extract coordinates
#' @param year year to match in CHM files
#' @param tif_base_dir where to search for CHM files
#' @param save_dir base path to save cropped files
#' @return Saved tif files for each plot
#' @importFrom magrittr "%>%"
#' @export
#'
crop_target_CHM<-function(siteID="TEAK",rgb_filename,year="2019",tif_base_dir="/orange/ewhite/NeonData",save_base_dir="/orange/ewhite/b.weinstein/NEON", save=T){
#CHM dir
tif_dir<-paste(tif_base_dir,siteID,"DP3.30015.001",year,sep="/")
chm_files<-list.files(tif_dir,recursive = TRUE,full.names = T, pattern="*CHM.tif")
#find extent and geoindex
ext <- raster::extent(raster::raster(rgb_filename))
easting <- as.integer(ext@xmin/1000)*1000
northing <- as.integer(ext@ymin/1000)*1000
geo_index <- paste(easting,northing,sep="_")
#Find corresponding h5 tile
tif_path<-chm_files[stringr::str_detect(chm_files,geo_index)]
save_dir<-paste(save_base_dir,siteID,year,"NEONPlots/CHM/target/",sep="/")
if(!dir.exists(save_dir)){
dir.create(save_dir,recursive=T)
}
#Clip
CHM<-raster::raster(tif_path)
cropped_CHM<-raster::crop(CHM,ext)
#filename
if(save){
basename <- stringr::str_match(rgb_filename,"/(\\w+).tif")[,2]
fname <- paste(basename,"_CHM.tif",sep="")
full_fname<-paste(save_dir,fname,sep="/")
raster::writeRaster(cropped_CHM,full_fname,overwrite=T)
return(full_fname)
} else{
return(cropped_CHM)
}
}
|
/R/crop_target_CHM.R
|
no_license
|
weecology/TreeSegmentation
|
R
| false
| false
| 1,525
|
r
|
#' Clip CHM Data Based on extent of RGB file
#'
#' @param rgb_filename path to a projected file to extract coordinates
#' @param year year to match in CHM files
#' @param tif_base_dir where to search for CHM files
#' @param save_dir base path to save cropped files
#' @return Saved tif files for each plot
#' @importFrom magrittr "%>%"
#' @export
#'
crop_target_CHM<-function(siteID="TEAK",rgb_filename,year="2019",tif_base_dir="/orange/ewhite/NeonData",save_base_dir="/orange/ewhite/b.weinstein/NEON", save=T){
#CHM dir
tif_dir<-paste(tif_base_dir,siteID,"DP3.30015.001",year,sep="/")
chm_files<-list.files(tif_dir,recursive = TRUE,full.names = T, pattern="*CHM.tif")
#find extent and geoindex
ext <- raster::extent(raster::raster(rgb_filename))
easting <- as.integer(ext@xmin/1000)*1000
northing <- as.integer(ext@ymin/1000)*1000
geo_index <- paste(easting,northing,sep="_")
#Find corresponding h5 tile
tif_path<-chm_files[stringr::str_detect(chm_files,geo_index)]
save_dir<-paste(save_base_dir,siteID,year,"NEONPlots/CHM/target/",sep="/")
if(!dir.exists(save_dir)){
dir.create(save_dir,recursive=T)
}
#Clip
CHM<-raster::raster(tif_path)
cropped_CHM<-raster::crop(CHM,ext)
#filename
if(save){
basename <- stringr::str_match(rgb_filename,"/(\\w+).tif")[,2]
fname <- paste(basename,"_CHM.tif",sep="")
full_fname<-paste(save_dir,fname,sep="/")
raster::writeRaster(cropped_CHM,full_fname,overwrite=T)
return(full_fname)
} else{
return(cropped_CHM)
}
}
|
/4_capitalizacion_de_mercado.r
|
no_license
|
alejandroguipe/mercadobvc
|
R
| false
| false
| 10,115
|
r
| ||
rm(list=ls())
# ==============
# Load libraries
# ==============
library(reshape2)
library(foreign)
library(tcltk)
library(dplyr)
library(lazyeval)
#===============
# Specify Inputs
#===============
inputsFilePath <- "C:/KPONEIL/GitHub/projects/shedsData/basinCharacteristics/zonalStatistics/INPUTS_NHDHRDV2.txt"
barrierStatsFilePath <- 'C:/KPONEIL/GitHub/projects/basinCharacteristics/tncDams/outputTables/barrierStats_NHDHRDV2.dbf'
missingDataFilePath <- 'C:/KPONEIL/GitHub/projects/basinCharacteristics/tncDams/outputTables/barrierStatsNAs_NHDHRDV2.dbf'
# ==========
# Load files
# ==========
# User inputs
# -----------
source(inputsFilePath)
# Barrier statistics
# ------------------
barrierStatsMaster <- read.dbf(barrierStatsFilePath)
# ==================
# Edit Barrier Stats
# ==================
# Remove extra column
barrierStatsMaster <- barrierStatsMaster[,- which(names(barrierStatsMaster) %in% "FREQUENCY")]
# Index zone column
zoneCol <- which(names(barrierStatsMaster) %in% zoneField )
# Rename columns to lowercase
names(barrierStatsMaster)[-zoneCol] <- tolower(names(barrierStatsMaster)[-zoneCol])
# Summ all barrier types
barrierStatsMaster$deg_barr_all <- rowSums (barrierStatsMaster[,-1], na.rm = TRUE, dims = 1)
# Barrier type count
numBarriers <- ncol(barrierStatsMaster) - 1
# Missing Data
# ------------
barrierStatsNAs <- read.dbf(missingDataFilePath)
missingDataZones <- barrierStatsNAs[,zoneField]
barrierStatsMaster[which(barrierStatsMaster[,zoneField] %in% missingDataZones), - zoneCol] <- NA
# Loop through all catchments files
for (catchmentsFileName in catchmentsFileNames) {
# Load catchments networks
# ------------------------
load(file.path(baseDirectory, "versions", outputName, "delineatedCatchments", paste0("Delineation_", catchmentsFileName,".RData")))
# Catchment Areas
# ---------------
# Local
vectorArea <- read.csv(file.path(baseDirectory, "versions", outputName, "rTables", catchmentsFileName, paste0("local_AreaSqKM.csv")))
# List the zone IDs for splitting dataset
zoneIDs <- unique(vectorArea[,c(zoneField)])
barrierStats <- barrierStatsMaster[which(barrierStatsMaster[,c(zoneField)] %in% zoneIDs),]
# ========================
# Process local statistics
# ========================
# Loop through layers, reading files.
for (b in 1:numBarriers) {
# Separate individual barrier types
gisStat <- barrierStats[,c(1, b + 1)]
# Specify the output
outputTable <- file.path(baseDirectory, "versions", outputName, "rTables", catchmentsFileName, paste0("local_", names(gisStat)[2], ".csv"))
# Save the local stat file
write.csv(gisStat, file = outputTable, row.names = F)
}
# ===========================
# Process upstream statistics
# ===========================
# Define storage dataframe
upstreamStats <- data.frame(matrix(NA, nrow = length(zoneIDs), ncol = numBarriers + 1))
names(upstreamStats) <- names(barrierStats)
# Catchments loop
# ---------------
progressBar <- tkProgressBar(title = "progress bar", min = 0, max = length(zoneIDs), width = 300)
for ( m in seq_along(zoneIDs)){
# Get features in current basin
features <- delineatedCatchments[[which(names(delineatedCatchments) == zoneIDs[m])]]
# Get individual catchment stats for current basin
catchStats <- filter_(barrierStats, interp(~col %in% features, col = as.name(zoneField)))
# Sum the weighted stats to get final values
outStats <- colSums(catchStats, na.rm = T)
# Upstream stats
upstreamStats[m,1] <- zoneIDs[m]
upstreamStats[m,2:ncol(upstreamStats)] <- outStats[-1]
# Progress bar update
setTkProgressBar(progressBar, m, label=paste( round(m/length(zoneIDs)*100, 2), "% done"))
}
close(progressBar)
# Output upstream statistics tables
# ---------------------------------
# Loop through variables writing tables with total number of dams upstream
for ( n in 2:(ncol(upstreamStats))){
# Name
colName <- names(upstreamStats)[n]
# Output dataframe
upStat <- upstreamStats[,c(zoneField, colName)]
# Write out file
write.csv(upStat,
file = file.path(baseDirectory, "versions", outputName, "rTables", catchmentsFileName, paste0("upstream_", colName, ".csv")),
row.names = F)
}
} # End catchments file loop
|
/basinCharacteristics/zonalStatistics/NHDHRDV2_3a_calculateUpstreamStatistics (TNC Dams).R
|
no_license
|
tarsacha/shedsGisData
|
R
| false
| false
| 4,427
|
r
|
rm(list=ls())
# ==============
# Load libraries
# ==============
library(reshape2)
library(foreign)
library(tcltk)
library(dplyr)
library(lazyeval)
#===============
# Specify Inputs
#===============
inputsFilePath <- "C:/KPONEIL/GitHub/projects/shedsData/basinCharacteristics/zonalStatistics/INPUTS_NHDHRDV2.txt"
barrierStatsFilePath <- 'C:/KPONEIL/GitHub/projects/basinCharacteristics/tncDams/outputTables/barrierStats_NHDHRDV2.dbf'
missingDataFilePath <- 'C:/KPONEIL/GitHub/projects/basinCharacteristics/tncDams/outputTables/barrierStatsNAs_NHDHRDV2.dbf'
# ==========
# Load files
# ==========
# User inputs
# -----------
source(inputsFilePath)
# Barrier statistics
# ------------------
barrierStatsMaster <- read.dbf(barrierStatsFilePath)
# ==================
# Edit Barrier Stats
# ==================
# Remove extra column
barrierStatsMaster <- barrierStatsMaster[,- which(names(barrierStatsMaster) %in% "FREQUENCY")]
# Index zone column
zoneCol <- which(names(barrierStatsMaster) %in% zoneField )
# Rename columns to lowercase
names(barrierStatsMaster)[-zoneCol] <- tolower(names(barrierStatsMaster)[-zoneCol])
# Summ all barrier types
barrierStatsMaster$deg_barr_all <- rowSums (barrierStatsMaster[,-1], na.rm = TRUE, dims = 1)
# Barrier type count
numBarriers <- ncol(barrierStatsMaster) - 1
# Missing Data
# ------------
barrierStatsNAs <- read.dbf(missingDataFilePath)
missingDataZones <- barrierStatsNAs[,zoneField]
barrierStatsMaster[which(barrierStatsMaster[,zoneField] %in% missingDataZones), - zoneCol] <- NA
# Loop through all catchments files
for (catchmentsFileName in catchmentsFileNames) {
# Load catchments networks
# ------------------------
load(file.path(baseDirectory, "versions", outputName, "delineatedCatchments", paste0("Delineation_", catchmentsFileName,".RData")))
# Catchment Areas
# ---------------
# Local
vectorArea <- read.csv(file.path(baseDirectory, "versions", outputName, "rTables", catchmentsFileName, paste0("local_AreaSqKM.csv")))
# List the zone IDs for splitting dataset
zoneIDs <- unique(vectorArea[,c(zoneField)])
barrierStats <- barrierStatsMaster[which(barrierStatsMaster[,c(zoneField)] %in% zoneIDs),]
# ========================
# Process local statistics
# ========================
# Loop through layers, reading files.
for (b in 1:numBarriers) {
# Separate individual barrier types
gisStat <- barrierStats[,c(1, b + 1)]
# Specify the output
outputTable <- file.path(baseDirectory, "versions", outputName, "rTables", catchmentsFileName, paste0("local_", names(gisStat)[2], ".csv"))
# Save the local stat file
write.csv(gisStat, file = outputTable, row.names = F)
}
# ===========================
# Process upstream statistics
# ===========================
# Define storage dataframe
upstreamStats <- data.frame(matrix(NA, nrow = length(zoneIDs), ncol = numBarriers + 1))
names(upstreamStats) <- names(barrierStats)
# Catchments loop
# ---------------
progressBar <- tkProgressBar(title = "progress bar", min = 0, max = length(zoneIDs), width = 300)
for ( m in seq_along(zoneIDs)){
# Get features in current basin
features <- delineatedCatchments[[which(names(delineatedCatchments) == zoneIDs[m])]]
# Get individual catchment stats for current basin
catchStats <- filter_(barrierStats, interp(~col %in% features, col = as.name(zoneField)))
# Sum the weighted stats to get final values
outStats <- colSums(catchStats, na.rm = T)
# Upstream stats
upstreamStats[m,1] <- zoneIDs[m]
upstreamStats[m,2:ncol(upstreamStats)] <- outStats[-1]
# Progress bar update
setTkProgressBar(progressBar, m, label=paste( round(m/length(zoneIDs)*100, 2), "% done"))
}
close(progressBar)
# Output upstream statistics tables
# ---------------------------------
# Loop through variables writing tables with total number of dams upstream
for ( n in 2:(ncol(upstreamStats))){
# Name
colName <- names(upstreamStats)[n]
# Output dataframe
upStat <- upstreamStats[,c(zoneField, colName)]
# Write out file
write.csv(upStat,
file = file.path(baseDirectory, "versions", outputName, "rTables", catchmentsFileName, paste0("upstream_", colName, ".csv")),
row.names = F)
}
} # End catchments file loop
|
#rm(list =ls())
#wwd <- setwd("C:/Users/Dominic Cyr/Dropbox/NorthShore")
wwd <- getwd()
simInfo <- read.csv("simInfo.csv", colClasses = c(simDir = "character"))
simDir <- simInfo$simDir
require(parallel)
require(doSNOW)
n <- floor(detectCores() * 0.90)
# #######
cl = makeCluster(n, outfile = "") ##
registerDoSNOW(cl)
foreach(i = 1:length(simDir)) %dopar% { # length(simDir)
if (i <= n) { ### to reduce the probability of several processes
### trying to access the same file at the same time
Sys.sleep(runif(1)*2)
}
setwd(paste(wwd, simDir[i], sep ="/"))
shell("landis scenario.txt", wait = T)
}
stopCluster(cl)
|
/biasCorrection/SudStlBlank/simPilot.R
|
no_license
|
onetreetwotrees/PicusToLandisIIBiomassSuccession
|
R
| false
| false
| 656
|
r
|
#rm(list =ls())
#wwd <- setwd("C:/Users/Dominic Cyr/Dropbox/NorthShore")
wwd <- getwd()
simInfo <- read.csv("simInfo.csv", colClasses = c(simDir = "character"))
simDir <- simInfo$simDir
require(parallel)
require(doSNOW)
n <- floor(detectCores() * 0.90)
# #######
cl = makeCluster(n, outfile = "") ##
registerDoSNOW(cl)
foreach(i = 1:length(simDir)) %dopar% { # length(simDir)
if (i <= n) { ### to reduce the probability of several processes
### trying to access the same file at the same time
Sys.sleep(runif(1)*2)
}
setwd(paste(wwd, simDir[i], sep ="/"))
shell("landis scenario.txt", wait = T)
}
stopCluster(cl)
|
#plot
percent_plot <- function(well_data,test_compound)
{
percent_data <- well_data %>% filter(var_compound == test_compound | !is.na(control))
compound_percent_data <- percent_data %>% mutate( concentration = ifelse( is.na(control),concentration,0) ) %>%
group_by(content) %>% mutate( x_label = ifelse( is.na(units), "Controls", paste(concentration,units) )) %>%
mutate( cat = ifelse( is.na(units), "green","red")) %>%
mutate(plotorder = ifelse(is.na(control), (as.numeric(concentration,rm.na=TRUE)*10000 +10), as.numeric(control,rm.na=TRUE)))
range <- compound_percent_data %>% summarise( maxp = max(percent,na.rm = TRUE) , minp = min(percent,na.rm = TRUE),
concentration = max(concentration))
title <- paste0(percent_data$var_compound[ is.na(percent_data$control) ][1], " CRC with ",
stringr::str_split(percent_data$content[ is.na(percent_data$control) ][1],":")[[1]][1] )
colors <- c("red","blue","orange","purple")
pp<- ggplot(compound_percent_data,aes(x=reorder(x_label,plotorder),y=as.numeric(percent),color=var_compound)) +
geom_point(aes(fill=var_compound),shape=1,size=3) + # geom_line() +
labs(y= "% GFP(+) Cell Area", x="Concentration") +
theme_minimal()+
theme(legend.position = "right",legend.title=element_blank()) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
ggtitle(title) + expand_limits(y = 0)+
# scale_color_manual(values=c("green",colors[1:length(unique(compound_percent_data$compound))-1]))
scale_color_manual(values=c("green",colors[1:length(unique(compound_percent_data$compound))]))
pp
}
|
/R_Code/percent_plot.R
|
no_license
|
ceparman/Imaging-App
|
R
| false
| false
| 1,752
|
r
|
#plot
percent_plot <- function(well_data,test_compound)
{
percent_data <- well_data %>% filter(var_compound == test_compound | !is.na(control))
compound_percent_data <- percent_data %>% mutate( concentration = ifelse( is.na(control),concentration,0) ) %>%
group_by(content) %>% mutate( x_label = ifelse( is.na(units), "Controls", paste(concentration,units) )) %>%
mutate( cat = ifelse( is.na(units), "green","red")) %>%
mutate(plotorder = ifelse(is.na(control), (as.numeric(concentration,rm.na=TRUE)*10000 +10), as.numeric(control,rm.na=TRUE)))
range <- compound_percent_data %>% summarise( maxp = max(percent,na.rm = TRUE) , minp = min(percent,na.rm = TRUE),
concentration = max(concentration))
title <- paste0(percent_data$var_compound[ is.na(percent_data$control) ][1], " CRC with ",
stringr::str_split(percent_data$content[ is.na(percent_data$control) ][1],":")[[1]][1] )
colors <- c("red","blue","orange","purple")
pp<- ggplot(compound_percent_data,aes(x=reorder(x_label,plotorder),y=as.numeric(percent),color=var_compound)) +
geom_point(aes(fill=var_compound),shape=1,size=3) + # geom_line() +
labs(y= "% GFP(+) Cell Area", x="Concentration") +
theme_minimal()+
theme(legend.position = "right",legend.title=element_blank()) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
ggtitle(title) + expand_limits(y = 0)+
# scale_color_manual(values=c("green",colors[1:length(unique(compound_percent_data$compound))-1]))
scale_color_manual(values=c("green",colors[1:length(unique(compound_percent_data$compound))]))
pp
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_option_set.R
\name{tar_option_set}
\alias{tar_option_set}
\title{Set target options.}
\usage{
tar_option_set(
tidy_eval = NULL,
packages = NULL,
imports = NULL,
library = NULL,
envir = NULL,
format = NULL,
iteration = NULL,
error = NULL,
memory = NULL,
garbage_collection = NULL,
deployment = NULL,
priority = NULL,
backoff = NULL,
resources = NULL,
storage = NULL,
retrieval = NULL,
cue = NULL,
debug = NULL,
workspaces = NULL
)
}
\arguments{
\item{tidy_eval}{Logical, whether to enable tidy evaluation
when interpreting \code{command} and \code{pattern}. If \code{TRUE}, you can use the
"bang-bang" operator \verb{!!} to programmatically insert
the values of global objects.}
\item{packages}{Character vector of packages to load right before
the target builds. Use \code{tar_option_set()} to set packages
globally for all subsequent targets you define.}
\item{imports}{Character vector of package names to track
global dependencies. For example, if you write
\code{tar_option_set(imports = "yourAnalysisPackage")} early in \verb{_targets.R},
then \code{tar_make()} will automatically rerun or skip targets
in response to changes to the R functions and objects defined in
\code{yourAnalysisPackage}. Does not account for low-level compiled code
such as C/C++ or Fortran. If you supply multiple packages,
e.g. \code{tar_option_set(imports = c("p1", "p2"))}, then the objects in
\code{p1} override the objects in \code{p2} if there are name conflicts.
Similarly, objects in \code{tar_option_get("envir")} override
everything in \code{tar_option_get("imports")}.}
\item{library}{Character vector of library paths to try
when loading \code{packages}.}
\item{envir}{Environment containing functions and global objects
used in the R commands to run targets.}
\item{format}{Optional storage format for the target's return value.
With the exception of \code{format = "file"}, each target
gets a file in \verb{_targets/objects}, and each format is a different
way to save and load this file.
Possible formats:
\itemize{
\item \code{"rds"}: Default, uses \code{saveRDS()} and \code{readRDS()}. Should work for
most objects, but slow.
\item \code{"qs"}: Uses \code{qs::qsave()} and \code{qs::qread()}. Should work for
most objects, much faster than \code{"rds"}. Optionally set the
preset for \code{qsave()} through the \code{resources} argument, e.g.
\code{tar_target(..., resources = list(preset = "archive"))}.
Requires the \code{qs} package (not installed by default).
\item \code{"feather"}: Uses \code{arrow::write_feather()} and
\code{arrow::read_feather()} (version 2.0). Much faster than \code{"rds"},
but the value must be a data frame. Optionally set
\code{compression} and \code{compression_level} in \code{arrow::write_feather()}
through the \code{resources} argument, e.g.
\code{tar_target(..., resources = list(compression = ...))}.
Requires the \code{arrow} package (not installed by default).
\item \code{"parquet"}: Uses \code{arrow::write_parquet()} and
\code{arrow::read_parquet()} (version 2.0). Much faster than \code{"rds"},
but the value must be a data frame. Optionally set
\code{compression} and \code{compression_level} in \code{arrow::write_parquet()}
through the \code{resources} argument, e.g.
\code{tar_target(..., resources = list(compression = ...))}.
Requires the \code{arrow} package (not installed by default).
\item \code{"fst"}: Uses \code{fst::write_fst()} and \code{fst::read_fst()}.
Much faster than \code{"rds"}, but the value must be
a data frame. Optionally set the compression level for
\code{fst::write_fst()} through the \code{resources} argument, e.g.
\code{tar_target(..., resources = list(compress = 100))}.
Requires the \code{fst} package (not installed by default).
\item \code{"fst_dt"}: Same as \code{"fst"}, but the value is a \code{data.table}.
Optionally set the compression level the same way as for \code{"fst"}.
\item \code{"fst_tbl"}: Same as \code{"fst"}, but the value is a \code{tibble}.
Optionally set the compression level the same way as for \code{"fst"}.
\item \code{"keras"}: Uses \code{keras::save_model_hdf5()} and
\code{keras::load_model_hdf5()}. The value must be a Keras model.
Requires the \code{keras} package (not installed by default).
\item \code{"torch"}: Uses \code{torch::torch_save()} and \code{torch::torch_load()}.
The value must be an object from the \code{torch} package
such as a tensor or neural network module.
Requires the \code{torch} package (not installed by default).
\item \code{"file"}: A dynamic file. To use this format,
the target needs to manually identify or save some data
and return a character vector of paths
to the data. (These paths must be existing files
and nonempty directories.)
Then, \code{targets} automatically checks those files and cues
the appropriate build decisions if those files are out of date.
Those paths must point to files or directories,
and they must not contain characters \code{|} or \code{*}.
All the files and directories you return must actually exist,
or else \code{targets} will throw an error. (And if \code{storage} is \code{"worker"},
\code{targets} will first stall out trying to wait for the file
to arrive over a network file system.)
\item \code{"url"}: A dynamic input URL. It works like \code{format = "file"}
except the return value of the target is a URL that already exists
and serves as input data for downstream targets. Optionally
supply a custom \code{curl} handle through the \code{resources} argument, e.g.
\code{tar_target(..., resources = list(handle = curl::new_handle()))}.
The data file at the URL needs to have an ETag or a Last-Modified
time stamp, or else the target will throw an error because
it cannot track the data. Also, use extreme caution when
trying to use \code{format = "url"} to track uploads. You must be absolutely
certain the ETag and Last-Modified time stamp are fully updated
and available by the time the target's command finishes running.
\code{targets} makes no attempt to wait for the web server.
\item \code{"aws_rds"}, \code{"aws_qs"}, \code{"aws_parquet"}, \code{"aws_fst"}, \code{"aws_fst_dt"},
\code{"aws_fst_tbl"}, \code{"aws_keras"}: AWS-powered versions of the
respective formats \code{"rds"}, \code{"qs"}, etc. The only difference
is that the data file is uploaded to the AWS S3 bucket
you supply to \code{resources}. See the cloud computing chapter
of the manual for details.
\item \code{"aws_file"}: arbitrary dynamic files on AWS S3. The target
should return a path to a temporary local file, then
\code{targets} will automatically upload this file to an S3
bucket and track it for you. Unlike \code{format = "file"},
\code{format = "aws_file"} can only handle one single file,
and that file must not be a directory.
\code{\link[=tar_read]{tar_read()}} and downstream targets
download the file to \verb{_targets/scratch/} locally and return the path.
\verb{_targets/scratch/} gets deleted at the end of \code{\link[=tar_make]{tar_make()}}.
Requires the same \code{resources} and other configuration details
as the other AWS-powered formats. See the cloud computing
chapter of the manual for details.
}}
\item{iteration}{Character of length 1, name of the iteration mode
of the target. Choices:
\itemize{
\item \code{"vector"}: branching happens with \code{vctrs::vec_slice()} and
aggregation happens with \code{vctrs::vec_c()}.
\item \code{"list"}, branching happens with \verb{[[]]} and aggregation happens with
\code{list()}.
\item \code{"group"}: \code{dplyr::group_by()}-like functionality to branch over
subsets of a data frame. The target's return value must be a data
frame with a special \code{tar_group} column of consecutive integers
from 1 through the number of groups. Each integer designates a group,
and a branch is created for each collection of rows in a group.
See the \code{\link[=tar_group]{tar_group()}} function to see how you can
create the special \code{tar_group} column with \code{dplyr::group_by()}.
}}
\item{error}{Character of length 1, what to do if the target
runs into an error. If \code{"stop"}, the whole pipeline stops
and throws an error. If \code{"continue"}, the error is recorded,
but the pipeline keeps going.}
\item{memory}{Character of length 1, memory strategy.
If \code{"persistent"}, the target stays in memory
until the end of the pipeline (unless \code{storage} is \code{"worker"},
in which case \code{targets} unloads the value from memory
right after storing it in order to avoid sending
copious data over a network).
If \code{"transient"}, the target gets unloaded
after every new target completes.
Either way, the target gets automatically loaded into memory
whenever another target needs the value.
For cloud-based dynamic files such as \code{format = "aws_file"},
this memory policy applies to
temporary local copies of the file in \verb{_targets/scratch/"}:
\code{"persistent"} means they remain until the end of the pipeline,
and \code{"transient"} means they get deleted from the file system
as soon as possible. The former conserves bandwidth,
and the latter conserves local storage.}
\item{garbage_collection}{Logical, whether to run \code{base::gc()}
just before the target runs.}
\item{deployment}{Character of length 1, only relevant to
\code{\link[=tar_make_clustermq]{tar_make_clustermq()}} and \code{\link[=tar_make_future]{tar_make_future()}}. If \code{"worker"},
the target builds on a parallel worker. If \code{"main"},
the target builds on the host machine / process managing the pipeline.}
\item{priority}{Numeric of length 1 between 0 and 1. Controls which
targets get deployed first when multiple competing targets are ready
simultaneously. Targets with priorities closer to 1 get built earlier
(and polled earlier in \code{\link[=tar_make_future]{tar_make_future()}}).
Only applies to \code{\link[=tar_make_future]{tar_make_future()}} and \code{\link[=tar_make_clustermq]{tar_make_clustermq()}}
(not \code{\link[=tar_make]{tar_make()}}). \code{\link[=tar_make_future]{tar_make_future()}} with no extra settings is
a drop-in replacement for \code{\link[=tar_make]{tar_make()}} in this case.}
\item{backoff}{Numeric of length 1, must be greater than or equal to 0.01.
Maximum upper bound of the random polling interval
for the priority queue (seconds).
In high-performance computing (e.g. \code{\link[=tar_make_clustermq]{tar_make_clustermq()}}
and \code{\link[=tar_make_future]{tar_make_future()}}) it can be expensive to repeatedly poll the
priority queue if no targets are ready to process. The number of seconds
between polls is \code{runif(1, 0.01, max(backoff, 0.01 * 1.5 ^ index))},
where \code{index} is the number of consecutive polls so far that found
no targets ready to skip or run.
(If no target is ready, \code{index} goes up by 1. If a target is ready,
\code{index} resets to 0. For more information on exponential,
backoff, visit \url{https://en.wikipedia.org/wiki/Exponential_backoff}).
Raising \code{backoff} is kinder to the CPU etc. but may incur delays
in some instances.}
\item{resources}{A named list of computing resources. Uses:
\itemize{
\item Template file wildcards for \code{future::future()} in \code{\link[=tar_make_future]{tar_make_future()}}.
\item Template file wildcards \code{clustermq::workers()} in \code{\link[=tar_make_clustermq]{tar_make_clustermq()}}.
\item Custom target-level \code{future::plan()}, e.g.
\code{resources = list(plan = future.callr::callr)}.
\item Custom \code{curl} handle if \code{format = "url"},
e.g. \code{resources = list(handle = curl::new_handle(nobody = TRUE))}.
In custom handles, most users should manually set \code{nobody = TRUE}
so \code{targets} does not download the entire file when it
only needs to check the time stamp and ETag.
\item Custom preset for \code{qs::qsave()} if \code{format = "qs"}, e.g.
\code{resources = list(handle = "archive")}.
\item Arguments \code{compression} and \code{compression_level} to
\code{arrow::write_feather()} and \code{arrow:write_parquet()} if \code{format} is
\code{"feather"}, \code{"parquet"}, \code{"aws_feather"}, or \code{"aws_parquet"}.
\item Custom compression level for \code{fst::write_fst()} if
\code{format} is \code{"fst"}, \code{"fst_dt"}, or \code{"fst_tbl"}, e.g.
\code{resources = list(compress = 100)}.
\item AWS bucket and prefix for the \code{"aws_"} formats, e.g.
\code{resources = list(bucket = "your-bucket", prefix = "folder/name")}.
\code{bucket} is required for AWS formats. See the cloud computing chapter
of the manual for details.
}}
\item{storage}{Character of length 1, only relevant to
\code{\link[=tar_make_clustermq]{tar_make_clustermq()}} and \code{\link[=tar_make_future]{tar_make_future()}}.
If \code{"main"}, the target's return value is sent back to the
host machine and saved locally. If \code{"worker"}, the worker
saves the value.}
\item{retrieval}{Character of length 1, only relevant to
\code{\link[=tar_make_clustermq]{tar_make_clustermq()}} and \code{\link[=tar_make_future]{tar_make_future()}}.
If \code{"main"}, the target's dependencies are loaded on the host machine
and sent to the worker before the target builds.
If \code{"worker"}, the worker loads the targets dependencies.}
\item{cue}{An optional object from \code{tar_cue()} to customize the
rules that decide whether the target is up to date.}
\item{debug}{Character vector of names of targets to run in debug mode.
To use effectively, you must set \code{callr_function = NULL} and
restart your R session just before running. You should also
\code{\link[=tar_make]{tar_make()}}, \code{\link[=tar_make_clustermq]{tar_make_clustermq()}}, or \code{\link[=tar_make_future]{tar_make_future()}}.
For any target mentioned in \code{debug}, \code{targets} will force the target to
build locally (with \code{tar_cue(mode = "always")} and \code{deployment = "main"}
in the settings) and pause in an interactive debugger to help you diagnose
problems. This is like inserting a \code{browser()} statement at the
beginning of the target's expression, but without invalidating any
targets.}
\item{workspaces}{Character vector of names of targets to save workspace
files. Workspace files let you re-create a target's runtime environment
in an interactive R session using \code{\link[=tar_workspace]{tar_workspace()}}. \code{\link[=tar_workspace]{tar_workspace()}}
loads a target's random number generator seed and dependency objects
as long as those target objects are still in the data store
(usually \verb{_targets/objects/}).}
}
\value{
Nothing.
}
\description{
Set target options, including default arguments to
\code{\link[=tar_target]{tar_target()}} such as packages, storage format,
iteration type, and cue. See default options with \code{\link[=tar_option_get]{tar_option_get()}}.
To use \code{tar_option_set()} effectively, put it in your workflow's
\verb{_targets.R} script before calls to \code{\link[=tar_target]{tar_target()}} or \code{\link[=tar_target_raw]{tar_target_raw()}}.
}
\examples{
tar_option_get("format") # default format before we set anything
tar_target(x, 1)$settings$format
tar_option_set(format = "fst_tbl") # new default format
tar_option_get("format")
tar_target(x, 1)$settings$format
tar_option_reset() # reset the format
tar_target(x, 1)$settings$format
if (identical(Sys.getenv("TAR_LONG_EXAMPLES"), "true")) {
tar_dir({ # tar_dir() runs code from a temporary directory.
tar_script({
tar_option_set(cue = tar_cue(mode = "always")) # All targets always run.
list(tar_target(x, 1), tar_target(y, 2))
})
tar_make()
tar_make()
})
}
}
|
/man/tar_option_set.Rd
|
permissive
|
guhjy/targets
|
R
| false
| true
| 15,567
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_option_set.R
\name{tar_option_set}
\alias{tar_option_set}
\title{Set target options.}
\usage{
tar_option_set(
tidy_eval = NULL,
packages = NULL,
imports = NULL,
library = NULL,
envir = NULL,
format = NULL,
iteration = NULL,
error = NULL,
memory = NULL,
garbage_collection = NULL,
deployment = NULL,
priority = NULL,
backoff = NULL,
resources = NULL,
storage = NULL,
retrieval = NULL,
cue = NULL,
debug = NULL,
workspaces = NULL
)
}
\arguments{
\item{tidy_eval}{Logical, whether to enable tidy evaluation
when interpreting \code{command} and \code{pattern}. If \code{TRUE}, you can use the
"bang-bang" operator \verb{!!} to programmatically insert
the values of global objects.}
\item{packages}{Character vector of packages to load right before
the target builds. Use \code{tar_option_set()} to set packages
globally for all subsequent targets you define.}
\item{imports}{Character vector of package names to track
global dependencies. For example, if you write
\code{tar_option_set(imports = "yourAnalysisPackage")} early in \verb{_targets.R},
then \code{tar_make()} will automatically rerun or skip targets
in response to changes to the R functions and objects defined in
\code{yourAnalysisPackage}. Does not account for low-level compiled code
such as C/C++ or Fortran. If you supply multiple packages,
e.g. \code{tar_option_set(imports = c("p1", "p2"))}, then the objects in
\code{p1} override the objects in \code{p2} if there are name conflicts.
Similarly, objects in \code{tar_option_get("envir")} override
everything in \code{tar_option_get("imports")}.}
\item{library}{Character vector of library paths to try
when loading \code{packages}.}
\item{envir}{Environment containing functions and global objects
used in the R commands to run targets.}
\item{format}{Optional storage format for the target's return value.
With the exception of \code{format = "file"}, each target
gets a file in \verb{_targets/objects}, and each format is a different
way to save and load this file.
Possible formats:
\itemize{
\item \code{"rds"}: Default, uses \code{saveRDS()} and \code{readRDS()}. Should work for
most objects, but slow.
\item \code{"qs"}: Uses \code{qs::qsave()} and \code{qs::qread()}. Should work for
most objects, much faster than \code{"rds"}. Optionally set the
preset for \code{qsave()} through the \code{resources} argument, e.g.
\code{tar_target(..., resources = list(preset = "archive"))}.
Requires the \code{qs} package (not installed by default).
\item \code{"feather"}: Uses \code{arrow::write_feather()} and
\code{arrow::read_feather()} (version 2.0). Much faster than \code{"rds"},
but the value must be a data frame. Optionally set
\code{compression} and \code{compression_level} in \code{arrow::write_feather()}
through the \code{resources} argument, e.g.
\code{tar_target(..., resources = list(compression = ...))}.
Requires the \code{arrow} package (not installed by default).
\item \code{"parquet"}: Uses \code{arrow::write_parquet()} and
\code{arrow::read_parquet()} (version 2.0). Much faster than \code{"rds"},
but the value must be a data frame. Optionally set
\code{compression} and \code{compression_level} in \code{arrow::write_parquet()}
through the \code{resources} argument, e.g.
\code{tar_target(..., resources = list(compression = ...))}.
Requires the \code{arrow} package (not installed by default).
\item \code{"fst"}: Uses \code{fst::write_fst()} and \code{fst::read_fst()}.
Much faster than \code{"rds"}, but the value must be
a data frame. Optionally set the compression level for
\code{fst::write_fst()} through the \code{resources} argument, e.g.
\code{tar_target(..., resources = list(compress = 100))}.
Requires the \code{fst} package (not installed by default).
\item \code{"fst_dt"}: Same as \code{"fst"}, but the value is a \code{data.table}.
Optionally set the compression level the same way as for \code{"fst"}.
\item \code{"fst_tbl"}: Same as \code{"fst"}, but the value is a \code{tibble}.
Optionally set the compression level the same way as for \code{"fst"}.
\item \code{"keras"}: Uses \code{keras::save_model_hdf5()} and
\code{keras::load_model_hdf5()}. The value must be a Keras model.
Requires the \code{keras} package (not installed by default).
\item \code{"torch"}: Uses \code{torch::torch_save()} and \code{torch::torch_load()}.
The value must be an object from the \code{torch} package
such as a tensor or neural network module.
Requires the \code{torch} package (not installed by default).
\item \code{"file"}: A dynamic file. To use this format,
the target needs to manually identify or save some data
and return a character vector of paths
to the data. (These paths must be existing files
and nonempty directories.)
Then, \code{targets} automatically checks those files and cues
the appropriate build decisions if those files are out of date.
Those paths must point to files or directories,
and they must not contain characters \code{|} or \code{*}.
All the files and directories you return must actually exist,
or else \code{targets} will throw an error. (And if \code{storage} is \code{"worker"},
\code{targets} will first stall out trying to wait for the file
to arrive over a network file system.)
\item \code{"url"}: A dynamic input URL. It works like \code{format = "file"}
except the return value of the target is a URL that already exists
and serves as input data for downstream targets. Optionally
supply a custom \code{curl} handle through the \code{resources} argument, e.g.
\code{tar_target(..., resources = list(handle = curl::new_handle()))}.
The data file at the URL needs to have an ETag or a Last-Modified
time stamp, or else the target will throw an error because
it cannot track the data. Also, use extreme caution when
trying to use \code{format = "url"} to track uploads. You must be absolutely
certain the ETag and Last-Modified time stamp are fully updated
and available by the time the target's command finishes running.
\code{targets} makes no attempt to wait for the web server.
\item \code{"aws_rds"}, \code{"aws_qs"}, \code{"aws_parquet"}, \code{"aws_fst"}, \code{"aws_fst_dt"},
\code{"aws_fst_tbl"}, \code{"aws_keras"}: AWS-powered versions of the
respective formats \code{"rds"}, \code{"qs"}, etc. The only difference
is that the data file is uploaded to the AWS S3 bucket
you supply to \code{resources}. See the cloud computing chapter
of the manual for details.
\item \code{"aws_file"}: arbitrary dynamic files on AWS S3. The target
should return a path to a temporary local file, then
\code{targets} will automatically upload this file to an S3
bucket and track it for you. Unlike \code{format = "file"},
\code{format = "aws_file"} can only handle one single file,
and that file must not be a directory.
\code{\link[=tar_read]{tar_read()}} and downstream targets
download the file to \verb{_targets/scratch/} locally and return the path.
\verb{_targets/scratch/} gets deleted at the end of \code{\link[=tar_make]{tar_make()}}.
Requires the same \code{resources} and other configuration details
as the other AWS-powered formats. See the cloud computing
chapter of the manual for details.
}}
\item{iteration}{Character of length 1, name of the iteration mode
of the target. Choices:
\itemize{
\item \code{"vector"}: branching happens with \code{vctrs::vec_slice()} and
aggregation happens with \code{vctrs::vec_c()}.
\item \code{"list"}, branching happens with \verb{[[]]} and aggregation happens with
\code{list()}.
\item \code{"group"}: \code{dplyr::group_by()}-like functionality to branch over
subsets of a data frame. The target's return value must be a data
frame with a special \code{tar_group} column of consecutive integers
from 1 through the number of groups. Each integer designates a group,
and a branch is created for each collection of rows in a group.
See the \code{\link[=tar_group]{tar_group()}} function to see how you can
create the special \code{tar_group} column with \code{dplyr::group_by()}.
}}
\item{error}{Character of length 1, what to do if the target
runs into an error. If \code{"stop"}, the whole pipeline stops
and throws an error. If \code{"continue"}, the error is recorded,
but the pipeline keeps going.}
\item{memory}{Character of length 1, memory strategy.
If \code{"persistent"}, the target stays in memory
until the end of the pipeline (unless \code{storage} is \code{"worker"},
in which case \code{targets} unloads the value from memory
right after storing it in order to avoid sending
copious data over a network).
If \code{"transient"}, the target gets unloaded
after every new target completes.
Either way, the target gets automatically loaded into memory
whenever another target needs the value.
For cloud-based dynamic files such as \code{format = "aws_file"},
this memory policy applies to
temporary local copies of the file in \verb{_targets/scratch/"}:
\code{"persistent"} means they remain until the end of the pipeline,
and \code{"transient"} means they get deleted from the file system
as soon as possible. The former conserves bandwidth,
and the latter conserves local storage.}
\item{garbage_collection}{Logical, whether to run \code{base::gc()}
just before the target runs.}
\item{deployment}{Character of length 1, only relevant to
\code{\link[=tar_make_clustermq]{tar_make_clustermq()}} and \code{\link[=tar_make_future]{tar_make_future()}}. If \code{"worker"},
the target builds on a parallel worker. If \code{"main"},
the target builds on the host machine / process managing the pipeline.}
\item{priority}{Numeric of length 1 between 0 and 1. Controls which
targets get deployed first when multiple competing targets are ready
simultaneously. Targets with priorities closer to 1 get built earlier
(and polled earlier in \code{\link[=tar_make_future]{tar_make_future()}}).
Only applies to \code{\link[=tar_make_future]{tar_make_future()}} and \code{\link[=tar_make_clustermq]{tar_make_clustermq()}}
(not \code{\link[=tar_make]{tar_make()}}). \code{\link[=tar_make_future]{tar_make_future()}} with no extra settings is
a drop-in replacement for \code{\link[=tar_make]{tar_make()}} in this case.}
\item{backoff}{Numeric of length 1, must be greater than or equal to 0.01.
Maximum upper bound of the random polling interval
for the priority queue (seconds).
In high-performance computing (e.g. \code{\link[=tar_make_clustermq]{tar_make_clustermq()}}
and \code{\link[=tar_make_future]{tar_make_future()}}) it can be expensive to repeatedly poll the
priority queue if no targets are ready to process. The number of seconds
between polls is \code{runif(1, 0.01, max(backoff, 0.01 * 1.5 ^ index))},
where \code{index} is the number of consecutive polls so far that found
no targets ready to skip or run.
(If no target is ready, \code{index} goes up by 1. If a target is ready,
\code{index} resets to 0. For more information on exponential,
backoff, visit \url{https://en.wikipedia.org/wiki/Exponential_backoff}).
Raising \code{backoff} is kinder to the CPU etc. but may incur delays
in some instances.}
\item{resources}{A named list of computing resources. Uses:
\itemize{
\item Template file wildcards for \code{future::future()} in \code{\link[=tar_make_future]{tar_make_future()}}.
\item Template file wildcards \code{clustermq::workers()} in \code{\link[=tar_make_clustermq]{tar_make_clustermq()}}.
\item Custom target-level \code{future::plan()}, e.g.
\code{resources = list(plan = future.callr::callr)}.
\item Custom \code{curl} handle if \code{format = "url"},
e.g. \code{resources = list(handle = curl::new_handle(nobody = TRUE))}.
In custom handles, most users should manually set \code{nobody = TRUE}
so \code{targets} does not download the entire file when it
only needs to check the time stamp and ETag.
\item Custom preset for \code{qs::qsave()} if \code{format = "qs"}, e.g.
\code{resources = list(handle = "archive")}.
\item Arguments \code{compression} and \code{compression_level} to
\code{arrow::write_feather()} and \code{arrow:write_parquet()} if \code{format} is
\code{"feather"}, \code{"parquet"}, \code{"aws_feather"}, or \code{"aws_parquet"}.
\item Custom compression level for \code{fst::write_fst()} if
\code{format} is \code{"fst"}, \code{"fst_dt"}, or \code{"fst_tbl"}, e.g.
\code{resources = list(compress = 100)}.
\item AWS bucket and prefix for the \code{"aws_"} formats, e.g.
\code{resources = list(bucket = "your-bucket", prefix = "folder/name")}.
\code{bucket} is required for AWS formats. See the cloud computing chapter
of the manual for details.
}}
\item{storage}{Character of length 1, only relevant to
\code{\link[=tar_make_clustermq]{tar_make_clustermq()}} and \code{\link[=tar_make_future]{tar_make_future()}}.
If \code{"main"}, the target's return value is sent back to the
host machine and saved locally. If \code{"worker"}, the worker
saves the value.}
\item{retrieval}{Character of length 1, only relevant to
\code{\link[=tar_make_clustermq]{tar_make_clustermq()}} and \code{\link[=tar_make_future]{tar_make_future()}}.
If \code{"main"}, the target's dependencies are loaded on the host machine
and sent to the worker before the target builds.
If \code{"worker"}, the worker loads the targets dependencies.}
\item{cue}{An optional object from \code{tar_cue()} to customize the
rules that decide whether the target is up to date.}
\item{debug}{Character vector of names of targets to run in debug mode.
To use effectively, you must set \code{callr_function = NULL} and
restart your R session just before running. You should also
\code{\link[=tar_make]{tar_make()}}, \code{\link[=tar_make_clustermq]{tar_make_clustermq()}}, or \code{\link[=tar_make_future]{tar_make_future()}}.
For any target mentioned in \code{debug}, \code{targets} will force the target to
build locally (with \code{tar_cue(mode = "always")} and \code{deployment = "main"}
in the settings) and pause in an interactive debugger to help you diagnose
problems. This is like inserting a \code{browser()} statement at the
beginning of the target's expression, but without invalidating any
targets.}
\item{workspaces}{Character vector of names of targets to save workspace
files. Workspace files let you re-create a target's runtime environment
in an interactive R session using \code{\link[=tar_workspace]{tar_workspace()}}. \code{\link[=tar_workspace]{tar_workspace()}}
loads a target's random number generator seed and dependency objects
as long as those target objects are still in the data store
(usually \verb{_targets/objects/}).}
}
\value{
Nothing.
}
\description{
Set target options, including default arguments to
\code{\link[=tar_target]{tar_target()}} such as packages, storage format,
iteration type, and cue. See default options with \code{\link[=tar_option_get]{tar_option_get()}}.
To use \code{tar_option_set()} effectively, put it in your workflow's
\verb{_targets.R} script before calls to \code{\link[=tar_target]{tar_target()}} or \code{\link[=tar_target_raw]{tar_target_raw()}}.
}
\examples{
tar_option_get("format") # default format before we set anything
tar_target(x, 1)$settings$format
tar_option_set(format = "fst_tbl") # new default format
tar_option_get("format")
tar_target(x, 1)$settings$format
tar_option_reset() # reset the format
tar_target(x, 1)$settings$format
if (identical(Sys.getenv("TAR_LONG_EXAMPLES"), "true")) {
tar_dir({ # tar_dir() runs code from a temporary directory.
tar_script({
tar_option_set(cue = tar_cue(mode = "always")) # All targets always run.
list(tar_target(x, 1), tar_target(y, 2))
})
tar_make()
tar_make()
})
}
}
|
pacman::p_load(leaflet, rgdal, dplyr, htmltools)
states <- readOGR("c2hgis_state/c2hgis_statePoint.shp")
states_bound <- readOGR("cb_2018_us_state_500k/cb_2018_us_state_500k.shp")
counties <- readOGR("c2hgis_county/c2hgis_countyPoint.shp")
counties_bound <- readOGR("cb_2018_us_county_500k/cb_2018_us_county_500k.shp")
# Remove unnecessary columns
states@data <- states@data %>% select(c(geography0, geography1, pctpopwbba, pctpopwobb))
counties@data <- counties@data %>% select(c(geography0, geography1, pctpopwbba, pctpopwobb))
# Remove US Territories
states_bound <- subset(states_bound, GEOID <= 56)
counties_bound <- subset(counties_bound, STATEFP <= 56)
# Merge broadband data with counties polygon data
states_merge <- sp::merge(states_bound, states@data, by.x = "GEOID", by.y = "geography0")
states_merge <- subset(states_merge, !is.na(pctpopwbba))
counties_merge <- sp::merge(counties_bound, counties@data, by.x = "GEOID", by.y = "geography0")
counties_merge <- subset(counties_merge, !is.na(pctpopwbba))
# Color palettes
pal_state <- colorNumeric(
palette = "Purples",
domain = states_merge$pctpopwbba
)
pal_county <- colorNumeric(
palette = "Greens",
domain = counties_merge$pctpopwbba
)
# Labels
labels_state <- sprintf(
"<strong>%s</strong><br/>%s",
states_merge$NAME, states_merge$pctpopwbba
) %>%
lapply(HTML)
labels_county <- sprintf(
"<strong>%s</strong><br/>%s",
counties_merge$NAME, counties_merge$pctpopwbba
) %>%
lapply(HTML)
# Map state boundaries
map_state_bound <- leaflet(data = states_merge) %>%
addPolygons(weight = .5, smoothFactor = 0.2, fillOpacity = 1,
color = ~pal_state(states_merge$pctpopwbba),
highlight = highlightOptions(
weight = 2, color= "White", fillOpacity = 1,
bringToFront = TRUE),
label = labels_state) %>%
setView(lng = -93,
lat = 38,
zoom = 3) %>%
addLegend(pal = pal_state, values = states_merge$pctpopwbba, title = "Broadband Access %", opacity = 1)
map_state_bound
# Map county boundaries
map_county_bound <- leaflet(data = counties_merge) %>%
addPolygons(weight = .5, smoothFactor = 0.2, fillOpacity = 1,
color = ~pal_county(counties_merge$pctpopwbba),
highlight = highlightOptions(
weight = 2, color= "White", fillOpacity = 1,
bringToFront = TRUE),
label = labels_county) %>%
setView(lng = -93,
lat = 38,
zoom = 3) %>%
addLegend(pal = pal_county, values = counties_merge$pctpopwbba, title = "Broadband Access %", opacity = 1)
map_county_bound
|
/explore_broadband.R
|
no_license
|
augiege/serve
|
R
| false
| false
| 2,630
|
r
|
pacman::p_load(leaflet, rgdal, dplyr, htmltools)
states <- readOGR("c2hgis_state/c2hgis_statePoint.shp")
states_bound <- readOGR("cb_2018_us_state_500k/cb_2018_us_state_500k.shp")
counties <- readOGR("c2hgis_county/c2hgis_countyPoint.shp")
counties_bound <- readOGR("cb_2018_us_county_500k/cb_2018_us_county_500k.shp")
# Remove unnecessary columns
states@data <- states@data %>% select(c(geography0, geography1, pctpopwbba, pctpopwobb))
counties@data <- counties@data %>% select(c(geography0, geography1, pctpopwbba, pctpopwobb))
# Remove US Territories
states_bound <- subset(states_bound, GEOID <= 56)
counties_bound <- subset(counties_bound, STATEFP <= 56)
# Merge broadband data with counties polygon data
states_merge <- sp::merge(states_bound, states@data, by.x = "GEOID", by.y = "geography0")
states_merge <- subset(states_merge, !is.na(pctpopwbba))
counties_merge <- sp::merge(counties_bound, counties@data, by.x = "GEOID", by.y = "geography0")
counties_merge <- subset(counties_merge, !is.na(pctpopwbba))
# Color palettes
pal_state <- colorNumeric(
palette = "Purples",
domain = states_merge$pctpopwbba
)
pal_county <- colorNumeric(
palette = "Greens",
domain = counties_merge$pctpopwbba
)
# Labels
labels_state <- sprintf(
"<strong>%s</strong><br/>%s",
states_merge$NAME, states_merge$pctpopwbba
) %>%
lapply(HTML)
labels_county <- sprintf(
"<strong>%s</strong><br/>%s",
counties_merge$NAME, counties_merge$pctpopwbba
) %>%
lapply(HTML)
# Map state boundaries
map_state_bound <- leaflet(data = states_merge) %>%
addPolygons(weight = .5, smoothFactor = 0.2, fillOpacity = 1,
color = ~pal_state(states_merge$pctpopwbba),
highlight = highlightOptions(
weight = 2, color= "White", fillOpacity = 1,
bringToFront = TRUE),
label = labels_state) %>%
setView(lng = -93,
lat = 38,
zoom = 3) %>%
addLegend(pal = pal_state, values = states_merge$pctpopwbba, title = "Broadband Access %", opacity = 1)
map_state_bound
# Map county boundaries
map_county_bound <- leaflet(data = counties_merge) %>%
addPolygons(weight = .5, smoothFactor = 0.2, fillOpacity = 1,
color = ~pal_county(counties_merge$pctpopwbba),
highlight = highlightOptions(
weight = 2, color= "White", fillOpacity = 1,
bringToFront = TRUE),
label = labels_county) %>%
setView(lng = -93,
lat = 38,
zoom = 3) %>%
addLegend(pal = pal_county, values = counties_merge$pctpopwbba, title = "Broadband Access %", opacity = 1)
map_county_bound
|
\name{amdp}
\alias{amdp}
\title{
Create a pad object.
}
\description{
Create amdps
}
\usage{
amdp(object, X, y, predictor, predictfcn, newdata, verbose = TRUE,
frac_to_build = 1, indices_to_build = NULL, num_grid_pts,
logodds = F, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
A fitted model of any kind.
}
\item{X}{
Matrix or data frame of predictors (observations along rows). To the extent that \code{object} requires
predictors in a certain format, \code{X} must obey that format. Note that \code{X} does not include the response.
}
\item{y}{
Optional vector of responses of the dataset, used to return the range of observed values. Cannot be a factor. If missing, returns the range of all points in the amdp grid.
}
\item{predictor}{
Column number of \code{X} for which to calculate partial dependence. Alternatively \code{predictor} can be a string corresponding to a column name in \code{X}.
}
\item{predictfcn}{
Optional prediction function to use with \code{object}. Must accept arguments \code{object} and \code{newdata}. The software does minimal further checking -- it is up to the user to ensure that \code{predictfcn(object = object, newdata = X)} is supported. If not passed, the function looks for the default \code{predict} function associated with \code{class(object)}.
}
\item{newdata}{Adam - explain what this does???}
\item{verbose}{
If \code{verbose} is \code{TRUE} we print updates.
}
\item{frac_to_build}{
Fraction of curves to build. If less than 1, the rows chosen correspond to equally distributed quantiles of \code{predictor}.
}
\item{indices_to_build}{
If specified, program builds objects from the rows specified in \code{indices_to_build}
}
\item{num_grid_pts}{
The number of points along \code{X[, predictor]} at which the program will predict. If unspecified, it will predict at all unique points. If the number of unique points is less than the number of grid points specified, the program will warn the user and the number of unique points will be used.
}
\item{logodds}{
If TRUE, the predicted values (which should be probabilities), are converted to logits
before the object is returned.
}
\item{\dots}{
Other parameters to be passed.
}
}
\value{If assigned, returns an object of class \code{amdp}. The object is a list with elements:\cr
\item{apdps}{An \code{nrow(X)} by \code{length(gridpts)} matrix where row i is observation i's marginal dependence curve.}
\item{gridpts}{A vector of the unique grid points at which all marginal curves are evaluated.}
\item{predictor}{The predictor used. If possible, returns the predictor's name; if not, returns column index in \code{X}.}
\item{xj}{The actual value of \code{X[, predictor]} for each observation.}
\item{actual_prediction}{The predicted value for each observation at its actual vector of predictors.}
\item{logodds}{The \code{logodds} specified by the user, either TRUE or FALSE.}
\item{xlab}{The x-axis label used for plotting}
\item{nominal_axis}{Whether or not this variable is treated as a nominal variable for plotting purposes (if it has more than 5 unique values, it's not).}
\item{N}{The number of rows in the inputted matrix or data frame}
\item{range_y}{If \code{y} is passed as an argument, the range of values. Otherwise the range of values in \code{apdps}.}
\item{Xamdp}{A matrix whose i-th row is the true X-vector associated with the observation in the i-th row of \code{apdps}. Used by the \code{plot} function to color by levels of a categorical variable.}
\item{pdp}{Friedman's partial dependence function}
}
\examples{
\dontrun{
require(randomForest)
require(MASS)
library(amdp)
data(Pima.te)
#fit a random forest
pima_rf = randomForest(type~., Pima.te)
#define a predict function that returns Prob(diabetes=Yes)
pima_predict = function(object, newdata){
preds = predict(object, newdata, type="prob")
col_idx = which(colnames(preds)=="Yes")
preds[,col_idx]
}
#get amdp object for the 'skin' predictor, but converted to logit.
pima_amdp = amdp(pima_rf, X = Pima.te[,1:7], predictor = "skin", predictfcn = pima_predict, logodds = T)
#plot only 10% of curves with quantiles, actual pdp, and original points.
plot(pima_amdp, x_quantile = T, plot_pdp = T, frac_to_plot = .1, centered=T)
}
}
|
/amdp/man/amdp.Rd
|
no_license
|
kapelner/advanced_marginal_dependence_plots
|
R
| false
| false
| 4,279
|
rd
|
\name{amdp}
\alias{amdp}
\title{
Create a pad object.
}
\description{
Create amdps
}
\usage{
amdp(object, X, y, predictor, predictfcn, newdata, verbose = TRUE,
frac_to_build = 1, indices_to_build = NULL, num_grid_pts,
logodds = F, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
A fitted model of any kind.
}
\item{X}{
Matrix or data frame of predictors (observations along rows). To the extent that \code{object} requires
predictors in a certain format, \code{X} must obey that format. Note that \code{X} does not include the response.
}
\item{y}{
Optional vector of responses of the dataset, used to return the range of observed values. Cannot be a factor. If missing, returns the range of all points in the amdp grid.
}
\item{predictor}{
Column number of \code{X} for which to calculate partial dependence. Alternatively \code{predictor} can be a string corresponding to a column name in \code{X}.
}
\item{predictfcn}{
Optional prediction function to use with \code{object}. Must accept arguments \code{object} and \code{newdata}. The software does minimal further checking -- it is up to the user to ensure that \code{predictfcn(object = object, newdata = X)} is supported. If not passed, the function looks for the default \code{predict} function associated with \code{class(object)}.
}
\item{newdata}{Adam - explain what this does???}
\item{verbose}{
If \code{verbose} is \code{TRUE} we print updates.
}
\item{frac_to_build}{
Fraction of curves to build. If less than 1, the rows chosen correspond to equally distributed quantiles of \code{predictor}.
}
\item{indices_to_build}{
If specified, program builds objects from the rows specified in \code{indices_to_build}
}
\item{num_grid_pts}{
The number of points along \code{X[, predictor]} at which the program will predict. If unspecified, it will predict at all unique points. If the number of unique points is less than the number of grid points specified, the program will warn the user and the number of unique points will be used.
}
\item{logodds}{
If TRUE, the predicted values (which should be probabilities), are converted to logits
before the object is returned.
}
\item{\dots}{
Other parameters to be passed.
}
}
\value{If assigned, returns an object of class \code{amdp}. The object is a list with elements:\cr
\item{apdps}{An \code{nrow(X)} by \code{length(gridpts)} matrix where row i is observation i's marginal dependence curve.}
\item{gridpts}{A vector of the unique grid points at which all marginal curves are evaluated.}
\item{predictor}{The predictor used. If possible, returns the predictor's name; if not, returns column index in \code{X}.}
\item{xj}{The actual value of \code{X[, predictor]} for each observation.}
\item{actual_prediction}{The predicted value for each observation at its actual vector of predictors.}
\item{logodds}{The \code{logodds} specified by the user, either TRUE or FALSE.}
\item{xlab}{The x-axis label used for plotting}
\item{nominal_axis}{Whether or not this variable is treated as a nominal variable for plotting purposes (if it has more than 5 unique values, it's not).}
\item{N}{The number of rows in the inputted matrix or data frame}
\item{range_y}{If \code{y} is passed as an argument, the range of values. Otherwise the range of values in \code{apdps}.}
\item{Xamdp}{A matrix whose i-th row is the true X-vector associated with the observation in the i-th row of \code{apdps}. Used by the \code{plot} function to color by levels of a categorical variable.}
\item{pdp}{Friedman's partial dependence function}
}
\examples{
\dontrun{
require(randomForest)
require(MASS)
library(amdp)
data(Pima.te)
#fit a random forest
pima_rf = randomForest(type~., Pima.te)
#define a predict function that returns Prob(diabetes=Yes)
pima_predict = function(object, newdata){
preds = predict(object, newdata, type="prob")
col_idx = which(colnames(preds)=="Yes")
preds[,col_idx]
}
#get amdp object for the 'skin' predictor, but converted to logit.
pima_amdp = amdp(pima_rf, X = Pima.te[,1:7], predictor = "skin", predictfcn = pima_predict, logodds = T)
#plot only 10% of curves with quantiles, actual pdp, and original points.
plot(pima_amdp, x_quantile = T, plot_pdp = T, frac_to_plot = .1, centered=T)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy-utils.R
\name{summarise_with_totals}
\alias{summarise_with_totals}
\title{Summarise a subgroup and create a summary row}
\usage{
summarise_with_totals(
.data,
...,
.groups = NULL,
.total = "Total",
.total_first = FALSE
)
}
\arguments{
\item{.data}{a dataframe}
\item{...}{the summarisation specification}
\item{.groups}{what to do with the grouping after summarisation (same as dplyr::summarise)}
\item{.total}{name of the total row which will be added into a factor list.}
\item{.total_first}{should the total be before or after the groups}
}
\value{
a summarised dataframe with the additional totals or group row
}
\description{
Summarise and include a total row, or a row including the summary for the whole group, into a factor list.
This looks and feels like a natural summarisation step, but applies the summarisation both to the
subgroups and to the data ungrouped by one level. The additional group result is included as a new row.
allows for a natural grouped and ungrouped summarisation
}
\examples{
library(tidyverse)
diamonds \%>\%
dplyr::group_by(color,cut) \%>\%
summarise_with_totals(
mpg = sprintf("\%1.1f \u00B1 \%1.1f", mean(price), sd(price)),
.total = "Overall"
)
}
|
/man/summarise_with_totals.Rd
|
permissive
|
terminological/ggrrr
|
R
| false
| true
| 1,294
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy-utils.R
\name{summarise_with_totals}
\alias{summarise_with_totals}
\title{Summarise a subgroup and create a summary row}
\usage{
summarise_with_totals(
.data,
...,
.groups = NULL,
.total = "Total",
.total_first = FALSE
)
}
\arguments{
\item{.data}{a dataframe}
\item{...}{the summarisation specification}
\item{.groups}{what to do with the grouping after summarisation (same as dplyr::summarise)}
\item{.total}{name of the total row which will be added into a factor list.}
\item{.total_first}{should the total be before or after the groups}
}
\value{
a summarised dataframe with the additional totals or group row
}
\description{
Summarise and include a total row, or a row including the summary for the whole group, into a factor list.
This looks and feels like a natural summarisation step, but applies the summarisation both to the
subgroups and to the data ungrouped by one level. The additional group result is included as a new row.
allows for a natural grouped and ungrouped summarisation
}
\examples{
library(tidyverse)
diamonds \%>\%
dplyr::group_by(color,cut) \%>\%
summarise_with_totals(
mpg = sprintf("\%1.1f \u00B1 \%1.1f", mean(price), sd(price)),
.total = "Overall"
)
}
|
#1. Interaction network
##adjacency matrix A and the normalized adjacency matrix M
normalizedMatrix = function(A_Matrix){
colsums = apply(A_Matrix, 2, sum)
M_Matrix = t(t(A_Matrix)/(colsums+1e-16))
return(M_Matrix)
}
##the diffused matrix D
diffusedMatrix = function(M_Matrix, beta){
D_Matrix = beta*solve(diag(1, dim(M_Matrix)[1], dim(M_Matrix)[2]) - (1-beta)*M_Matrix)
return(D_Matrix)
}
##we choose β to minimize |sum(D_ij [M_ij≠0])-sum(D_ij [M_ij=0])|
difference = function(beta, A_Matrix = AdjacencyMatrix){
M_Matrix = normalizedMatrix(A_Matrix)
D_Matrix = diffusedMatrix(M_Matrix, beta)
restarted = sum(D_Matrix[A_Matrix != 0])
diffused = sum(D_Matrix[A_Matrix = 0])
return(abs(restarted - diffused))
}
chooseBeta = function(){
return(optimize(difference, interval = c(0.1, 0.9))$minimum)
}
#2. Mutation data
##get the mutation score of each gene in each individual sample
Mutation = function(SNP, Network){
SNPScore = matrix(0, dim(Network)[1], dim(SNP)[2])
rownames(SNPScore) = rownames(Network)
colnames(SNPScore) = colnames(SNP)
SNPScore[intersect(rownames(SNPScore),rownames(SNP)), ] = as.matrix(SNP[intersect(rownames(SNPScore),rownames(SNP)),])
GeneZero = colnames(SNPScore)[apply(SNPScore, 2, sum) == 0]
SNPScore = SNPScore[, setdiff(colnames(SNPScore), GeneZero)]
SNPScore = abs(SNPScore)
NormalizedMutationScore = t(t(SNPScore)/apply(SNPScore, 2, sum))
return(NormalizedMutationScore)
}
#3. Expresion data
##calculat the Absolute of Log2 Fold-Change (ALFC) of gene expression between the paired tumor and normal samples
TumorNormalExp = function(TumorExpression, NormalExpression ){
##get gene expression for genes if the tumor with paired normal
matchedSample = intersect(colnames(TumorExpression),colnames(NormalExpression))
matchedDiffExpression = log2(TumorExpression[,matchedSample] / NormalExpression[,matchedSample])
#get the gene expression for genes if the tumor with no paried normal
unmatchedSample = setdiff(colnames(TumorExpression) , matchedSample)
generalExpression = apply(NormalExpression,1,mean)
unmatchedDiffExpression = log2(TumorExpression[,unmatchedSample] / generalExpression)
#combine the expressions
Expression<-cbind(matchedDiffExpression,unmatchedDiffExpression)
Expression[is.na(Expression)] = 0
Expression[Expression == "Inf"] = 0
Expression[Expression == "-Inf"] = 0
return(Expression)
}
##get the expression score of each gene in each individual sample
RnaExpression = function(Expression, Network){
ExpressionScore = matrix(0, dim(Network)[1], dim(Expression)[2])
rownames(ExpressionScore) = rownames(Network)
colnames(ExpressionScore) = colnames(Expression)
ExpressionScore[intersect(rownames(ExpressionScore),rownames(Expression)), ] = as.matrix(Expression[intersect(rownames(ExpressionScore),rownames(Expression)),])
ExpressionScore = abs(ExpressionScore)
NormalizedExpressionScore = t(t(ExpressionScore)/apply(ExpressionScore, 2, sum))
return(NormalizedExpressionScore)
}
#4. the relevance scores
Relevance = function(D_Matrix, NormalizedScore){
##the relevance scores of mutation data or expression data
RelevanceScores = D_Matrix%*%NormalizedScore
return(RelevanceScores)
}
#5. the pmin of expression relevance scores and mutation relevance score
MinRelevance = function(MutationRelevanceScores, ExpressionRelevanceScores){
##the relevance score of gene is 0 in all samples
for (i in 1:dim(MutationRelevanceScores)[1]){
if(sum(MutationRelevanceScores[i, ]==0)/dim(MutationRelevanceScores)[2] >0.95){
MutationRelevanceScores[i, ] = 0
}
}
for (i in 1:dim(ExpressionRelevanceScores)[1]){
if(sum(ExpressionRelevanceScores[i, ]==0)/dim(ExpressionRelevanceScores)[2] >0.95){
ExpressionRelevanceScores[i, ] = 0
}
}
GeneZero = union(rownames(MutationRelevanceScores[(apply(MutationRelevanceScores,1,sum) == 0),]), rownames(ExpressionRelevanceScores[(apply(ExpressionRelevanceScores,1,sum) == 0),]))
ExpressionRelevanceScores = ExpressionRelevanceScores[setdiff(rownames(MutationRelevanceScores), GeneZero), ]
MutationRelevanceScores = MutationRelevanceScores[setdiff(rownames(MutationRelevanceScores), GeneZero), ]
##final relevance scores
samesamples = intersect(colnames(MutationRelevanceScores),colnames(ExpressionRelevanceScores))
MinRelevanceScores = pmin(ExpressionRelevanceScores[ ,samesamples], MutationRelevanceScores[ ,samesamples])
return(MinRelevanceScores)
}
#6.aggregate gene ranking in each individual sample to a robust population-level gene ranking
RankSum = function(RelevanceScores){
geneRank = matrix(0, dim(RelevanceScores)[1], dim(RelevanceScores)[2])
for (i in 1:dim(RelevanceScores)[2]){
geneRank[, i] = (dim(RelevanceScores)[1] + 1) - rank(RelevanceScores[, i])
}
rownames(geneRank) = rownames(RelevanceScores)
colnames(geneRank) = colnames(RelevanceScores)
RankResult = rowSums(geneRank)
RankResult = sort(RankResult)
return(RankResult)
}
#7.Result
MinNetRank = function(Network = "AdjacencyMatrix", SNP = FALSE, TumorExpression = FALSE, NormalExpression = FALSE, CGC = KnownGenes, beta = 0.4841825){
if (Network == "DiffusedMatrix"){
Network = DiffusedMatrix
print("Using DiffusedMatrix in MinNetWork.")
if (identical(SNP, FALSE) & identical(TumorExpression, FALSE)){
print("You should provide Mutation data or Expression data.")
}
else if(identical(SNP, FALSE)){
print("Mutation data are not provided, Only using Expression data")
RankResult = RankSum(Relevance(Network, RnaExpression(TumorNormalExp(TumorExpression, NormalExpression), Network)))
}
else if(identical(TumorExpression, FALSE)){
print("Expression data are not provided, Only using Mutation data")
RankResult = RankSum(Relevance(Network, Mutation(SNP, Network)))
}
else{
print("Using Mutation data and Expression data.")
RankResult = RankSum(MinRelevance(Relevance(Network, Mutation(SNP, Network)), Relevance(Network, RnaExpression(TumorNormalExp(TumorExpression, NormalExpression), Network))))
}
}
else if (Network == "AdjacencyMatrix"){
Network = AdjacencyMatrix
print("Using AdjacencyMatrix in MinNetWork.")
if (identical(SNP, FALSE) & identical(TumorExpression, FALSE)){
print("You should provide Mutation data or Expression data.")
}
else if(identical(SNP, FALSE)){
print("Mutation data are not provided, Only using Expression data")
RankResult = RankSum(Relevance(diffusedMatrix(normalizedMatrix(Network), beta), RnaExpression(TumorNormalExp(TumorExpression, NormalExpression), Network)))
}
else if(identical(TumorExpression, FALSE)){
print("Expression data are not provided, Only using Mutation data")
RankResult = RankSum(Relevance(diffusedMatrix(normalizedMatrix(Network), beta), Mutation(SNP, Network)))
}
else{
print("Using Mutation data and Expression data.")
RankResult = RankSum(MinRelevance(Relevance(diffusedMatrix(normalizedMatrix(Network), beta), Mutation(SNP, Network)), Relevance(diffusedMatrix(normalizedMatrix(Network), beta), RnaExpression(TumorNormalExp(TumorExpression, NormalExpression), Network))))
}
}
else{
print("Using your own network.")
print("Canculate the restart probability of beta and this step needs more time.")
beta = chooseBeta()
print(paste0("the beta is ", beta))
if (identical(SNP, FALSE) & identical(TumorExpression, FALSE)){
print("You should provide Mutation data or Expression data.")
}
else if(identical(SNP, FALSE)){
print("Mutation data are not provided, Only using Expression data")
RankResult = RankSum(Relevance(diffusedMatrix(normalizedMatrix(Network), beta), RnaExpression(TumorNormalExp(TumorExpression, NormalExpression), Network)))
}
else if(identical(TumorExpression, FALSE)){
print("Expression data are not provided, Only using Mutation data")
RankResult = RankSum(Relevance(diffusedMatrix(normalizedMatrix(Network), beta), Mutation(SNP, Network)))
}
else{
print("Using Mutation data and Expression data.")
RankResult = RankSum(MinRelevance(Relevance(diffusedMatrix(normalizedMatrix(Network), beta), Mutation(SNP, Network)), Relevance(diffusedMatrix(normalizedMatrix(Network), beta), RnaExpression(TumorNormalExp(TumorExpression, NormalExpression), Network))))
}
}
if(identical(SNP, FALSE)){
MinNetRankResult = matrix(0, length(RankResult), 3)
rownames(MinNetRankResult) = names(RankResult)
colnames(MinNetRankResult) = c("Gene", "Score", "KnownCancerGenes")
MinNetRankResult[, 1] = names(RankResult)
MinNetRankResult[, 2] = RankResult
#knonwn cancer genes
MinNetRankResult[intersect(rownames(MinNetRankResult), CGC[,1]), 3] = "CGC"
MinNetRankResult[setdiff(rownames(MinNetRankResult), CGC[,1]), 3] = "-"
}
else {
MinNetRankResult = matrix(0, length(RankResult), 4)
rownames(MinNetRankResult) = names(RankResult)
colnames(MinNetRankResult) = c("Gene", "Score", "KnownCancerGenes", "Freq")
MinNetRankResult[, 1] = names(RankResult)
MinNetRankResult[, 2] = RankResult
#knonwn cancer genes
MinNetRankResult[intersect(rownames(MinNetRankResult), CGC[,1]), 3] = "CGC"
MinNetRankResult[setdiff(rownames(MinNetRankResult), CGC[,1]), 3] = "-"
# mutation frequency
MinNetRankResult[intersect(rownames(MinNetRankResult), rownames(SNP)), 4] = apply(SNP,1,sum)[intersect(rownames(MinNetRankResult), rownames(SNP))]
MinNetRankResult[,4] = as.numeric(MinNetRankResult[,4])/dim(SNP)[2]
}
return(MinNetRankResult)
}
|
/R/MinNetRank.R
|
no_license
|
weitinging/MinNetRank
|
R
| false
| false
| 9,856
|
r
|
#1. Interaction network
##adjacency matrix A and the normalized adjacency matrix M
normalizedMatrix = function(A_Matrix){
colsums = apply(A_Matrix, 2, sum)
M_Matrix = t(t(A_Matrix)/(colsums+1e-16))
return(M_Matrix)
}
##the diffused matrix D
diffusedMatrix = function(M_Matrix, beta){
D_Matrix = beta*solve(diag(1, dim(M_Matrix)[1], dim(M_Matrix)[2]) - (1-beta)*M_Matrix)
return(D_Matrix)
}
##we choose β to minimize |sum(D_ij [M_ij≠0])-sum(D_ij [M_ij=0])|
difference = function(beta, A_Matrix = AdjacencyMatrix){
M_Matrix = normalizedMatrix(A_Matrix)
D_Matrix = diffusedMatrix(M_Matrix, beta)
restarted = sum(D_Matrix[A_Matrix != 0])
diffused = sum(D_Matrix[A_Matrix = 0])
return(abs(restarted - diffused))
}
chooseBeta = function(){
return(optimize(difference, interval = c(0.1, 0.9))$minimum)
}
#2. Mutation data
##get the mutation score of each gene in each individual sample
Mutation = function(SNP, Network){
SNPScore = matrix(0, dim(Network)[1], dim(SNP)[2])
rownames(SNPScore) = rownames(Network)
colnames(SNPScore) = colnames(SNP)
SNPScore[intersect(rownames(SNPScore),rownames(SNP)), ] = as.matrix(SNP[intersect(rownames(SNPScore),rownames(SNP)),])
GeneZero = colnames(SNPScore)[apply(SNPScore, 2, sum) == 0]
SNPScore = SNPScore[, setdiff(colnames(SNPScore), GeneZero)]
SNPScore = abs(SNPScore)
NormalizedMutationScore = t(t(SNPScore)/apply(SNPScore, 2, sum))
return(NormalizedMutationScore)
}
#3. Expresion data
##calculat the Absolute of Log2 Fold-Change (ALFC) of gene expression between the paired tumor and normal samples
TumorNormalExp = function(TumorExpression, NormalExpression ){
##get gene expression for genes if the tumor with paired normal
matchedSample = intersect(colnames(TumorExpression),colnames(NormalExpression))
matchedDiffExpression = log2(TumorExpression[,matchedSample] / NormalExpression[,matchedSample])
#get the gene expression for genes if the tumor with no paried normal
unmatchedSample = setdiff(colnames(TumorExpression) , matchedSample)
generalExpression = apply(NormalExpression,1,mean)
unmatchedDiffExpression = log2(TumorExpression[,unmatchedSample] / generalExpression)
#combine the expressions
Expression<-cbind(matchedDiffExpression,unmatchedDiffExpression)
Expression[is.na(Expression)] = 0
Expression[Expression == "Inf"] = 0
Expression[Expression == "-Inf"] = 0
return(Expression)
}
##get the expression score of each gene in each individual sample
RnaExpression = function(Expression, Network){
ExpressionScore = matrix(0, dim(Network)[1], dim(Expression)[2])
rownames(ExpressionScore) = rownames(Network)
colnames(ExpressionScore) = colnames(Expression)
ExpressionScore[intersect(rownames(ExpressionScore),rownames(Expression)), ] = as.matrix(Expression[intersect(rownames(ExpressionScore),rownames(Expression)),])
ExpressionScore = abs(ExpressionScore)
NormalizedExpressionScore = t(t(ExpressionScore)/apply(ExpressionScore, 2, sum))
return(NormalizedExpressionScore)
}
#4. the relevance scores
Relevance = function(D_Matrix, NormalizedScore){
##the relevance scores of mutation data or expression data
RelevanceScores = D_Matrix%*%NormalizedScore
return(RelevanceScores)
}
#5. the pmin of expression relevance scores and mutation relevance score
MinRelevance = function(MutationRelevanceScores, ExpressionRelevanceScores){
##the relevance score of gene is 0 in all samples
for (i in 1:dim(MutationRelevanceScores)[1]){
if(sum(MutationRelevanceScores[i, ]==0)/dim(MutationRelevanceScores)[2] >0.95){
MutationRelevanceScores[i, ] = 0
}
}
for (i in 1:dim(ExpressionRelevanceScores)[1]){
if(sum(ExpressionRelevanceScores[i, ]==0)/dim(ExpressionRelevanceScores)[2] >0.95){
ExpressionRelevanceScores[i, ] = 0
}
}
GeneZero = union(rownames(MutationRelevanceScores[(apply(MutationRelevanceScores,1,sum) == 0),]), rownames(ExpressionRelevanceScores[(apply(ExpressionRelevanceScores,1,sum) == 0),]))
ExpressionRelevanceScores = ExpressionRelevanceScores[setdiff(rownames(MutationRelevanceScores), GeneZero), ]
MutationRelevanceScores = MutationRelevanceScores[setdiff(rownames(MutationRelevanceScores), GeneZero), ]
##final relevance scores
samesamples = intersect(colnames(MutationRelevanceScores),colnames(ExpressionRelevanceScores))
MinRelevanceScores = pmin(ExpressionRelevanceScores[ ,samesamples], MutationRelevanceScores[ ,samesamples])
return(MinRelevanceScores)
}
#6.aggregate gene ranking in each individual sample to a robust population-level gene ranking
RankSum = function(RelevanceScores){
geneRank = matrix(0, dim(RelevanceScores)[1], dim(RelevanceScores)[2])
for (i in 1:dim(RelevanceScores)[2]){
geneRank[, i] = (dim(RelevanceScores)[1] + 1) - rank(RelevanceScores[, i])
}
rownames(geneRank) = rownames(RelevanceScores)
colnames(geneRank) = colnames(RelevanceScores)
RankResult = rowSums(geneRank)
RankResult = sort(RankResult)
return(RankResult)
}
#7.Result
MinNetRank = function(Network = "AdjacencyMatrix", SNP = FALSE, TumorExpression = FALSE, NormalExpression = FALSE, CGC = KnownGenes, beta = 0.4841825){
if (Network == "DiffusedMatrix"){
Network = DiffusedMatrix
print("Using DiffusedMatrix in MinNetWork.")
if (identical(SNP, FALSE) & identical(TumorExpression, FALSE)){
print("You should provide Mutation data or Expression data.")
}
else if(identical(SNP, FALSE)){
print("Mutation data are not provided, Only using Expression data")
RankResult = RankSum(Relevance(Network, RnaExpression(TumorNormalExp(TumorExpression, NormalExpression), Network)))
}
else if(identical(TumorExpression, FALSE)){
print("Expression data are not provided, Only using Mutation data")
RankResult = RankSum(Relevance(Network, Mutation(SNP, Network)))
}
else{
print("Using Mutation data and Expression data.")
RankResult = RankSum(MinRelevance(Relevance(Network, Mutation(SNP, Network)), Relevance(Network, RnaExpression(TumorNormalExp(TumorExpression, NormalExpression), Network))))
}
}
else if (Network == "AdjacencyMatrix"){
Network = AdjacencyMatrix
print("Using AdjacencyMatrix in MinNetWork.")
if (identical(SNP, FALSE) & identical(TumorExpression, FALSE)){
print("You should provide Mutation data or Expression data.")
}
else if(identical(SNP, FALSE)){
print("Mutation data are not provided, Only using Expression data")
RankResult = RankSum(Relevance(diffusedMatrix(normalizedMatrix(Network), beta), RnaExpression(TumorNormalExp(TumorExpression, NormalExpression), Network)))
}
else if(identical(TumorExpression, FALSE)){
print("Expression data are not provided, Only using Mutation data")
RankResult = RankSum(Relevance(diffusedMatrix(normalizedMatrix(Network), beta), Mutation(SNP, Network)))
}
else{
print("Using Mutation data and Expression data.")
RankResult = RankSum(MinRelevance(Relevance(diffusedMatrix(normalizedMatrix(Network), beta), Mutation(SNP, Network)), Relevance(diffusedMatrix(normalizedMatrix(Network), beta), RnaExpression(TumorNormalExp(TumorExpression, NormalExpression), Network))))
}
}
else{
print("Using your own network.")
print("Canculate the restart probability of beta and this step needs more time.")
beta = chooseBeta()
print(paste0("the beta is ", beta))
if (identical(SNP, FALSE) & identical(TumorExpression, FALSE)){
print("You should provide Mutation data or Expression data.")
}
else if(identical(SNP, FALSE)){
print("Mutation data are not provided, Only using Expression data")
RankResult = RankSum(Relevance(diffusedMatrix(normalizedMatrix(Network), beta), RnaExpression(TumorNormalExp(TumorExpression, NormalExpression), Network)))
}
else if(identical(TumorExpression, FALSE)){
print("Expression data are not provided, Only using Mutation data")
RankResult = RankSum(Relevance(diffusedMatrix(normalizedMatrix(Network), beta), Mutation(SNP, Network)))
}
else{
print("Using Mutation data and Expression data.")
RankResult = RankSum(MinRelevance(Relevance(diffusedMatrix(normalizedMatrix(Network), beta), Mutation(SNP, Network)), Relevance(diffusedMatrix(normalizedMatrix(Network), beta), RnaExpression(TumorNormalExp(TumorExpression, NormalExpression), Network))))
}
}
if(identical(SNP, FALSE)){
MinNetRankResult = matrix(0, length(RankResult), 3)
rownames(MinNetRankResult) = names(RankResult)
colnames(MinNetRankResult) = c("Gene", "Score", "KnownCancerGenes")
MinNetRankResult[, 1] = names(RankResult)
MinNetRankResult[, 2] = RankResult
#knonwn cancer genes
MinNetRankResult[intersect(rownames(MinNetRankResult), CGC[,1]), 3] = "CGC"
MinNetRankResult[setdiff(rownames(MinNetRankResult), CGC[,1]), 3] = "-"
}
else {
MinNetRankResult = matrix(0, length(RankResult), 4)
rownames(MinNetRankResult) = names(RankResult)
colnames(MinNetRankResult) = c("Gene", "Score", "KnownCancerGenes", "Freq")
MinNetRankResult[, 1] = names(RankResult)
MinNetRankResult[, 2] = RankResult
#knonwn cancer genes
MinNetRankResult[intersect(rownames(MinNetRankResult), CGC[,1]), 3] = "CGC"
MinNetRankResult[setdiff(rownames(MinNetRankResult), CGC[,1]), 3] = "-"
# mutation frequency
MinNetRankResult[intersect(rownames(MinNetRankResult), rownames(SNP)), 4] = apply(SNP,1,sum)[intersect(rownames(MinNetRankResult), rownames(SNP))]
MinNetRankResult[,4] = as.numeric(MinNetRankResult[,4])/dim(SNP)[2]
}
return(MinNetRankResult)
}
|
setwd("~/Acads/SS 2018-2019/Stat 197 DS/specdata")
library(data.table)
### PART 1 ### ----
pollutantmean <- function(directory, pollutant, id= 1:332){
pollutants = NULL #creates empty vector
filenames = list.files(directory) #lists files in the directory
for(i in id){
##concatenates the directory and filenames
##directory = C:/Users/ASUS/Documents/Acads/SS 2018-2019/Stat 197 DS/specdata
##filenames = vector("001.csv", "002.csv", ...)
##filepath= "C:/Users/ASUS/Documents/Acads/SS 2018-2019/Stat 197 DS/specdata/001.csv"
filepath=paste(directory, "/", filenames[i], sep="")
data = read.csv(filepath, header = TRUE) ##read in each file and store it in data vector
##concatenate the vectors from each file of the pollutant column to pollutants vector
pollutants = c(pollutants, data[,pollutant])
}
#NA values are removed and calculates the mean of the pollutants vector
pollutants_mean = mean(pollutants, na.rm=TRUE)
return(pollutants_mean)
}
#example
pollutantmean("~/Acads/SS 2018-2019/Stat 197 DS/specdata", "sulfate", 1:10)
pollutantmean("~/Acads/SS 2018-2019/Stat 197 DS/specdata", "nitrate", 70:72)
#quiz
pollutantmean("~/Acads/SS 2018-2019/Stat 197 DS/specdata", "sulfate", 1:10)
pollutantmean("~/Acads/SS 2018-2019/Stat 197 DS/specdata", "nitrate", 70:72)
pollutantmean("~/Acads/SS 2018-2019/Stat 197 DS/specdata", "sulfate", 34)
pollutantmean("~/Acads/SS 2018-2019/Stat 197 DS/specdata", "nitrate")
### PART 2 ### ----
corr <- function(directory, threshold=0){
correlations <- NULL #empty correlations vector
filenames = list.files(directory) #lists filenames
for(i in 1:332){
##concatenates the directory and filenames
##directory = C:/Users/ASUS/Documents/Acads/SS 2018-2019/Stat 197 DS/specdata
##filenames = vector("001.csv", "002.csv", ...)
##filepath= "C:/Users/ASUS/Documents/Acads/SS 2018-2019/Stat 197 DS/specdata/001.csv"
filepath=paste(directory,"/" ,filenames[i], sep="")
data = read.csv(filepath, header = TRUE) #reads each file and stores it in the data vector
completeCases = data[complete.cases(data),] #calculates the number of complete cases
count = nrow(completeCases) #counts the number of complete cases
if( count >= threshold ) {
correlations = c(correlations, cor(completeCases$nitrate, completeCases$sulfate) )
}
}
return(correlations)
}
#example
cr <- corr("~/Acads/SS 2018-2019/Stat 197 DS/specdata")
cr
#quiz
cr <- corr("~/Acads/SS 2018-2019/Stat 197 DS/specdata")
cr <- sort(cr)
set.seed(868)
out <- round(cr[sample(length(cr), 5)], 4)
print(out)
cr <- corr("~/Acads/SS 2018-2019/Stat 197 DS/specdata", 129)
cr <- sort(cr)
n <- length(cr)
set.seed(197)
out <- c(n, round(cr[sample(n, 5)], 4))
print(out)
cr <- corr("~/Acads/SS 2018-2019/Stat 197 DS/specdata", 2000)
n <- length(cr)
cr <- corr("~/Acads/SS 2018-2019/Stat 197 DS/specdata", 1000)
cr <- sort(cr)
print(c(n, round(cr, 4)))
### PART 3 ### ----
complete <- function(directory, id= 1:332){
ids = NULL #empty vector
nobss = NULL #empty vector
filenames = list.files(directory) #lists files
for(i in id){
###concatenates the directory and filenames
##directory = C:/Users/ASUS/Documents/Acads/SS 2018-2019/Stat 197 DS/specdata
##filenames = vector("001.csv", "002.csv", ...)
##filepath= "C:/Users/ASUS/Documents/Acads/SS 2018-2019/Stat 197 DS/specdata/001.csv"
filepath=paste(directory,"/" ,filenames[i], sep="")
data = read.csv(filepath, TRUE) #reads each file
completeCases = data[complete.cases(data), ] #subsets with complete observations
ids = c(ids, i)
nobss = c(nobss, nrow(completeCases) )
}
data.frame(id=ids, nobs=nobss)
}
#example
complete("~/Acads/SS 2018-2019/Stat 197 DS/specdata",c(1:15))
complete("~/Acads/SS 2018-2019/Stat 197 DS/specdata",c(1,4,8,5,12))
#quiz
cc <- complete("~/Acads/SS 2018-2019/Stat 197 DS/specdata", c(6, 10, 20, 34, 100, 200, 310))
print(cc$nobs)
cc <- complete("~/Acads/SS 2018-2019/Stat 197 DS/specdata", 54)
print(cc$nobs)
set.seed(42)
cc <- complete("~/Acads/SS 2018-2019/Stat 197 DS/specdata", 332:1)
use <- sample(332, 10)
print(cc[use, "nobs"])
|
/project_2.r
|
no_license
|
dnllmnll/Stat197DataScience
|
R
| false
| false
| 4,414
|
r
|
setwd("~/Acads/SS 2018-2019/Stat 197 DS/specdata")
library(data.table)
### PART 1 ### ----
pollutantmean <- function(directory, pollutant, id= 1:332){
pollutants = NULL #creates empty vector
filenames = list.files(directory) #lists files in the directory
for(i in id){
##concatenates the directory and filenames
##directory = C:/Users/ASUS/Documents/Acads/SS 2018-2019/Stat 197 DS/specdata
##filenames = vector("001.csv", "002.csv", ...)
##filepath= "C:/Users/ASUS/Documents/Acads/SS 2018-2019/Stat 197 DS/specdata/001.csv"
filepath=paste(directory, "/", filenames[i], sep="")
data = read.csv(filepath, header = TRUE) ##read in each file and store it in data vector
##concatenate the vectors from each file of the pollutant column to pollutants vector
pollutants = c(pollutants, data[,pollutant])
}
#NA values are removed and calculates the mean of the pollutants vector
pollutants_mean = mean(pollutants, na.rm=TRUE)
return(pollutants_mean)
}
#example
pollutantmean("~/Acads/SS 2018-2019/Stat 197 DS/specdata", "sulfate", 1:10)
pollutantmean("~/Acads/SS 2018-2019/Stat 197 DS/specdata", "nitrate", 70:72)
#quiz
pollutantmean("~/Acads/SS 2018-2019/Stat 197 DS/specdata", "sulfate", 1:10)
pollutantmean("~/Acads/SS 2018-2019/Stat 197 DS/specdata", "nitrate", 70:72)
pollutantmean("~/Acads/SS 2018-2019/Stat 197 DS/specdata", "sulfate", 34)
pollutantmean("~/Acads/SS 2018-2019/Stat 197 DS/specdata", "nitrate")
### PART 2 ### ----
corr <- function(directory, threshold=0){
correlations <- NULL #empty correlations vector
filenames = list.files(directory) #lists filenames
for(i in 1:332){
##concatenates the directory and filenames
##directory = C:/Users/ASUS/Documents/Acads/SS 2018-2019/Stat 197 DS/specdata
##filenames = vector("001.csv", "002.csv", ...)
##filepath= "C:/Users/ASUS/Documents/Acads/SS 2018-2019/Stat 197 DS/specdata/001.csv"
filepath=paste(directory,"/" ,filenames[i], sep="")
data = read.csv(filepath, header = TRUE) #reads each file and stores it in the data vector
completeCases = data[complete.cases(data),] #calculates the number of complete cases
count = nrow(completeCases) #counts the number of complete cases
if( count >= threshold ) {
correlations = c(correlations, cor(completeCases$nitrate, completeCases$sulfate) )
}
}
return(correlations)
}
#example
cr <- corr("~/Acads/SS 2018-2019/Stat 197 DS/specdata")
cr
#quiz
cr <- corr("~/Acads/SS 2018-2019/Stat 197 DS/specdata")
cr <- sort(cr)
set.seed(868)
out <- round(cr[sample(length(cr), 5)], 4)
print(out)
cr <- corr("~/Acads/SS 2018-2019/Stat 197 DS/specdata", 129)
cr <- sort(cr)
n <- length(cr)
set.seed(197)
out <- c(n, round(cr[sample(n, 5)], 4))
print(out)
cr <- corr("~/Acads/SS 2018-2019/Stat 197 DS/specdata", 2000)
n <- length(cr)
cr <- corr("~/Acads/SS 2018-2019/Stat 197 DS/specdata", 1000)
cr <- sort(cr)
print(c(n, round(cr, 4)))
### PART 3 ### ----
complete <- function(directory, id= 1:332){
ids = NULL #empty vector
nobss = NULL #empty vector
filenames = list.files(directory) #lists files
for(i in id){
###concatenates the directory and filenames
##directory = C:/Users/ASUS/Documents/Acads/SS 2018-2019/Stat 197 DS/specdata
##filenames = vector("001.csv", "002.csv", ...)
##filepath= "C:/Users/ASUS/Documents/Acads/SS 2018-2019/Stat 197 DS/specdata/001.csv"
filepath=paste(directory,"/" ,filenames[i], sep="")
data = read.csv(filepath, TRUE) #reads each file
completeCases = data[complete.cases(data), ] #subsets with complete observations
ids = c(ids, i)
nobss = c(nobss, nrow(completeCases) )
}
data.frame(id=ids, nobs=nobss)
}
#example
complete("~/Acads/SS 2018-2019/Stat 197 DS/specdata",c(1:15))
complete("~/Acads/SS 2018-2019/Stat 197 DS/specdata",c(1,4,8,5,12))
#quiz
cc <- complete("~/Acads/SS 2018-2019/Stat 197 DS/specdata", c(6, 10, 20, 34, 100, 200, 310))
print(cc$nobs)
cc <- complete("~/Acads/SS 2018-2019/Stat 197 DS/specdata", 54)
print(cc$nobs)
set.seed(42)
cc <- complete("~/Acads/SS 2018-2019/Stat 197 DS/specdata", 332:1)
use <- sample(332, 10)
print(cc[use, "nobs"])
|
# Import data
nooutlierdata<-read.csv(file="Analysis/Data/NoOutliers.csv",header=TRUE,
sep=",",stringsAsFactors = TRUE)
nooutlierdata$TransactionNo<-nooutlierdata$id
data<-read.csv(file="Analysis/Data/HousePriceData.csv",header=TRUE,
sep=",",stringsAsFactors = TRUE)
data<-merge(data,nooutlierdata,by="TransactionNo")
# Add Quantile Information
a<-cut(data$LogSalePrice, breaks=c(quantile(data$LogSalePrice, probs = seq(0, 1, by = 0.25))),
labels=c("0-25","25-50","50-75","75-100"), include.lowest=TRUE)
data$quantile<-a
#create data frame for the ANOVA
data2<-data[,c("AboveGroundFloor",
"RenovationFlag",
"grade",
"condition",
"View",
"WaterfrontView",
"NumberOfFloors",
"NumberOfBedrooms",
"NumberOfBathrooms",
"SaleYear",
"ConstructionYear",
"TotalArea",
"BasementSize",
"LivingSpace",
"quantile")]
#Manova Test
test<-manova(cbind(AboveGroundFloor,
RenovationFlag,
grade,
condition,
View,
WaterfrontView,
NumberOfFloors,
NumberOfBedrooms,
NumberOfBathrooms,
SaleYear,
ConstructionYear,
TotalArea,
LivingSpace)
~quantile,data=data2)
summary(test,test="Wilks")
|
/Analysis/Anova.R
|
permissive
|
twoshotamericano/ThesisPublished
|
R
| false
| false
| 1,563
|
r
|
# Import data
nooutlierdata<-read.csv(file="Analysis/Data/NoOutliers.csv",header=TRUE,
sep=",",stringsAsFactors = TRUE)
nooutlierdata$TransactionNo<-nooutlierdata$id
data<-read.csv(file="Analysis/Data/HousePriceData.csv",header=TRUE,
sep=",",stringsAsFactors = TRUE)
data<-merge(data,nooutlierdata,by="TransactionNo")
# Add Quantile Information
a<-cut(data$LogSalePrice, breaks=c(quantile(data$LogSalePrice, probs = seq(0, 1, by = 0.25))),
labels=c("0-25","25-50","50-75","75-100"), include.lowest=TRUE)
data$quantile<-a
#create data frame for the ANOVA
data2<-data[,c("AboveGroundFloor",
"RenovationFlag",
"grade",
"condition",
"View",
"WaterfrontView",
"NumberOfFloors",
"NumberOfBedrooms",
"NumberOfBathrooms",
"SaleYear",
"ConstructionYear",
"TotalArea",
"BasementSize",
"LivingSpace",
"quantile")]
#Manova Test
test<-manova(cbind(AboveGroundFloor,
RenovationFlag,
grade,
condition,
View,
WaterfrontView,
NumberOfFloors,
NumberOfBedrooms,
NumberOfBathrooms,
SaleYear,
ConstructionYear,
TotalArea,
LivingSpace)
~quantile,data=data2)
summary(test,test="Wilks")
|
library(plyr)
#Download and unzip data
if(!file.exists("./data")){dir.create("./data")}
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, destfile="./data/Dataset.zip")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
#Read data
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
features <- read.table('./data/UCI HAR Dataset/features.txt')
labels <- read.table('./data/UCI HAR Dataset/activity_labels.txt')
#Merge all rows
x_total <- rbind(x_test, x_train)
y_total <- rbind(y_test, y_train)
subject_total <- rbind(subject_test, subject_train)
y_total_labels <- join(y_total, labels)
activity <- data.frame(y_total_labels$V2)
#Name all columns
colnames(x_total) <- features$V2
colnames(activity) <- "activity"
colnames(subject_total) <- "subject"
#Select all colums with mean or std
x_subset <- x_total[grep("mean\\(\\)|std\\(\\)", names(x_total), value = TRUE)]
#Merge all columns
set_total <- cbind(subject_total, activity, x_subset)
#Label the dataset with descriptive variable names
names(set_total) <- gsub("^t", "time", names(set_total))
names(set_total) <- gsub("^f", "frequency", names(set_total))
names(set_total) <- gsub("Acc", "Accelerometer", names(set_total))
names(set_total) <- gsub("Gyro", "Gyroscope", names(set_total))
names(set_total) <- gsub("Mag", "Magnitude", names(set_total))
names(set_total) <- gsub("BodyBody", "Body", names(set_total))
names(set_total) <- tolower(names(set_total))
View(set_total)
#Create a second, independent tidy data set with the average of each variable for each activity and each subject
set_total2 <- aggregate(. ~subject + activity, set_total, mean)
#Export the set
write.table(set_total2, "tidyset.txt", row.names = FALSE, quote = FALSE)
|
/run_analysis.R
|
no_license
|
hdouwes/Getting-and-Cleaning-Data-Course-Project
|
R
| false
| false
| 2,187
|
r
|
library(plyr)
#Download and unzip data
if(!file.exists("./data")){dir.create("./data")}
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, destfile="./data/Dataset.zip")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
#Read data
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
features <- read.table('./data/UCI HAR Dataset/features.txt')
labels <- read.table('./data/UCI HAR Dataset/activity_labels.txt')
#Merge all rows
x_total <- rbind(x_test, x_train)
y_total <- rbind(y_test, y_train)
subject_total <- rbind(subject_test, subject_train)
y_total_labels <- join(y_total, labels)
activity <- data.frame(y_total_labels$V2)
#Name all columns
colnames(x_total) <- features$V2
colnames(activity) <- "activity"
colnames(subject_total) <- "subject"
#Select all colums with mean or std
x_subset <- x_total[grep("mean\\(\\)|std\\(\\)", names(x_total), value = TRUE)]
#Merge all columns
set_total <- cbind(subject_total, activity, x_subset)
#Label the dataset with descriptive variable names
names(set_total) <- gsub("^t", "time", names(set_total))
names(set_total) <- gsub("^f", "frequency", names(set_total))
names(set_total) <- gsub("Acc", "Accelerometer", names(set_total))
names(set_total) <- gsub("Gyro", "Gyroscope", names(set_total))
names(set_total) <- gsub("Mag", "Magnitude", names(set_total))
names(set_total) <- gsub("BodyBody", "Body", names(set_total))
names(set_total) <- tolower(names(set_total))
View(set_total)
#Create a second, independent tidy data set with the average of each variable for each activity and each subject
set_total2 <- aggregate(. ~subject + activity, set_total, mean)
#Export the set
write.table(set_total2, "tidyset.txt", row.names = FALSE, quote = FALSE)
|
\name{as.listofindex}
\alias{as.listofindex}
\title{
Transform index results in a list of index
}
\description{
Transform various results from functions Tstast, ComIndex or ComIndexMulti in a list of index. Useful to use the functions plot.listofindex (S3 method) and ses.listofindex.
}
\usage{
as.listofindex(x, namesindex = NULL)
}
\arguments{
\item{x}{
A list of objects of class Tstast, ComIndex or ComIndexMulti
}
\item{namesindex}{
Optionnal, the names of index in the same order as in x.
}
}
\value{
A list of observed values and corresponding "null" values (i.e. produced by null models) in the form "list(index1, null model index1, index2, null model index2 ...)"
}
\author{
Adrien Taudiere
}
\seealso{
\code{\link{ses.listofindex}};
\code{\link{plot.listofindex}}
}
\examples{
data(finch.ind)
oldpar <- par(no.readonly = TRUE)
####
#The function ComIndex allow to choose your own function
#(like mean, range, variance...) to calculate customize index.
require(e1071)
funct <- c("mean(x, na.rm = TRUE)", "kurtosis(x, na.rm = TRUE)",
"max(x, na.rm = TRUE) - min(x, na.rm = TRUE)")
res.finch.sp_loc <- ComIndex(traits = traits.finch, index = funct,
sp = sp.finch, nullmodels = "regional.ind", ind.plot = ind.plot.finch,
nperm = 9, print = FALSE)
res.finch.sp_reg <- ComIndex(traits = traits.finch, index = funct,
sp = sp.finch, nullmodels = "regional.pop",
ind.plot = ind.plot.finch, nperm = 9, print = FALSE)
####
#We can represent Standardized Effect Size (ses)
#using the function plot(as.listofindex(list1, list2, list3))
list.ind2 <- list(res.finch.sp_loc, res.finch.sp_reg)
index.list2 <- as.listofindex(list.ind2)
plot(index.list2, type = "bytraits")
}
|
/.Archive man/man/as.listofindex.Rd
|
no_license
|
ben9216/cati
|
R
| false
| false
| 1,710
|
rd
|
\name{as.listofindex}
\alias{as.listofindex}
\title{
Transform index results in a list of index
}
\description{
Transform various results from functions Tstast, ComIndex or ComIndexMulti in a list of index. Useful to use the functions plot.listofindex (S3 method) and ses.listofindex.
}
\usage{
as.listofindex(x, namesindex = NULL)
}
\arguments{
\item{x}{
A list of objects of class Tstast, ComIndex or ComIndexMulti
}
\item{namesindex}{
Optionnal, the names of index in the same order as in x.
}
}
\value{
A list of observed values and corresponding "null" values (i.e. produced by null models) in the form "list(index1, null model index1, index2, null model index2 ...)"
}
\author{
Adrien Taudiere
}
\seealso{
\code{\link{ses.listofindex}};
\code{\link{plot.listofindex}}
}
\examples{
data(finch.ind)
oldpar <- par(no.readonly = TRUE)
####
#The function ComIndex allow to choose your own function
#(like mean, range, variance...) to calculate customize index.
require(e1071)
funct <- c("mean(x, na.rm = TRUE)", "kurtosis(x, na.rm = TRUE)",
"max(x, na.rm = TRUE) - min(x, na.rm = TRUE)")
res.finch.sp_loc <- ComIndex(traits = traits.finch, index = funct,
sp = sp.finch, nullmodels = "regional.ind", ind.plot = ind.plot.finch,
nperm = 9, print = FALSE)
res.finch.sp_reg <- ComIndex(traits = traits.finch, index = funct,
sp = sp.finch, nullmodels = "regional.pop",
ind.plot = ind.plot.finch, nperm = 9, print = FALSE)
####
#We can represent Standardized Effect Size (ses)
#using the function plot(as.listofindex(list1, list2, list3))
list.ind2 <- list(res.finch.sp_loc, res.finch.sp_reg)
index.list2 <- as.listofindex(list.ind2)
plot(index.list2, type = "bytraits")
}
|
###Class 04 - Visualization & Intro to Mapping ###
## Author: Esteban Lopez
## Course: Spatial Analytics
## Program: Master in Business Analytics
## Institution: Universidad Adolfo Ibáñez....
#---- Part 1: Data Management -------------------
# Reading an exporting data
library(readxl)
library(data.table)
casos<-data.table(read_excel("Class_02/2020-03-17-Casos-confirmados.xlsx",na = "—",trim_ws = TRUE,col_names = TRUE),stringsAsFactors = FALSE)
names(casos)
casos<-casos[Región=="Metropolitana",]
saveRDS(casos,"Class_03/casosRM.rds")
write.csv(casos,file = 'Class_03/CasosCovid_RM.csv',fileEncoding = 'UTF-8')
writexl::write_xlsx(casos,path = "Class_03/CasosenExcel.xlsx")
library(foreign)
#write.dta
casosRM<-fread("Class_03/CasosCovid_RM.csv",header = T, showProgress = T,data.table = T)
casosRM[,table(Sexo)]
casosRM[Sexo=="Fememino",Sexo:="Femenino"]
casosRM[`Centro de salud`=="Clínica Alemana",`Centro de salud`:="Clinica Alemana"]
casosRM[,.N,by=.(`Centro de salud`)]
# Creating (factor) variables
class(casosRM$Sexo)
casosRM[,Sexo:=factor(Sexo)]
head(casosRM$Sexo)
head(as.numeric(casosRM$Sexo))
table(casosRM$Sexo)
casosRM[,.N,by=.(Sexo)]
casosRM[,.N,by=.(Sexo,`Centro de salud`)]
#Collapsing by Centro de Salud
names(casosRM)
obj1<-casosRM[,.N,by=.(`Centro de salud`)]
obj1[,sum(N,na.rm = T)]
obj1[,porc:=N/sum(N,na.rm = T)]
# collapsing (colapsar) by average age
A<-casosRM[,.(AvAge=mean(Edad,na.rm = T)),by=.(`Centro de salud`)]
B<-casosRM[,.(Total_centro=.N),by=.(`Centro de salud`)]
C<-casosRM[Sexo=="Femenino",.(Total_Centro_Mujeres=.N),by=.(`Centro de salud`)]
D<-casosRM[Sexo=="Masculino",.(Total_Centro_Hombres=.N),by=.(`Centro de salud`)]
dim(A)
dim(B)
dim(C)
dim(D)
#merging data sets
AB<-merge(A,B,by = "Centro de salud",all = T,sort = F)
ABC<-merge(AB,C,by = "Centro de salud",all = T,sort = F)
ABCD<-merge(ABC,D,by = "Centro de salud",all = T,sort = F)
ABCD[,porc_mujeres:=Total_Centro_Mujeres/Total_centro]
# reshaping
E<-casosRM[,.(AvAge=mean(Edad,na.rm = T),`Casos confirmados`=.N),by=.(`Centro de salud`,Sexo)]
G<-reshape(E,direction = 'wide',timevar = 'Sexo',v.names = c('AvAge','Casos confirmados'),idvar = 'Centro de salud')
#---- Part 2: Visualization -------------------
#Scatter plot # GRAFICO CON DOS VARIABLES
#Base R
plot(G$`Casos confirmados.Femenino`,G$`Casos confirmados.Masculino`) # ANTES DE LA COMA ES UNA VARIABLE DESPUES DE ESTA ES LA OTRA VARIABLE. X Y DPS Y
text(x =G$`Casos confirmados.Femenino`,y=G$`Casos confirmados.Masculino`, G$`Centro de salud`,cex=0.5) #
#ggplot2
library(ggplot2)
names (E)
ggplot(data = E, mapping = aes(x = AvAge, y = `Casos confirmados`)) + geom_point()
# geom_point hace que en el grafico me aparezcan los puntos
ggplot(data = G,mapping = aes(x=`Casos confirmados.Femenino`,y=`Casos confirmados.Masculino`)) + geom_point()
ggplot(G,aes(x=`Casos confirmados.Femenino`,y=`Casos confirmados.Masculino`))+geom_point(aes(size=AvAge.Femenino,colour=AvAge.Masculino))+geom_text(aes(label=`Centro de salud`),size=2,check_overlap = T)
ggplot(data = E,mapping = aes(x=AvAge,y=`Casos confirmados`))+geom_point()+facet_wrap(~Sexo)+geom_smooth(method = 'lm',se=F) + geom_smooth(method = 'loess',col='red',se=F)
# geom_smooth me agrega una linea de tendencia
#plotly
#install.packages('plotly')
library(plotly)
ggplotly(p1)
#histograms
ggplot(casos,aes(x=Edad))+geom_histogram()
ggplot(E,aes(x=AvAge))+geom_histogram()
# Kernel Densities
# cuando quiero dibujar una linea sobre un histograma
ggplot(E,aes(x=AvAge))+geom_density()
ggplot(E,aes(x=AvAge,group=Sexo))+geom_density()
ggplot(E,aes(x=AvAge,group=Sexo,colour=Sexo))+geom_density()
ggplot(E,aes(x=AvAge,group=Sexo,colour=Sexo))+geom_density()+facet_wrap(~Sexo)
#looking at the whole country
casos<-data.table(read_excel("Class_02/2020-03-17-Casos-confirmados.xlsx",na = "—",trim_ws = TRUE,col_names = TRUE),stringsAsFactors = FALSE)
ggplot(casos,aes(x=Edad,group=Sexo,fill=Sexo))+geom_histogram()+facet_wrap(~factor(Región))
#como sacamos el "fememino"?
ggplot(casos,aes(x=Edad,group=Sexo,fill=Sexo))+geom_histogram()
#high charter
# http://jkunst.com/highcharter/index.html
#https://chilecracia.org
#---- Part 3: Intro to Mapping -------------------
#install.packages("chilemapas")
#install.packages("rgdal")
#install.packages("sf")
library(sf)
library(rgdal)
library(sp)
library(chilemapas)
library(data.table)
# 3.1 Shapefiles as in the `sp` package
View(ogrDrivers())
comunas_rm<-readOGR("Class_04/ComunasR13/COMUNA_C17.shp")
class(comunas_rm)
View(comunas_rm@data)
plot(comunas_rm) # me crea un mapa
coordinates(comunas_rm) # si le pongo un mapa poligono, me da el centro medio de cada poligono
centroids_rm<-SpatialPoints(coordinates(comunas_rm),proj4string = comunas_rm@proj4string)
plot(comunas_rm)
plot(centroids_rm,add=T,col='red',lty=1,pch=21,cex=0.1)
lines(coordinates(comunas_rm),col='blue')
str(comunas_rm@data)
# 3.2 Shapefiles as in the `sf` package
zonas_censo<-data.table(censo_2017_zonas,stringsAsFactors = F)
poblacion_adulto_mayor_zonas<-zonas_censo[edad=="65 y mas",.(AdultosMayores=sum(poblacion)),by=.(geocodigo)]
zonas_valparaiso<-mapa_zonas[mapa_zonas$codigo_region=="05",]
zonas_valparaiso<-merge(zonas_valparaiso,codigos_territoriales[,c("codigo_comuna","nombre_comuna")],by="codigo_comuna",all.x=TRUE,sort=F)
zonas_valparaiso<-zonas_valparaiso[zonas_valparaiso$codigo_comuna%in%c("05101","05109"),]
zonas_valparaiso<-merge(zonas_valparaiso,poblacion_adulto_mayor_zonas,by="geocodigo",all.x=TRUE,sort=F)
#plotting
library(RColorBrewer)
paleta <- rev(brewer.pal(n = 9,name = "Reds"))
ggplot(zonas_valparaiso) +
geom_sf(aes(fill = AdultosMayores, geometry = geometry)) +
scale_fill_gradientn(colours = rev(paleta), name = "Poblacion\nadulto mayor") +
labs(title = "Poblacion de 65 años y más", subtitle = "Valparaíso y Viña del Mar") +
theme_minimal(base_size = 11)
# creating a fake spatial distribution of adult population in space
zonas_valparaiso2<-cbind(zonas_valparaiso[,c("geocodigo","codigo_comuna","codigo_provincia","codigo_region","geometry")],"AdultosMayores"=sample(zonas_valparaiso$AdultosMayores,size = length(zonas_valparaiso$AdultosMayores)))
ggplot(zonas_valparaiso2) +
geom_sf(aes(fill = AdultosMayores, geometry = geometry)) +
scale_fill_gradientn(colours = rev(paleta), name = "Poblacion\nadulto mayor") +
labs(title = "Poblacion de 65 años y más", subtitle = "Valparaíso y Viña del Mar") +
theme_minimal(base_size = 13)
#comparing histograms of the same variable
hist(zonas_valparaiso$AdultosMayores,main = "Histograma Adultos Mayores Viña-Valpo")
hist(zonas_valparaiso2$AdultosMayores,main = "Histograma Adultos Mayores Viña-Valpo")
|
/Class_04/Script_Clase04.R
|
permissive
|
mariajosevaldivieso/Clases2
|
R
| false
| false
| 6,749
|
r
|
###Class 04 - Visualization & Intro to Mapping ###
## Author: Esteban Lopez
## Course: Spatial Analytics
## Program: Master in Business Analytics
## Institution: Universidad Adolfo Ibáñez....
#---- Part 1: Data Management -------------------
# Reading an exporting data
library(readxl)
library(data.table)
casos<-data.table(read_excel("Class_02/2020-03-17-Casos-confirmados.xlsx",na = "—",trim_ws = TRUE,col_names = TRUE),stringsAsFactors = FALSE)
names(casos)
casos<-casos[Región=="Metropolitana",]
saveRDS(casos,"Class_03/casosRM.rds")
write.csv(casos,file = 'Class_03/CasosCovid_RM.csv',fileEncoding = 'UTF-8')
writexl::write_xlsx(casos,path = "Class_03/CasosenExcel.xlsx")
library(foreign)
#write.dta
casosRM<-fread("Class_03/CasosCovid_RM.csv",header = T, showProgress = T,data.table = T)
casosRM[,table(Sexo)]
casosRM[Sexo=="Fememino",Sexo:="Femenino"]
casosRM[`Centro de salud`=="Clínica Alemana",`Centro de salud`:="Clinica Alemana"]
casosRM[,.N,by=.(`Centro de salud`)]
# Creating (factor) variables
class(casosRM$Sexo)
casosRM[,Sexo:=factor(Sexo)]
head(casosRM$Sexo)
head(as.numeric(casosRM$Sexo))
table(casosRM$Sexo)
casosRM[,.N,by=.(Sexo)]
casosRM[,.N,by=.(Sexo,`Centro de salud`)]
#Collapsing by Centro de Salud
names(casosRM)
obj1<-casosRM[,.N,by=.(`Centro de salud`)]
obj1[,sum(N,na.rm = T)]
obj1[,porc:=N/sum(N,na.rm = T)]
# collapsing (colapsar) by average age
A<-casosRM[,.(AvAge=mean(Edad,na.rm = T)),by=.(`Centro de salud`)]
B<-casosRM[,.(Total_centro=.N),by=.(`Centro de salud`)]
C<-casosRM[Sexo=="Femenino",.(Total_Centro_Mujeres=.N),by=.(`Centro de salud`)]
D<-casosRM[Sexo=="Masculino",.(Total_Centro_Hombres=.N),by=.(`Centro de salud`)]
dim(A)
dim(B)
dim(C)
dim(D)
#merging data sets
AB<-merge(A,B,by = "Centro de salud",all = T,sort = F)
ABC<-merge(AB,C,by = "Centro de salud",all = T,sort = F)
ABCD<-merge(ABC,D,by = "Centro de salud",all = T,sort = F)
ABCD[,porc_mujeres:=Total_Centro_Mujeres/Total_centro]
# reshaping
E<-casosRM[,.(AvAge=mean(Edad,na.rm = T),`Casos confirmados`=.N),by=.(`Centro de salud`,Sexo)]
G<-reshape(E,direction = 'wide',timevar = 'Sexo',v.names = c('AvAge','Casos confirmados'),idvar = 'Centro de salud')
#---- Part 2: Visualization -------------------
#Scatter plot # GRAFICO CON DOS VARIABLES
#Base R
plot(G$`Casos confirmados.Femenino`,G$`Casos confirmados.Masculino`) # ANTES DE LA COMA ES UNA VARIABLE DESPUES DE ESTA ES LA OTRA VARIABLE. X Y DPS Y
text(x =G$`Casos confirmados.Femenino`,y=G$`Casos confirmados.Masculino`, G$`Centro de salud`,cex=0.5) #
#ggplot2
library(ggplot2)
names (E)
ggplot(data = E, mapping = aes(x = AvAge, y = `Casos confirmados`)) + geom_point()
# geom_point hace que en el grafico me aparezcan los puntos
ggplot(data = G,mapping = aes(x=`Casos confirmados.Femenino`,y=`Casos confirmados.Masculino`)) + geom_point()
ggplot(G,aes(x=`Casos confirmados.Femenino`,y=`Casos confirmados.Masculino`))+geom_point(aes(size=AvAge.Femenino,colour=AvAge.Masculino))+geom_text(aes(label=`Centro de salud`),size=2,check_overlap = T)
ggplot(data = E,mapping = aes(x=AvAge,y=`Casos confirmados`))+geom_point()+facet_wrap(~Sexo)+geom_smooth(method = 'lm',se=F) + geom_smooth(method = 'loess',col='red',se=F)
# geom_smooth me agrega una linea de tendencia
#plotly
#install.packages('plotly')
library(plotly)
ggplotly(p1)
#histograms
ggplot(casos,aes(x=Edad))+geom_histogram()
ggplot(E,aes(x=AvAge))+geom_histogram()
# Kernel Densities
# cuando quiero dibujar una linea sobre un histograma
ggplot(E,aes(x=AvAge))+geom_density()
ggplot(E,aes(x=AvAge,group=Sexo))+geom_density()
ggplot(E,aes(x=AvAge,group=Sexo,colour=Sexo))+geom_density()
ggplot(E,aes(x=AvAge,group=Sexo,colour=Sexo))+geom_density()+facet_wrap(~Sexo)
#looking at the whole country
casos<-data.table(read_excel("Class_02/2020-03-17-Casos-confirmados.xlsx",na = "—",trim_ws = TRUE,col_names = TRUE),stringsAsFactors = FALSE)
ggplot(casos,aes(x=Edad,group=Sexo,fill=Sexo))+geom_histogram()+facet_wrap(~factor(Región))
#como sacamos el "fememino"?
ggplot(casos,aes(x=Edad,group=Sexo,fill=Sexo))+geom_histogram()
#high charter
# http://jkunst.com/highcharter/index.html
#https://chilecracia.org
#---- Part 3: Intro to Mapping -------------------
#install.packages("chilemapas")
#install.packages("rgdal")
#install.packages("sf")
library(sf)
library(rgdal)
library(sp)
library(chilemapas)
library(data.table)
# 3.1 Shapefiles as in the `sp` package
View(ogrDrivers())
comunas_rm<-readOGR("Class_04/ComunasR13/COMUNA_C17.shp")
class(comunas_rm)
View(comunas_rm@data)
plot(comunas_rm) # me crea un mapa
coordinates(comunas_rm) # si le pongo un mapa poligono, me da el centro medio de cada poligono
centroids_rm<-SpatialPoints(coordinates(comunas_rm),proj4string = comunas_rm@proj4string)
plot(comunas_rm)
plot(centroids_rm,add=T,col='red',lty=1,pch=21,cex=0.1)
lines(coordinates(comunas_rm),col='blue')
str(comunas_rm@data)
# 3.2 Shapefiles as in the `sf` package
zonas_censo<-data.table(censo_2017_zonas,stringsAsFactors = F)
poblacion_adulto_mayor_zonas<-zonas_censo[edad=="65 y mas",.(AdultosMayores=sum(poblacion)),by=.(geocodigo)]
zonas_valparaiso<-mapa_zonas[mapa_zonas$codigo_region=="05",]
zonas_valparaiso<-merge(zonas_valparaiso,codigos_territoriales[,c("codigo_comuna","nombre_comuna")],by="codigo_comuna",all.x=TRUE,sort=F)
zonas_valparaiso<-zonas_valparaiso[zonas_valparaiso$codigo_comuna%in%c("05101","05109"),]
zonas_valparaiso<-merge(zonas_valparaiso,poblacion_adulto_mayor_zonas,by="geocodigo",all.x=TRUE,sort=F)
#plotting
library(RColorBrewer)
paleta <- rev(brewer.pal(n = 9,name = "Reds"))
ggplot(zonas_valparaiso) +
geom_sf(aes(fill = AdultosMayores, geometry = geometry)) +
scale_fill_gradientn(colours = rev(paleta), name = "Poblacion\nadulto mayor") +
labs(title = "Poblacion de 65 años y más", subtitle = "Valparaíso y Viña del Mar") +
theme_minimal(base_size = 11)
# creating a fake spatial distribution of adult population in space
zonas_valparaiso2<-cbind(zonas_valparaiso[,c("geocodigo","codigo_comuna","codigo_provincia","codigo_region","geometry")],"AdultosMayores"=sample(zonas_valparaiso$AdultosMayores,size = length(zonas_valparaiso$AdultosMayores)))
ggplot(zonas_valparaiso2) +
geom_sf(aes(fill = AdultosMayores, geometry = geometry)) +
scale_fill_gradientn(colours = rev(paleta), name = "Poblacion\nadulto mayor") +
labs(title = "Poblacion de 65 años y más", subtitle = "Valparaíso y Viña del Mar") +
theme_minimal(base_size = 13)
#comparing histograms of the same variable
hist(zonas_valparaiso$AdultosMayores,main = "Histograma Adultos Mayores Viña-Valpo")
hist(zonas_valparaiso2$AdultosMayores,main = "Histograma Adultos Mayores Viña-Valpo")
|
## Step 1 - Load the data
# Load only records for 2007-02-01 and 2007-02-02
househ <- read.table("household_power_consumption.txt", header = FALSE,
sep = ";", dec = ".", skip = 66637, nrows = 2880,
col.names=c("Date",
"Time",
"Global_active_power",
"Global_reactive_power",
"Voltage",
"Global_intensity",
"Sub_metering_1",
"Sub_metering_2",
"Sub_metering_3"))
# Create datetime by pasting together Date and Time
househ$datetime <- with(househ, as.POSIXct(paste(Date, Time), format="%d/%m/%Y %H:%M"))
## Plot 2
# Plot and save to png
png('plot2.png')
with(househ, plot(datetime, Global_active_power, type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)"))
dev.off()
|
/Course 4 - Exploratory Data Analysis/Week1_Assignment/plot2.R
|
no_license
|
Leijtenss/Coursera-JHU-Data-Science-Specialization
|
R
| false
| false
| 1,024
|
r
|
## Step 1 - Load the data
# Load only records for 2007-02-01 and 2007-02-02
househ <- read.table("household_power_consumption.txt", header = FALSE,
sep = ";", dec = ".", skip = 66637, nrows = 2880,
col.names=c("Date",
"Time",
"Global_active_power",
"Global_reactive_power",
"Voltage",
"Global_intensity",
"Sub_metering_1",
"Sub_metering_2",
"Sub_metering_3"))
# Create datetime by pasting together Date and Time
househ$datetime <- with(househ, as.POSIXct(paste(Date, Time), format="%d/%m/%Y %H:%M"))
## Plot 2
# Plot and save to png
png('plot2.png')
with(househ, plot(datetime, Global_active_power, type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)"))
dev.off()
|
graphOrthographic <- function(nim, uid, modality, imgPath, htmlFile,
width=750, height=750, cornerText=NULL,
captionText=NULL) {
zrange <- switch(modality,
CT = c(-125, 225) - nim@"scl_inter",
PT = quantile(nim[nim > 0], c(.005, .995)),
NULL)
graph <- file.path(imgPath, paste(uid, "ortho.png", sep="_"))
bitmap(graph, type="pnggray", taa=2)
par(mfrow=c(1,1), mar=rep(0,4), bg="black")
orthographic(nim, zlim=zrange, crosshairs=FALSE, text=cornerText)
dev.off()
graphImage <- hwriteImage(graph, width=width, height=height, center=TRUE)
hwrite("", htmlFile, br=TRUE)
hwrite(c(graphImage, captionText), htmlFile, br=TRUE, dim=c(2,1),
center=TRUE, border=0, style="text-align:center")
invisible()
}
graphSliceThickness <- function(nim, uid, imagePositionPatient,
movingDimensions, imgPath, htmlFile,
width=500, height=500) {
graph <- paste(uid, "sloc.png", sep="_")
bitmap(file.path(imgPath, graph), taa=4, gaa=4)
par(mfrow=c(1,1), mar=c(5,4,4,2)+.5, bg="white")
dSL <- abs(diff(imagePositionPatient[,movingDimensions]))
plot(dSL, ylim=range(range(dSL) * 1.5, 0, 10),
xlab="Index", ylab="mm", main="Difference in Slice Location")
dev.off()
graphImage <- hwriteImage(graph, width=width, height=height, center=TRUE)
graphCaption <- "Use this graph to detect inconsistent slice thickness. All points should be equal and constant."
hwrite(c(graphImage, graphCaption), htmlFile, br=TRUE, dim=c(2,1),
center=TRUE, border=0, style="text-align:center")
invisible()
}
graphSingleSlice <- function(nim, uid, modality, imgPath, htmlFile,
width=750, height=750) {
zrange <- switch(modality,
CT = c(-125, 225) - nim@"scl_inter",
PT = quantile(nim[nim > 0], c(.005, .995)),
NULL)
graph <- paste(uid, "slice.png", sep="_")
bitmap(file.path(imgPath, graph), type="pnggray")
par(mfrow=c(1,1), mar=rep(0,4), bg="black")
image(nim, plot.type="single", zlim=zrange)
dev.off()
graphImage <- hwriteImage(graph, width=width, height=height, center=TRUE)
graphCaption <- paste("Single slice",
ifelse(modality == "CT", " in Hounsfield units.", "."),
sep="")
hwrite(c(graphImage, graphCaption), htmlFile, br=TRUE, dim=c(2,1),
center=TRUE, border=0, style="text-align:center")
invisible()
}
graphImagePatientPosition <- function(nim, uid, imagePositionPatient,
sliceLocation, imgPath, htmlFile,
width=512, height=512, ...) {
graph <- paste(uid, "ipp.png", sep="_")
png(file.path(imgPath, graph)) # bitmap(file.path(imgPath, graph), taa=4, gaa=4)
par(mfrow=c(2,2), oma=c(0,0,2,0), mex=0.85)
plot(str2time(contentTime)$time, ipp[,1], xlab="Time (julian)",
ylab="ImagePositionPatient[, 1]", cex=0.5)
plot(str2time(contentTime)$time, ipp[,2], xlab="Time (julian)",
ylab="ImagePositionPatient[, 2]", cex=.5)
plot(str2time(contentTime)$time, ipp[,3], xlab="Time (julian)",
ylab="ImagePositionPatient[, 3]", cex=.5)
mtext(outer=TRUE, side=3, fname, cex=1.2)
sliceLocation <- extractHeader(uid.dcm$hdr, "SliceLocation")
plot(str2time(contentTime)$time, sliceLocation, xlab="Time (julian)",
ylab="SliceLocation", cex=.5)
par(mfrow=c(1,1), mar=c(5,4,4,2)+.5, bg="white")
dSL <- abs(diff(imagePositionPatient[,movingDimensions]))
plot(dSL, ylim=range(range(dSL) * 1.5, 0, 10),
xlab="Index", ylab="mm", main="Difference in Slice Location")
dev.off()
graphImage <- hwriteImage(graph, width=width, height=height, center=TRUE)
graphCaption <- "Use this graph to detect inconsistent slice thickness. All points should be equal and constant."
hwrite(c(graphImage, graphCaption), htmlFile, br=TRUE, dim=c(2,1),
center=TRUE, border=0, style="text-align:center")
invisible()
}
|
/R/graph.R
|
no_license
|
bjw34032/qaqc
|
R
| false
| false
| 4,104
|
r
|
graphOrthographic <- function(nim, uid, modality, imgPath, htmlFile,
width=750, height=750, cornerText=NULL,
captionText=NULL) {
zrange <- switch(modality,
CT = c(-125, 225) - nim@"scl_inter",
PT = quantile(nim[nim > 0], c(.005, .995)),
NULL)
graph <- file.path(imgPath, paste(uid, "ortho.png", sep="_"))
bitmap(graph, type="pnggray", taa=2)
par(mfrow=c(1,1), mar=rep(0,4), bg="black")
orthographic(nim, zlim=zrange, crosshairs=FALSE, text=cornerText)
dev.off()
graphImage <- hwriteImage(graph, width=width, height=height, center=TRUE)
hwrite("", htmlFile, br=TRUE)
hwrite(c(graphImage, captionText), htmlFile, br=TRUE, dim=c(2,1),
center=TRUE, border=0, style="text-align:center")
invisible()
}
graphSliceThickness <- function(nim, uid, imagePositionPatient,
movingDimensions, imgPath, htmlFile,
width=500, height=500) {
graph <- paste(uid, "sloc.png", sep="_")
bitmap(file.path(imgPath, graph), taa=4, gaa=4)
par(mfrow=c(1,1), mar=c(5,4,4,2)+.5, bg="white")
dSL <- abs(diff(imagePositionPatient[,movingDimensions]))
plot(dSL, ylim=range(range(dSL) * 1.5, 0, 10),
xlab="Index", ylab="mm", main="Difference in Slice Location")
dev.off()
graphImage <- hwriteImage(graph, width=width, height=height, center=TRUE)
graphCaption <- "Use this graph to detect inconsistent slice thickness. All points should be equal and constant."
hwrite(c(graphImage, graphCaption), htmlFile, br=TRUE, dim=c(2,1),
center=TRUE, border=0, style="text-align:center")
invisible()
}
graphSingleSlice <- function(nim, uid, modality, imgPath, htmlFile,
width=750, height=750) {
zrange <- switch(modality,
CT = c(-125, 225) - nim@"scl_inter",
PT = quantile(nim[nim > 0], c(.005, .995)),
NULL)
graph <- paste(uid, "slice.png", sep="_")
bitmap(file.path(imgPath, graph), type="pnggray")
par(mfrow=c(1,1), mar=rep(0,4), bg="black")
image(nim, plot.type="single", zlim=zrange)
dev.off()
graphImage <- hwriteImage(graph, width=width, height=height, center=TRUE)
graphCaption <- paste("Single slice",
ifelse(modality == "CT", " in Hounsfield units.", "."),
sep="")
hwrite(c(graphImage, graphCaption), htmlFile, br=TRUE, dim=c(2,1),
center=TRUE, border=0, style="text-align:center")
invisible()
}
graphImagePatientPosition <- function(nim, uid, imagePositionPatient,
sliceLocation, imgPath, htmlFile,
width=512, height=512, ...) {
graph <- paste(uid, "ipp.png", sep="_")
png(file.path(imgPath, graph)) # bitmap(file.path(imgPath, graph), taa=4, gaa=4)
par(mfrow=c(2,2), oma=c(0,0,2,0), mex=0.85)
plot(str2time(contentTime)$time, ipp[,1], xlab="Time (julian)",
ylab="ImagePositionPatient[, 1]", cex=0.5)
plot(str2time(contentTime)$time, ipp[,2], xlab="Time (julian)",
ylab="ImagePositionPatient[, 2]", cex=.5)
plot(str2time(contentTime)$time, ipp[,3], xlab="Time (julian)",
ylab="ImagePositionPatient[, 3]", cex=.5)
mtext(outer=TRUE, side=3, fname, cex=1.2)
sliceLocation <- extractHeader(uid.dcm$hdr, "SliceLocation")
plot(str2time(contentTime)$time, sliceLocation, xlab="Time (julian)",
ylab="SliceLocation", cex=.5)
par(mfrow=c(1,1), mar=c(5,4,4,2)+.5, bg="white")
dSL <- abs(diff(imagePositionPatient[,movingDimensions]))
plot(dSL, ylim=range(range(dSL) * 1.5, 0, 10),
xlab="Index", ylab="mm", main="Difference in Slice Location")
dev.off()
graphImage <- hwriteImage(graph, width=width, height=height, center=TRUE)
graphCaption <- "Use this graph to detect inconsistent slice thickness. All points should be equal and constant."
hwrite(c(graphImage, graphCaption), htmlFile, br=TRUE, dim=c(2,1),
center=TRUE, border=0, style="text-align:center")
invisible()
}
|
library(survival)
library(readxl)
library(ggplot2)
library(survminer)
setwd("E:/Summer Semester/Module 1/Advanced Business Analytics/Data/Project Data")
data <- read_excel("DiabeticData.xlsx")
str(data)
fit.all <- survfit(Surv(time,status==1)~1,data=data)
plot(fit.all, xlab = "Time", ylab = "Survival Probability", main="Visual Loss",
ylim = c(0.40,1))
### KM curve shows the survival rate to be between (~0.47-1)
data$laser<- as.factor(data$laser)
data$eye <- as.factor(data$eye)
data$trt<- as.factor(data$trt)
fit.laser<-survfit(Surv(time,status==1)~laser,data=data)
plot(fit.laser,col=2:5, lty=1:4, xlab = "Time", ylim = c(0.40,1),
ylab = "Survival Probability", main="Visual Loss by Laser Type")
llabel<-gsub("x=","",names(fit.laser$strata))
legend("bottomleft",legend=llabel,col=2:5,lty=1:4,bty='n')
survdiff(Surv(time,status==1)~laser,data=data)
# Call:
# survdiff(formula = Surv(time, status == 1) ~ laser, data = data)
#N Observed Expected (O-E)^2/E (O-E)^2/V
#laser=argon 166 68 67.2 0.00988 0.0175
#laser=xenon 228 87 87.8 0.00756 0.0175
#Chisq= 0 on 1 degrees of freedom, p= 0.9
### Survival rate for both types of laser seem to be pretty similar. However, as per
### survdiff analysis laser doesn't seem to be a significant predictor for this problem
data$agecut <- cut(data$age, breaks=c(0, 18, Inf), labels=c("juvenile", "adult"))
fit.age <- survfit(Surv(time,status==1)~agecut,data=data)
plot(fit.age, col=2:5, lty=1:4, xlab = "Time", ylim = c(0.40,1),
ylab = "Survival Probability", main="Visual Loss by Age")
llabel <- gsub("x=","",names(fit.age$strata))
legend("bottomleft",legend=llabel,col=2:5,lty=1:4,bty='n')
survdiff(Surv(time,status==1)~agecut,data=data)
# Call:
# survdiff(formula = Surv(time, status == 1) ~ agecut, data = data)
#N Observed Expected (O-E)^2/E (O-E)^2/V
#agecut=child 222 84 85.2 0.0165 0.0367
#agecut=adult 172 71 69.8 0.0201 0.0367
#Chisq= 0 on 1 degrees of freedom, p= 0.8
### Again, survival rate for both child & adult seem pretty similar with both becoming
### constant after one point. Also as per survdiff analysis, agecut is an insignificant
### predictor.
fit.eye<-survfit(Surv(time,status==1)~eye,data=data)
plot(fit.eye,col=2:5, lty=1:4, xlab = "Time", ylim = c(0.40,1),
ylab = "Survival Probability", main="Visual Loss by Eye")
llabel <- gsub("x=","",names(fit.eye$strata))
legend("bottomleft",legend=llabel,col=2:5,lty=1:4,bty='n')
survdiff(Surv(time,status==1)~eye,data=data)
# Call:
# survdiff(formula = Surv(time, status == 1) ~ eye, data = data)
#N Observed Expected (O-E)^2/E (O-E)^2/V
#eye=left 197 69 79.8 1.47 3.03
#eye=right 197 86 75.2 1.56 3.03
#Chisq= 3 on 1 degrees of freedom, p= 0.08
### As per KM curve for eye, survival rate for left eye seems to be higher but again
### eye is an insignificant predictor for this problem.
fit.trt<-survfit(Surv(time,status==1)~trt,data=data)
plot(fit.trt,col=2:5, lty=1:4, xlab = "Time", ylim = c(0.40,1),
ylab = "Survival Probablity", main="Visual Loss by Treatment")
llabel<-gsub("x=","",names(fit.trt$strata))
legend("bottomleft",legend=llabel,col=2:5,lty=1:4,bty='n')
survdiff(Surv(time,status==1)~trt,data=data)
#Call:
# survdiff(formula = Surv(time, status == 1) ~ trt, data = data)
#N Observed Expected (O-E)^2/E (O-E)^2/V
#trt=0 197 101 71.8 11.9 22.2
#trt=1 197 54 83.2 10.3 22.2
#Chisq= 22.2 on 1 degrees of freedom, p= 2e-06
### As per KM curve for treatment, survival rate for treatment is drastically higher
### than no treatment. Also, trt is a significant predictor for this problem.
data$riskcat <- cut(data$risk, breaks=c(5, 9, 12), labels=c("medium", "high"))
fit.risk<-survfit(Surv(time,status==1)~riskcat,data=data)
plot(fit.risk,col=2:5, lty=1:4, xlab = "Time", ylim = c(0.40,1),
ylab = "Survival Probability", main="Visual Loss by Risk")
llabel<-gsub("x=","",names(fit.risk$strata))
legend("bottomleft",legend=llabel,col=2:5,lty=1:4,bty='n')
survdiff(Surv(time,status==1)~riskcat,data=data)
#Call:
# survdiff(formula = Surv(time, status == 1) ~ riskcat, data = data)
#N Observed Expected (O-E)^2/E (O-E)^2/V
#riskcat=medium 196 58 85.1 8.62 19.2
#riskcat=high 198 97 69.9 10.50 19.2
#Chisq= 19.2 on 1 degrees of freedom, p= 1e-05
### As per KM curve for risk, survival rate for high risk group is lower vs. medium
### risk group. Also, risk is a significant predictor for this problem.
fit.cox <- coxph(formula = Surv(time, status ==1) ~ laser + age + eye + trt
+ riskcat ,data = data)
summary(fit.cox)
# Call:
# coxph(formula = Surv(time, status == 1) ~ laser + age + eye +
# trt + riskcat, data = data)
#n= 394, number of events= 155
#coef exp(coef) se(coef) z Pr(>|z|)
#laserxenon 0.073359 1.076117 0.293240 0.250 0.8025
#age 0.005193 1.005207 0.009690 0.536 0.5920
#eyeright 0.306206 1.358262 0.163292 1.875 0.0608 .
#trt1 -0.837169 0.432935 0.169934 -4.926 8.37e-07 ***
# riskcathigh 0.720188 2.054819 0.166947 4.314 1.60e-05 ***
# ---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#exp(coef) exp(-coef) lower .95 upper .95
#laserxenon 1.0761 0.9293 0.6057 1.912
#age 1.0052 0.9948 0.9863 1.024
#eyeright 1.3583 0.7362 0.9863 1.871
#trt1 0.4329 2.3098 0.3103 0.604
#riskcathigh 2.0548 0.4867 1.4814 2.850
#Concordance= 0.655 (se = 0.022 )
#Likelihood ratio test= 47.1 on 5 df, p=5e-09
#Wald test = 44.93 on 5 df, p=1e-08
#Score (logrank) test = 46.81 on 5 df, p=6e-09
### The overall model's p-value of 5.946e-09 means that the model is meaningful
### and would accurately portray the visual loss of patients
### trt1 is significant based on p-value (8.17e-07)
### Interpreting exp(coef) we can conclude, compared to the reference variable
### trt0, trt1 is 0.57x less likely to lose vision as those who did not receive treatment
### riskcathigh is significant based on p-value (1.44e-05), riskcathigh is ~1x more likely
### to not lose vision.
### Interpreting exp(coef) we can conclude, compared to the reference of riskcatmedium,
### riskcathigh is 1.0628x more likely to lose vision as those who are in the medium
### risk group.
#Residual Plot
plot(predict(fit.cox),residuals(fit.cox,type = 'martingale'),ylab = 'residuals',
xlab = 'fittedvalues')
abline(h=0)
lines(smooth.spline(predict(fit.cox),residuals(fit.cox,type = 'martingale'))
,col='red')
### Shows clear linear residual plot
#Checking for Constant Hazard Ratio
k=cox.zph(fit.cox)
k
plot(k[5,])
abline(h=0)
plot(k[4,])
abline(h=0)
plot(k[3,])
abline(h=0)
plot(k[2,])
abline(h=0)
plot(k[1,])
abline(h=0)
### All have constant hazard ratio
#Outlier Analysis
ggcoxdiagnostics(fit.cox, type = "dfbeta",linear.predictions = FALSE,
ggtheme = theme_bw())
### No outlier values have significant contribution
|
/Final_Project_ABA.R
|
no_license
|
Usama93-PU/Diabetes-Patients-Survival-Analysis
|
R
| false
| false
| 7,417
|
r
|
library(survival)
library(readxl)
library(ggplot2)
library(survminer)
setwd("E:/Summer Semester/Module 1/Advanced Business Analytics/Data/Project Data")
data <- read_excel("DiabeticData.xlsx")
str(data)
fit.all <- survfit(Surv(time,status==1)~1,data=data)
plot(fit.all, xlab = "Time", ylab = "Survival Probability", main="Visual Loss",
ylim = c(0.40,1))
### KM curve shows the survival rate to be between (~0.47-1)
data$laser<- as.factor(data$laser)
data$eye <- as.factor(data$eye)
data$trt<- as.factor(data$trt)
fit.laser<-survfit(Surv(time,status==1)~laser,data=data)
plot(fit.laser,col=2:5, lty=1:4, xlab = "Time", ylim = c(0.40,1),
ylab = "Survival Probability", main="Visual Loss by Laser Type")
llabel<-gsub("x=","",names(fit.laser$strata))
legend("bottomleft",legend=llabel,col=2:5,lty=1:4,bty='n')
survdiff(Surv(time,status==1)~laser,data=data)
# Call:
# survdiff(formula = Surv(time, status == 1) ~ laser, data = data)
#N Observed Expected (O-E)^2/E (O-E)^2/V
#laser=argon 166 68 67.2 0.00988 0.0175
#laser=xenon 228 87 87.8 0.00756 0.0175
#Chisq= 0 on 1 degrees of freedom, p= 0.9
### Survival rate for both types of laser seem to be pretty similar. However, as per
### survdiff analysis laser doesn't seem to be a significant predictor for this problem
data$agecut <- cut(data$age, breaks=c(0, 18, Inf), labels=c("juvenile", "adult"))
fit.age <- survfit(Surv(time,status==1)~agecut,data=data)
plot(fit.age, col=2:5, lty=1:4, xlab = "Time", ylim = c(0.40,1),
ylab = "Survival Probability", main="Visual Loss by Age")
llabel <- gsub("x=","",names(fit.age$strata))
legend("bottomleft",legend=llabel,col=2:5,lty=1:4,bty='n')
survdiff(Surv(time,status==1)~agecut,data=data)
# Call:
# survdiff(formula = Surv(time, status == 1) ~ agecut, data = data)
#N Observed Expected (O-E)^2/E (O-E)^2/V
#agecut=child 222 84 85.2 0.0165 0.0367
#agecut=adult 172 71 69.8 0.0201 0.0367
#Chisq= 0 on 1 degrees of freedom, p= 0.8
### Again, survival rate for both child & adult seem pretty similar with both becoming
### constant after one point. Also as per survdiff analysis, agecut is an insignificant
### predictor.
fit.eye<-survfit(Surv(time,status==1)~eye,data=data)
plot(fit.eye,col=2:5, lty=1:4, xlab = "Time", ylim = c(0.40,1),
ylab = "Survival Probability", main="Visual Loss by Eye")
llabel <- gsub("x=","",names(fit.eye$strata))
legend("bottomleft",legend=llabel,col=2:5,lty=1:4,bty='n')
survdiff(Surv(time,status==1)~eye,data=data)
# Call:
# survdiff(formula = Surv(time, status == 1) ~ eye, data = data)
#N Observed Expected (O-E)^2/E (O-E)^2/V
#eye=left 197 69 79.8 1.47 3.03
#eye=right 197 86 75.2 1.56 3.03
#Chisq= 3 on 1 degrees of freedom, p= 0.08
### As per KM curve for eye, survival rate for left eye seems to be higher but again
### eye is an insignificant predictor for this problem.
fit.trt<-survfit(Surv(time,status==1)~trt,data=data)
plot(fit.trt,col=2:5, lty=1:4, xlab = "Time", ylim = c(0.40,1),
ylab = "Survival Probablity", main="Visual Loss by Treatment")
llabel<-gsub("x=","",names(fit.trt$strata))
legend("bottomleft",legend=llabel,col=2:5,lty=1:4,bty='n')
survdiff(Surv(time,status==1)~trt,data=data)
#Call:
# survdiff(formula = Surv(time, status == 1) ~ trt, data = data)
#N Observed Expected (O-E)^2/E (O-E)^2/V
#trt=0 197 101 71.8 11.9 22.2
#trt=1 197 54 83.2 10.3 22.2
#Chisq= 22.2 on 1 degrees of freedom, p= 2e-06
### As per KM curve for treatment, survival rate for treatment is drastically higher
### than no treatment. Also, trt is a significant predictor for this problem.
data$riskcat <- cut(data$risk, breaks=c(5, 9, 12), labels=c("medium", "high"))
fit.risk<-survfit(Surv(time,status==1)~riskcat,data=data)
plot(fit.risk,col=2:5, lty=1:4, xlab = "Time", ylim = c(0.40,1),
ylab = "Survival Probability", main="Visual Loss by Risk")
llabel<-gsub("x=","",names(fit.risk$strata))
legend("bottomleft",legend=llabel,col=2:5,lty=1:4,bty='n')
survdiff(Surv(time,status==1)~riskcat,data=data)
#Call:
# survdiff(formula = Surv(time, status == 1) ~ riskcat, data = data)
#N Observed Expected (O-E)^2/E (O-E)^2/V
#riskcat=medium 196 58 85.1 8.62 19.2
#riskcat=high 198 97 69.9 10.50 19.2
#Chisq= 19.2 on 1 degrees of freedom, p= 1e-05
### As per KM curve for risk, survival rate for high risk group is lower vs. medium
### risk group. Also, risk is a significant predictor for this problem.
fit.cox <- coxph(formula = Surv(time, status ==1) ~ laser + age + eye + trt
+ riskcat ,data = data)
summary(fit.cox)
# Call:
# coxph(formula = Surv(time, status == 1) ~ laser + age + eye +
# trt + riskcat, data = data)
#n= 394, number of events= 155
#coef exp(coef) se(coef) z Pr(>|z|)
#laserxenon 0.073359 1.076117 0.293240 0.250 0.8025
#age 0.005193 1.005207 0.009690 0.536 0.5920
#eyeright 0.306206 1.358262 0.163292 1.875 0.0608 .
#trt1 -0.837169 0.432935 0.169934 -4.926 8.37e-07 ***
# riskcathigh 0.720188 2.054819 0.166947 4.314 1.60e-05 ***
# ---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#exp(coef) exp(-coef) lower .95 upper .95
#laserxenon 1.0761 0.9293 0.6057 1.912
#age 1.0052 0.9948 0.9863 1.024
#eyeright 1.3583 0.7362 0.9863 1.871
#trt1 0.4329 2.3098 0.3103 0.604
#riskcathigh 2.0548 0.4867 1.4814 2.850
#Concordance= 0.655 (se = 0.022 )
#Likelihood ratio test= 47.1 on 5 df, p=5e-09
#Wald test = 44.93 on 5 df, p=1e-08
#Score (logrank) test = 46.81 on 5 df, p=6e-09
### The overall model's p-value of 5.946e-09 means that the model is meaningful
### and would accurately portray the visual loss of patients
### trt1 is significant based on p-value (8.17e-07)
### Interpreting exp(coef) we can conclude, compared to the reference variable
### trt0, trt1 is 0.57x less likely to lose vision as those who did not receive treatment
### riskcathigh is significant based on p-value (1.44e-05), riskcathigh is ~1x more likely
### to not lose vision.
### Interpreting exp(coef) we can conclude, compared to the reference of riskcatmedium,
### riskcathigh is 1.0628x more likely to lose vision as those who are in the medium
### risk group.
#Residual Plot
plot(predict(fit.cox),residuals(fit.cox,type = 'martingale'),ylab = 'residuals',
xlab = 'fittedvalues')
abline(h=0)
lines(smooth.spline(predict(fit.cox),residuals(fit.cox,type = 'martingale'))
,col='red')
### Shows clear linear residual plot
#Checking for Constant Hazard Ratio
k=cox.zph(fit.cox)
k
plot(k[5,])
abline(h=0)
plot(k[4,])
abline(h=0)
plot(k[3,])
abline(h=0)
plot(k[2,])
abline(h=0)
plot(k[1,])
abline(h=0)
### All have constant hazard ratio
#Outlier Analysis
ggcoxdiagnostics(fit.cox, type = "dfbeta",linear.predictions = FALSE,
ggtheme = theme_bw())
### No outlier values have significant contribution
|
# Exercise-1
# Implement code from this book chapter: http://r4ds.had.co.nz/many-models.html
# Packages
# install.packages('modelr')
# install.packages('tidyverse')
# install.packages('gapminder')
library(gapminder)
library(modelr)
library(tidyverse)
# Initial view of the data with ggplot
gapminder %>%
ggplot(aes(year, lifeExp, group = country)) +
geom_line(alpha = 1/3)
# Look only at new zealand
nz <- filter(gapminder, country == "New Zealand")
nz %>%
ggplot(aes(year, lifeExp)) +
geom_line() +
ggtitle("Full data = ")
nz_mod <- lm(lifeExp ~ year, data = nz)
nz %>%
add_predictions(nz_mod) %>%
ggplot(aes(year, pred)) +
geom_line() +
ggtitle("Linear trend + ")
nz %>%
add_residuals(nz_mod) %>%
ggplot(aes(year, resid)) +
geom_hline(yintercept = 0, colour = "white", size = 3) +
geom_line() +
ggtitle("Remaining pattern")
# Better yet, write your own function to accept a country as a parameter,
# and produce the same graphics
# Nest the data by country/continent
by_country <- gapminder %>%
group_by(country, continent) %>%
nest()
# Define a statistical model, and store it in a function
country_model <- function(df) {
lm(lifeExp ~ year, data = df)
}
# Use the `map` functionality to run the same model for each country separately
by_country <- by_country %>%
mutate(model = map(data, country_model))
# Add additional columns to store your residuals (distance between data and prediction)
by_country <- by_country %>%
mutate(
resids = map2(data, model, add_residuals)
)
# Unnest your residual
resids <- unnest(by_country, resids)
# Plot the residuals
resids %>%
ggplot(aes(year, resid)) +
geom_line(aes(group = country), alpha = 1 / 3) +
geom_smooth(se = FALSE)
# Plot residuals by continent
resids %>%
ggplot(aes(year, resid, group = country)) +
geom_line(alpha = 1 / 3) +
facet_wrap(~continent)
# Use `glance` to look at model quality
# Compare model quality to continent
# View country that have an r.squared value of less than .25
|
/exercise-1/exercise.R
|
permissive
|
tdhoxxi/m15-special-topics
|
R
| false
| false
| 2,034
|
r
|
# Exercise-1
# Implement code from this book chapter: http://r4ds.had.co.nz/many-models.html
# Packages
# install.packages('modelr')
# install.packages('tidyverse')
# install.packages('gapminder')
library(gapminder)
library(modelr)
library(tidyverse)
# Initial view of the data with ggplot
gapminder %>%
ggplot(aes(year, lifeExp, group = country)) +
geom_line(alpha = 1/3)
# Look only at new zealand
nz <- filter(gapminder, country == "New Zealand")
nz %>%
ggplot(aes(year, lifeExp)) +
geom_line() +
ggtitle("Full data = ")
nz_mod <- lm(lifeExp ~ year, data = nz)
nz %>%
add_predictions(nz_mod) %>%
ggplot(aes(year, pred)) +
geom_line() +
ggtitle("Linear trend + ")
nz %>%
add_residuals(nz_mod) %>%
ggplot(aes(year, resid)) +
geom_hline(yintercept = 0, colour = "white", size = 3) +
geom_line() +
ggtitle("Remaining pattern")
# Better yet, write your own function to accept a country as a parameter,
# and produce the same graphics
# Nest the data by country/continent
by_country <- gapminder %>%
group_by(country, continent) %>%
nest()
# Define a statistical model, and store it in a function
country_model <- function(df) {
lm(lifeExp ~ year, data = df)
}
# Use the `map` functionality to run the same model for each country separately
by_country <- by_country %>%
mutate(model = map(data, country_model))
# Add additional columns to store your residuals (distance between data and prediction)
by_country <- by_country %>%
mutate(
resids = map2(data, model, add_residuals)
)
# Unnest your residual
resids <- unnest(by_country, resids)
# Plot the residuals
resids %>%
ggplot(aes(year, resid)) +
geom_line(aes(group = country), alpha = 1 / 3) +
geom_smooth(se = FALSE)
# Plot residuals by continent
resids %>%
ggplot(aes(year, resid, group = country)) +
geom_line(alpha = 1 / 3) +
facet_wrap(~continent)
# Use `glance` to look at model quality
# Compare model quality to continent
# View country that have an r.squared value of less than .25
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sst_med.R
\docType{data}
\name{sst_Med}
\alias{sst_Med}
\title{Optimally interpolated 0.25 degree SST for the Mediterranean region.}
\format{
A data frame with 13514 rows and 2 variables:
\describe{
\item{t}{date, as.Date() format}
\item{temp}{SST, in degrees Celsius}
...
}
}
\source{
\url{https://www.ncdc.noaa.gov/oisst}
}
\usage{
sst_Med
}
\description{
A dataset containing the sea surface temperature (in degrees Celsius)
and date for the Mediterranean region from 1982-01-01 to 2018-12-31.
}
\details{
lon/lat: 9/43.5
}
\keyword{datasets}
|
/man/sst_Med.Rd
|
permissive
|
ShiQuanOoi/heatwaveR
|
R
| false
| true
| 630
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sst_med.R
\docType{data}
\name{sst_Med}
\alias{sst_Med}
\title{Optimally interpolated 0.25 degree SST for the Mediterranean region.}
\format{
A data frame with 13514 rows and 2 variables:
\describe{
\item{t}{date, as.Date() format}
\item{temp}{SST, in degrees Celsius}
...
}
}
\source{
\url{https://www.ncdc.noaa.gov/oisst}
}
\usage{
sst_Med
}
\description{
A dataset containing the sea surface temperature (in degrees Celsius)
and date for the Mediterranean region from 1982-01-01 to 2018-12-31.
}
\details{
lon/lat: 9/43.5
}
\keyword{datasets}
|
########################
#### H3, sentiments ####
########################
setwd("~/Egyetemi/survey/Szakdolgozat/thesis_polsenti/src")
corpus <- read.csv2("./corpus_predicted.csv", header = T, sep = ";",
fileEncoding = 'UTF-8', stringsAsFactors = F)
### libraries ###
#library(foreign)
library(ggplot2)
library(MASS)
library(Hmisc)
library(reshape2)
### create variables ###
corpus$pred <- as.factor(corpus$pred)
ellenzeki <- c('24.hu', '444.hu', 'index.hu', 'nepszava.hu', 'alfahir.hu', 'atv.hu')
for (i in 1:nrow(corpus)){
if (corpus$source.y[i] %in% ellenzeki){
corpus$side[i] <- 1 # ellenzéki
}else{
corpus$side[i] <- 2 # kormánypárti
}
}
corpus$side <- as.factor(corpus$side)
corpus$date <- as.Date(corpus$date, "%Y-%m-%d")
corpus$months <- format(corpus$date,"%m")
corpus$months <- as.numeric(corpus$months)
corpus$side <- as.numeric(corpus$side)
########################
######## Fidesz ########
########################
fidesz <- corpus[corpus$fidesz==1,]
### ord. logit ###
f <- polr(pred ~ months, data = fidesz, Hess=TRUE)
summary(f)
(ctable <- coef(summary(f)))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
(ctable <- cbind(ctable, "p value" = p))
(ci <- confint(f))
exp(cbind(OR = coef(f), ci))
### test odds proportional assumption ###
sf <- function(y) {
c('Y>=1' = qlogis(mean(y >= 1)),
'Y>=2' = qlogis(mean(y >= 2)),
'Y>=3' = qlogis(mean(y >= 3)))
}
(s <- with(fidesz, summary(as.numeric(pred) ~ months, fun=sf)))
s[, 4] <- s[, 4] - s[, 3]
s[, 3] <- s[, 3] - s[, 3]
(p_f1 <- plot(s, which=1:3, pch=1:3, xlab='logit', main=' ', xlim=range(s[,3:4])))
newdat <- data.frame(
months = rep(seq(from = 1, to = 6, length.out = 100), 4))
newdat <- cbind(newdat, predict(f, newdat, type = "probs"))
lnewdat <- melt(newdat, id.vars = c("months"),
variable.name = "Level", value.name="Probability")
p_f2 <- ggplot(lnewdat, aes(x = months, y = Probability, colour = Level)) +
geom_line() +
scale_colour_manual(
values = c("-1" = "#fa8072", "0" = "#999999", "1" = "#8edba3"),
labels = c("Negatív", "Semleges", "Pozitív"))+
labs(title = "Fidesz", fill="Szentiment")+
theme(plot.title = element_text(
size = 14,
face = "bold",
hjust = 0.5))+
theme(legend.position = "top")
######################
######## MSZP ########
######################
MSZP <- corpus[corpus$mszpp == 1 | corpus$MSZP == 1 | corpus$Párbeszéd == 1,]
### ord. logit ###
m <- polr(pred ~ months, data = MSZP, Hess=TRUE)
summary(m)
(ctable <- coef(summary(m)))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
(ctable <- cbind(ctable, "p value" = p))
(ci <- confint(m))
exp(cbind(OR = coef(m), ci))
### test odds proportional assumption ###
sf <- function(y) {
c('Y>=1' = qlogis(mean(y >= 1)),
'Y>=2' = qlogis(mean(y >= 2)),
'Y>=3' = qlogis(mean(y >= 3)))
}
(s <- with(MSZP, summary(as.numeric(pred) ~ months + side, fun=sf)))
s[, 4] <- s[, 4] - s[, 3]
s[, 3] <- s[, 3] - s[, 3]
(p_m1 <- plot(s, which=1:3, pch=1:3, xlab='logit', main=' ', xlim=range(s[,3:4])))
newdat <- data.frame(
months = rep(seq(from = 1, to = 6, length.out = 100), 4))
newdat <- cbind(newdat, predict(m, newdat, type = "probs"))
lnewdat <- melt(newdat, id.vars = c("months"),
variable.name = "Level", value.name="Probability")
p_m2 <- ggplot(lnewdat, aes(x = months, y = Probability, colour = Level)) +
geom_line() +
scale_colour_manual(
values = c("-1" = "#fa8072", "0" = "#999999", "1" = "#8edba3"),
labels = c("Negatív", "Semleges", "Pozitív"))+
labs(title = "MSZP", fill="Szentiment")+
theme(plot.title = element_text(
size = 14,
face = "bold",
hjust = 0.5))+
theme(legend.position = "top")
#######################
######## LMP ##########
#######################
LMP <- corpus[corpus$lmp==1,]
### ord. logit ###
l <- polr(pred ~ months, data = LMP, Hess=TRUE)
summary(l)
(ctable <- coef(summary(l)))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
(ctable <- cbind(ctable, "p value" = p))
(ci <- confint(l))
exp(cbind(OR = coef(l), ci))
### test odds proportional assumption ###
sf <- function(y) {
c('Y>=1' = qlogis(mean(y >= 1)),
'Y>=2' = qlogis(mean(y >= 2)),
'Y>=3' = qlogis(mean(y >= 3)))
}
(s <- with(LMP, summary(as.numeric(pred) ~ months, fun=sf)))
s[, 4] <- s[, 4] - s[, 3]
s[, 3] <- s[, 3] - s[, 3]
(p_l1 <- plot(s, which=1:3, pch=1:3, xlab='logit', main=' ', xlim=range(s[,3:4])))
newdat <- data.frame(
months = rep(seq(from = 1, to = 6, length.out = 100), 4))
newdat <- cbind(newdat, predict(l, newdat, type = "probs"))
lnewdat <- melt(newdat, id.vars = c("months"),
variable.name = "Level", value.name="Probability")
p_l2 <- ggplot(lnewdat, aes(x = months, y = Probability, colour = Level)) +
geom_line() +
scale_colour_manual(
values = c("-1" = "#fa8072", "0" = "#999999", "1" = "#8edba3"),
labels = c("Negatív", "Semleges", "Pozitív"))+
labs(title = "LMP", fill="Szentiment")+
theme(plot.title = element_text(
size = 14,
face = "bold",
hjust = 0.5))+
theme(legend.position = "top")
######################
######### DK #########
######################
DK <- corpus[corpus$DemKo == 1,]
### ord. logit ###
d <- polr(pred ~ months, data = DK, Hess=TRUE)
summary(d)
(ctable <- coef(summary(d)))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
(ctable <- cbind(ctable, "p value" = p))
(ci <- confint(d))
exp(cbind(OR = coef(d), ci))
### test odds proportional assumption ###
sf <- function(y) {
c('Y>=1' = qlogis(mean(y >= 1)),
'Y>=2' = qlogis(mean(y >= 2)),
'Y>=3' = qlogis(mean(y >= 3)))
}
(s <- with(DK, summary(as.numeric(pred) ~ months, fun=sf)))
s[, 4] <- s[, 4] - s[, 3]
s[, 3] <- s[, 3] - s[, 3]
(p_d1 <- plot(s, which=1:3, pch=1:3, xlab='logit', main=' ', xlim=range(s[,3:4])))
newdat <- data.frame(
months = rep(seq(from = 1, to = 6, length.out = 100), 4))
newdat <- cbind(newdat, predict(d, newdat, type = "probs"))
lnewdat <- melt(newdat, id.vars = c("months"),
variable.name = "Level", value.name="Probability")
DK$pred <- as.numeric(DK$pred)
anov1 <- aov(pred ~ months, data = DK)
summary(anov1)
DK$pred <- as.factor(DK$pred)
#####################
###### Jobbik #######
#####################
Jobbik <- corpus[corpus$Jobbik==1,]
### ord. logit ###
j <- polr(pred ~ months, data = Jobbik, Hess=TRUE)
summary(j)
(ctable <- coef(summary(j)))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
(ctable <- cbind(ctable, "p value" = p))
(ci <- confint(j))
exp(cbind(OR = coef(j), ci))
### test odds proportional assumption ###
sf <- function(y) {
c('Y>=1' = qlogis(mean(y >= 1)),
'Y>=2' = qlogis(mean(y >= 2)),
'Y>=3' = qlogis(mean(y >= 3)))
}
(s <- with(Jobbik, summary(as.numeric(pred) ~ months, fun=sf)))
s[, 4] <- s[, 4] - s[, 3]
s[, 3] <- s[, 3] - s[, 3]
(p_j1 <- plot(s, which=1:3, pch=1:3, xlab='logit', main=' ', xlim=range(s[,3:4])))
newdat <- data.frame(
months = rep(seq(from = 1, to = 6, length.out = 100), 4))
newdat <- cbind(newdat, predict(j, newdat, type = "probs"))
lnewdat <- melt(newdat, id.vars = c("months"),
variable.name = "Level", value.name="Probability")
Jobbik$pred <- as.numeric(Jobbik$pred)
anov2 <- aov(pred ~ months, data = Jobbik)
summary(anov2)
Jobbik$pred <- as.factor(Jobbik$pred)
setwd("~/Egyetemi/survey/Szakdolgozat/thesis_polsenti/script")
source("multiplot_fn.R")
multiplot(p_f2, p_m2, p_l2, cols = 3)
|
/script/H3_senti.R
|
no_license
|
bgallina/Thesis_polsenti_2019
|
R
| false
| false
| 7,592
|
r
|
########################
#### H3, sentiments ####
########################
setwd("~/Egyetemi/survey/Szakdolgozat/thesis_polsenti/src")
corpus <- read.csv2("./corpus_predicted.csv", header = T, sep = ";",
fileEncoding = 'UTF-8', stringsAsFactors = F)
### libraries ###
#library(foreign)
library(ggplot2)
library(MASS)
library(Hmisc)
library(reshape2)
### create variables ###
corpus$pred <- as.factor(corpus$pred)
ellenzeki <- c('24.hu', '444.hu', 'index.hu', 'nepszava.hu', 'alfahir.hu', 'atv.hu')
for (i in 1:nrow(corpus)){
if (corpus$source.y[i] %in% ellenzeki){
corpus$side[i] <- 1 # ellenzéki
}else{
corpus$side[i] <- 2 # kormánypárti
}
}
corpus$side <- as.factor(corpus$side)
corpus$date <- as.Date(corpus$date, "%Y-%m-%d")
corpus$months <- format(corpus$date,"%m")
corpus$months <- as.numeric(corpus$months)
corpus$side <- as.numeric(corpus$side)
########################
######## Fidesz ########
########################
fidesz <- corpus[corpus$fidesz==1,]
### ord. logit ###
f <- polr(pred ~ months, data = fidesz, Hess=TRUE)
summary(f)
(ctable <- coef(summary(f)))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
(ctable <- cbind(ctable, "p value" = p))
(ci <- confint(f))
exp(cbind(OR = coef(f), ci))
### test odds proportional assumption ###
sf <- function(y) {
c('Y>=1' = qlogis(mean(y >= 1)),
'Y>=2' = qlogis(mean(y >= 2)),
'Y>=3' = qlogis(mean(y >= 3)))
}
(s <- with(fidesz, summary(as.numeric(pred) ~ months, fun=sf)))
s[, 4] <- s[, 4] - s[, 3]
s[, 3] <- s[, 3] - s[, 3]
(p_f1 <- plot(s, which=1:3, pch=1:3, xlab='logit', main=' ', xlim=range(s[,3:4])))
newdat <- data.frame(
months = rep(seq(from = 1, to = 6, length.out = 100), 4))
newdat <- cbind(newdat, predict(f, newdat, type = "probs"))
lnewdat <- melt(newdat, id.vars = c("months"),
variable.name = "Level", value.name="Probability")
p_f2 <- ggplot(lnewdat, aes(x = months, y = Probability, colour = Level)) +
geom_line() +
scale_colour_manual(
values = c("-1" = "#fa8072", "0" = "#999999", "1" = "#8edba3"),
labels = c("Negatív", "Semleges", "Pozitív"))+
labs(title = "Fidesz", fill="Szentiment")+
theme(plot.title = element_text(
size = 14,
face = "bold",
hjust = 0.5))+
theme(legend.position = "top")
######################
######## MSZP ########
######################
MSZP <- corpus[corpus$mszpp == 1 | corpus$MSZP == 1 | corpus$Párbeszéd == 1,]
### ord. logit ###
m <- polr(pred ~ months, data = MSZP, Hess=TRUE)
summary(m)
(ctable <- coef(summary(m)))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
(ctable <- cbind(ctable, "p value" = p))
(ci <- confint(m))
exp(cbind(OR = coef(m), ci))
### test odds proportional assumption ###
sf <- function(y) {
c('Y>=1' = qlogis(mean(y >= 1)),
'Y>=2' = qlogis(mean(y >= 2)),
'Y>=3' = qlogis(mean(y >= 3)))
}
(s <- with(MSZP, summary(as.numeric(pred) ~ months + side, fun=sf)))
s[, 4] <- s[, 4] - s[, 3]
s[, 3] <- s[, 3] - s[, 3]
(p_m1 <- plot(s, which=1:3, pch=1:3, xlab='logit', main=' ', xlim=range(s[,3:4])))
newdat <- data.frame(
months = rep(seq(from = 1, to = 6, length.out = 100), 4))
newdat <- cbind(newdat, predict(m, newdat, type = "probs"))
lnewdat <- melt(newdat, id.vars = c("months"),
variable.name = "Level", value.name="Probability")
p_m2 <- ggplot(lnewdat, aes(x = months, y = Probability, colour = Level)) +
geom_line() +
scale_colour_manual(
values = c("-1" = "#fa8072", "0" = "#999999", "1" = "#8edba3"),
labels = c("Negatív", "Semleges", "Pozitív"))+
labs(title = "MSZP", fill="Szentiment")+
theme(plot.title = element_text(
size = 14,
face = "bold",
hjust = 0.5))+
theme(legend.position = "top")
#######################
######## LMP ##########
#######################
LMP <- corpus[corpus$lmp==1,]
### ord. logit ###
l <- polr(pred ~ months, data = LMP, Hess=TRUE)
summary(l)
(ctable <- coef(summary(l)))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
(ctable <- cbind(ctable, "p value" = p))
(ci <- confint(l))
exp(cbind(OR = coef(l), ci))
### test odds proportional assumption ###
sf <- function(y) {
c('Y>=1' = qlogis(mean(y >= 1)),
'Y>=2' = qlogis(mean(y >= 2)),
'Y>=3' = qlogis(mean(y >= 3)))
}
(s <- with(LMP, summary(as.numeric(pred) ~ months, fun=sf)))
s[, 4] <- s[, 4] - s[, 3]
s[, 3] <- s[, 3] - s[, 3]
(p_l1 <- plot(s, which=1:3, pch=1:3, xlab='logit', main=' ', xlim=range(s[,3:4])))
newdat <- data.frame(
months = rep(seq(from = 1, to = 6, length.out = 100), 4))
newdat <- cbind(newdat, predict(l, newdat, type = "probs"))
lnewdat <- melt(newdat, id.vars = c("months"),
variable.name = "Level", value.name="Probability")
p_l2 <- ggplot(lnewdat, aes(x = months, y = Probability, colour = Level)) +
geom_line() +
scale_colour_manual(
values = c("-1" = "#fa8072", "0" = "#999999", "1" = "#8edba3"),
labels = c("Negatív", "Semleges", "Pozitív"))+
labs(title = "LMP", fill="Szentiment")+
theme(plot.title = element_text(
size = 14,
face = "bold",
hjust = 0.5))+
theme(legend.position = "top")
######################
######### DK #########
######################
DK <- corpus[corpus$DemKo == 1,]
### ord. logit ###
d <- polr(pred ~ months, data = DK, Hess=TRUE)
summary(d)
(ctable <- coef(summary(d)))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
(ctable <- cbind(ctable, "p value" = p))
(ci <- confint(d))
exp(cbind(OR = coef(d), ci))
### test odds proportional assumption ###
sf <- function(y) {
c('Y>=1' = qlogis(mean(y >= 1)),
'Y>=2' = qlogis(mean(y >= 2)),
'Y>=3' = qlogis(mean(y >= 3)))
}
(s <- with(DK, summary(as.numeric(pred) ~ months, fun=sf)))
s[, 4] <- s[, 4] - s[, 3]
s[, 3] <- s[, 3] - s[, 3]
(p_d1 <- plot(s, which=1:3, pch=1:3, xlab='logit', main=' ', xlim=range(s[,3:4])))
newdat <- data.frame(
months = rep(seq(from = 1, to = 6, length.out = 100), 4))
newdat <- cbind(newdat, predict(d, newdat, type = "probs"))
lnewdat <- melt(newdat, id.vars = c("months"),
variable.name = "Level", value.name="Probability")
DK$pred <- as.numeric(DK$pred)
anov1 <- aov(pred ~ months, data = DK)
summary(anov1)
DK$pred <- as.factor(DK$pred)
#####################
###### Jobbik #######
#####################
Jobbik <- corpus[corpus$Jobbik==1,]
### ord. logit ###
j <- polr(pred ~ months, data = Jobbik, Hess=TRUE)
summary(j)
(ctable <- coef(summary(j)))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
(ctable <- cbind(ctable, "p value" = p))
(ci <- confint(j))
exp(cbind(OR = coef(j), ci))
### test odds proportional assumption ###
sf <- function(y) {
c('Y>=1' = qlogis(mean(y >= 1)),
'Y>=2' = qlogis(mean(y >= 2)),
'Y>=3' = qlogis(mean(y >= 3)))
}
(s <- with(Jobbik, summary(as.numeric(pred) ~ months, fun=sf)))
s[, 4] <- s[, 4] - s[, 3]
s[, 3] <- s[, 3] - s[, 3]
(p_j1 <- plot(s, which=1:3, pch=1:3, xlab='logit', main=' ', xlim=range(s[,3:4])))
newdat <- data.frame(
months = rep(seq(from = 1, to = 6, length.out = 100), 4))
newdat <- cbind(newdat, predict(j, newdat, type = "probs"))
lnewdat <- melt(newdat, id.vars = c("months"),
variable.name = "Level", value.name="Probability")
Jobbik$pred <- as.numeric(Jobbik$pred)
anov2 <- aov(pred ~ months, data = Jobbik)
summary(anov2)
Jobbik$pred <- as.factor(Jobbik$pred)
setwd("~/Egyetemi/survey/Szakdolgozat/thesis_polsenti/script")
source("multiplot_fn.R")
multiplot(p_f2, p_m2, p_l2, cols = 3)
|
# H3K27ac density plot (Control)
H3k27ac_density = function(df,column,Title){
dat1 <- read.csv(df,sep = "\t",header =T,na.strings = "NA")
df1 <- dat1[,column]
colname <- gsub("...NCoR1_H3K27ac_analysis.tag_dir.|...NCoR1_H3K27ac_Rep2_analysis.tag_dir.","",colnames(df1))
colname <- gsub("..Coverage","",colname)
colname[1] <- "Dist"
colnames(df1) <- colname
colnames(df1)[8] = "Emp_6hr_CpG_pIC"
colnames(df1)[3] = "Emp_0hr_Rep2"
df2 <- reshape2::melt(df1,id="Dist",variable.name = "Stimulation")
df2 <- reshape2::melt(df1[,c(1,2,4,6,8)],id="Dist",variable.name = "Stimulation")
df2$condition =gsub("Emp_|NCoR1_|_Rep2","",df2$Stimulation)
tgc <- summarySE(df2, measurevar="value", groupvars=c("Dist","condition"))
tgc$condition = factor(tgc$condition,levels = c("0hr","6hr_CpG","6hr_pIC","6hr_CpG_pIC"),labels = c("Uns","CpG","pIC","CpG+pIC"))
df2$condition = factor(df2$condition,levels = c("0hr","6hr_CpG","6hr_pIC","6hr_CpG_pIC"),labels = c("Uns","CpG","pIC","CpG+pIC"))
den <- tgc %>% ggplot(aes(x=Dist ,y=value,color=condition))+
#geom_boxplot()+
geom_line(size=1) +
gg_theme+
#facet_wrap(~condition,ncol = 4)+
theme_bw()+
geom_ribbon(aes(ymax = value + sd, ymin = value - sd),
alpha = 0.5,
fill = "grey70",
colour=NA)+
#ylim(0,8)+
#scale_color_manual(values = c("#f7766d","#38bf7d","#3daff5","#e789f0"))+
gg_theme +
labs(x="Distance from NCoR1 peaks center (bp)",y="Normalized Tag Count")
ggtitle(Title)
return(den1)
}
# H3K27ac (Control and NCoR1)
H3k27ac_density = function(df,column,Title){
dat1 <- read.csv(df,sep = "\t",header =T,na.strings = "NA")
df1 <- dat1[,column]
colname <- gsub("...NCoR1_H3K27ac_analysis.tag_dir.","",colnames(df1))
colname <- gsub("..Coverage","",colname)
colname[1] <- "Dist"
colnames(df1) <- colname
df2 <- reshape2::melt(df1,id="Dist",variable.name = "Stimulation")
df2$condition =gsub("Emp_|NCoR1_","",df2$Stimulation)
df2$Genotype = gsub("_[0-9].*","",df2$Stimulation)
df2$condition = factor(df2$condition,levels = c("0hr","6hr_CpG","6hr_pIC","6h_CpG_pIC"),labels = c("Uns","CpG","pIC","CpG+pIC"))
den1 <- df2 %>% ggplot(aes(x=Dist ,y=value,color=Genotype))+
#geom_boxplot()+
geom_line(size=1) +
gg_theme+
facet_wrap(~condition,ncol = 4)+
theme_bw()+
#ylim(0,8)+
scale_color_manual(values = c("blue","red"))+
gg_theme +
labs(x="Distance from NCoR1 peaks center (bp)",y="Normalized Tag Count")+
ggtitle(Title)
return(den1)
}
# extract peak count from peakAnnolist
annotation = function(x){
as.data.frame(x) %>%
dplyr::select(annotation) %>%
separate(annotation, c("A","B"), " \\(",extra = "drop",fill = "right") %>%
dplyr::select(A) %>% table() %>% as.data.frame()
}
# Volcano plot
library(ggrepel)
Volcano.plot =function(df,gene_label,Title){
df <- df[which(df$log2FoldChange != "NA" & df$padj != "NA"),]
df <- df %>%
mutate(reg = case_when(
df$log2FoldChange >= 1 & df$padj <= 0.05 ~ "Up",
df$log2FoldChange <= -1 & df$padj <= 0.05 ~ "Down",
abs(df$log2FoldChange) <= 1 & df$padj >= 0.05 ~ "No Change",
abs(df$log2FoldChange) <= 1 & df$padj <= 0.05 ~ "No Change",
abs(df$log2FoldChange) > 1 & df$padj >0.05 ~ "No Change"
)) %>%
mutate(reg = factor(reg,
levels = c("Up", "No Change","Down")))
label <- gene_label
up_label = dim(df[which(df$reg =="Up"),])[1]
down_label = dim(df[which(df$reg =="Down"),])[1]
data= subset(df, Gene %in% label)
data= data[which(abs(data$log2FoldChange) >=1),]
data= data[which(abs(data$padj) <=0.05),]
df.plt <- ggplot(df,aes(x=log2FoldChange,y=-log10(padj),label = Gene))+
geom_point(aes(color=reg),size=0.5)+
scale_color_manual(name = "Differential \n regulation",
values = c("Down" = "blue",
"No Change" = "grey",
"Up" = "red"))+
theme_bw()+
#ylim(0,max(-log10(df$padj)))+
xlim(-6,11)+
xlab("log2 (Fold Change)")+
ylab("-log10(adj p-value)")+
gg_theme+
guides(colour = guide_legend(override.aes = list(size=3)))+
#geom_text_repel(
# data = data,
# size = 5,
# #nudge_y = 30,
# direction = "x",
# angle = 0,
# vjust = 0,
# #segment.size = 0.2,
# # #angle = 45,
# # #fill = data$stat1,
# #arrow = arrow(length = unit(0.01, "npc"), type = "closed", ends = "first"),
# nudge_y = 5 + data$log2FoldChange ,
# segment.size = 0.5,
# segment.color = "black"
# # direction = "x"
#) +
geom_vline(xintercept=c(-1,1), linetype="dashed",size=0.5)+
geom_hline(yintercept=c(1.3), linetype="dashed",size=0.5)
df.plt <- df.plt +annotate("text", x = -4, y = max(-log10(df$padj)), label = down_label,color="blue",size=6)
df.plt + annotate("text", x = 8, y = max(-log10(df$padj)), label = up_label,color="red",size=6) +
ggtitle(Title)
}
plot_h3k27ac_Emp = function(df,x,y){
plt = merge(df,H3K27ac_vst_df,by=0) %>%
#filter(log2FoldChange >=1 | log2FoldChange <= -1) %>%
ggplot(.,aes_string(x=x,y=y))+
#geom_hex(bins = 70) +
geom_point(size=0.0001,aes(color=cut(log2FoldChange, c(-6, -1, 1, 6)))) +
scale_color_manual(values = c("blue","grey","red"),label=c("Down","No Change","Up")) +
theme_scatter #+guides(colour = guide_legend(override.aes = list(size=10)))
plt = plt + annotate("text", x = 6, y = 10.5, label = dim(df[which(df$log2FoldChange >=1),])[1],color="red",size=6)
plt = plt + annotate("text", x = 10.5, y = 6, label = dim(df[which(df$log2FoldChange <= -1),])[1],color="blue",size=6)
return(plt)
}
H3K27ac.Volcano.plot =function(df,Title){
df <- df[which(df$log2FoldChange != "NA" & df$padj != "NA"),]
df <- df %>%
mutate(reg = case_when(
df$log2FoldChange >= 1 & df$padj < 0.05 ~ "Up",
df$log2FoldChange <= -1 & df$padj < 0.05 ~ "Down",
abs(df$log2FoldChange) < 1 & df$padj >= 0.05 ~ "No Change",
abs(df$log2FoldChange) < 1 & df$padj <= 0.05 ~ "No Change",
abs(df$log2FoldChange) > 1 & df$padj >0.05 ~ "No Change"
)) %>%
mutate(reg = factor(reg,
levels = c("Up", "No Change","Down"))) #%>%
#filter(reg != "No Change")
up_label = dim(df[which(df$reg =="Up"),])[1]
down_label = dim(df[which(df$reg =="Down"),])[1]
df.plt <- ggplot(df,aes(x=log2FoldChange,y=-log10(padj)))+
geom_point(aes(color=reg),size=0.08)+
scale_color_manual(name = "Differential \n regulation",
values = c("Down" = "blue",
"No Change" = "grey",
"Up" = "red"))+
theme_bw()+
#ylim(0,max(-log10(df$padj)))+
ylim(0,70)+
#xlim(min(df$log2FoldChange),max(df$log2FoldChange))+
xlim(-5,5)+
xlab("log2 (Fold Change)")+
ylab("-log10(adj p-value)")+
gg_theme+
guides(colour = guide_legend(override.aes = list(size=3)))+
geom_vline(xintercept=c(-1,1), linetype="dashed",size=0.5)+
geom_hline(yintercept=c(1.3), linetype="dashed",size=0.5)
df.plt <- df.plt +annotate("text", x = -4.5, y = 65, label = down_label,color="blue",size=6)
df.plt + annotate("text", x = 4.5, y = 65, label = up_label,color="red",size=6) +
ggtitle(Title)
}
KD.H3K27ac.Volcano.plot =function(df,Title){
df <- df[which(df$log2FoldChange != "NA" & df$padj != "NA"),]
df <- df %>%
mutate(reg = case_when(
df$log2FoldChange >= 1 & df$padj < 0.05 ~ "Up",
df$log2FoldChange <= -1 & df$padj < 0.05 ~ "Down",
abs(df$log2FoldChange) < 1 & df$padj >= 0.05 ~ "No Change",
abs(df$log2FoldChange) < 1 & df$padj <= 0.05 ~ "No Change",
abs(df$log2FoldChange) > 1 & df$padj >0.05 ~ "No Change"
)) %>%
mutate(reg = factor(reg,
levels = c("Up", "No Change","Down"))) #%>%
#filter(reg != "No Change")
up_label = dim(df[which(df$reg =="Up"),])[1]
down_label = dim(df[which(df$reg =="Down"),])[1]
df.plt <- ggplot(df,aes(x=log2FoldChange,y=-log10(padj)))+
geom_point(aes(color=reg),size=0.08)+
scale_color_manual(name = "Differential \n regulation",
values = c("Down" = "blue",
"No Change" = "grey",
"Up" = "red"))+
theme_bw()+
#ylim(0,max(-log10(df$padj)))+
ylim(0,15)+
#xlim(min(df$log2FoldChange),max(df$log2FoldChange))+
xlim(-3,5)+
xlab("log2 (Fold Change)")+
ylab("-log10(adj p-value)")+
gg_theme+
guides(colour = guide_legend(override.aes = list(size=3)))+
geom_vline(xintercept=c(-1,1), linetype="dashed",size=0.5)+
geom_hline(yintercept=c(1.3), linetype="dashed",size=0.5)
df.plt <- df.plt +annotate("text", x = -2.5, y =14, label = down_label,color="blue",size=6)
df.plt + annotate("text", x = 2.5, y = 14, label = up_label,color="red",size=6) +
ggtitle(Title)
}
###############################################################################
# Pathway enrichemnt analysis (fgsea)
library(fgsea)
DE_fgsea = function(df,rows){
df <- df[which(abs(df$log2FoldChange) >=1 & df$padj <= 0.05 ),]
df$fcsign <- sign(df$log2FoldChange)
df$logP=-log10(df$padj)
df$metric= df$logP/df$fcsign
if("Gene" %in% colnames(df)){
df$Gene <- toupper(df$Gene)
}
df <-df[,c("Gene", "metric")]
ranks <- deframe(df)
ranks <- ranks[!duplicated(names(ranks))]
ranks <- ranks[which(ranks != "Inf")]
# head(ranks, 20)
pathways.hallmark <- gmtPathways("/home/imgsb/tools/GSEA/c2.cp.reactome.v7.1.symbols.gmt")
#pathways.hallmark %>% head() %>% lapply(head)
register(SerialParam())
fgseaRes <- fgsea(pathways=pathways.hallmark, stats=ranks, nperm=10000)
fgseaResTidy <- fgseaRes %>% as_tibble() %>% arrange(desc(NES)) %>% as.data.frame() %>% filter(pval<0.05)
rownames(fgseaResTidy) = seq(length=nrow(fgseaResTidy))
# Show in a nice table:
#fgseaResTidy %>%
# dplyr::select(-ES, -nMoreExtreme) %>%
# filter(pval <=0.05) %>% View()
# arrange(padj) %>%
# DT::datatable()
fgseaResTidy %>% #View()
slice(rows) %>%
mutate(pathway = gsub("REACTOME|_"," ",pathway),
reg=case_when(NES >0 ~ "Up",
NES < 0 ~ "Down")) %>%
ggplot(aes(reorder(pathway, NES), NES,fill=reg)) +
geom_bar(stat="identity") +
coord_flip() +
ylab("") +
ylab("Normalized Enrichment Score")+
scale_fill_manual(values = c("blue","red"))+
scale_x_discrete(labels = function(x) str_wrap(x, width = 40))+
scale_y_continuous(expand = c(0,0)) +
gg_theme
}
# pathway enrichment from clusterpofileR
library(tidytext)
Gene_Set_Enrichment = function(gene_set){
term2gene = read.gmt("/home/imgsb/tools/GSEA/c2.cp.reactome.v7.1.symbols.gmt")
gen_set =lapply(gene_set, toupper)
compareCluster(diff_NCoR1.annotation.df.list_toupper, fun="enricher",TERM2GENE=term2gene) %>% as.data.frame() %>%
#write.table("/home/imgsb/Gyan/NCOR1/Emp_NCoR1_CpG_pIC_C+P_DE_analysis/NCoR1_diff")
group_by(Cluster) %>%
slice(1:5) %>%
ungroup %>%
mutate(Cluster = factor(Cluster,levels = sort(unique(Cluster))),
Description = gsub("REACTOME|_"," ",Description)) %>%
#Description = reorder(Description, pvalue))
#as.data.frame(.) %>%
ggplot(aes(x=reorder(Description,-pvalue), y=-log(pvalue), fill = p.adjust)) +
geom_bar(stat="identity") +
#geom_text(aes(label =str_wrap(geneID, width = 130), y = 0), color = "black", hjust = 0,size=4)+
facet_wrap(~Cluster, scales = "free",ncol = 1) +
coord_flip() +
#scale_x_reordered() +
scale_x_discrete(labels = function(x) str_wrap(x, width = 40))+
scale_fill_gradient(low="red",high="blue")+
scale_y_continuous(expand = c(0,0)) +
gg_theme
}
# plot RNAseq and H3K27ac scatter plot
plot_RNaseq_h3k27ac_scatter = function(df,x,y,SYMBOL){
ggplot(df,aes_string(x=x,y=y))+
geom_point(aes(color=Genes),size=0.2) +
stat_cor()+
geom_smooth(method="lm",se = TRUE,color="grey")+
scale_color_manual(values = c("blue","darkgreen","red"))+
xlim(-2,2)+ylim(-2,2)+
geom_vline(xintercept = 0,linetype = "dashed",size = 0.5) +
geom_hline(yintercept = 0, linetype = "dashed",size = 0.5) +
gg_theme+
geom_label_repel(data = subset(df,SYMBOL %in% labRow),
aes(label = SYMBOL,fill=factor(Genes),segment.colour = factor(Genes)),,
color = "white" ,fontface="italic",box.padding = 0.5,max.overlaps = 1000,
size = 3.5,segment.size = 0.2)+
scale_fill_manual(values = c("blue","darkgreen","red"),aesthetics = c("fill", "segment.color"))
#geom_label_repel(data = subset(df,SYMBOL %in% labRow),
# aes(label = SYMBOL,fill=factor(Genes),segment.colour = factor(Genes)),
# color = "white" ,box.padding = 0.5,max.overlaps = 40,
# size = 3.5)+
#scale_fill_manual(values = c("blue","darkgreen","red"),aesthetics = c("color", "segment.color")) # scale_fill_manual(values = c("blue","darkgreen","red"),aesthetics = c("color", "segment.color"))
}
# plot H3K27ac differential enhancer scatter plot
theme_scatter = theme_bw(20) +
theme(axis.text.x=element_text(size=15,color="black"),
axis.title.x=element_text(size=15),
axis.text.y=element_text(size=15,color="black"),
axis.title.y=element_text(size=15),
legend.justification=c(0,1),
legend.title = element_blank(),
plot.title = element_text(hjust = 0.5,size=15))
plot_h3k27ac = function(df,x,y){
plt = merge(df,H3K27ac_Rlog.df,by=0) %>%
ggplot(.,aes_string(x=x,y=y))+
geom_point(size=0.1,aes(color=cut(log2FoldChange, c(-6, -1, 1, 6)))) +
scale_color_manual(values = c("blue","grey","red"),label=c("Down","No Change","Up")) +
theme_scatter +guides(colour = guide_legend(override.aes = list(size=10)))
plt = plt + annotate("text", x = 6, y = 10.5, label = dim(df[which(df$log2FoldChange >=1),])[1],color="red",size=6)
plt = plt + annotate("text", x = 10.5, y = 6, label = dim(df[which(df$log2FoldChange <= -1),])[1],color="blue",size=6)
return(plt)
}
#intersection
intersection = function(gene.df){
ncor_cluster_gene_exp.list = list()
for (j in 1:3){
#print(j)
if( j ==1){
common = Reduce(intersect, list(gene.df[[j]],gene.df[[j+1]],gene.df[[j+2]]))
diff = setdiff(gene.df[[j]],gene.df[[j+1]])
diff = setdiff(diff,gene.df[[j+2]])
com = Reduce(intersect, list(gene.df[[j]],gene.df[[j+1]]))
CpG_pIC_all3_vs_CpG_all3 = setdiff(com,common)
ncor_cluster_gene_exp.list[["CpG_pIC_all3_vs_CpG_all3"]] = CpG_pIC_all3_vs_CpG_all3
}
#print(length(diff))
if( j ==2){
diff = setdiff(gene.df[[j]],gene.df[[j+1]])
diff = setdiff(diff,gene.df[[1]])
com = Reduce(intersect, list(gene.df[[j]],gene.df[[j+1]]))
CpG_all3_vs_Uns_pIC = setdiff(com,common)
ncor_cluster_gene_exp.list[["CpG_all3_vs_Uns_pIC"]] = CpG_all3_vs_Uns_pIC
}
if( j ==3){
diff = setdiff(gene.df[[j]],gene.df[[j-1]])
diff = setdiff(diff,gene.df[[j-2]])
com = Reduce(intersect, list(gene.df[[j]],gene.df[[j-2]]))
Uns_pIC_vs_CpG_pIC_all3 = setdiff(com,common)
ncor_cluster_gene_exp.list[["Uns_pIC_vs_CpG_pIC_all3"]] = Uns_pIC_vs_CpG_pIC_all3
}
ncor_cluster_gene_exp.list[[names(gene.df[j])]] = diff
ncor_cluster_gene_exp.list[["Common"]] = common
}
return(ncor_cluster_gene_exp.list)
}
# Generate expression bar plot of any genes based on only gene name
norm_count_bar_plot = function(gene){
gene_exp = Emp_NCoR1_Normalized_count[gene,] %>% reshape2::melt()
gene_exp$variable = gsub('_R2|_R1',"",gene_exp$variable)
gene_exp$condition = gsub('_.*',"",gene_exp$variable)
gene_exp$stimulation = gsub('Emp_|NCoR1_',"",gene_exp$variable)
gene_exp$condition <- factor(gene_exp$condition,levels = c("Emp","NCoR1"))
gene_exp$stimulation <- factor(gene_exp$stimulation,levels = c("Uns","IFNg","6hr_CpG","6hr_pIC","6hr_all3"))
tgc <- summarySE(gene_exp, measurevar="value", groupvars=c("condition","stimulation"))
print(ggplot(tgc,aes(x=stimulation,y=value,fill=condition)) +
geom_bar(stat="identity",position=position_dodge()) +
facet_wrap(~stimulation,scales = "free_x",ncol=5)+
geom_errorbar( aes(ymin=value-sd, ymax=value+sd), width=.2,position=position_dodge(.9))+
scale_fill_manual(values=c("blue","red"))+
#scale_color_hue()+
theme_bw()+
theme(axis.text.x=element_blank(),
axis.text.y=element_text(size=10,colour = "black",face= "bold"),
axis.title.y=element_text(size=10,colour = "black",face= "bold"),
plot.title = element_text(size = 20, face = "italic",hjust = 0.5),
legend.text = element_text(size=10,colour = "black"),
#legend.position = "none",
strip.text = element_text(size = 15,face="bold")) +
labs(x = "",y="Normalized count",label =FALSE,title = "") +
ggtitle(gene))
}
merge_rep= function(col1){
N <- ncol(col1)
name <- colnames(col1)
obj <- vector("list",ncol(col1)/2)
k=1
for(i in 1:N) {
if(i%%2 ==1 && i <= N) {
#print(i)
ID <-rowMeans(col1[,c(i,i+1)])
obj[[k]] <- ID
nam <- colnames(col1)[i]
nam <- str_replace(nam,"_R[0-9]","")
names(obj)[k] <- nam
names(obj[[k]]) <- rownames(col1)
#print(k)
k=k+1
}
}
mat_merged <- as.data.frame(t(do.call(rbind, obj)))
colnames(mat_merged) = names(obj)
return(mat_merged)
}
H3K27ac_Rep_scatter_plot= function(col1){
N <- ncol(col1)
name <- colnames(col1)
name = gsub("_1|_2","",name)
name = unique(name)
k=1
plotList = list()
for(i in 1:N) {
if(i%%2 ==1 && i <= N) {
#print(i)
tmp = col1[,c(i,i+1)]
x= colnames(tmp)[1]
y= colnames(tmp)[2]
nam <- str_replace(colnames(tmp[,1]),"_1","")
tmp$density = get_density(tmp[,1], tmp[,2], n = 100)
p = ggplot(tmp) +
geom_point(aes_string(x=x, y=y, color = "density"),size =0.1) +
scale_color_viridis()+
ggtitle(name[k]) +
xlab("Replicate 1") + ylab("Replicate 2") +
xlim(4,12) +ylim(4,12)
plotList[[k]] = p + stat_cor(aes_string(x=x, y=y),method = "pearson", label.x = 4.5, label.y = 11.5)
print(k)
k=k+1
}
}
#mat_merged <- as.data.frame(t(do.call(rbind, obj)))
#colnames(mat_merged) = names(obj)
return(plotList)
}
|
/R/Functions.R
|
no_license
|
sraghav-lab/NCoR1-TLR9-TLR3-Project
|
R
| false
| false
| 19,219
|
r
|
# H3K27ac density plot (Control)
H3k27ac_density = function(df,column,Title){
dat1 <- read.csv(df,sep = "\t",header =T,na.strings = "NA")
df1 <- dat1[,column]
colname <- gsub("...NCoR1_H3K27ac_analysis.tag_dir.|...NCoR1_H3K27ac_Rep2_analysis.tag_dir.","",colnames(df1))
colname <- gsub("..Coverage","",colname)
colname[1] <- "Dist"
colnames(df1) <- colname
colnames(df1)[8] = "Emp_6hr_CpG_pIC"
colnames(df1)[3] = "Emp_0hr_Rep2"
df2 <- reshape2::melt(df1,id="Dist",variable.name = "Stimulation")
df2 <- reshape2::melt(df1[,c(1,2,4,6,8)],id="Dist",variable.name = "Stimulation")
df2$condition =gsub("Emp_|NCoR1_|_Rep2","",df2$Stimulation)
tgc <- summarySE(df2, measurevar="value", groupvars=c("Dist","condition"))
tgc$condition = factor(tgc$condition,levels = c("0hr","6hr_CpG","6hr_pIC","6hr_CpG_pIC"),labels = c("Uns","CpG","pIC","CpG+pIC"))
df2$condition = factor(df2$condition,levels = c("0hr","6hr_CpG","6hr_pIC","6hr_CpG_pIC"),labels = c("Uns","CpG","pIC","CpG+pIC"))
den <- tgc %>% ggplot(aes(x=Dist ,y=value,color=condition))+
#geom_boxplot()+
geom_line(size=1) +
gg_theme+
#facet_wrap(~condition,ncol = 4)+
theme_bw()+
geom_ribbon(aes(ymax = value + sd, ymin = value - sd),
alpha = 0.5,
fill = "grey70",
colour=NA)+
#ylim(0,8)+
#scale_color_manual(values = c("#f7766d","#38bf7d","#3daff5","#e789f0"))+
gg_theme +
labs(x="Distance from NCoR1 peaks center (bp)",y="Normalized Tag Count")
ggtitle(Title)
return(den1)
}
# H3K27ac (Control and NCoR1)
H3k27ac_density = function(df,column,Title){
dat1 <- read.csv(df,sep = "\t",header =T,na.strings = "NA")
df1 <- dat1[,column]
colname <- gsub("...NCoR1_H3K27ac_analysis.tag_dir.","",colnames(df1))
colname <- gsub("..Coverage","",colname)
colname[1] <- "Dist"
colnames(df1) <- colname
df2 <- reshape2::melt(df1,id="Dist",variable.name = "Stimulation")
df2$condition =gsub("Emp_|NCoR1_","",df2$Stimulation)
df2$Genotype = gsub("_[0-9].*","",df2$Stimulation)
df2$condition = factor(df2$condition,levels = c("0hr","6hr_CpG","6hr_pIC","6h_CpG_pIC"),labels = c("Uns","CpG","pIC","CpG+pIC"))
den1 <- df2 %>% ggplot(aes(x=Dist ,y=value,color=Genotype))+
#geom_boxplot()+
geom_line(size=1) +
gg_theme+
facet_wrap(~condition,ncol = 4)+
theme_bw()+
#ylim(0,8)+
scale_color_manual(values = c("blue","red"))+
gg_theme +
labs(x="Distance from NCoR1 peaks center (bp)",y="Normalized Tag Count")+
ggtitle(Title)
return(den1)
}
# extract peak count from peakAnnolist
annotation = function(x){
as.data.frame(x) %>%
dplyr::select(annotation) %>%
separate(annotation, c("A","B"), " \\(",extra = "drop",fill = "right") %>%
dplyr::select(A) %>% table() %>% as.data.frame()
}
# Volcano plot
library(ggrepel)
Volcano.plot =function(df,gene_label,Title){
df <- df[which(df$log2FoldChange != "NA" & df$padj != "NA"),]
df <- df %>%
mutate(reg = case_when(
df$log2FoldChange >= 1 & df$padj <= 0.05 ~ "Up",
df$log2FoldChange <= -1 & df$padj <= 0.05 ~ "Down",
abs(df$log2FoldChange) <= 1 & df$padj >= 0.05 ~ "No Change",
abs(df$log2FoldChange) <= 1 & df$padj <= 0.05 ~ "No Change",
abs(df$log2FoldChange) > 1 & df$padj >0.05 ~ "No Change"
)) %>%
mutate(reg = factor(reg,
levels = c("Up", "No Change","Down")))
label <- gene_label
up_label = dim(df[which(df$reg =="Up"),])[1]
down_label = dim(df[which(df$reg =="Down"),])[1]
data= subset(df, Gene %in% label)
data= data[which(abs(data$log2FoldChange) >=1),]
data= data[which(abs(data$padj) <=0.05),]
df.plt <- ggplot(df,aes(x=log2FoldChange,y=-log10(padj),label = Gene))+
geom_point(aes(color=reg),size=0.5)+
scale_color_manual(name = "Differential \n regulation",
values = c("Down" = "blue",
"No Change" = "grey",
"Up" = "red"))+
theme_bw()+
#ylim(0,max(-log10(df$padj)))+
xlim(-6,11)+
xlab("log2 (Fold Change)")+
ylab("-log10(adj p-value)")+
gg_theme+
guides(colour = guide_legend(override.aes = list(size=3)))+
#geom_text_repel(
# data = data,
# size = 5,
# #nudge_y = 30,
# direction = "x",
# angle = 0,
# vjust = 0,
# #segment.size = 0.2,
# # #angle = 45,
# # #fill = data$stat1,
# #arrow = arrow(length = unit(0.01, "npc"), type = "closed", ends = "first"),
# nudge_y = 5 + data$log2FoldChange ,
# segment.size = 0.5,
# segment.color = "black"
# # direction = "x"
#) +
geom_vline(xintercept=c(-1,1), linetype="dashed",size=0.5)+
geom_hline(yintercept=c(1.3), linetype="dashed",size=0.5)
df.plt <- df.plt +annotate("text", x = -4, y = max(-log10(df$padj)), label = down_label,color="blue",size=6)
df.plt + annotate("text", x = 8, y = max(-log10(df$padj)), label = up_label,color="red",size=6) +
ggtitle(Title)
}
plot_h3k27ac_Emp = function(df,x,y){
plt = merge(df,H3K27ac_vst_df,by=0) %>%
#filter(log2FoldChange >=1 | log2FoldChange <= -1) %>%
ggplot(.,aes_string(x=x,y=y))+
#geom_hex(bins = 70) +
geom_point(size=0.0001,aes(color=cut(log2FoldChange, c(-6, -1, 1, 6)))) +
scale_color_manual(values = c("blue","grey","red"),label=c("Down","No Change","Up")) +
theme_scatter #+guides(colour = guide_legend(override.aes = list(size=10)))
plt = plt + annotate("text", x = 6, y = 10.5, label = dim(df[which(df$log2FoldChange >=1),])[1],color="red",size=6)
plt = plt + annotate("text", x = 10.5, y = 6, label = dim(df[which(df$log2FoldChange <= -1),])[1],color="blue",size=6)
return(plt)
}
H3K27ac.Volcano.plot =function(df,Title){
df <- df[which(df$log2FoldChange != "NA" & df$padj != "NA"),]
df <- df %>%
mutate(reg = case_when(
df$log2FoldChange >= 1 & df$padj < 0.05 ~ "Up",
df$log2FoldChange <= -1 & df$padj < 0.05 ~ "Down",
abs(df$log2FoldChange) < 1 & df$padj >= 0.05 ~ "No Change",
abs(df$log2FoldChange) < 1 & df$padj <= 0.05 ~ "No Change",
abs(df$log2FoldChange) > 1 & df$padj >0.05 ~ "No Change"
)) %>%
mutate(reg = factor(reg,
levels = c("Up", "No Change","Down"))) #%>%
#filter(reg != "No Change")
up_label = dim(df[which(df$reg =="Up"),])[1]
down_label = dim(df[which(df$reg =="Down"),])[1]
df.plt <- ggplot(df,aes(x=log2FoldChange,y=-log10(padj)))+
geom_point(aes(color=reg),size=0.08)+
scale_color_manual(name = "Differential \n regulation",
values = c("Down" = "blue",
"No Change" = "grey",
"Up" = "red"))+
theme_bw()+
#ylim(0,max(-log10(df$padj)))+
ylim(0,70)+
#xlim(min(df$log2FoldChange),max(df$log2FoldChange))+
xlim(-5,5)+
xlab("log2 (Fold Change)")+
ylab("-log10(adj p-value)")+
gg_theme+
guides(colour = guide_legend(override.aes = list(size=3)))+
geom_vline(xintercept=c(-1,1), linetype="dashed",size=0.5)+
geom_hline(yintercept=c(1.3), linetype="dashed",size=0.5)
df.plt <- df.plt +annotate("text", x = -4.5, y = 65, label = down_label,color="blue",size=6)
df.plt + annotate("text", x = 4.5, y = 65, label = up_label,color="red",size=6) +
ggtitle(Title)
}
KD.H3K27ac.Volcano.plot =function(df,Title){
df <- df[which(df$log2FoldChange != "NA" & df$padj != "NA"),]
df <- df %>%
mutate(reg = case_when(
df$log2FoldChange >= 1 & df$padj < 0.05 ~ "Up",
df$log2FoldChange <= -1 & df$padj < 0.05 ~ "Down",
abs(df$log2FoldChange) < 1 & df$padj >= 0.05 ~ "No Change",
abs(df$log2FoldChange) < 1 & df$padj <= 0.05 ~ "No Change",
abs(df$log2FoldChange) > 1 & df$padj >0.05 ~ "No Change"
)) %>%
mutate(reg = factor(reg,
levels = c("Up", "No Change","Down"))) #%>%
#filter(reg != "No Change")
up_label = dim(df[which(df$reg =="Up"),])[1]
down_label = dim(df[which(df$reg =="Down"),])[1]
df.plt <- ggplot(df,aes(x=log2FoldChange,y=-log10(padj)))+
geom_point(aes(color=reg),size=0.08)+
scale_color_manual(name = "Differential \n regulation",
values = c("Down" = "blue",
"No Change" = "grey",
"Up" = "red"))+
theme_bw()+
#ylim(0,max(-log10(df$padj)))+
ylim(0,15)+
#xlim(min(df$log2FoldChange),max(df$log2FoldChange))+
xlim(-3,5)+
xlab("log2 (Fold Change)")+
ylab("-log10(adj p-value)")+
gg_theme+
guides(colour = guide_legend(override.aes = list(size=3)))+
geom_vline(xintercept=c(-1,1), linetype="dashed",size=0.5)+
geom_hline(yintercept=c(1.3), linetype="dashed",size=0.5)
df.plt <- df.plt +annotate("text", x = -2.5, y =14, label = down_label,color="blue",size=6)
df.plt + annotate("text", x = 2.5, y = 14, label = up_label,color="red",size=6) +
ggtitle(Title)
}
###############################################################################
# Pathway enrichemnt analysis (fgsea)
library(fgsea)
DE_fgsea = function(df,rows){
df <- df[which(abs(df$log2FoldChange) >=1 & df$padj <= 0.05 ),]
df$fcsign <- sign(df$log2FoldChange)
df$logP=-log10(df$padj)
df$metric= df$logP/df$fcsign
if("Gene" %in% colnames(df)){
df$Gene <- toupper(df$Gene)
}
df <-df[,c("Gene", "metric")]
ranks <- deframe(df)
ranks <- ranks[!duplicated(names(ranks))]
ranks <- ranks[which(ranks != "Inf")]
# head(ranks, 20)
pathways.hallmark <- gmtPathways("/home/imgsb/tools/GSEA/c2.cp.reactome.v7.1.symbols.gmt")
#pathways.hallmark %>% head() %>% lapply(head)
register(SerialParam())
fgseaRes <- fgsea(pathways=pathways.hallmark, stats=ranks, nperm=10000)
fgseaResTidy <- fgseaRes %>% as_tibble() %>% arrange(desc(NES)) %>% as.data.frame() %>% filter(pval<0.05)
rownames(fgseaResTidy) = seq(length=nrow(fgseaResTidy))
# Show in a nice table:
#fgseaResTidy %>%
# dplyr::select(-ES, -nMoreExtreme) %>%
# filter(pval <=0.05) %>% View()
# arrange(padj) %>%
# DT::datatable()
fgseaResTidy %>% #View()
slice(rows) %>%
mutate(pathway = gsub("REACTOME|_"," ",pathway),
reg=case_when(NES >0 ~ "Up",
NES < 0 ~ "Down")) %>%
ggplot(aes(reorder(pathway, NES), NES,fill=reg)) +
geom_bar(stat="identity") +
coord_flip() +
ylab("") +
ylab("Normalized Enrichment Score")+
scale_fill_manual(values = c("blue","red"))+
scale_x_discrete(labels = function(x) str_wrap(x, width = 40))+
scale_y_continuous(expand = c(0,0)) +
gg_theme
}
# pathway enrichment from clusterpofileR
library(tidytext)
Gene_Set_Enrichment = function(gene_set){
term2gene = read.gmt("/home/imgsb/tools/GSEA/c2.cp.reactome.v7.1.symbols.gmt")
gen_set =lapply(gene_set, toupper)
compareCluster(diff_NCoR1.annotation.df.list_toupper, fun="enricher",TERM2GENE=term2gene) %>% as.data.frame() %>%
#write.table("/home/imgsb/Gyan/NCOR1/Emp_NCoR1_CpG_pIC_C+P_DE_analysis/NCoR1_diff")
group_by(Cluster) %>%
slice(1:5) %>%
ungroup %>%
mutate(Cluster = factor(Cluster,levels = sort(unique(Cluster))),
Description = gsub("REACTOME|_"," ",Description)) %>%
#Description = reorder(Description, pvalue))
#as.data.frame(.) %>%
ggplot(aes(x=reorder(Description,-pvalue), y=-log(pvalue), fill = p.adjust)) +
geom_bar(stat="identity") +
#geom_text(aes(label =str_wrap(geneID, width = 130), y = 0), color = "black", hjust = 0,size=4)+
facet_wrap(~Cluster, scales = "free",ncol = 1) +
coord_flip() +
#scale_x_reordered() +
scale_x_discrete(labels = function(x) str_wrap(x, width = 40))+
scale_fill_gradient(low="red",high="blue")+
scale_y_continuous(expand = c(0,0)) +
gg_theme
}
# plot RNAseq and H3K27ac scatter plot
plot_RNaseq_h3k27ac_scatter = function(df,x,y,SYMBOL){
ggplot(df,aes_string(x=x,y=y))+
geom_point(aes(color=Genes),size=0.2) +
stat_cor()+
geom_smooth(method="lm",se = TRUE,color="grey")+
scale_color_manual(values = c("blue","darkgreen","red"))+
xlim(-2,2)+ylim(-2,2)+
geom_vline(xintercept = 0,linetype = "dashed",size = 0.5) +
geom_hline(yintercept = 0, linetype = "dashed",size = 0.5) +
gg_theme+
geom_label_repel(data = subset(df,SYMBOL %in% labRow),
aes(label = SYMBOL,fill=factor(Genes),segment.colour = factor(Genes)),,
color = "white" ,fontface="italic",box.padding = 0.5,max.overlaps = 1000,
size = 3.5,segment.size = 0.2)+
scale_fill_manual(values = c("blue","darkgreen","red"),aesthetics = c("fill", "segment.color"))
#geom_label_repel(data = subset(df,SYMBOL %in% labRow),
# aes(label = SYMBOL,fill=factor(Genes),segment.colour = factor(Genes)),
# color = "white" ,box.padding = 0.5,max.overlaps = 40,
# size = 3.5)+
#scale_fill_manual(values = c("blue","darkgreen","red"),aesthetics = c("color", "segment.color")) # scale_fill_manual(values = c("blue","darkgreen","red"),aesthetics = c("color", "segment.color"))
}
# plot H3K27ac differential enhancer scatter plot
theme_scatter = theme_bw(20) +
theme(axis.text.x=element_text(size=15,color="black"),
axis.title.x=element_text(size=15),
axis.text.y=element_text(size=15,color="black"),
axis.title.y=element_text(size=15),
legend.justification=c(0,1),
legend.title = element_blank(),
plot.title = element_text(hjust = 0.5,size=15))
plot_h3k27ac = function(df,x,y){
plt = merge(df,H3K27ac_Rlog.df,by=0) %>%
ggplot(.,aes_string(x=x,y=y))+
geom_point(size=0.1,aes(color=cut(log2FoldChange, c(-6, -1, 1, 6)))) +
scale_color_manual(values = c("blue","grey","red"),label=c("Down","No Change","Up")) +
theme_scatter +guides(colour = guide_legend(override.aes = list(size=10)))
plt = plt + annotate("text", x = 6, y = 10.5, label = dim(df[which(df$log2FoldChange >=1),])[1],color="red",size=6)
plt = plt + annotate("text", x = 10.5, y = 6, label = dim(df[which(df$log2FoldChange <= -1),])[1],color="blue",size=6)
return(plt)
}
#intersection
intersection = function(gene.df){
ncor_cluster_gene_exp.list = list()
for (j in 1:3){
#print(j)
if( j ==1){
common = Reduce(intersect, list(gene.df[[j]],gene.df[[j+1]],gene.df[[j+2]]))
diff = setdiff(gene.df[[j]],gene.df[[j+1]])
diff = setdiff(diff,gene.df[[j+2]])
com = Reduce(intersect, list(gene.df[[j]],gene.df[[j+1]]))
CpG_pIC_all3_vs_CpG_all3 = setdiff(com,common)
ncor_cluster_gene_exp.list[["CpG_pIC_all3_vs_CpG_all3"]] = CpG_pIC_all3_vs_CpG_all3
}
#print(length(diff))
if( j ==2){
diff = setdiff(gene.df[[j]],gene.df[[j+1]])
diff = setdiff(diff,gene.df[[1]])
com = Reduce(intersect, list(gene.df[[j]],gene.df[[j+1]]))
CpG_all3_vs_Uns_pIC = setdiff(com,common)
ncor_cluster_gene_exp.list[["CpG_all3_vs_Uns_pIC"]] = CpG_all3_vs_Uns_pIC
}
if( j ==3){
diff = setdiff(gene.df[[j]],gene.df[[j-1]])
diff = setdiff(diff,gene.df[[j-2]])
com = Reduce(intersect, list(gene.df[[j]],gene.df[[j-2]]))
Uns_pIC_vs_CpG_pIC_all3 = setdiff(com,common)
ncor_cluster_gene_exp.list[["Uns_pIC_vs_CpG_pIC_all3"]] = Uns_pIC_vs_CpG_pIC_all3
}
ncor_cluster_gene_exp.list[[names(gene.df[j])]] = diff
ncor_cluster_gene_exp.list[["Common"]] = common
}
return(ncor_cluster_gene_exp.list)
}
# Generate expression bar plot of any genes based on only gene name
norm_count_bar_plot = function(gene){
gene_exp = Emp_NCoR1_Normalized_count[gene,] %>% reshape2::melt()
gene_exp$variable = gsub('_R2|_R1',"",gene_exp$variable)
gene_exp$condition = gsub('_.*',"",gene_exp$variable)
gene_exp$stimulation = gsub('Emp_|NCoR1_',"",gene_exp$variable)
gene_exp$condition <- factor(gene_exp$condition,levels = c("Emp","NCoR1"))
gene_exp$stimulation <- factor(gene_exp$stimulation,levels = c("Uns","IFNg","6hr_CpG","6hr_pIC","6hr_all3"))
tgc <- summarySE(gene_exp, measurevar="value", groupvars=c("condition","stimulation"))
print(ggplot(tgc,aes(x=stimulation,y=value,fill=condition)) +
geom_bar(stat="identity",position=position_dodge()) +
facet_wrap(~stimulation,scales = "free_x",ncol=5)+
geom_errorbar( aes(ymin=value-sd, ymax=value+sd), width=.2,position=position_dodge(.9))+
scale_fill_manual(values=c("blue","red"))+
#scale_color_hue()+
theme_bw()+
theme(axis.text.x=element_blank(),
axis.text.y=element_text(size=10,colour = "black",face= "bold"),
axis.title.y=element_text(size=10,colour = "black",face= "bold"),
plot.title = element_text(size = 20, face = "italic",hjust = 0.5),
legend.text = element_text(size=10,colour = "black"),
#legend.position = "none",
strip.text = element_text(size = 15,face="bold")) +
labs(x = "",y="Normalized count",label =FALSE,title = "") +
ggtitle(gene))
}
merge_rep= function(col1){
N <- ncol(col1)
name <- colnames(col1)
obj <- vector("list",ncol(col1)/2)
k=1
for(i in 1:N) {
if(i%%2 ==1 && i <= N) {
#print(i)
ID <-rowMeans(col1[,c(i,i+1)])
obj[[k]] <- ID
nam <- colnames(col1)[i]
nam <- str_replace(nam,"_R[0-9]","")
names(obj)[k] <- nam
names(obj[[k]]) <- rownames(col1)
#print(k)
k=k+1
}
}
mat_merged <- as.data.frame(t(do.call(rbind, obj)))
colnames(mat_merged) = names(obj)
return(mat_merged)
}
H3K27ac_Rep_scatter_plot= function(col1){
N <- ncol(col1)
name <- colnames(col1)
name = gsub("_1|_2","",name)
name = unique(name)
k=1
plotList = list()
for(i in 1:N) {
if(i%%2 ==1 && i <= N) {
#print(i)
tmp = col1[,c(i,i+1)]
x= colnames(tmp)[1]
y= colnames(tmp)[2]
nam <- str_replace(colnames(tmp[,1]),"_1","")
tmp$density = get_density(tmp[,1], tmp[,2], n = 100)
p = ggplot(tmp) +
geom_point(aes_string(x=x, y=y, color = "density"),size =0.1) +
scale_color_viridis()+
ggtitle(name[k]) +
xlab("Replicate 1") + ylab("Replicate 2") +
xlim(4,12) +ylim(4,12)
plotList[[k]] = p + stat_cor(aes_string(x=x, y=y),method = "pearson", label.x = 4.5, label.y = 11.5)
print(k)
k=k+1
}
}
#mat_merged <- as.data.frame(t(do.call(rbind, obj)))
#colnames(mat_merged) = names(obj)
return(plotList)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/praise.R
\name{praise}
\alias{praise}
\alias{feedback}
\title{Publish praise about \pkg{texreg}}
\usage{
praise(
academic_user,
organization,
name = NULL,
general_praise = NULL,
increase_productivity = NULL,
increase_quality = NULL,
start_using = NULL,
where_learn = NULL,
contact_details = NULL,
models = NULL,
num_users = NULL,
return.response = FALSE
)
}
\arguments{
\item{academic_user}{Should be \code{TRUE} if you are at a university or
public research institute. Should be \code{FALSE} if you are a private
user, for example you are using \pkg{texreg} in your work for a firm, NGO,
association, government department, as an individual user etc. We
particularly need praise from non-academic users to demonstrate societal
impact, but we can also make the case for academic usage to generate impact
indirectly.}
\item{organization}{Please tell us the name of the organization for which you
are using \pkg{texreg}. If we can show that the package is being employed
in a number of different settings, this will help us demonstrate impact.}
\item{name}{(Optional) We would be delighted to to know who you are. After
all, we can quote you much more effectively if we can tell the funders and
employers who provided this praise! If possible, include your title.}
\item{general_praise}{Use this argument to provide general praise, for
example about the way it was designed, the user support you have received,
or just how much you enjoy using it. While this is useful, however, we
would be even more interested in receiving statements in how \pkg{texreg}
makes you more productive (in the \code{increase_productivity} argument) or
how it increases the quality of your work or your reports (through the
\code{increase_quality} argument). Note: you need to provide at least one
of these three free-form text arguments.}
\item{increase_productivity}{This is one of the fields we are most interested
in. Please use this field to tell us how \pkg{texreg} is making you more
productive. For example, does it speed up writing your articles or research
reports? Does it enable you to skip manual work like copy and paste of your
results into your reports, or to avoid fiddling with table formatting? How
much time has it saved you so far? Are there any other benefits in terms of
productivity you can think of? Note: you need to provide feedback using at
least one of the three free-form arguments (\code{general_praise},
\code{increase_productivity}, or \code{increase_quality}).}
\item{increase_quality}{This is one of the fields we are most interested in.
Please use this argument to tell us how \pkg{texreg} increases the quality
of your work or the quality of your reporting. For example, does the
package generate tables that look more professional than the tables you
used to create manually? Are you using \link{screenreg} to improve your
workflow by understanding better how the results of multiple models
compare? Are you using \link{plotreg} to visualize and present your
statistical results in a more effective way? Can you think of any other
ways in which \pkg{texreg} is helping you? Note: you need to provide
feedback using at least one of the three free-form arguments
(\code{general_praise}, \code{increase_productivity}, or
\code{increase_quality}).}
\item{start_using}{(Optional) When did you start using \pkg{texreg}? We are
interested in the approximate time or year as a free-form text argument,
for example \code{"back in 2013 when the JSS article came out"}.}
\item{where_learn}{(Optional) Where or how did you learn about the
\pkg{texreg} package?}
\item{contact_details}{(Optional) Tell us how we can contact you in case we
would benefit from additional information. This might help us further down
the road in compiling an impact case study or a similar report. Don't
worry, this information will not be displayed on the website!}
\item{models}{(Optional) Which kinds of statistical models do you use in your
work? For example, \code{"Mostly linear models, but also lme4 and ergm."}.}
\item{num_users}{(Optional) How many other \pkg{texreg} users do you know? In
particular, if you are a non-academic user, would you mind telling us how
many other non-academic users you are aware of and how many of them are in
your organization? The more we know, the more convincing our evidence base
will be. This argument accepts \code{numeric} values or more detailed
responses as a \code{character} object.}
\item{return.response}{If \code{TRUE}, a website with the submitted data will
be returned as a \code{response} object, as defined in the \pkg{httr}
package. You can load the \pkg{httr} package and use the
\code{\link[httr]{content}} function, possibly enclosed in an
\code{\link[base]{as.character}} call, to inspect the output and diagnose
any problems with the transmission of the data. Only use this argument if
instructed by the package authors.}
}
\value{
If everything works well, no output is returned (but see the
\code{return.response} argument to change this). If the submission of the
praise to the maintainer fails, a \code{response} object (as defined in the
\pkg{httr} package) will be returned. Should you have any problems, do feel
free to e-mail your praise to the package maintainer directly.
}
\description{
Publish praise about \pkg{texreg} to help the developers demonstrate impact.
}
\details{
You can use this function to praise the \pkg{texreg} package. Funders and
academic employers are increasingly interested in seeing evidence for the
impact academic research generates. For software, such as \pkg{texreg}, this
is very hard to accomplish because the developers are usually disconnected
from the users. The consequence is that incentives for developing
packages like these are diminishing the more the funders and employers
require evidence of impact on society, firms, or policy makers.
The \code{\link{praise}} function is our attempt at rectifying the situation.
With this function, you can provide positive feedback to the developers. The
praise is saved to a database on the web server of the package maintainer and
subsequently displayed at \url{http://www.philipleifeld.com/praise}
for other users, funders, and employers to view. This will also enable the
package authors to compile reports about how \pkg{texreg} is used by academic
and non-academic users to increase their productivity and work quality, for
example in the form of an impact case study for the 2021 UK Research
Excellence Framework (REF).
We need many positive examples of how \pkg{texreg} has an impact on your
work. We are especially interested in non-academic users, but welcome
feedback from anyone. So please contribute by using the praise function! Tell
us how cool this package is and how it has changed your work!
The minimal information we require from you is whether you are an academic or
non-academic user, the name of your organization, and some free-form praise
(of a general nature, or about how it makes you more productive, or about how
it increases the quality of your work or reporting). But there are some
additional fields. While we are happy with the basic information, of course
we will be happier if we also know your name, how to contact you, what kinds
of models you work with, and some other details. Your choice!
Please note that by using the \code{\link{praise}} function you agree that
the information you provide through the function, including your location, is
stored online in a database, displayed on the website of the package author,
and used in reports to funders, employers etc. (This is the whole purpose of
it.) You can contact the package maintainer any time to have your praise
removed.
}
\examples{
\dontrun{
praise(academic_user = TRUE,
organization = "University of Happy Tables",
increase_quality = "Man I've never had such pretty tables!")
}
}
\author{
Philip Leifeld
}
|
/man/praise.Rd
|
no_license
|
zauster/texreg
|
R
| false
| true
| 7,979
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/praise.R
\name{praise}
\alias{praise}
\alias{feedback}
\title{Publish praise about \pkg{texreg}}
\usage{
praise(
academic_user,
organization,
name = NULL,
general_praise = NULL,
increase_productivity = NULL,
increase_quality = NULL,
start_using = NULL,
where_learn = NULL,
contact_details = NULL,
models = NULL,
num_users = NULL,
return.response = FALSE
)
}
\arguments{
\item{academic_user}{Should be \code{TRUE} if you are at a university or
public research institute. Should be \code{FALSE} if you are a private
user, for example you are using \pkg{texreg} in your work for a firm, NGO,
association, government department, as an individual user etc. We
particularly need praise from non-academic users to demonstrate societal
impact, but we can also make the case for academic usage to generate impact
indirectly.}
\item{organization}{Please tell us the name of the organization for which you
are using \pkg{texreg}. If we can show that the package is being employed
in a number of different settings, this will help us demonstrate impact.}
\item{name}{(Optional) We would be delighted to to know who you are. After
all, we can quote you much more effectively if we can tell the funders and
employers who provided this praise! If possible, include your title.}
\item{general_praise}{Use this argument to provide general praise, for
example about the way it was designed, the user support you have received,
or just how much you enjoy using it. While this is useful, however, we
would be even more interested in receiving statements in how \pkg{texreg}
makes you more productive (in the \code{increase_productivity} argument) or
how it increases the quality of your work or your reports (through the
\code{increase_quality} argument). Note: you need to provide at least one
of these three free-form text arguments.}
\item{increase_productivity}{This is one of the fields we are most interested
in. Please use this field to tell us how \pkg{texreg} is making you more
productive. For example, does it speed up writing your articles or research
reports? Does it enable you to skip manual work like copy and paste of your
results into your reports, or to avoid fiddling with table formatting? How
much time has it saved you so far? Are there any other benefits in terms of
productivity you can think of? Note: you need to provide feedback using at
least one of the three free-form arguments (\code{general_praise},
\code{increase_productivity}, or \code{increase_quality}).}
\item{increase_quality}{This is one of the fields we are most interested in.
Please use this argument to tell us how \pkg{texreg} increases the quality
of your work or the quality of your reporting. For example, does the
package generate tables that look more professional than the tables you
used to create manually? Are you using \link{screenreg} to improve your
workflow by understanding better how the results of multiple models
compare? Are you using \link{plotreg} to visualize and present your
statistical results in a more effective way? Can you think of any other
ways in which \pkg{texreg} is helping you? Note: you need to provide
feedback using at least one of the three free-form arguments
(\code{general_praise}, \code{increase_productivity}, or
\code{increase_quality}).}
\item{start_using}{(Optional) When did you start using \pkg{texreg}? We are
interested in the approximate time or year as a free-form text argument,
for example \code{"back in 2013 when the JSS article came out"}.}
\item{where_learn}{(Optional) Where or how did you learn about the
\pkg{texreg} package?}
\item{contact_details}{(Optional) Tell us how we can contact you in case we
would benefit from additional information. This might help us further down
the road in compiling an impact case study or a similar report. Don't
worry, this information will not be displayed on the website!}
\item{models}{(Optional) Which kinds of statistical models do you use in your
work? For example, \code{"Mostly linear models, but also lme4 and ergm."}.}
\item{num_users}{(Optional) How many other \pkg{texreg} users do you know? In
particular, if you are a non-academic user, would you mind telling us how
many other non-academic users you are aware of and how many of them are in
your organization? The more we know, the more convincing our evidence base
will be. This argument accepts \code{numeric} values or more detailed
responses as a \code{character} object.}
\item{return.response}{If \code{TRUE}, a website with the submitted data will
be returned as a \code{response} object, as defined in the \pkg{httr}
package. You can load the \pkg{httr} package and use the
\code{\link[httr]{content}} function, possibly enclosed in an
\code{\link[base]{as.character}} call, to inspect the output and diagnose
any problems with the transmission of the data. Only use this argument if
instructed by the package authors.}
}
\value{
If everything works well, no output is returned (but see the
\code{return.response} argument to change this). If the submission of the
praise to the maintainer fails, a \code{response} object (as defined in the
\pkg{httr} package) will be returned. Should you have any problems, do feel
free to e-mail your praise to the package maintainer directly.
}
\description{
Publish praise about \pkg{texreg} to help the developers demonstrate impact.
}
\details{
You can use this function to praise the \pkg{texreg} package. Funders and
academic employers are increasingly interested in seeing evidence for the
impact academic research generates. For software, such as \pkg{texreg}, this
is very hard to accomplish because the developers are usually disconnected
from the users. The consequence is that incentives for developing
packages like these are diminishing the more the funders and employers
require evidence of impact on society, firms, or policy makers.
The \code{\link{praise}} function is our attempt at rectifying the situation.
With this function, you can provide positive feedback to the developers. The
praise is saved to a database on the web server of the package maintainer and
subsequently displayed at \url{http://www.philipleifeld.com/praise}
for other users, funders, and employers to view. This will also enable the
package authors to compile reports about how \pkg{texreg} is used by academic
and non-academic users to increase their productivity and work quality, for
example in the form of an impact case study for the 2021 UK Research
Excellence Framework (REF).
We need many positive examples of how \pkg{texreg} has an impact on your
work. We are especially interested in non-academic users, but welcome
feedback from anyone. So please contribute by using the praise function! Tell
us how cool this package is and how it has changed your work!
The minimal information we require from you is whether you are an academic or
non-academic user, the name of your organization, and some free-form praise
(of a general nature, or about how it makes you more productive, or about how
it increases the quality of your work or reporting). But there are some
additional fields. While we are happy with the basic information, of course
we will be happier if we also know your name, how to contact you, what kinds
of models you work with, and some other details. Your choice!
Please note that by using the \code{\link{praise}} function you agree that
the information you provide through the function, including your location, is
stored online in a database, displayed on the website of the package author,
and used in reports to funders, employers etc. (This is the whole purpose of
it.) You can contact the package maintainer any time to have your praise
removed.
}
\examples{
\dontrun{
praise(academic_user = TRUE,
organization = "University of Happy Tables",
increase_quality = "Man I've never had such pretty tables!")
}
}
\author{
Philip Leifeld
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checks.R
\name{check_data}
\alias{check_data}
\title{Check the data and grid}
\usage{
check_data(x, dim_check = NA, type)
}
\arguments{
\item{x}{Data or grid}
\item{dim_check}{How many columns do we expect?}
\item{type}{Is it the "grid" or "data" for use in error messages.}
}
\description{
Checks that the data or grid provided is of the correct form. This function
is an auxiliary function that can quickly check that a supplied data set or
grid is a matrix or a data frame, and that it has the correct dimension, as
defined by the \code{dim_check} parameter. The \code{type} argument is simply
a character vector "data" or "grid" that is used for printing error messages.
}
|
/man/check_data.Rd
|
no_license
|
cran/lg
|
R
| false
| true
| 757
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checks.R
\name{check_data}
\alias{check_data}
\title{Check the data and grid}
\usage{
check_data(x, dim_check = NA, type)
}
\arguments{
\item{x}{Data or grid}
\item{dim_check}{How many columns do we expect?}
\item{type}{Is it the "grid" or "data" for use in error messages.}
}
\description{
Checks that the data or grid provided is of the correct form. This function
is an auxiliary function that can quickly check that a supplied data set or
grid is a matrix or a data frame, and that it has the correct dimension, as
defined by the \code{dim_check} parameter. The \code{type} argument is simply
a character vector "data" or "grid" that is used for printing error messages.
}
|
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
## Norms the data within specified groups in a data frame; it normalizes each
## subject (identified by idvar) so that they have the same mean, within each group
## specified by betweenvars.
## data: a data frame.
## idvar: the name of a column that identifies each subject (or matched subjects)
## measurevar: the name of a column that contains the variable to be summariezed
## betweenvars: a vector containing names of columns that are between-subjects variables
## na.rm: a boolean that indicates whether to ignore NA's
normDataWithin <- function(data=NULL, idvar, measurevar, betweenvars=NULL,
na.rm=FALSE, .drop=TRUE) {
library(plyr)
# Measure var on left, idvar + between vars on right of formula.
data.subjMean <- ddply(data, c(idvar, betweenvars), .drop=.drop,
.fun = function(xx, col, na.rm) {
c(subjMean = mean(xx[,col], na.rm=na.rm))
},
measurevar,
na.rm
)
# Put the subject means with original data
data <- merge(data, data.subjMean)
# Get the normalized data in a new column
measureNormedVar <- paste(measurevar, "_norm", sep="")
data[,measureNormedVar] <- data[,measurevar] - data[,"subjMean"] +
mean(data[,measurevar], na.rm=na.rm)
# Remove this subject mean column
data$subjMean <- NULL
return(data)
}
## Summarizes data, handling within-subjects variables by removing inter-subject variability.
## It will still work if there are no within-S variables.
## Gives count, un-normed mean, normed mean (with same between-group mean),
## standard deviation, standard error of the mean, and confidence interval.
## If there are within-subject variables, calculate adjusted values using method from Morey (2008).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## betweenvars: a vector containing names of columns that are between-subjects variables
## withinvars: a vector containing names of columns that are within-subjects variables
## idvar: the name of a column that identifies each subject (or matched subjects)
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySEwithin <- function(data=NULL, measurevar, betweenvars=NULL, withinvars=NULL,
idvar=NULL, na.rm=FALSE, conf.interval=.95, .drop=TRUE) {
# Ensure that the betweenvars and withinvars are factors
factorvars <- vapply(data[, c(betweenvars, withinvars), drop=FALSE],
FUN=is.factor, FUN.VALUE=logical(1))
if (!all(factorvars)) {
nonfactorvars <- names(factorvars)[!factorvars]
message("Automatically converting the following non-factors to factors: ",
paste(nonfactorvars, collapse = ", "))
data[nonfactorvars] <- lapply(data[nonfactorvars], factor)
}
# Get the means from the un-normed data
datac <- summarySE(data, measurevar, groupvars=c(betweenvars, withinvars),
na.rm=na.rm, conf.interval=conf.interval, .drop=.drop)
# Drop all the unused columns (these will be calculated with normed data)
datac$sd <- NULL
datac$se <- NULL
datac$ci <- NULL
# Norm each subject's data
ndata <- normDataWithin(data, idvar, measurevar, betweenvars, na.rm, .drop=.drop)
# This is the name of the new column
measurevar_n <- paste(measurevar, "_norm", sep="")
# Collapse the normed data - now we can treat between and within vars the same
ndatac <- summarySE(ndata, measurevar_n, groupvars=c(betweenvars, withinvars),
na.rm=na.rm, conf.interval=conf.interval, .drop=.drop)
# Apply correction from Morey (2008) to the standard error and confidence interval
# Get the product of the number of conditions of within-S variables
nWithinGroups <- prod(vapply(ndatac[,withinvars, drop=FALSE], FUN=nlevels,
FUN.VALUE=numeric(1)))
correctionFactor <- sqrt( nWithinGroups / (nWithinGroups-1) )
# Apply the correction factor
ndatac$sd <- ndatac$sd * correctionFactor
ndatac$se <- ndatac$se * correctionFactor
ndatac$ci <- ndatac$ci * correctionFactor
# Combine the un-normed means with the normed results
merge(datac, ndatac)
}
|
/dependencies/summarySE.R
|
no_license
|
michaelakent/AnThroM
|
R
| false
| false
| 6,191
|
r
|
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
## Norms the data within specified groups in a data frame; it normalizes each
## subject (identified by idvar) so that they have the same mean, within each group
## specified by betweenvars.
## data: a data frame.
## idvar: the name of a column that identifies each subject (or matched subjects)
## measurevar: the name of a column that contains the variable to be summariezed
## betweenvars: a vector containing names of columns that are between-subjects variables
## na.rm: a boolean that indicates whether to ignore NA's
normDataWithin <- function(data=NULL, idvar, measurevar, betweenvars=NULL,
na.rm=FALSE, .drop=TRUE) {
library(plyr)
# Measure var on left, idvar + between vars on right of formula.
data.subjMean <- ddply(data, c(idvar, betweenvars), .drop=.drop,
.fun = function(xx, col, na.rm) {
c(subjMean = mean(xx[,col], na.rm=na.rm))
},
measurevar,
na.rm
)
# Put the subject means with original data
data <- merge(data, data.subjMean)
# Get the normalized data in a new column
measureNormedVar <- paste(measurevar, "_norm", sep="")
data[,measureNormedVar] <- data[,measurevar] - data[,"subjMean"] +
mean(data[,measurevar], na.rm=na.rm)
# Remove this subject mean column
data$subjMean <- NULL
return(data)
}
## Summarizes data, handling within-subjects variables by removing inter-subject variability.
## It will still work if there are no within-S variables.
## Gives count, un-normed mean, normed mean (with same between-group mean),
## standard deviation, standard error of the mean, and confidence interval.
## If there are within-subject variables, calculate adjusted values using method from Morey (2008).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## betweenvars: a vector containing names of columns that are between-subjects variables
## withinvars: a vector containing names of columns that are within-subjects variables
## idvar: the name of a column that identifies each subject (or matched subjects)
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySEwithin <- function(data=NULL, measurevar, betweenvars=NULL, withinvars=NULL,
idvar=NULL, na.rm=FALSE, conf.interval=.95, .drop=TRUE) {
# Ensure that the betweenvars and withinvars are factors
factorvars <- vapply(data[, c(betweenvars, withinvars), drop=FALSE],
FUN=is.factor, FUN.VALUE=logical(1))
if (!all(factorvars)) {
nonfactorvars <- names(factorvars)[!factorvars]
message("Automatically converting the following non-factors to factors: ",
paste(nonfactorvars, collapse = ", "))
data[nonfactorvars] <- lapply(data[nonfactorvars], factor)
}
# Get the means from the un-normed data
datac <- summarySE(data, measurevar, groupvars=c(betweenvars, withinvars),
na.rm=na.rm, conf.interval=conf.interval, .drop=.drop)
# Drop all the unused columns (these will be calculated with normed data)
datac$sd <- NULL
datac$se <- NULL
datac$ci <- NULL
# Norm each subject's data
ndata <- normDataWithin(data, idvar, measurevar, betweenvars, na.rm, .drop=.drop)
# This is the name of the new column
measurevar_n <- paste(measurevar, "_norm", sep="")
# Collapse the normed data - now we can treat between and within vars the same
ndatac <- summarySE(ndata, measurevar_n, groupvars=c(betweenvars, withinvars),
na.rm=na.rm, conf.interval=conf.interval, .drop=.drop)
# Apply correction from Morey (2008) to the standard error and confidence interval
# Get the product of the number of conditions of within-S variables
nWithinGroups <- prod(vapply(ndatac[,withinvars, drop=FALSE], FUN=nlevels,
FUN.VALUE=numeric(1)))
correctionFactor <- sqrt( nWithinGroups / (nWithinGroups-1) )
# Apply the correction factor
ndatac$sd <- ndatac$sd * correctionFactor
ndatac$se <- ndatac$se * correctionFactor
ndatac$ci <- ndatac$ci * correctionFactor
# Combine the un-normed means with the normed results
merge(datac, ndatac)
}
|
library(tibble)
input_df <- as.data.frame(tribble(
~age, ~gender, ~race, ~BMI, ~sbp, ~hdl, ~totchol, ~bp_med, ~smoker, ~diabetes,
55, "male", "white", 30, 140, 50, 213, 0, 0, 0,
45, "female", "white", 27, 125, 50, 200, 1, 0, 0,
45, "female", "white", 27, 125, 50, 200, NA, 0, 0
))
output_df <- cbind(input_df, as.data.frame(tribble(
~ascvd_10y_accaha, ~ascvd_10y_frs, ~ascvd_10y_frs_simple,
7.01, 13.53, 16.75,
1.22, 4.68, 4.91,
NA, NA, NA
)))
test_that("compute_CVrisk returns correct data frame", {
expect_equal(
compute_CVrisk(
input_df,
age = "age",
race = "race",
gender = "gender",
bmi = "BMI",
sbp = "sbp",
hdl = "hdl",
totchol = "totchol",
bp_med = "bp_med",
smoker = "smoker",
diabetes = "diabetes"
),
output_df
)
})
|
/tests/testthat/test-compute_CVrisk.R
|
no_license
|
minghao2016/CVrisk
|
R
| false
| false
| 827
|
r
|
library(tibble)
input_df <- as.data.frame(tribble(
~age, ~gender, ~race, ~BMI, ~sbp, ~hdl, ~totchol, ~bp_med, ~smoker, ~diabetes,
55, "male", "white", 30, 140, 50, 213, 0, 0, 0,
45, "female", "white", 27, 125, 50, 200, 1, 0, 0,
45, "female", "white", 27, 125, 50, 200, NA, 0, 0
))
output_df <- cbind(input_df, as.data.frame(tribble(
~ascvd_10y_accaha, ~ascvd_10y_frs, ~ascvd_10y_frs_simple,
7.01, 13.53, 16.75,
1.22, 4.68, 4.91,
NA, NA, NA
)))
test_that("compute_CVrisk returns correct data frame", {
expect_equal(
compute_CVrisk(
input_df,
age = "age",
race = "race",
gender = "gender",
bmi = "BMI",
sbp = "sbp",
hdl = "hdl",
totchol = "totchol",
bp_med = "bp_med",
smoker = "smoker",
diabetes = "diabetes"
),
output_df
)
})
|
#' Gives values for the erythemal BSWF as a function of wavelength
#'
#' This function gives a set of numeric multipliers that can be used as a weight
#' to calculate effective doses and irradiances. The returned values are on
#' quantum based effectiveness relative units.
#'
#' @param w.length numeric array of wavelengths (nm)
#'
#' @return a numeric array of the same length as \code{w.length} with values for
#' the BSWF normalized as in the original source (298 nm) and based on quantum
#' effectiveness.
#'
#'
#' @export
#' @examples
#' CIE_q_fun(293:400)
#'
#'
#' @family BSWF functions
#'
CIE_q_fun <-
function(w.length){
CIE_e_fun(w.length) * 298.0 / w.length
}
|
/photobiologyWavebands/R/cie.q.fun.r
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 706
|
r
|
#' Gives values for the erythemal BSWF as a function of wavelength
#'
#' This function gives a set of numeric multipliers that can be used as a weight
#' to calculate effective doses and irradiances. The returned values are on
#' quantum based effectiveness relative units.
#'
#' @param w.length numeric array of wavelengths (nm)
#'
#' @return a numeric array of the same length as \code{w.length} with values for
#' the BSWF normalized as in the original source (298 nm) and based on quantum
#' effectiveness.
#'
#'
#' @export
#' @examples
#' CIE_q_fun(293:400)
#'
#'
#' @family BSWF functions
#'
CIE_q_fun <-
function(w.length){
CIE_e_fun(w.length) * 298.0 / w.length
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate_clean_ballots.R
\name{cleanBallots}
\alias{cleanBallots}
\title{Tries to Address \code{validateBallots()} Warning(s).}
\usage{
cleanBallots(x, cand.names = NULL)
}
\arguments{
\item{x}{a \code{data.frame} with rows as ballots and columns as candidates.}
\item{cand.names}{character vector of length equal to number of candidates
(needed when column names of \code{x} are missing or not unique).}
}
\value{
a \code{data.frame} compatible for \code{stv()} function.
}
\description{
Tries to clean data for \code{stv()}. Some of the warnings from \code{validateBallots()}
have to addressed by the user (see Details).
}
\details{
Assumes \code{x} contains rows and columns corresponding to ballots and
candidates respectively. Tries to address issues raised by \code{validateBallots()}
in the following order:
\enumerate{
\item If \code{x} is a \code{matrix} then converts to \code{data.frame}.
Otherwise, user has to convert \code{x} into \code{data.frame}.
\item Checks if \code{x} has numeric entries. If not, checks if numeric
data was passed as character. If this also fails, then user has to
convert data into numeric type.
\item If column names of \code{x} are missing assigns \code{cand.names} as
column names. If
\code{x} already has valid column names, no need to specify \code{cand.names}.
If column names of \code{x} missing and \code{cand.names} not specified, returns
error message.
\item Removes blank columns.
\item Removes blank and/or non-sequentially ranked rows.
}
}
\examples{
data(ballots)
cballots <- cleanBallots(ballots)
validateBallots(cballots)
}
|
/man/cleanBallots.Rd
|
no_license
|
chandrasaksham/STV
|
R
| false
| true
| 1,690
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate_clean_ballots.R
\name{cleanBallots}
\alias{cleanBallots}
\title{Tries to Address \code{validateBallots()} Warning(s).}
\usage{
cleanBallots(x, cand.names = NULL)
}
\arguments{
\item{x}{a \code{data.frame} with rows as ballots and columns as candidates.}
\item{cand.names}{character vector of length equal to number of candidates
(needed when column names of \code{x} are missing or not unique).}
}
\value{
a \code{data.frame} compatible for \code{stv()} function.
}
\description{
Tries to clean data for \code{stv()}. Some of the warnings from \code{validateBallots()}
have to addressed by the user (see Details).
}
\details{
Assumes \code{x} contains rows and columns corresponding to ballots and
candidates respectively. Tries to address issues raised by \code{validateBallots()}
in the following order:
\enumerate{
\item If \code{x} is a \code{matrix} then converts to \code{data.frame}.
Otherwise, user has to convert \code{x} into \code{data.frame}.
\item Checks if \code{x} has numeric entries. If not, checks if numeric
data was passed as character. If this also fails, then user has to
convert data into numeric type.
\item If column names of \code{x} are missing assigns \code{cand.names} as
column names. If
\code{x} already has valid column names, no need to specify \code{cand.names}.
If column names of \code{x} missing and \code{cand.names} not specified, returns
error message.
\item Removes blank columns.
\item Removes blank and/or non-sequentially ranked rows.
}
}
\examples{
data(ballots)
cballots <- cleanBallots(ballots)
validateBallots(cballots)
}
|
source('./code/0-setup.R')
# data import -----------------------------------------------------------------------------------------------------
#' Write a function that imports the clean data template with the additional columns.
#' This makes it easy so seperate the data cleaning from using the data in modelling
#' or analysis
import_data = function(file_path){
data = rio::import(file_path, setclass = 'tbl') %>%
dplyr::mutate(
account_id = as.character(account_id),
period = as.Date(period),
origination_date = as.Date(origination_date),
maturity_date = as.Date(maturity_date),
interest_rate = as.double(interest_rate),
product_type = as.character(product_type),
arrears_status = as.character(arrears_status),
stage = as.character(stage),
account_balance = as.double(account_balance),
contract_term = as.integer(contract_term),
time_on_book = as.integer(time_on_book),
remaining_term = as.integer(remaining_term)
) %>%
dplyr::arrange(account_id, period)
return(data)
}
data = import_data('.data/master-data-clean.csv.gz')
# basic data analysis ---------------------------------------------------------------------------------------------
#' In this secion we will do some basic data analysis to see if trends and distributions are stable over
#' time. If distributions aren't stable they need to be invesigated and catered for in the modelling.
data %>%
group_by(period, product_type) %>%
summarise(total = sum(account_balance)) %>%
plotly::plot_ly(x = ~period, y = ~total, color = ~product_type, type = 'scatter', mode = 'line')
data %>%
group_by(period, stage) %>%
summarise(total = sum(account_balance)) %>%
mutate(total = total / sum(total)) %>%
plotly::plot_ly(x = ~period, y = ~total, color = ~stage, type = 'bar') %>%
plotly::layout(barmode='stack')
data %>%
filter(product_type == 'loan') %>%
group_by(period, stage) %>%
summarise(total = sum(account_balance)) %>%
mutate(total = total / sum(total)) %>%
plotly::plot_ly(x = ~period, y = ~total, color = ~stage, type = 'bar') %>%
plotly::layout(barmode='stack')
data %>%
group_by(product_type, time_on_book) %>%
summarise(total = sum(account_balance)) %>%
plotly::plot_ly(alpha = 0.7, type = 'bar', x = ~time_on_book, y = ~total, color = ~product_type) %>%
plotly::layout(barmode='overlay')
|
/code/2-data-analysis.R
|
no_license
|
gbisschoff/R-Training
|
R
| false
| false
| 2,405
|
r
|
source('./code/0-setup.R')
# data import -----------------------------------------------------------------------------------------------------
#' Write a function that imports the clean data template with the additional columns.
#' This makes it easy so seperate the data cleaning from using the data in modelling
#' or analysis
import_data = function(file_path){
data = rio::import(file_path, setclass = 'tbl') %>%
dplyr::mutate(
account_id = as.character(account_id),
period = as.Date(period),
origination_date = as.Date(origination_date),
maturity_date = as.Date(maturity_date),
interest_rate = as.double(interest_rate),
product_type = as.character(product_type),
arrears_status = as.character(arrears_status),
stage = as.character(stage),
account_balance = as.double(account_balance),
contract_term = as.integer(contract_term),
time_on_book = as.integer(time_on_book),
remaining_term = as.integer(remaining_term)
) %>%
dplyr::arrange(account_id, period)
return(data)
}
data = import_data('.data/master-data-clean.csv.gz')
# basic data analysis ---------------------------------------------------------------------------------------------
#' In this secion we will do some basic data analysis to see if trends and distributions are stable over
#' time. If distributions aren't stable they need to be invesigated and catered for in the modelling.
data %>%
group_by(period, product_type) %>%
summarise(total = sum(account_balance)) %>%
plotly::plot_ly(x = ~period, y = ~total, color = ~product_type, type = 'scatter', mode = 'line')
data %>%
group_by(period, stage) %>%
summarise(total = sum(account_balance)) %>%
mutate(total = total / sum(total)) %>%
plotly::plot_ly(x = ~period, y = ~total, color = ~stage, type = 'bar') %>%
plotly::layout(barmode='stack')
data %>%
filter(product_type == 'loan') %>%
group_by(period, stage) %>%
summarise(total = sum(account_balance)) %>%
mutate(total = total / sum(total)) %>%
plotly::plot_ly(x = ~period, y = ~total, color = ~stage, type = 'bar') %>%
plotly::layout(barmode='stack')
data %>%
group_by(product_type, time_on_book) %>%
summarise(total = sum(account_balance)) %>%
plotly::plot_ly(alpha = 0.7, type = 'bar', x = ~time_on_book, y = ~total, color = ~product_type) %>%
plotly::layout(barmode='overlay')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build-series.R
\name{build_series}
\alias{build_series}
\title{Builds Multi-year Series for the Station Variables.}
\usage{
build_series(station)
}
\arguments{
\item{station}{character string given the WIGOS compatible station
identifier.}
}
\description{
For each of the variables of a given station, rbinds the anual data frames in
the C3S-QC format to make a series of several years.
}
|
/man/build_series.Rd
|
no_license
|
mcmventura/fcdata2qc
|
R
| false
| true
| 467
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build-series.R
\name{build_series}
\alias{build_series}
\title{Builds Multi-year Series for the Station Variables.}
\usage{
build_series(station)
}
\arguments{
\item{station}{character string given the WIGOS compatible station
identifier.}
}
\description{
For each of the variables of a given station, rbinds the anual data frames in
the C3S-QC format to make a series of several years.
}
|
Day <- 5
# Load libraries ----------------------------------------------------------
library(here)
library(glue)
library(tidyverse)
# Read data ---------------------------------------------------------------
d <- readLines(here("2019", "data", glue("day_{Day}_input.txt")))
d <- strsplit(d, ",")[[1]] %>%
as.numeric() %>% as_tibble() %>%
rownames_to_column(var = "idx") %>%
mutate(idx = as.numeric(idx) - 1)
# Functions ---------------------------------------------------------------
# This is the hardest program I've written
parse_instruction <- function(code) {
code <- sprintf("%05d", code)
opcode <- as.numeric(stringi::stri_sub(code,
from = nchar(code) - 1,
to = nchar(code)))
modes <- rev(as.numeric(strsplit(stringi::stri_sub(code,
from = 1,
to = 3), "")[[1]]))
return(list(oc = opcode,
m = modes))
}
modal_read <- function(d, mode, addr) {
v <- pull(filter(d, idx == addr))
if (mode == 1) {
return(v)
} else {
return(pull(filter(d,
idx == v),
value))
}
}
execute_instruction <- function(d, inst, idx) {
param_1_v <- modal_read(d, inst$m[1], idx + 1)
param_2_v <- modal_read(d, inst$m[2], idx + 2)
num_params <- case_when(
inst$oc %in% c(1, 2) ~ 3,
inst$oc %in% c(3, 4) ~ 1,
TRUE ~ 5000
)
write_addr <- pull(d[d$idx == idx + num_params,], value)
if (inst$oc == 99) {
print("Exiting...")
return("stop")
break
} else if (inst$oc == 1) {
# Add
d[d$idx == write_addr,] <- param_1_v + param_2_v
} else if (inst$oc == 2) {
# Multiply
d[d$idx == write_addr,] <- param_1_v * param_2_v
} else if (inst$oc == 3) {
# Prompt for input
in_val <- as.numeric(readline(prompt="Enter input: "))
d[d$idx == write_addr,] <- in_val
} else if (inst$oc == 4) {
# Print result
readline(prompt="Press [enter] to continue")
}
# Return the next opcode
next_code <- pull(d[d$idx == idx + num_params + 1,], value)
print(next_code)
readline(prompt="Press [enter] to continue")
return(next_code)
}
ic <- function(d) {
idx <- pull(d[1,], idx)
code <- pull(d[1,], value)
while(code != "stop") {
inst <- parse_instruction(code)
code <- execute_instruction(d, inst, idx)
}
}
# Question 1 --------------------------------------------------------------
answer1
# Question 2 --------------------------------------------------------------
answer2
|
/2019/day5.R
|
no_license
|
jwinget/advent_of_code_2020
|
R
| false
| false
| 2,564
|
r
|
Day <- 5
# Load libraries ----------------------------------------------------------
library(here)
library(glue)
library(tidyverse)
# Read data ---------------------------------------------------------------
d <- readLines(here("2019", "data", glue("day_{Day}_input.txt")))
d <- strsplit(d, ",")[[1]] %>%
as.numeric() %>% as_tibble() %>%
rownames_to_column(var = "idx") %>%
mutate(idx = as.numeric(idx) - 1)
# Functions ---------------------------------------------------------------
# This is the hardest program I've written
parse_instruction <- function(code) {
code <- sprintf("%05d", code)
opcode <- as.numeric(stringi::stri_sub(code,
from = nchar(code) - 1,
to = nchar(code)))
modes <- rev(as.numeric(strsplit(stringi::stri_sub(code,
from = 1,
to = 3), "")[[1]]))
return(list(oc = opcode,
m = modes))
}
modal_read <- function(d, mode, addr) {
v <- pull(filter(d, idx == addr))
if (mode == 1) {
return(v)
} else {
return(pull(filter(d,
idx == v),
value))
}
}
execute_instruction <- function(d, inst, idx) {
param_1_v <- modal_read(d, inst$m[1], idx + 1)
param_2_v <- modal_read(d, inst$m[2], idx + 2)
num_params <- case_when(
inst$oc %in% c(1, 2) ~ 3,
inst$oc %in% c(3, 4) ~ 1,
TRUE ~ 5000
)
write_addr <- pull(d[d$idx == idx + num_params,], value)
if (inst$oc == 99) {
print("Exiting...")
return("stop")
break
} else if (inst$oc == 1) {
# Add
d[d$idx == write_addr,] <- param_1_v + param_2_v
} else if (inst$oc == 2) {
# Multiply
d[d$idx == write_addr,] <- param_1_v * param_2_v
} else if (inst$oc == 3) {
# Prompt for input
in_val <- as.numeric(readline(prompt="Enter input: "))
d[d$idx == write_addr,] <- in_val
} else if (inst$oc == 4) {
# Print result
readline(prompt="Press [enter] to continue")
}
# Return the next opcode
next_code <- pull(d[d$idx == idx + num_params + 1,], value)
print(next_code)
readline(prompt="Press [enter] to continue")
return(next_code)
}
ic <- function(d) {
idx <- pull(d[1,], idx)
code <- pull(d[1,], value)
while(code != "stop") {
inst <- parse_instruction(code)
code <- execute_instruction(d, inst, idx)
}
}
# Question 1 --------------------------------------------------------------
answer1
# Question 2 --------------------------------------------------------------
answer2
|
filtragemResultadosSCUT_BiVetor = function(matrizResultados, limiaresInferioresSCUT, limiaresSuperioresSCUT){
resultadosFiltrados = matrix(nrow=dim(matrizResultados)[1], ncol=dim(matrizResultados)[2]);
dimnames(resultadosFiltrados)[1]=dimnames(matrizResultados)[1];
#filtragem sobre a matriz de resultados, filtrando aqueles que são maiores que cada limiarSCUT
for(i in 1:nrow(matrizResultados)){
#resultadosFiltrados[i,1]=i;
for(j in 1:ncol(matrizResultados)){
if(matrizResultados[i,j]>limiaresInferioresSCUT[j] && matrizResultados[i,j]<limiaresSuperioresSCUT[j]) {resultadosFiltrados[i,j]=1;}
else resultadosFiltrados[i,j]=0;
}
}
#simplificacao da matriz, mantendo apenas as linhas em que pelo menos uma das classificações é um risco
linhasBuscadas = apply(resultadosFiltrados, 1, function(x) any(x !=0 ));
resultadosFiltrados = resultadosFiltrados[linhasBuscadas,]
#retorno da matriz
return(resultadosFiltrados);
}
|
/scriptFiltragemResultadosSCUT_BiVetor.R
|
no_license
|
rnpeclat/A2E
|
R
| false
| false
| 976
|
r
|
filtragemResultadosSCUT_BiVetor = function(matrizResultados, limiaresInferioresSCUT, limiaresSuperioresSCUT){
resultadosFiltrados = matrix(nrow=dim(matrizResultados)[1], ncol=dim(matrizResultados)[2]);
dimnames(resultadosFiltrados)[1]=dimnames(matrizResultados)[1];
#filtragem sobre a matriz de resultados, filtrando aqueles que são maiores que cada limiarSCUT
for(i in 1:nrow(matrizResultados)){
#resultadosFiltrados[i,1]=i;
for(j in 1:ncol(matrizResultados)){
if(matrizResultados[i,j]>limiaresInferioresSCUT[j] && matrizResultados[i,j]<limiaresSuperioresSCUT[j]) {resultadosFiltrados[i,j]=1;}
else resultadosFiltrados[i,j]=0;
}
}
#simplificacao da matriz, mantendo apenas as linhas em que pelo menos uma das classificações é um risco
linhasBuscadas = apply(resultadosFiltrados, 1, function(x) any(x !=0 ));
resultadosFiltrados = resultadosFiltrados[linhasBuscadas,]
#retorno da matriz
return(resultadosFiltrados);
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 100452
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 100452
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc03-uniform-depth-106.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 37558
c no.of clauses 100452
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 100452
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc03-uniform-depth-106.qdimacs 37558 100452 E1 [] 0 214 37131 100452 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/trafficlight-controller/tlc03-uniform-depth-106/tlc03-uniform-depth-106.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 697
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 100452
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 100452
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc03-uniform-depth-106.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 37558
c no.of clauses 100452
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 100452
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc03-uniform-depth-106.qdimacs 37558 100452 E1 [] 0 214 37131 100452 NONE
|
# ui.R
shinyUI(fluidPage(
titlePanel("censusVis"),
sidebarLayout(
sidebarPanel(
helpText("Create demographic maps with
information from the 2010 US Census."),
selectInput("var",
label = "Choose a variable to display",
choices = c("Percent White", "Percent Black",
"Percent Hispanic", "Percent Asian"),
selected = "Percent White"),
sliderInput("range",
label = "Range of interest:",
min = 0, max = 100, value = c(0, 100))
),
mainPanel(plotOutput("map"))
)
))
|
/Coursera/Data Science (JHU)/09 Developing Data Products/Week 1 practice and quiz/Shiny apps/census-app/ui.R
|
no_license
|
abudish/Course_Materials_and_Certificates
|
R
| false
| false
| 917
|
r
|
# ui.R
shinyUI(fluidPage(
titlePanel("censusVis"),
sidebarLayout(
sidebarPanel(
helpText("Create demographic maps with
information from the 2010 US Census."),
selectInput("var",
label = "Choose a variable to display",
choices = c("Percent White", "Percent Black",
"Percent Hispanic", "Percent Asian"),
selected = "Percent White"),
sliderInput("range",
label = "Range of interest:",
min = 0, max = 100, value = c(0, 100))
),
mainPanel(plotOutput("map"))
)
))
|
x <- function() {
columnnames <- read.table("features.txt", sep = " ", stringsAsFactors = FALSE)
columnnames$new <- paste(columnnames$V1, columnnames$V2, sep="_")
trainsample <- cbind(fread("train/X_train.txt", sep=" "), fread("train/y_train.txt"), fread("train/subject_train.txt"))
testsample <- cbind(fread("test/X_test.txt", sep=" "), fread("test/y_test.txt"), fread("test/subject_test.txt"))
names(trainsample) <- c(columnnames$new,"activity","subject")
names(testsample) <- c(columnnames$new,"activity","subject")
mergedsamples <- rbind(trainsample, testsample)
mergedsamples$ID<-seq.int(nrow(mergedsamples))
columnnamesmean <- grep("mean",columnnames$new)
columnnamesstd <- grep("std",columnnames$new)
meancolumns <- select(mergedsamples,c(columnnamesmean),562:564)
stdcolumns <- select(mergedsamples,c(columnnamesstd),562:564)
finaltable <- merge(meancolumns, stdcolumns, by = c("ID","activity","subject"))
finaltable$activity <- ordered(finaltable$activity, c(0,1,2,3,4,5,6), labels=c("NA","WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING"))
}
|
/run_analysis.R
|
no_license
|
Ymget/Cleaning-data---Exam
|
R
| false
| false
| 1,131
|
r
|
x <- function() {
columnnames <- read.table("features.txt", sep = " ", stringsAsFactors = FALSE)
columnnames$new <- paste(columnnames$V1, columnnames$V2, sep="_")
trainsample <- cbind(fread("train/X_train.txt", sep=" "), fread("train/y_train.txt"), fread("train/subject_train.txt"))
testsample <- cbind(fread("test/X_test.txt", sep=" "), fread("test/y_test.txt"), fread("test/subject_test.txt"))
names(trainsample) <- c(columnnames$new,"activity","subject")
names(testsample) <- c(columnnames$new,"activity","subject")
mergedsamples <- rbind(trainsample, testsample)
mergedsamples$ID<-seq.int(nrow(mergedsamples))
columnnamesmean <- grep("mean",columnnames$new)
columnnamesstd <- grep("std",columnnames$new)
meancolumns <- select(mergedsamples,c(columnnamesmean),562:564)
stdcolumns <- select(mergedsamples,c(columnnamesstd),562:564)
finaltable <- merge(meancolumns, stdcolumns, by = c("ID","activity","subject"))
finaltable$activity <- ordered(finaltable$activity, c(0,1,2,3,4,5,6), labels=c("NA","WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING"))
}
|
context("Amplicon regressions")
test_that("AmpliconGraph", {
ag <- AmpliconGraph()
expect_true(validObject(ag))
})
cgov44t_preprocess <- function(){
extdata <- system.file("extdata", package="svbams")
id <- "cgov44t_revised.bam"
bamfile <- file.path(extdata, id)
irp.file <- file.path(extdata, "cgov44t_improper.rds")
irp <- readRDS(irp.file)
genome(irp) <- "hg19"
if(FALSE){
f <- irp@first
irp@first <- updateObject(f)
irp@last <- updateObject(irp@last)
saveRDS(irp, file=file.path(extdata, "cgov44t_improper.rds"))
}
bamdir <- system.file("extdata", package="svbams",
mustWork=TRUE)
lr <- readRDS(file.path(bamdir, "preprocessed_coverage.rds"))/1000
seqlevels(bins1kb, pruning.mode="coarse") <- paste0("chr", c(1:22, "X"))
bins1kb$log_ratio <- lr
path <- system.file("extdata", package="svbams")
segs <- readRDS(file.path(path, "cgov44t_segments.rds"))
seqlevels(segs, pruning.mode="coarse") <- c("chr5", "chr8", "chr15")
##amp.gr <- segs[segs$seg.mean < ampliconParams()$AMP_THR]
##proper.amp <- properReadPairs(bamfile,
## gr=reduce(amp.gr, min.gapwidth=2000))
rps <- list(improper=irp)
pdat <- preprocessData(bam.file=bamfile,
genome=genome(segs)[[1]],
segments=segs,
read_pairs=rps,
bins=bins1kb)
}
test_that("sv_amplicons", {
library(Rsamtools)
library(svfilters.hg19)
##
## standard setup
##
cv.extdata <- system.file("extdata", package="svbams")
segs <- readRDS(file.path(cv.extdata, "cgov44t_segments.rds"))
seqlevels(segs, pruning.mode="coarse") <- c("chr5", "chr8", "chr15")
extdata <- system.file("extdata", package="svbams")
bview <- BamViews(bamPaths=file.path(extdata, "cgov44t_revised.bam"))
params <- ampliconParams()
tx <- loadTx("hg19")
##
## call sv_amplicons with a bunch of arguments
##
ag2 <- sv_amplicons(bview=bview,
segs=segs,
amplicon_filters=germline_filters,
params=params,
transcripts=tx)
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "setDrivers.4adcc78.rds"))
expect_equivalent(ag.4adcc78, ag2)
##
## proposed setup. Call sv_amplicons2 with single argument
##
pdat <- cgov44t_preprocess()
expect_equivalent(pdat$segments, segs)
ag3 <- sv_amplicons2(pdat)
##
## these will not be exactly identical because the set of improper read pairs
## changes slightly. However, the amplicon graph is exactly the same
##
expect_identical(ampliconRanges(ag2), ampliconRanges(ag3))
expect_identical(queryRanges(ag2), queryRanges(ag3))
expect_identical(edges(ag2), edges(ag3))
})
test_that("initialize_graph", {
##
## standard
##
cv.extdata <- system.file("extdata", package="svbams")
segs <- readRDS(file.path(cv.extdata, "cgov44t_segments.rds"))
seqlevels(segs, pruning.mode="coarse") <- c("chr5", "chr8", "chr15")
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "setDrivers.4adcc78.rds"))
extdata <- system.file("extdata", package="svbams")
bview <- BamViews(bamPaths=file.path(extdata, "cgov44t_revised.bam"))
params <- ampliconParams()
amplicon_filters <- germline_filters
params <- ampliconParams()
ag <- initialize_graph(segs, amplicon_filters, params)
if(FALSE){
saveRDS(ag, file="initialize_graph.a4d7744.rds")
}
path <- system.file("extdata", package="svbams")
ag.a4d7744 <- readRDS(file.path(path, "initialize_graph.a4d7744.rds"))
expect_equivalent(ag, ag.a4d7744)
## proposed
##skip("Check cgov44t_preprocess example")
pdat <- cgov44t_preprocess()
pdat$segments <- amplified_segments(pdat$segments, params)
ag2 <- initialize_graph2(pdat, ampliconFilters(pdat$genome),
ampliconParams())
expect_identical(ag, ag2)
})
test_that("add_amplicons", {
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "addFocalDups.ffab104.rds"))
path <- system.file("extdata", package="svbams")
query.ranges <- readRDS(file.path(path, "focalAmpliconDupRanges.a4d7744.rds"))
queryRanges(ag) <- query.ranges
expected <- ag
## proposed
##skip("Check cgov44t_preprocess example")
pdat <- cgov44t_preprocess()
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "initialize_graph.a4d7744.rds"))
ag2 <- add_amplicons(ag, pdat$bam.file, ampliconParams())
if(FALSE){
saveRDS(ag2, file="add_amplicons.a4d7744.rds")
}
expect_equivalent(ag2, expected)
})
test_that("link_amplicons", {
##
## standard
##
extdata <- system.file("extdata", package="svbams")
bam.file <- file.path(extdata, "cgov44t_revised.bam")
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "add_amplicons.a4d7744.rds"))
irp <- get_improper_readpairs(ag, bam.file)
params <- ampliconParams()
ag1 <- link_amplicons(ag, irp, params)
if(FALSE){
saveRDS(ag1, file="link_amplicons.a4d744.rds")
}
path <- system.file("extdata", package="svbams")
ag.a4d744 <- readRDS(file.path(path, "link_amplicons.a4d744.rds"))
expect_equivalent(ag1, ag.a4d744)
##
## proposed
##
##skip("Check cgov44t_preprocess example")
pdat <- cgov44t_preprocess()
improper_rp2 <- pdat$read_pairs[["improper"]]
ag2 <- link_amplicons(ag, improper_rp2, params)
expect_identical(ampliconRanges(ag2), ampliconRanges(ag1))
expect_identical(queryRanges(ag2), queryRanges(ag1))
expect_identical(edges(ag2), edges(ag1))
##
## if we use all the improper read pairs with a mapq filter we recover one fewer edge
##
iparams <- improperAlignmentParams(mapqFilter=30,
what=c("flag", "mrnm", "mpos"))
improper_rp3 <- getImproperAlignmentPairs(pdat$bam.file,
param=iparams,
build="hg19")
ag3 <- link_amplicons(ag, improper_rp3, params)
expect_lt(numEdges(ag3), numEdges(ag2))
})
test_that("annotate_amplicons", {
path <- system.file("extdata", package="svbams")
expected <- readRDS(file.path(path, "setDrivers.4adcc78.rds"))
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "link_amplicons.a4d744.rds"))
tx <- loadTx("hg19")
ag <- annotate_amplicons(ag, tx)
expect_equivalent(ag, expected)
})
## ag3 is not identical to ag2
## -- check the internal functions
test_that("makeAGraph", {
library(Rsamtools)
library(svfilters.hg19)
##
## standard setup
##
cv.extdata <- system.file("extdata", package="svbams")
segs <- readRDS(file.path(cv.extdata, "cgov44t_segments.rds"))
seqlevels(segs, pruning.mode="coarse") <- c("chr5", "chr8","chr15")
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "setDrivers.4adcc78.rds"))
extdata <- system.file("extdata", package="svbams")
bview <- BamViews(bamPaths=file.path(extdata, "cgov44t_revised.bam"))
params <- ampliconParams()
amplicon_filters <- germline_filters
params <- ampliconParams()
##
## Begin testing internals of sv_amplicons
##
ag <- makeAGraph(segs, amplicon_filters, params)
path <- system.file("extdata", package="svbams")
ag.ffab104 <- readRDS(file.path(path, "makeAGraphffab104.rds"))
expect_equivalent(ag, ag.ffab104)
expect_true(validObject(ag))
##
## Proposed setup
##
##skip("Check cgov44t_preprocess example")
pdat <- cgov44t_preprocess()
## generates an error
segs <- pdat$segments
segs$is_amplicon <- segs$seg.mean > params$AMP_THR
agnew <- makeAGraph2(segs, amplicon_filters, params)
expect_identical(agnew, ag)
})
## the proposed and standard setup are the same here
test_that("joinNearGRanges", {
##
## standard setup
##
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "makeAGraphffab104.rds"))
merged <- joinNearGRanges(ranges(ag), ampliconParams())
if(FALSE){
saveRDS(merged, file="merged.a4d7744.rds")
}
path <- system.file("extdata", package="svbams")
merged.a4d7744 <- readRDS(file.path(path, "merged.a4d7744.rds"))
expect_equivalent(merged, merged.a4d7744)
})
test_that("get_readpairs", {
##
## standard
##
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "initialize_graph.a4d7744.rds"))
extdata <- system.file("extdata", package="svbams")
bam.file <- file.path(extdata, "cgov44t_revised.bam")
rp <- get_readpairs(ag, bam.file)
##
## proposed
##
rp2 <- get_readpairs2(queryRanges(ag), bam.file)
expect_identical(rp, rp2)
})
test_that("addFocalDups", {
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "initialize_graph.a4d7744.rds"))
## standard and proposed are the same
extdata <- system.file("extdata", package="svbams")
bam.file <- file.path(extdata, "cgov44t_revised.bam")
rp <- get_readpairs(ag, bam.file)
ag <- addFocalDupsFlankingAmplicon(ag, rp, ampliconParams())
path <- system.file("extdata", package="svbams")
ag.ffab104 <- readRDS(file.path(path, "addFocalDups.ffab104.rds"))
expect_identical(ag, ag.ffab104)
query.ranges <- focalAmpliconDupRanges(ag, ampliconParams())
if(FALSE){
saveRDS(query.ranges, file="focalAmpliconDupRanges.a4d7744.rds")
}
path <- system.file("extdata", package="svbams")
query.a4d7744 <- readRDS(file.path(path, "focalAmpliconDupRanges.a4d7744.rds"))
expect_equivalent(query.ranges, query.a4d7744)
})
test_that("linkNearAmplicons", {
params <- ampliconParams()
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "linkAmplicons.4adcc78.rds"))
ag <- linkNearAmplicons(ag, maxgap=params[["maxgap"]])
if(FALSE){
saveRDS(ag, file="linkNearAmplicons.4adcc78.rds")
}
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "linkNearAmplicons.4adcc78.rds"))
expect_identical(ag.4adcc78, ag)
})
test_that("filterSmallAmplicons", {
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "linkNearAmplicons.4adcc78.rds"))
ag <- filterSmallAmplicons (ag)
if(FALSE){
saveRDS(ag, file="filterSmallAmplicons.4adcc78.rds")
}
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "filterSmallAmplicons.4adcc78.rds"))
expect_identical(ag.4adcc78, ag)
})
test_that("setAmpliconGroups", {
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "filterSmallAmplicons.4adcc78.rds"))
ag <- setAmpliconGroups (ag)
if(FALSE){
saveRDS(ag, file="setAmpliconGroups.4adcc78.rds")
}
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "setAmpliconGroups.4adcc78.rds"))
expect_equivalent(ag.4adcc78, ag)
})
test_that("setGenes", {
library(svfilters.hg19)
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "setAmpliconGroups.4adcc78.rds"))
tx <- loadTx("hg19")
ag <- setGenes (ag, tx)
if(FALSE){
saveRDS(ag, file="setAmpliconGenes.4adcc78.rds")
}
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "setAmpliconGenes.4adcc78.rds"))
expect_equivalent(ag.4adcc78, ag)
})
test_that("setDrivers", {
library(svfilters.hg19)
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "setAmpliconGenes.4adcc78.rds"))
tx <- loadTx("hg19")
ag <- setDrivers (ag, tx, clin_sign=TRUE)
ag <- setDrivers (ag, tx, clin_sign=FALSE)
if(FALSE){
saveRDS(ag, file="setDrivers.4adcc78.rds")
}
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "setDrivers.4adcc78.rds"))
expect_equivalent(ag.4adcc78, ag)
})
test_that("no germline filter", {
library(svfilters.hg19)
library(svbams)
library(Rsamtools)
library(graph)
data(germline_filters)
data(transcripts)
##
## read in some CNVs
##
cv.extdata <- system.file("extdata", package="svbams")
segs <- readRDS(file.path(cv.extdata, "cgov44t_segments.rds"))
seqlevels(segs, pruning.mode="coarse") <- c("chr5", "chr8","chr15")
extdata <- system.file("extdata", package="svbams")
bview <- BamViews(bamPaths=file.path(extdata, "cgov44t_revised.bam"))
params <- ampliconParams()
germline_filters[["germline_cnv"]] <- GRanges()
germline_filters[["outliers"]] <- GRanges()
##
## Begin testing internals of sv_amplicons
##
ag <- makeAGraph(segs, germline_filters, params)
merged <- joinNearGRanges(ranges(ag), params)
names(merged) <- ampliconNames(merged)
ranges(ag) <- merged
rp <- get_readpairs(ag, bamPaths(bview))
ag <- addFocalDupsFlankingAmplicon(ag, rp, params)
queryRanges(ag) <- focalAmpliconDupRanges(ag, params)
irp <- get_improper_readpairs(ag, bamPaths(bview))
ag <- linkFocalDups(ag, irp, params)
ag <- linkAmplicons(ag, irp, edgeParam=params[["edge"]])
ag <- linkNearAmplicons(ag, maxgap=params[["maxgap"]])
ag <- filterSmallAmplicons (ag)
ag <- setAmpliconGroups (ag)
tx <- loadTx("hg19")
ag <- setGenes (ag, tx)
ag <- setDrivers (ag, tx, clin_sign=TRUE)
ag <- setDrivers (ag, tx, clin_sign=FALSE)
ag2 <- sv_amplicons(bview, segs,
germline_filters,
params, tx)
expect_identical(ag, ag2)
if(FALSE){
saveRDS(ag2, file="sv_deletion.4adcc78.rds")
}
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "sv_deletion.4adcc78.rds"))
expect_equivalent(ag2, ag.4adcc78)
})
test_amplicon_vignette <- function(){
## unit test bins
data(bins1kb, package="svfilters.hg19")
ddir <- system.file("extdata", package="svbams",
mustWork=TRUE)
lr <- readRDS(file.path(ddir, "preprocessed_coverage.rds"))/1000
seqlevels(bins1kb, pruning.mode="coarse") <- paste0("chr", c(1:22, "X"))
bins1kb$log_ratio <- lr
ut.bins <- keepSeqlevels(bins1kb, c("chr5", "chr8", "chr15"),
pruning.mode="coarse")
path <- system.file("extdata", package="svbams")
segs <- readRDS(file.path(path, "cgov44t_segments.rds"))
seqlevels(segs, pruning.mode="coarse") <- c("chr5", "chr8","chr15")
## unit test (ut) segments
ut.segs <- segs
##
## vignette bins
##
## The vignette log ratios on chr5 are centered at zero, but this is actually a big amplicon that we can see when we process the full data
data(bins1kb, package="svfilters.hg19")
bins <- keepSeqlevels(bins1kb, c("chr5", "chr8", "chr15"),
pruning.mode="coarse")
bviews <- BamViews(bamPaths=bamfile, bamRanges=bins)
bins$cnt <- binnedCounts(bviews)
bins <- bins[ bins$cnt > 0 ]
bins$std_cnt <- binNormalize(bins)
set.seed(123)
bins$log_ratio <- binGCCorrect(bins)
params <- SegmentParam()
g <- segmentBins(bins, param=SegmentParam())
ut.segs.subset <- subsetByOverlaps(ut.segs, g)
dat <- as_tibble(ut.bins) %>%
filter(seqnames %in% c("chr5", "chr8", "chr15")) %>%
mutate(seqnames=factor(seqnames,
levels=c("chr5", "chr8","chr15"))) %>%
filter(!is.na(log_ratio))
segs.dat <- as_tibble(ut.segs)
ggplot(dat) +
geom_point(aes(start/1e6, log_ratio), size = 1, shape=".",
col="gray") +
xlab("Coordinate") +
ylab("log2 normalized counts") +
coord_cartesian(ylim=c(-4, 2), xlim=c(172, 177)) +
geom_segment(data=segs.dat, aes(x=start/1e6, xend=end/1e6,
y=seg.mean, yend=seg.mean),
inherit.aes=FALSE) +
facet_grid(~seqnames, space="free", scales="free_x") +
theme(panel.background=element_rect(fill="white", color="black")) +
xlab("")
## chr5 is one big segment
dat2 <- as_tibble(bins) %>%
filter(seqnames %in% c("chr5", "chr8","chr15")) %>%
mutate(seqnames=factor(seqnames,
levels=c("chr5", "chr8","chr15"))) %>%
filter(!is.na(log_ratio))
g.dat <- as_tibble(g) %>%
filter(seqnames %in% c("chr5", "chr8","chr15"))
ggplot(dat2) +
geom_point(aes(start/1e6, log_ratio), size = 1, shape=".",
col="gray") +
xlab("Coordinate") +
ylab("log2 normalized counts") +
coord_cartesian(ylim=c(-4, 2)) +
geom_segment(data=g.dat, aes(x=start/1e6, xend=end/1e6,
y=seg.mean, yend=seg.mean),
inherit.aes=FALSE) +
facet_grid(~seqnames, space="free", scales="free_x") +
theme(panel.background=element_rect(fill="white", color="black")) +
xlab("")
ggsave("~/tmp.pdf", width=10, height=6)
}
|
/tests/testthat/test_amplicon_regressions.R
|
no_license
|
cancer-genomics/trellis
|
R
| false
| false
| 16,657
|
r
|
context("Amplicon regressions")
test_that("AmpliconGraph", {
ag <- AmpliconGraph()
expect_true(validObject(ag))
})
cgov44t_preprocess <- function(){
extdata <- system.file("extdata", package="svbams")
id <- "cgov44t_revised.bam"
bamfile <- file.path(extdata, id)
irp.file <- file.path(extdata, "cgov44t_improper.rds")
irp <- readRDS(irp.file)
genome(irp) <- "hg19"
if(FALSE){
f <- irp@first
irp@first <- updateObject(f)
irp@last <- updateObject(irp@last)
saveRDS(irp, file=file.path(extdata, "cgov44t_improper.rds"))
}
bamdir <- system.file("extdata", package="svbams",
mustWork=TRUE)
lr <- readRDS(file.path(bamdir, "preprocessed_coverage.rds"))/1000
seqlevels(bins1kb, pruning.mode="coarse") <- paste0("chr", c(1:22, "X"))
bins1kb$log_ratio <- lr
path <- system.file("extdata", package="svbams")
segs <- readRDS(file.path(path, "cgov44t_segments.rds"))
seqlevels(segs, pruning.mode="coarse") <- c("chr5", "chr8", "chr15")
##amp.gr <- segs[segs$seg.mean < ampliconParams()$AMP_THR]
##proper.amp <- properReadPairs(bamfile,
## gr=reduce(amp.gr, min.gapwidth=2000))
rps <- list(improper=irp)
pdat <- preprocessData(bam.file=bamfile,
genome=genome(segs)[[1]],
segments=segs,
read_pairs=rps,
bins=bins1kb)
}
test_that("sv_amplicons", {
library(Rsamtools)
library(svfilters.hg19)
##
## standard setup
##
cv.extdata <- system.file("extdata", package="svbams")
segs <- readRDS(file.path(cv.extdata, "cgov44t_segments.rds"))
seqlevels(segs, pruning.mode="coarse") <- c("chr5", "chr8", "chr15")
extdata <- system.file("extdata", package="svbams")
bview <- BamViews(bamPaths=file.path(extdata, "cgov44t_revised.bam"))
params <- ampliconParams()
tx <- loadTx("hg19")
##
## call sv_amplicons with a bunch of arguments
##
ag2 <- sv_amplicons(bview=bview,
segs=segs,
amplicon_filters=germline_filters,
params=params,
transcripts=tx)
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "setDrivers.4adcc78.rds"))
expect_equivalent(ag.4adcc78, ag2)
##
## proposed setup. Call sv_amplicons2 with single argument
##
pdat <- cgov44t_preprocess()
expect_equivalent(pdat$segments, segs)
ag3 <- sv_amplicons2(pdat)
##
## these will not be exactly identical because the set of improper read pairs
## changes slightly. However, the amplicon graph is exactly the same
##
expect_identical(ampliconRanges(ag2), ampliconRanges(ag3))
expect_identical(queryRanges(ag2), queryRanges(ag3))
expect_identical(edges(ag2), edges(ag3))
})
test_that("initialize_graph", {
##
## standard
##
cv.extdata <- system.file("extdata", package="svbams")
segs <- readRDS(file.path(cv.extdata, "cgov44t_segments.rds"))
seqlevels(segs, pruning.mode="coarse") <- c("chr5", "chr8", "chr15")
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "setDrivers.4adcc78.rds"))
extdata <- system.file("extdata", package="svbams")
bview <- BamViews(bamPaths=file.path(extdata, "cgov44t_revised.bam"))
params <- ampliconParams()
amplicon_filters <- germline_filters
params <- ampliconParams()
ag <- initialize_graph(segs, amplicon_filters, params)
if(FALSE){
saveRDS(ag, file="initialize_graph.a4d7744.rds")
}
path <- system.file("extdata", package="svbams")
ag.a4d7744 <- readRDS(file.path(path, "initialize_graph.a4d7744.rds"))
expect_equivalent(ag, ag.a4d7744)
## proposed
##skip("Check cgov44t_preprocess example")
pdat <- cgov44t_preprocess()
pdat$segments <- amplified_segments(pdat$segments, params)
ag2 <- initialize_graph2(pdat, ampliconFilters(pdat$genome),
ampliconParams())
expect_identical(ag, ag2)
})
test_that("add_amplicons", {
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "addFocalDups.ffab104.rds"))
path <- system.file("extdata", package="svbams")
query.ranges <- readRDS(file.path(path, "focalAmpliconDupRanges.a4d7744.rds"))
queryRanges(ag) <- query.ranges
expected <- ag
## proposed
##skip("Check cgov44t_preprocess example")
pdat <- cgov44t_preprocess()
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "initialize_graph.a4d7744.rds"))
ag2 <- add_amplicons(ag, pdat$bam.file, ampliconParams())
if(FALSE){
saveRDS(ag2, file="add_amplicons.a4d7744.rds")
}
expect_equivalent(ag2, expected)
})
test_that("link_amplicons", {
##
## standard
##
extdata <- system.file("extdata", package="svbams")
bam.file <- file.path(extdata, "cgov44t_revised.bam")
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "add_amplicons.a4d7744.rds"))
irp <- get_improper_readpairs(ag, bam.file)
params <- ampliconParams()
ag1 <- link_amplicons(ag, irp, params)
if(FALSE){
saveRDS(ag1, file="link_amplicons.a4d744.rds")
}
path <- system.file("extdata", package="svbams")
ag.a4d744 <- readRDS(file.path(path, "link_amplicons.a4d744.rds"))
expect_equivalent(ag1, ag.a4d744)
##
## proposed
##
##skip("Check cgov44t_preprocess example")
pdat <- cgov44t_preprocess()
improper_rp2 <- pdat$read_pairs[["improper"]]
ag2 <- link_amplicons(ag, improper_rp2, params)
expect_identical(ampliconRanges(ag2), ampliconRanges(ag1))
expect_identical(queryRanges(ag2), queryRanges(ag1))
expect_identical(edges(ag2), edges(ag1))
##
## if we use all the improper read pairs with a mapq filter we recover one fewer edge
##
iparams <- improperAlignmentParams(mapqFilter=30,
what=c("flag", "mrnm", "mpos"))
improper_rp3 <- getImproperAlignmentPairs(pdat$bam.file,
param=iparams,
build="hg19")
ag3 <- link_amplicons(ag, improper_rp3, params)
expect_lt(numEdges(ag3), numEdges(ag2))
})
test_that("annotate_amplicons", {
path <- system.file("extdata", package="svbams")
expected <- readRDS(file.path(path, "setDrivers.4adcc78.rds"))
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "link_amplicons.a4d744.rds"))
tx <- loadTx("hg19")
ag <- annotate_amplicons(ag, tx)
expect_equivalent(ag, expected)
})
## ag3 is not identical to ag2
## -- check the internal functions
test_that("makeAGraph", {
library(Rsamtools)
library(svfilters.hg19)
##
## standard setup
##
cv.extdata <- system.file("extdata", package="svbams")
segs <- readRDS(file.path(cv.extdata, "cgov44t_segments.rds"))
seqlevels(segs, pruning.mode="coarse") <- c("chr5", "chr8","chr15")
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "setDrivers.4adcc78.rds"))
extdata <- system.file("extdata", package="svbams")
bview <- BamViews(bamPaths=file.path(extdata, "cgov44t_revised.bam"))
params <- ampliconParams()
amplicon_filters <- germline_filters
params <- ampliconParams()
##
## Begin testing internals of sv_amplicons
##
ag <- makeAGraph(segs, amplicon_filters, params)
path <- system.file("extdata", package="svbams")
ag.ffab104 <- readRDS(file.path(path, "makeAGraphffab104.rds"))
expect_equivalent(ag, ag.ffab104)
expect_true(validObject(ag))
##
## Proposed setup
##
##skip("Check cgov44t_preprocess example")
pdat <- cgov44t_preprocess()
## generates an error
segs <- pdat$segments
segs$is_amplicon <- segs$seg.mean > params$AMP_THR
agnew <- makeAGraph2(segs, amplicon_filters, params)
expect_identical(agnew, ag)
})
## the proposed and standard setup are the same here
test_that("joinNearGRanges", {
##
## standard setup
##
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "makeAGraphffab104.rds"))
merged <- joinNearGRanges(ranges(ag), ampliconParams())
if(FALSE){
saveRDS(merged, file="merged.a4d7744.rds")
}
path <- system.file("extdata", package="svbams")
merged.a4d7744 <- readRDS(file.path(path, "merged.a4d7744.rds"))
expect_equivalent(merged, merged.a4d7744)
})
test_that("get_readpairs", {
##
## standard
##
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "initialize_graph.a4d7744.rds"))
extdata <- system.file("extdata", package="svbams")
bam.file <- file.path(extdata, "cgov44t_revised.bam")
rp <- get_readpairs(ag, bam.file)
##
## proposed
##
rp2 <- get_readpairs2(queryRanges(ag), bam.file)
expect_identical(rp, rp2)
})
test_that("addFocalDups", {
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "initialize_graph.a4d7744.rds"))
## standard and proposed are the same
extdata <- system.file("extdata", package="svbams")
bam.file <- file.path(extdata, "cgov44t_revised.bam")
rp <- get_readpairs(ag, bam.file)
ag <- addFocalDupsFlankingAmplicon(ag, rp, ampliconParams())
path <- system.file("extdata", package="svbams")
ag.ffab104 <- readRDS(file.path(path, "addFocalDups.ffab104.rds"))
expect_identical(ag, ag.ffab104)
query.ranges <- focalAmpliconDupRanges(ag, ampliconParams())
if(FALSE){
saveRDS(query.ranges, file="focalAmpliconDupRanges.a4d7744.rds")
}
path <- system.file("extdata", package="svbams")
query.a4d7744 <- readRDS(file.path(path, "focalAmpliconDupRanges.a4d7744.rds"))
expect_equivalent(query.ranges, query.a4d7744)
})
test_that("linkNearAmplicons", {
params <- ampliconParams()
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "linkAmplicons.4adcc78.rds"))
ag <- linkNearAmplicons(ag, maxgap=params[["maxgap"]])
if(FALSE){
saveRDS(ag, file="linkNearAmplicons.4adcc78.rds")
}
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "linkNearAmplicons.4adcc78.rds"))
expect_identical(ag.4adcc78, ag)
})
test_that("filterSmallAmplicons", {
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "linkNearAmplicons.4adcc78.rds"))
ag <- filterSmallAmplicons (ag)
if(FALSE){
saveRDS(ag, file="filterSmallAmplicons.4adcc78.rds")
}
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "filterSmallAmplicons.4adcc78.rds"))
expect_identical(ag.4adcc78, ag)
})
test_that("setAmpliconGroups", {
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "filterSmallAmplicons.4adcc78.rds"))
ag <- setAmpliconGroups (ag)
if(FALSE){
saveRDS(ag, file="setAmpliconGroups.4adcc78.rds")
}
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "setAmpliconGroups.4adcc78.rds"))
expect_equivalent(ag.4adcc78, ag)
})
test_that("setGenes", {
library(svfilters.hg19)
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "setAmpliconGroups.4adcc78.rds"))
tx <- loadTx("hg19")
ag <- setGenes (ag, tx)
if(FALSE){
saveRDS(ag, file="setAmpliconGenes.4adcc78.rds")
}
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "setAmpliconGenes.4adcc78.rds"))
expect_equivalent(ag.4adcc78, ag)
})
test_that("setDrivers", {
library(svfilters.hg19)
path <- system.file("extdata", package="svbams")
ag <- readRDS(file.path(path, "setAmpliconGenes.4adcc78.rds"))
tx <- loadTx("hg19")
ag <- setDrivers (ag, tx, clin_sign=TRUE)
ag <- setDrivers (ag, tx, clin_sign=FALSE)
if(FALSE){
saveRDS(ag, file="setDrivers.4adcc78.rds")
}
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "setDrivers.4adcc78.rds"))
expect_equivalent(ag.4adcc78, ag)
})
test_that("no germline filter", {
library(svfilters.hg19)
library(svbams)
library(Rsamtools)
library(graph)
data(germline_filters)
data(transcripts)
##
## read in some CNVs
##
cv.extdata <- system.file("extdata", package="svbams")
segs <- readRDS(file.path(cv.extdata, "cgov44t_segments.rds"))
seqlevels(segs, pruning.mode="coarse") <- c("chr5", "chr8","chr15")
extdata <- system.file("extdata", package="svbams")
bview <- BamViews(bamPaths=file.path(extdata, "cgov44t_revised.bam"))
params <- ampliconParams()
germline_filters[["germline_cnv"]] <- GRanges()
germline_filters[["outliers"]] <- GRanges()
##
## Begin testing internals of sv_amplicons
##
ag <- makeAGraph(segs, germline_filters, params)
merged <- joinNearGRanges(ranges(ag), params)
names(merged) <- ampliconNames(merged)
ranges(ag) <- merged
rp <- get_readpairs(ag, bamPaths(bview))
ag <- addFocalDupsFlankingAmplicon(ag, rp, params)
queryRanges(ag) <- focalAmpliconDupRanges(ag, params)
irp <- get_improper_readpairs(ag, bamPaths(bview))
ag <- linkFocalDups(ag, irp, params)
ag <- linkAmplicons(ag, irp, edgeParam=params[["edge"]])
ag <- linkNearAmplicons(ag, maxgap=params[["maxgap"]])
ag <- filterSmallAmplicons (ag)
ag <- setAmpliconGroups (ag)
tx <- loadTx("hg19")
ag <- setGenes (ag, tx)
ag <- setDrivers (ag, tx, clin_sign=TRUE)
ag <- setDrivers (ag, tx, clin_sign=FALSE)
ag2 <- sv_amplicons(bview, segs,
germline_filters,
params, tx)
expect_identical(ag, ag2)
if(FALSE){
saveRDS(ag2, file="sv_deletion.4adcc78.rds")
}
path <- system.file("extdata", package="svbams")
ag.4adcc78 <- readRDS(file.path(path, "sv_deletion.4adcc78.rds"))
expect_equivalent(ag2, ag.4adcc78)
})
test_amplicon_vignette <- function(){
## unit test bins
data(bins1kb, package="svfilters.hg19")
ddir <- system.file("extdata", package="svbams",
mustWork=TRUE)
lr <- readRDS(file.path(ddir, "preprocessed_coverage.rds"))/1000
seqlevels(bins1kb, pruning.mode="coarse") <- paste0("chr", c(1:22, "X"))
bins1kb$log_ratio <- lr
ut.bins <- keepSeqlevels(bins1kb, c("chr5", "chr8", "chr15"),
pruning.mode="coarse")
path <- system.file("extdata", package="svbams")
segs <- readRDS(file.path(path, "cgov44t_segments.rds"))
seqlevels(segs, pruning.mode="coarse") <- c("chr5", "chr8","chr15")
## unit test (ut) segments
ut.segs <- segs
##
## vignette bins
##
## The vignette log ratios on chr5 are centered at zero, but this is actually a big amplicon that we can see when we process the full data
data(bins1kb, package="svfilters.hg19")
bins <- keepSeqlevels(bins1kb, c("chr5", "chr8", "chr15"),
pruning.mode="coarse")
bviews <- BamViews(bamPaths=bamfile, bamRanges=bins)
bins$cnt <- binnedCounts(bviews)
bins <- bins[ bins$cnt > 0 ]
bins$std_cnt <- binNormalize(bins)
set.seed(123)
bins$log_ratio <- binGCCorrect(bins)
params <- SegmentParam()
g <- segmentBins(bins, param=SegmentParam())
ut.segs.subset <- subsetByOverlaps(ut.segs, g)
dat <- as_tibble(ut.bins) %>%
filter(seqnames %in% c("chr5", "chr8", "chr15")) %>%
mutate(seqnames=factor(seqnames,
levels=c("chr5", "chr8","chr15"))) %>%
filter(!is.na(log_ratio))
segs.dat <- as_tibble(ut.segs)
ggplot(dat) +
geom_point(aes(start/1e6, log_ratio), size = 1, shape=".",
col="gray") +
xlab("Coordinate") +
ylab("log2 normalized counts") +
coord_cartesian(ylim=c(-4, 2), xlim=c(172, 177)) +
geom_segment(data=segs.dat, aes(x=start/1e6, xend=end/1e6,
y=seg.mean, yend=seg.mean),
inherit.aes=FALSE) +
facet_grid(~seqnames, space="free", scales="free_x") +
theme(panel.background=element_rect(fill="white", color="black")) +
xlab("")
## chr5 is one big segment
dat2 <- as_tibble(bins) %>%
filter(seqnames %in% c("chr5", "chr8","chr15")) %>%
mutate(seqnames=factor(seqnames,
levels=c("chr5", "chr8","chr15"))) %>%
filter(!is.na(log_ratio))
g.dat <- as_tibble(g) %>%
filter(seqnames %in% c("chr5", "chr8","chr15"))
ggplot(dat2) +
geom_point(aes(start/1e6, log_ratio), size = 1, shape=".",
col="gray") +
xlab("Coordinate") +
ylab("log2 normalized counts") +
coord_cartesian(ylim=c(-4, 2)) +
geom_segment(data=g.dat, aes(x=start/1e6, xend=end/1e6,
y=seg.mean, yend=seg.mean),
inherit.aes=FALSE) +
facet_grid(~seqnames, space="free", scales="free_x") +
theme(panel.background=element_rect(fill="white", color="black")) +
xlab("")
ggsave("~/tmp.pdf", width=10, height=6)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compile_activities.R
\name{compile_activities}
\alias{compile_activities}
\title{converts a list of activities into a dataframe}
\usage{
compile_activities(actlist, acts = NULL, id = NULL, units = "metric")
}
\arguments{
\item{actlist}{an activities list returned by \code{\link{get_activity_list}}}
\item{acts}{numeric indicating which activities to compile starting with most recent, defaults to all}
\item{id}{optional numeric vector to specify the id(s) of the activity/activities to plot, \code{acts} is ignored if provided}
\item{units}{chr string indicating metric or imperial}
}
\value{
An activities frame object (\code{actframe} that includes a data frame for the data and attributes for the distance, speed, and elevation units
}
\description{
converts a list of activities into a dataframe
}
\details{
each activity has a value for every column present across all activities, with NAs populating empty values
}
\examples{
\dontrun{
stoken <- httr::config(token = strava_oauth(app_name, app_client_id, app_secret, cache = TRUE))
my_acts <- get_activity_list(stoken)
acts_data <- compile_activities(my_acts)
# show attributes
attr(acts_data, 'unit_type')
attr(acts_data, 'unit_vals')
}
}
\seealso{
\code{\link{compile_club_activities}} for compiling an activities list for club activities
}
\author{
Daniel Padfield
}
\concept{token}
|
/man/compile_activities.Rd
|
permissive
|
fawda123/rStrava
|
R
| false
| true
| 1,430
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compile_activities.R
\name{compile_activities}
\alias{compile_activities}
\title{converts a list of activities into a dataframe}
\usage{
compile_activities(actlist, acts = NULL, id = NULL, units = "metric")
}
\arguments{
\item{actlist}{an activities list returned by \code{\link{get_activity_list}}}
\item{acts}{numeric indicating which activities to compile starting with most recent, defaults to all}
\item{id}{optional numeric vector to specify the id(s) of the activity/activities to plot, \code{acts} is ignored if provided}
\item{units}{chr string indicating metric or imperial}
}
\value{
An activities frame object (\code{actframe} that includes a data frame for the data and attributes for the distance, speed, and elevation units
}
\description{
converts a list of activities into a dataframe
}
\details{
each activity has a value for every column present across all activities, with NAs populating empty values
}
\examples{
\dontrun{
stoken <- httr::config(token = strava_oauth(app_name, app_client_id, app_secret, cache = TRUE))
my_acts <- get_activity_list(stoken)
acts_data <- compile_activities(my_acts)
# show attributes
attr(acts_data, 'unit_type')
attr(acts_data, 'unit_vals')
}
}
\seealso{
\code{\link{compile_club_activities}} for compiling an activities list for club activities
}
\author{
Daniel Padfield
}
\concept{token}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/create_weather_grid.R
\name{voronoipolygons}
\alias{voronoipolygons}
\title{voronoi polygons}
\usage{
voronoipolygons(layer, create.filename = T)
}
\description{
voronoi polygons
}
|
/man/voronoipolygons.Rd
|
permissive
|
paulalsrees/conteStreamflow
|
R
| false
| false
| 268
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/create_weather_grid.R
\name{voronoipolygons}
\alias{voronoipolygons}
\title{voronoi polygons}
\usage{
voronoipolygons(layer, create.filename = T)
}
\description{
voronoi polygons
}
|
# =====================================================================
# CSE487/587
# Author: your name
# Email: UBID@buffalo.edu
# =====================================================================
# need to install the following two packages in CCR(at least)
# data path /gpfs/courses/cse587/spring2015/data/hw2/data
library(forecast)
library(fpp)
val = c()
stock = c()
# need to read the stocklist, and loop all files
### TO DO
#stocklist_file = "E:/mapreduce/bin/stocklist.txt"
stocklist_file = "/gpfs/courses/cse587/spring2015/data/hw2/stocklist.txt"
stocklist = read.table(stocklist_file , sep = "\n")
for (i in 1:2970)
{
filename = ""
#filename = paste("E:/mapreduce/bin/small/" , stocklist[i,1] , sep='')
filename = paste("/gpfs/courses/cse587/spring2015/data/hw2/data/" , stocklist[i,1] , sep='')
filename = paste( filename , ".csv" , sep='')
# if file is not empty
if(file.info(filename)[1]>0 && !(stocklist[i,1]=="JASOLtd.") && !(stocklist[i,1]=="ULTAInc.")) {
# read one csv file into variable (DO NOT EDIT)
textData=read.csv(file=filename, header=T)
if(nrow(textData) > 753)
{
# convert txt data to time-series data, in day unit (DO NOT EDIT)
tsData = ts(rev(textData$Adj.Close),start=c(2012, 1),frequency=365)
# define train data (DO NOT EDIT)
trainData = window(tsData, end=c(2014,14))
# define test data (DO NOT EDIT)
testData = window(tsData, start=c(2014,15))
# MAE row vector (DO NOT EDIT)
MAE = matrix(NA,1,length(testData))
# apply ARIMA model (DO NOT EDIT)
fitData = auto.arima(trainData, seasonal = FALSE, lambda = NULL, approximation = TRUE, parallel = TRUE)
# apply forecast(DO NOT EDIT)
forecastData = forecast(fitData, h=length(testData))
# print variable and see what is in the result data set
#print(forecastData)
#print(stocklist[i,1])
# calculate Mean Absolute Error
for(i1 in 1:length(testData))
{
MAE[1,i1] = abs(forecastData$mean[i1] - testData[i1])
}
val = c(val , sum(MAE[1,1:10]))
st = ""
st = paste(st , stocklist[i,1], sep='')
stock = c(stock , st)
#print(sum(MAE[1,1:10]))
# plot the top 10 minimum sum of MAE in 3 models respectively
#plot(MAE[1,1:10], col = "blue")
#lines(MAE[1,1:10], lw = 2, col = "red")
### TO DO
}
}
}
#hw = sort(hw , decreasing = FALSE)
#print(hw[1:10])
final_df = data.frame(val , stock)
#hw_final = sort(hw_final , decreasing = FALSE)
final = final_df[order(val),]
print(final[1:10,])
|
/Time-Series forecast of stocks in NASDAQ/Arima.R
|
no_license
|
pratik-chavan/Projects
|
R
| false
| false
| 2,674
|
r
|
# =====================================================================
# CSE487/587
# Author: your name
# Email: UBID@buffalo.edu
# =====================================================================
# need to install the following two packages in CCR(at least)
# data path /gpfs/courses/cse587/spring2015/data/hw2/data
library(forecast)
library(fpp)
val = c()
stock = c()
# need to read the stocklist, and loop all files
### TO DO
#stocklist_file = "E:/mapreduce/bin/stocklist.txt"
stocklist_file = "/gpfs/courses/cse587/spring2015/data/hw2/stocklist.txt"
stocklist = read.table(stocklist_file , sep = "\n")
for (i in 1:2970)
{
filename = ""
#filename = paste("E:/mapreduce/bin/small/" , stocklist[i,1] , sep='')
filename = paste("/gpfs/courses/cse587/spring2015/data/hw2/data/" , stocklist[i,1] , sep='')
filename = paste( filename , ".csv" , sep='')
# if file is not empty
if(file.info(filename)[1]>0 && !(stocklist[i,1]=="JASOLtd.") && !(stocklist[i,1]=="ULTAInc.")) {
# read one csv file into variable (DO NOT EDIT)
textData=read.csv(file=filename, header=T)
if(nrow(textData) > 753)
{
# convert txt data to time-series data, in day unit (DO NOT EDIT)
tsData = ts(rev(textData$Adj.Close),start=c(2012, 1),frequency=365)
# define train data (DO NOT EDIT)
trainData = window(tsData, end=c(2014,14))
# define test data (DO NOT EDIT)
testData = window(tsData, start=c(2014,15))
# MAE row vector (DO NOT EDIT)
MAE = matrix(NA,1,length(testData))
# apply ARIMA model (DO NOT EDIT)
fitData = auto.arima(trainData, seasonal = FALSE, lambda = NULL, approximation = TRUE, parallel = TRUE)
# apply forecast(DO NOT EDIT)
forecastData = forecast(fitData, h=length(testData))
# print variable and see what is in the result data set
#print(forecastData)
#print(stocklist[i,1])
# calculate Mean Absolute Error
for(i1 in 1:length(testData))
{
MAE[1,i1] = abs(forecastData$mean[i1] - testData[i1])
}
val = c(val , sum(MAE[1,1:10]))
st = ""
st = paste(st , stocklist[i,1], sep='')
stock = c(stock , st)
#print(sum(MAE[1,1:10]))
# plot the top 10 minimum sum of MAE in 3 models respectively
#plot(MAE[1,1:10], col = "blue")
#lines(MAE[1,1:10], lw = 2, col = "red")
### TO DO
}
}
}
#hw = sort(hw , decreasing = FALSE)
#print(hw[1:10])
final_df = data.frame(val , stock)
#hw_final = sort(hw_final , decreasing = FALSE)
final = final_df[order(val),]
print(final[1:10,])
|
gjamTrimY <- function(y, minObs = 2, maxCols = NULL, OTHER = TRUE){
# minObs - minimum no. of non-zero values in a column of y
# maxCols - number of columns to retain, those with highest values
# OTHER - logical or names to include in 'other' class
# if(OTHER) sum of rare are returned in 'other' column
# if already a column 'other', they are combined
y <- as.matrix(y)
nc <- ncol(y)
ci <- 1:nc
mnames <- colnames(y)
other <- numeric(0)
if(is.character(OTHER)){
other <- y[,OTHER,drop=F]
ci <- ci[!mnames %in% OTHER]
y <- y[,ci,drop=F]
OTHER <- T
}
io <- y
io[io > 0] <- 1
csum <- colSums(io, na.rm=T)
ww <- which(csum >= minObs)
if(!is.null(maxCols)){
ww <- ww[ order(csum[ww],decreasing=T) ]
ww <- ww[1:maxCols]
}
ci <- ci[ww]
out <- y[,ww]
mnames <- mnames[ww]
if(OTHER){
other <- rowSums(cbind(other,y[,-ww]),na.rm=T)
out <- cbind(out,other)
mnames <- c(mnames,'other')
ww <- which(colnames(out) == 'other')
if(length(ww) > 1){
other <- rowSums(out[,ww])
out <- cbind(out[,-ww],other)
}
}
if(!is.matrix(out)){
out <- matrix(out,ncol=1)
colnames(out) <- mnames
}
csum <- csum[ci]
list(y = out, colIndex = ci, nobs = csum)
}
|
/R/gjamTrimY.r
|
no_license
|
dbystrova/gjamed
|
R
| false
| false
| 1,433
|
r
|
gjamTrimY <- function(y, minObs = 2, maxCols = NULL, OTHER = TRUE){
# minObs - minimum no. of non-zero values in a column of y
# maxCols - number of columns to retain, those with highest values
# OTHER - logical or names to include in 'other' class
# if(OTHER) sum of rare are returned in 'other' column
# if already a column 'other', they are combined
y <- as.matrix(y)
nc <- ncol(y)
ci <- 1:nc
mnames <- colnames(y)
other <- numeric(0)
if(is.character(OTHER)){
other <- y[,OTHER,drop=F]
ci <- ci[!mnames %in% OTHER]
y <- y[,ci,drop=F]
OTHER <- T
}
io <- y
io[io > 0] <- 1
csum <- colSums(io, na.rm=T)
ww <- which(csum >= minObs)
if(!is.null(maxCols)){
ww <- ww[ order(csum[ww],decreasing=T) ]
ww <- ww[1:maxCols]
}
ci <- ci[ww]
out <- y[,ww]
mnames <- mnames[ww]
if(OTHER){
other <- rowSums(cbind(other,y[,-ww]),na.rm=T)
out <- cbind(out,other)
mnames <- c(mnames,'other')
ww <- which(colnames(out) == 'other')
if(length(ww) > 1){
other <- rowSums(out[,ww])
out <- cbind(out[,-ww],other)
}
}
if(!is.matrix(out)){
out <- matrix(out,ncol=1)
colnames(out) <- mnames
}
csum <- csum[ci]
list(y = out, colIndex = ci, nobs = csum)
}
|
a <- Variable(name = "a")
b <- Variable(name = "b")
c <- Variable(name = "c")
x <- Variable(2, name = "x")
y <- Variable(3, name = "y")
z <- Variable(2, name = "z")
A <- Variable(2, 2, name = "A")
B <- Variable(2, 2, name = "B")
C <- Variable(3, 2, name = "C")
assert_expression <- function(expr, size) {
expect_true(is(expr, "Expression") || is(expr, "Constraint"))
expect_equal(size(expr), size)
}
test_that("Test R vectors", {
## Vector
v <- 1:2
assert_expression(x + v, c(2, 1))
assert_expression(v + x, c(2, 1))
assert_expression(x - v, c(2, 1))
assert_expression(v - x, c(2, 1))
assert_expression(x <= v, c(2, 1))
assert_expression(v <= x, c(2, 1))
assert_expression(x == v, c(2, 1))
assert_expression(v == x, c(2, 1))
## Matrix
Amat <- matrix(1:8, nrow = 4, ncol = 2)
assert_expression(Amat %*% x, c(4, 1))
## PSD inequalities
Amat <- matrix(1, nrow = 2, ncol = 2)
assert_expression(Amat %<<% A, c(2, 2))
assert_expression(Amat %>>% A, c(2, 2))
})
test_that("Test R matrices", {
## Vector
v <- matrix(1:2, nrow = 2, ncol = 1)
assert_expression(x + v, c(2, 1))
assert_expression(v + v + x, c(2, 1))
assert_expression(x - v, c(2, 1))
assert_expression(v - v - x, c(2, 1))
assert_expression(x <= v, c(2, 1))
assert_expression(v <= x, c(2, 1))
assert_expression(x == v, c(2, 1))
assert_expression(v == x, c(2, 1))
## Matrix
Amat <- matrix(1:8, nrow = 4, ncol = 2)
assert_expression(Amat %*% x, c(4, 1))
assert_expression((t(Amat) %*% Amat) %*% x, c(2, 1))
## PSD inequalities
Amat <- matrix(rep(1, 4), nrow = 2, ncol = 2)
assert_expression(Amat %<<% A, c(2, 2))
assert_expression(Amat %>>% A, c(2, 2))
})
test_that("Test R scalars", {
v <- 2.0
assert_expression(x + v, c(2, 1))
assert_expression(v + x, c(2, 1))
assert_expression(v * x, c(2, 1))
assert_expression(x - v, c(2, 1))
assert_expression(v - v - x, c(2, 1))
assert_expression(x <= v, c(2, 1))
assert_expression(v <= x, c(2, 1))
assert_expression(x == v, c(2, 1))
assert_expression(v == x, c(2, 1))
## PSD inequalities
assert_expression(v %<<% A, c(2, 2))
assert_expression(v %>>% A, c(2, 2))
})
test_that("Test sparseMatrix objects from the Matrix library", {
##require(Matrix)
## Constants
A <- matrix(1:8, nrow = 4, ncol = 2)
A <- Matrix::Matrix(A, sparse = TRUE)
A <- Matrix::sparseMatrix(i = 1:2, j = 1:2, x = rep(1, 2))
Aidx <- Matrix::Matrix(A[1,], sparse = TRUE)
expect_equal(dim(Aidx), c(2, 1))
expect_equal(Aidx[1,1], 1)
expect_equal(Aidx[2,1], 0)
## Linear ops
var <- Variable(4, 2)
A <- matrix(1:8, nrow = 4, ncol = 2)
A <- Matrix::Matrix(A, sparse = TRUE)
B <- cbind(A, A)
assert_expression(var + A, c(4, 2))
assert_expression(A + var, c(4, 2))
assert_expression(B %*% var, c(4, 2))
assert_expression(var - A, c(4, 2))
assert_expression(A - A - var, c(4, 2))
})
|
/tests/testthat/test-g01-matrices.R
|
permissive
|
aszekMosek/CVXR
|
R
| false
| false
| 2,927
|
r
|
a <- Variable(name = "a")
b <- Variable(name = "b")
c <- Variable(name = "c")
x <- Variable(2, name = "x")
y <- Variable(3, name = "y")
z <- Variable(2, name = "z")
A <- Variable(2, 2, name = "A")
B <- Variable(2, 2, name = "B")
C <- Variable(3, 2, name = "C")
assert_expression <- function(expr, size) {
expect_true(is(expr, "Expression") || is(expr, "Constraint"))
expect_equal(size(expr), size)
}
test_that("Test R vectors", {
## Vector
v <- 1:2
assert_expression(x + v, c(2, 1))
assert_expression(v + x, c(2, 1))
assert_expression(x - v, c(2, 1))
assert_expression(v - x, c(2, 1))
assert_expression(x <= v, c(2, 1))
assert_expression(v <= x, c(2, 1))
assert_expression(x == v, c(2, 1))
assert_expression(v == x, c(2, 1))
## Matrix
Amat <- matrix(1:8, nrow = 4, ncol = 2)
assert_expression(Amat %*% x, c(4, 1))
## PSD inequalities
Amat <- matrix(1, nrow = 2, ncol = 2)
assert_expression(Amat %<<% A, c(2, 2))
assert_expression(Amat %>>% A, c(2, 2))
})
test_that("Test R matrices", {
## Vector
v <- matrix(1:2, nrow = 2, ncol = 1)
assert_expression(x + v, c(2, 1))
assert_expression(v + v + x, c(2, 1))
assert_expression(x - v, c(2, 1))
assert_expression(v - v - x, c(2, 1))
assert_expression(x <= v, c(2, 1))
assert_expression(v <= x, c(2, 1))
assert_expression(x == v, c(2, 1))
assert_expression(v == x, c(2, 1))
## Matrix
Amat <- matrix(1:8, nrow = 4, ncol = 2)
assert_expression(Amat %*% x, c(4, 1))
assert_expression((t(Amat) %*% Amat) %*% x, c(2, 1))
## PSD inequalities
Amat <- matrix(rep(1, 4), nrow = 2, ncol = 2)
assert_expression(Amat %<<% A, c(2, 2))
assert_expression(Amat %>>% A, c(2, 2))
})
test_that("Test R scalars", {
v <- 2.0
assert_expression(x + v, c(2, 1))
assert_expression(v + x, c(2, 1))
assert_expression(v * x, c(2, 1))
assert_expression(x - v, c(2, 1))
assert_expression(v - v - x, c(2, 1))
assert_expression(x <= v, c(2, 1))
assert_expression(v <= x, c(2, 1))
assert_expression(x == v, c(2, 1))
assert_expression(v == x, c(2, 1))
## PSD inequalities
assert_expression(v %<<% A, c(2, 2))
assert_expression(v %>>% A, c(2, 2))
})
test_that("Test sparseMatrix objects from the Matrix library", {
##require(Matrix)
## Constants
A <- matrix(1:8, nrow = 4, ncol = 2)
A <- Matrix::Matrix(A, sparse = TRUE)
A <- Matrix::sparseMatrix(i = 1:2, j = 1:2, x = rep(1, 2))
Aidx <- Matrix::Matrix(A[1,], sparse = TRUE)
expect_equal(dim(Aidx), c(2, 1))
expect_equal(Aidx[1,1], 1)
expect_equal(Aidx[2,1], 0)
## Linear ops
var <- Variable(4, 2)
A <- matrix(1:8, nrow = 4, ncol = 2)
A <- Matrix::Matrix(A, sparse = TRUE)
B <- cbind(A, A)
assert_expression(var + A, c(4, 2))
assert_expression(A + var, c(4, 2))
assert_expression(B %*% var, c(4, 2))
assert_expression(var - A, c(4, 2))
assert_expression(A - A - var, c(4, 2))
})
|
freqs<-function(pp)
{
m<-union(pp$marks,NULL)
h<-NULL
for(i in m) h<-c(h, sum(pp$marks==i))
names(h)<-m
h
}
####################################################################################
clean.up.data<-function(pp,dbh=10,atleast=10)
{
h<-freqs(pp)
m<-union(pp$marks,NULL)
i<-pp$dbh>dbh & pp$marks%in%m[h>atleast]
p<-pp[i]
marks<-p$marks
p$marks<-NULL
p$markformat<-"none"
p$dbh<-pp$dbh[i]
p$marks<-factor(marks)
p
}
####################################################################################
shake<-function(pp, a=0.001)
{
pp$x<-pp$x+runif(pp$n, -a,a)
pp$y<-pp$y+runif(pp$n, -a,a)
pp
}
####################################################################################
minusID<-function(pp, minusR, dbh, atleast=0)#, lifeform, status)
{
id<-( pp$x<(pp$window$x[2]-minusR) ) & (pp$x>(pp$window$x[1]+minusR)) & (pp$y<(pp$window$y[2]-minusR)) & (pp$y>(pp$window$y[1]+minusR))
if(!is.null(pp$dbh) & !missing(dbh))
id<-id & pp$dbh>dbh
if(atleast>0)
{
fqs<-freqs(pp)
marks<-names(fqs)
for(m in 1:length(marks))
{
bad<-which(pp$marks==marks[m])
if(fqs[m]<atleast)
id[bad]<- FALSE
}
}
# if(!missing(lifeform))
# {
# sp<-levels(pp$marks)
# lfs<-bcispecies.lifeform(lifeform=lifeform)
# bad<-sp[!(sp%in%lfs)]
# for(s in bad)
# id[pp$marks==s]<-FALSE
# }
# if(!missing(status))
# {
# id[!(pp$status%in%status)]<-FALSE
# }
if(sum(is.na(id))>0) warning("Vector contains NA's.")
id
}
|
/spatialsegregation/R/modifiers.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,466
|
r
|
freqs<-function(pp)
{
m<-union(pp$marks,NULL)
h<-NULL
for(i in m) h<-c(h, sum(pp$marks==i))
names(h)<-m
h
}
####################################################################################
clean.up.data<-function(pp,dbh=10,atleast=10)
{
h<-freqs(pp)
m<-union(pp$marks,NULL)
i<-pp$dbh>dbh & pp$marks%in%m[h>atleast]
p<-pp[i]
marks<-p$marks
p$marks<-NULL
p$markformat<-"none"
p$dbh<-pp$dbh[i]
p$marks<-factor(marks)
p
}
####################################################################################
shake<-function(pp, a=0.001)
{
pp$x<-pp$x+runif(pp$n, -a,a)
pp$y<-pp$y+runif(pp$n, -a,a)
pp
}
####################################################################################
minusID<-function(pp, minusR, dbh, atleast=0)#, lifeform, status)
{
id<-( pp$x<(pp$window$x[2]-minusR) ) & (pp$x>(pp$window$x[1]+minusR)) & (pp$y<(pp$window$y[2]-minusR)) & (pp$y>(pp$window$y[1]+minusR))
if(!is.null(pp$dbh) & !missing(dbh))
id<-id & pp$dbh>dbh
if(atleast>0)
{
fqs<-freqs(pp)
marks<-names(fqs)
for(m in 1:length(marks))
{
bad<-which(pp$marks==marks[m])
if(fqs[m]<atleast)
id[bad]<- FALSE
}
}
# if(!missing(lifeform))
# {
# sp<-levels(pp$marks)
# lfs<-bcispecies.lifeform(lifeform=lifeform)
# bad<-sp[!(sp%in%lfs)]
# for(s in bad)
# id[pp$marks==s]<-FALSE
# }
# if(!missing(status))
# {
# id[!(pp$status%in%status)]<-FALSE
# }
if(sum(is.na(id))>0) warning("Vector contains NA's.")
id
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/disk.frame.r
\name{disk.frame_folder}
\alias{disk.frame_folder}
\alias{disk.frame_fst}
\title{Create a data frame pointed to a folder}
\usage{
disk.frame_folder(path)
disk.frame_fst(path)
}
\arguments{
\item{path}{The path to store the output file or to a directory}
}
\description{
Create a data frame pointed to a folder
Create a disk.frame from fst files
}
|
/man/disk.frame_fst.Rd
|
no_license
|
iqis/disk.frame
|
R
| false
| true
| 440
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/disk.frame.r
\name{disk.frame_folder}
\alias{disk.frame_folder}
\alias{disk.frame_fst}
\title{Create a data frame pointed to a folder}
\usage{
disk.frame_folder(path)
disk.frame_fst(path)
}
\arguments{
\item{path}{The path to store the output file or to a directory}
}
\description{
Create a data frame pointed to a folder
Create a disk.frame from fst files
}
|
###################################
## Power, Voltage and Energy
#
# This script works on the
# "Individual household electric power consumption Data Set"
# extracted from the UC Irvine Machine Learning Repository and available for download
# via the https://github.com/rdpeng/ExData_Plotting1 repository
#
# Be sure to download the data set to the same path as this script for running this script.
# The dplyr package for R is also required.
#
# It has four plots:
#
# 1. the household global minute-averaged active power over two days (in kilowatt),
# 2007-02-01 and 2007-02-02
#
# 2. the voltage over two days,
# 2007-02-01 and 2007-02-02
#
# 3. the active energy of three different sub meters over two days,
# 2007-02-01 and 2007-02-02.
# Sub_metering_1: energy sub-metering No. 1 (in watt-hour of active energy).
# It corresponds to the kitchen, containing mainly a dishwasher, an oven and a microwave
# (hot plates are not electric but gas powered).
# Sub_metering_2: energy sub-metering No. 2 (in watt-hour of active energy).
# It corresponds to the laundry room, containing a washing-machine, a tumble-drier,
# a refrigerator and a light.
# Sub_metering_3: energy sub-metering No. 3 (in watt-hour of active energy).
# It corresponds to an electric water-heater and an air-conditioner.
#
# 4. the household global minute-averaged reactive power (in kilowatt) over two days,
# 2007-02-01 and 2007-02-02
###################################
library(dplyr)
# get classes of data from the dataset
init_hpc <- read.csv2("household_power_consumption.txt", nrows=100)
classes_hpc <- sapply(init_hpc, class)
#read entire dataset
hpc_all <- read.csv2("household_power_consumption.txt", colClasses = classes_hpc)
#create a subset of the dataframe and omit NA values
sub1_hpc <- subset(hpc_all[complete.cases(hpc_all),], Date == "1/2/2007" | Date == "2/2/2007")
#turn the values into numeric values
sub1_hpc <- transform(sub1_hpc,
Global_active_power = as.numeric(as.vector(Global_active_power)),
Global_reactive_power = as.numeric(as.vector(Global_reactive_power)),
Voltage = as.numeric(as.vector(Voltage)),
Sub_metering_1 = as.numeric(as.vector(Sub_metering_1)),
Sub_metering_2 = as.numeric(as.vector(Sub_metering_2)),
Sub_metering_3 = as.numeric(as.vector(Sub_metering_3))
)
#concatenate Date and Time values
sub2 <- (mutate(sub1_hpc, DateTime = paste(Date, Time)))
#convert DateTime to POSIXlt objects
sub2$DateTime <- strptime(sub2$DateTime, "%d/%m/%Y %H:%M:%S")
#add a weekday column (not necessary for this plot, I just like adding it)
sub2 <- mutate(sub2, Day = format(DateTime, "%a"))
#set up and plot to PNG
png(filename = "plot4.png", width = 480, height = 480)
par(mfrow = c(2,2))
with(sub2, {
plot(DateTime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(DateTime, Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(DateTime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
points(DateTime, Sub_metering_2, type = "l", col = "red")
points(DateTime, Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), bty = "n", legend = names(sub2[7:9]))
plot(DateTime, Global_reactive_power, type = "l", xlab = "datetime")
})
dev.off()
|
/Plot4.R
|
no_license
|
kleinbot/ExData_Plotting1
|
R
| false
| false
| 3,477
|
r
|
###################################
## Power, Voltage and Energy
#
# This script works on the
# "Individual household electric power consumption Data Set"
# extracted from the UC Irvine Machine Learning Repository and available for download
# via the https://github.com/rdpeng/ExData_Plotting1 repository
#
# Be sure to download the data set to the same path as this script for running this script.
# The dplyr package for R is also required.
#
# It has four plots:
#
# 1. the household global minute-averaged active power over two days (in kilowatt),
# 2007-02-01 and 2007-02-02
#
# 2. the voltage over two days,
# 2007-02-01 and 2007-02-02
#
# 3. the active energy of three different sub meters over two days,
# 2007-02-01 and 2007-02-02.
# Sub_metering_1: energy sub-metering No. 1 (in watt-hour of active energy).
# It corresponds to the kitchen, containing mainly a dishwasher, an oven and a microwave
# (hot plates are not electric but gas powered).
# Sub_metering_2: energy sub-metering No. 2 (in watt-hour of active energy).
# It corresponds to the laundry room, containing a washing-machine, a tumble-drier,
# a refrigerator and a light.
# Sub_metering_3: energy sub-metering No. 3 (in watt-hour of active energy).
# It corresponds to an electric water-heater and an air-conditioner.
#
# 4. the household global minute-averaged reactive power (in kilowatt) over two days,
# 2007-02-01 and 2007-02-02
###################################
library(dplyr)
# get classes of data from the dataset
init_hpc <- read.csv2("household_power_consumption.txt", nrows=100)
classes_hpc <- sapply(init_hpc, class)
#read entire dataset
hpc_all <- read.csv2("household_power_consumption.txt", colClasses = classes_hpc)
#create a subset of the dataframe and omit NA values
sub1_hpc <- subset(hpc_all[complete.cases(hpc_all),], Date == "1/2/2007" | Date == "2/2/2007")
#turn the values into numeric values
sub1_hpc <- transform(sub1_hpc,
Global_active_power = as.numeric(as.vector(Global_active_power)),
Global_reactive_power = as.numeric(as.vector(Global_reactive_power)),
Voltage = as.numeric(as.vector(Voltage)),
Sub_metering_1 = as.numeric(as.vector(Sub_metering_1)),
Sub_metering_2 = as.numeric(as.vector(Sub_metering_2)),
Sub_metering_3 = as.numeric(as.vector(Sub_metering_3))
)
#concatenate Date and Time values
sub2 <- (mutate(sub1_hpc, DateTime = paste(Date, Time)))
#convert DateTime to POSIXlt objects
sub2$DateTime <- strptime(sub2$DateTime, "%d/%m/%Y %H:%M:%S")
#add a weekday column (not necessary for this plot, I just like adding it)
sub2 <- mutate(sub2, Day = format(DateTime, "%a"))
#set up and plot to PNG
png(filename = "plot4.png", width = 480, height = 480)
par(mfrow = c(2,2))
with(sub2, {
plot(DateTime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(DateTime, Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(DateTime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
points(DateTime, Sub_metering_2, type = "l", col = "red")
points(DateTime, Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), bty = "n", legend = names(sub2[7:9]))
plot(DateTime, Global_reactive_power, type = "l", xlab = "datetime")
})
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metabulate.R
\name{add_commas}
\alias{add_commas}
\title{Add commas to integers
Add commas to integers (e.g., convert "1000000" to "1,000,000")}
\usage{
add_commas(x, decimals = 0)
}
\arguments{
\item{x}{Integer value}
\item{decimals}{Number of decimal places to print}
}
\value{
Value with commmas.
}
\description{
Add commas to integers
Add commas to integers (e.g., convert "1000000" to "1,000,000")
}
\examples{
\dontrun{
add_commas(x = c(1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000))
add_commas(x = c(1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000), decimals = 2)
}
}
\keyword{internal}
|
/man/add_commas.Rd
|
no_license
|
EnterStudios/psychmeta
|
R
| false
| true
| 704
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metabulate.R
\name{add_commas}
\alias{add_commas}
\title{Add commas to integers
Add commas to integers (e.g., convert "1000000" to "1,000,000")}
\usage{
add_commas(x, decimals = 0)
}
\arguments{
\item{x}{Integer value}
\item{decimals}{Number of decimal places to print}
}
\value{
Value with commmas.
}
\description{
Add commas to integers
Add commas to integers (e.g., convert "1000000" to "1,000,000")
}
\examples{
\dontrun{
add_commas(x = c(1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000))
add_commas(x = c(1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000), decimals = 2)
}
}
\keyword{internal}
|
#' Apply numeric_np class to a numeric variable
#'
#' To allow \code{atable} to return different statistics for different variables
#' of the same type (e.g. we want some variables to be formatted as
#' \code{Mean (SD)} and others as \code{Median [IQR]}), they need to be passed
#' to \code{atable} as different classes.
#'
#' @param x
#'
#' @return
#' @export
#'
#' @examples
#' data(mtcars)
#' mtcars$wt_np <- as.numeric_np(mtcars$wt)
#'
as.numeric_np <- function(x){
if(!"numeric" %in% class(x))
stop("Not a numeric")
if(!"numeric_np" %in% class(x))
class(x) <- c("numeric_np", class(x))
x
}
# subsetting function
'[.numeric_np' <- function(x, i, j, ...){
y <- unclass(x)[i, ...]
class(y) <- c("numeric_np", class(y))
y
}
|
/R/numeric_np.R
|
no_license
|
aghaynes/atableExtra
|
R
| false
| false
| 763
|
r
|
#' Apply numeric_np class to a numeric variable
#'
#' To allow \code{atable} to return different statistics for different variables
#' of the same type (e.g. we want some variables to be formatted as
#' \code{Mean (SD)} and others as \code{Median [IQR]}), they need to be passed
#' to \code{atable} as different classes.
#'
#' @param x
#'
#' @return
#' @export
#'
#' @examples
#' data(mtcars)
#' mtcars$wt_np <- as.numeric_np(mtcars$wt)
#'
as.numeric_np <- function(x){
if(!"numeric" %in% class(x))
stop("Not a numeric")
if(!"numeric_np" %in% class(x))
class(x) <- c("numeric_np", class(x))
x
}
# subsetting function
'[.numeric_np' <- function(x, i, j, ...){
y <- unclass(x)[i, ...]
class(y) <- c("numeric_np", class(y))
y
}
|
#' A Reference Class to represent a todo list item
#'
#' @field itemText Text of the todo list item
#' @field timeCreated Time item was created
#' @field timeCompleted Time item was completed
#' @field isCompleted Has the item been completed?
#' @field itemID Integer identifier of the todo item.
#' @export TodoItem
#' @exportClass TodoItem
TodoItem <- setRefClass("TodoItem",
fields = list(itemText = "character",
timeCreated = "POSIXct",
timeCompleted = "POSIXct",
isCompleted = "logical",
itemID = "integer",
status = "factor",
comment = "character"),
# contains = "list",
methods = list(
initialize = function(text, ID = NA_integer_,
timeCreated = Sys.time(),
timeCompleted = as.POSIXct(NA),
isCompleted = FALSE,
status = "incomplete",
comment = "") {
itemText <<- text
timeCreated <<- timeCreated
timeCompleted <<- timeCompleted
isCompleted <<- isCompleted
status <<- factor(status,
levels = c("incomplete",
"completed",
"removed"))
comment <<- ""
itemID <<- ID
},
show = function() {
"print a todo item"
cat("Text: "); cat(methods::show(itemText))
cat("Created: "); cat(methods::show(timeCreated))
if(isCompleted) {
cat("Completed: "); cat(methods::show(timeCompleted))
}
if(comment != "") {
cat("Comment: "); cat(methods::show(comment))
}
cat("ID: "); cat(methods::show(itemID))
},
setStatus = function(newStatus = c("incomplete", "completed", "removed")) {
"set the status of a todo item"
newStatus <- factor(newStatus,
levels = c("incomplete",
"completed",
"removed"))
status <<- newStatus
if (newStatus == "completed")
.self$markComplete()
if (newStatus == "incomplete")
.self$markIncomplete()
},
addComment = function(text, erase = FALSE) {
"Add a comment to an item"
stopifnot(length(text) == 1)
text <- chartr(",", "_", text)
if (nchar(comment) == 0 || erase)
comment <<- text
else
comment <<- paste(comment, text, sep = "; ")
},
markComplete = function() {
"Mark an item as complete"
if (isCompleted)
stop(sprintf("Item already completed on %s", timeCompleted))
isCompleted <<- TRUE
timeCompleted <<- Sys.time()
},
markIncomplete = function() {
"Mark a completed item incomplete."
if (!isCompleted)
stop(sprintf("Item not marked as complete.", timeCompleted))
isCompleted <<- FALSE
timeCompleted <<- as.POSIXct(NA)
},
as.data.frame = function() {
"Convert an item to a data.frame"
out <- data.frame(itemText = itemText,
timeCreated = timeCreated,
status = status,
timeCompleted = timeCompleted,
isCompleted = isCompleted,
itemID = itemID,
comment = comment,
stringsAsFactors = FALSE)
out
}
))
#' Reference Class to represent todo list
#'
#' @field items List of TodoItems
#' @field nitems Number of items in list
#' @field File Associated csv file for reading and writing operations.
#' @field autowrite Logical, automatically write file upon modification? Defaults to TRUE.
#' @export TodoList
#' @exportClass TodoList
TodoList <- setRefClass("TodoList",
fields = list(items = "list",
nitems = "integer",
File = "character",
autowrite = "logical"),
methods = list(
initialize = function(file = NULL, autowrite = TRUE) {
items <<- list()
nitems <<- 0L
File <<- NA_character_
autowrite <<- autowrite
if (!is.null(file)) {
.self$import.csv(file)
File <<- file
}
},
show = function(what = c("todo", "done", "removed", "all")) {
what = match.arg(what)
cmpltd <- vapply(items, `[[`, logical(1), "isCompleted")
rmvd <- vapply(items, function(x) x$status == "removed",
logical(1))
# browser()
if (what == "todo")
toshow <- items[!cmpltd & !rmvd]
else if (what == "done")
toshow <- items[cmpltd & !rmvd]
else if (what == "removed")
toshow <- items[rmvd]
else if (what == "all")
toshow <- items[!rmvd]
cat("nitems: "); cat(methods::show(nitems))
cat("items: "); cat(methods::show(toshow))
},
add = function(text, write = autowrite) {
"Generate a new item to the todo list with the given text"
nitems <<- nitems + 1L
newItem <- TodoItem$new(text = text, ID = nitems)
items <<- stats::setNames(c(items, newItem),
c(names(items), paste0("itm", nitems)))
if (write && file.exists(File))
.self$write.csv()
},
add_item = function(newItem, write = autowrite) {
"Add a TodoItem object to the todo list"
stopifnot()
nitems <<- nitems + 1L
items <<- stats::setNames(c(items, newItem),
c(names(items), paste0("itm", nitems)))
if (write && file.exists(File))
.self$write.csv()
},
comment = function(ID, text, erase = FALSE, write = autowrite) {
"Add a comment to an item"
ID <- paste0("itm", ID)
items[[ID]]$addComment(text, erase = erase)
if (write && file.exists(File))
.self$write.csv()
},
done = function(ID, write = autowrite) {
ID <- paste0("itm", ID)
items[[ID]]$setStatus("completed")
if (write && file.exists(File))
.self$write.csv()
},
remove = function(ID, write = autowrite) {
ID <- paste0("itm", ID)
items[[ID]]$setStatus("removed")
},
to_df = function() {
lst <- list()
for (item in items) {
lst <- c(lst, list(item$as.data.frame()))
}
out <- dplyr::bind_rows(lst)
out
},
write.csv = function(file = NULL, setFile = FALSE) {
if (is.null(file)) {
if (is.na(File)) {
stop("file must be specified if none already associate with object")
} else {
file = File
}
}
if (setFile) {
File <<- file
}
out <- .self$to_df()
# print(out)
# browser()
utils::write.csv(x = out, file = file, row.names = FALSE)
},
import.csv = function(file) {
input <- try(utils::read.csv(file = file,
stringsAsFactors = FALSE),
silent = TRUE)
if (is(input, "try-error"))
return()
for (i in 1:nrow(input)) {
itmlst <- input[i, ]
# browser()
if (is.na(itmlst$status))
itmlst$status <- ifelse(itmlst$isCompleted,
"completed", "incomplete")
newItem <- with(itmlst,
TodoItem$new(text = itemText,
ID = itemID,
status = status,
timeCreated = as.POSIXct(timeCreated),
timeCompleted = as.POSIXct(timeCompleted),
isCompleted = isCompleted,
comment = comment))
.self$add_item(newItem = newItem, write = FALSE)
}
},
reread = function(){
ff <- File
aw <- autowrite
.self$initialize(file = ff,
autowrite = aw)
}
))
|
/R/todolist.R
|
no_license
|
markwh/todoList
|
R
| false
| false
| 11,583
|
r
|
#' A Reference Class to represent a todo list item
#'
#' @field itemText Text of the todo list item
#' @field timeCreated Time item was created
#' @field timeCompleted Time item was completed
#' @field isCompleted Has the item been completed?
#' @field itemID Integer identifier of the todo item.
#' @export TodoItem
#' @exportClass TodoItem
TodoItem <- setRefClass("TodoItem",
fields = list(itemText = "character",
timeCreated = "POSIXct",
timeCompleted = "POSIXct",
isCompleted = "logical",
itemID = "integer",
status = "factor",
comment = "character"),
# contains = "list",
methods = list(
initialize = function(text, ID = NA_integer_,
timeCreated = Sys.time(),
timeCompleted = as.POSIXct(NA),
isCompleted = FALSE,
status = "incomplete",
comment = "") {
itemText <<- text
timeCreated <<- timeCreated
timeCompleted <<- timeCompleted
isCompleted <<- isCompleted
status <<- factor(status,
levels = c("incomplete",
"completed",
"removed"))
comment <<- ""
itemID <<- ID
},
show = function() {
"print a todo item"
cat("Text: "); cat(methods::show(itemText))
cat("Created: "); cat(methods::show(timeCreated))
if(isCompleted) {
cat("Completed: "); cat(methods::show(timeCompleted))
}
if(comment != "") {
cat("Comment: "); cat(methods::show(comment))
}
cat("ID: "); cat(methods::show(itemID))
},
setStatus = function(newStatus = c("incomplete", "completed", "removed")) {
"set the status of a todo item"
newStatus <- factor(newStatus,
levels = c("incomplete",
"completed",
"removed"))
status <<- newStatus
if (newStatus == "completed")
.self$markComplete()
if (newStatus == "incomplete")
.self$markIncomplete()
},
addComment = function(text, erase = FALSE) {
"Add a comment to an item"
stopifnot(length(text) == 1)
text <- chartr(",", "_", text)
if (nchar(comment) == 0 || erase)
comment <<- text
else
comment <<- paste(comment, text, sep = "; ")
},
markComplete = function() {
"Mark an item as complete"
if (isCompleted)
stop(sprintf("Item already completed on %s", timeCompleted))
isCompleted <<- TRUE
timeCompleted <<- Sys.time()
},
markIncomplete = function() {
"Mark a completed item incomplete."
if (!isCompleted)
stop(sprintf("Item not marked as complete.", timeCompleted))
isCompleted <<- FALSE
timeCompleted <<- as.POSIXct(NA)
},
as.data.frame = function() {
"Convert an item to a data.frame"
out <- data.frame(itemText = itemText,
timeCreated = timeCreated,
status = status,
timeCompleted = timeCompleted,
isCompleted = isCompleted,
itemID = itemID,
comment = comment,
stringsAsFactors = FALSE)
out
}
))
#' Reference Class to represent todo list
#'
#' @field items List of TodoItems
#' @field nitems Number of items in list
#' @field File Associated csv file for reading and writing operations.
#' @field autowrite Logical, automatically write file upon modification? Defaults to TRUE.
#' @export TodoList
#' @exportClass TodoList
TodoList <- setRefClass("TodoList",
fields = list(items = "list",
nitems = "integer",
File = "character",
autowrite = "logical"),
methods = list(
initialize = function(file = NULL, autowrite = TRUE) {
items <<- list()
nitems <<- 0L
File <<- NA_character_
autowrite <<- autowrite
if (!is.null(file)) {
.self$import.csv(file)
File <<- file
}
},
show = function(what = c("todo", "done", "removed", "all")) {
what = match.arg(what)
cmpltd <- vapply(items, `[[`, logical(1), "isCompleted")
rmvd <- vapply(items, function(x) x$status == "removed",
logical(1))
# browser()
if (what == "todo")
toshow <- items[!cmpltd & !rmvd]
else if (what == "done")
toshow <- items[cmpltd & !rmvd]
else if (what == "removed")
toshow <- items[rmvd]
else if (what == "all")
toshow <- items[!rmvd]
cat("nitems: "); cat(methods::show(nitems))
cat("items: "); cat(methods::show(toshow))
},
add = function(text, write = autowrite) {
"Generate a new item to the todo list with the given text"
nitems <<- nitems + 1L
newItem <- TodoItem$new(text = text, ID = nitems)
items <<- stats::setNames(c(items, newItem),
c(names(items), paste0("itm", nitems)))
if (write && file.exists(File))
.self$write.csv()
},
add_item = function(newItem, write = autowrite) {
"Add a TodoItem object to the todo list"
stopifnot()
nitems <<- nitems + 1L
items <<- stats::setNames(c(items, newItem),
c(names(items), paste0("itm", nitems)))
if (write && file.exists(File))
.self$write.csv()
},
comment = function(ID, text, erase = FALSE, write = autowrite) {
"Add a comment to an item"
ID <- paste0("itm", ID)
items[[ID]]$addComment(text, erase = erase)
if (write && file.exists(File))
.self$write.csv()
},
done = function(ID, write = autowrite) {
ID <- paste0("itm", ID)
items[[ID]]$setStatus("completed")
if (write && file.exists(File))
.self$write.csv()
},
remove = function(ID, write = autowrite) {
ID <- paste0("itm", ID)
items[[ID]]$setStatus("removed")
},
to_df = function() {
lst <- list()
for (item in items) {
lst <- c(lst, list(item$as.data.frame()))
}
out <- dplyr::bind_rows(lst)
out
},
write.csv = function(file = NULL, setFile = FALSE) {
if (is.null(file)) {
if (is.na(File)) {
stop("file must be specified if none already associate with object")
} else {
file = File
}
}
if (setFile) {
File <<- file
}
out <- .self$to_df()
# print(out)
# browser()
utils::write.csv(x = out, file = file, row.names = FALSE)
},
import.csv = function(file) {
input <- try(utils::read.csv(file = file,
stringsAsFactors = FALSE),
silent = TRUE)
if (is(input, "try-error"))
return()
for (i in 1:nrow(input)) {
itmlst <- input[i, ]
# browser()
if (is.na(itmlst$status))
itmlst$status <- ifelse(itmlst$isCompleted,
"completed", "incomplete")
newItem <- with(itmlst,
TodoItem$new(text = itemText,
ID = itemID,
status = status,
timeCreated = as.POSIXct(timeCreated),
timeCompleted = as.POSIXct(timeCompleted),
isCompleted = isCompleted,
comment = comment))
.self$add_item(newItem = newItem, write = FALSE)
}
},
reread = function(){
ff <- File
aw <- autowrite
.self$initialize(file = ff,
autowrite = aw)
}
))
|
# Classification Tree with rpart
library(rpart)
library(caret)
setwd("/Users/ritesh/pad-datascience/R/")
bcTree <- read.csv("machineLearning/data/breast-cancer-wisconsin.csv")
breast_cancer_wisconsin_col_names <-
c(
"Sample_code_number",
"Clump_Thickness",
"Uniformity_of_Cell_Size",
"Uniformity_of_Cell_Shape",
"Marginal_Adhesion",
"Single_Epithelial_Cell_Size",
"Bare_Nuclei",
"Bland_Chromatin",
"Normal_Nucleoli",
"Mitoses",
"Class"
)
colnames(bcTree) <- breast_cancer_wisconsin_col_names
str(bcTree)
## view the first few rows of the data
bcTree[bcTree == '?'] <- NA
bcTree <- na.omit(bcTree)
bcTree$Class <- as.numeric(bcTree$Class)
bcTree$Class[bcTree$Class == 2] <- 0
bcTree$Class[bcTree$Class == 4] <- 1
bcTree$Class <- as.factor(bcTree$Class)
trainIndex <-
createDataPartition(
y = bcTree$Class,
p = .8,
list = FALSE,
times = 1
)
head(trainIndex)
myDataTrain <- bcTree[trainIndex, ]
myDataTest <- bcTree[-trainIndex, ]
#To get the basic derivatives of data
summary(myDataTrain)
#to get the standard deviations of data
sapply(myDataTrain, sd)
#attach(myDataTrain)
## two-way contingency table of categorical outcome and predictors
## we want to make sure there are not 0 cells
xtabs( ~ Class + Clump_Thickness, data = myDataTrain)
#Convert the 'Class' field to categorical output variable
myDataTrain$Class <- factor(myDataTrain$Class)
## Build a decision tree model
fit <-
rpart(
Class ~ Clump_Thickness +
Uniformity_of_Cell_Size +
Uniformity_of_Cell_Shape +
Marginal_Adhesion +
Single_Epithelial_Cell_Size +
Bare_Nuclei +
Bland_Chromatin +
Normal_Nucleoli +
Mitoses,
method = "class",
data = myDataTrain
)
## Prediction
levels(myDataTrain$Class) <- levels(myDataTest$Class)
myDataTest$Class <- factor(myDataTest$Class)
myDataTest$myDataOutput.bc <- predict(fit, myDataTest, type="class")
#Confusion Matrix
predict.output.tree.bc <-(myDataTest$myDataOutput.bc)
actual.input.tree.bc <- myDataTest$Class
conf.tablr <- table(predict.output.tree.bc, actual.input.tree.bc)
confusionMatrix(conf.tablr)
## Plot
printcp(fit) # display the results
plotcp(fit) # visualize cross-validation results
summary(fit) # detailed summary of splits
# plot tree
plot(fit, uniform = TRUE,
main = "Classification Tree for marks")
text(fit,
use.n = TRUE,
all = TRUE,
cex = .8)
|
/R/machineLearning/classification/ClassificationBreastCancer.R
|
no_license
|
riteshsolankee/data-science
|
R
| false
| false
| 2,435
|
r
|
# Classification Tree with rpart
library(rpart)
library(caret)
setwd("/Users/ritesh/pad-datascience/R/")
bcTree <- read.csv("machineLearning/data/breast-cancer-wisconsin.csv")
breast_cancer_wisconsin_col_names <-
c(
"Sample_code_number",
"Clump_Thickness",
"Uniformity_of_Cell_Size",
"Uniformity_of_Cell_Shape",
"Marginal_Adhesion",
"Single_Epithelial_Cell_Size",
"Bare_Nuclei",
"Bland_Chromatin",
"Normal_Nucleoli",
"Mitoses",
"Class"
)
colnames(bcTree) <- breast_cancer_wisconsin_col_names
str(bcTree)
## view the first few rows of the data
bcTree[bcTree == '?'] <- NA
bcTree <- na.omit(bcTree)
bcTree$Class <- as.numeric(bcTree$Class)
bcTree$Class[bcTree$Class == 2] <- 0
bcTree$Class[bcTree$Class == 4] <- 1
bcTree$Class <- as.factor(bcTree$Class)
trainIndex <-
createDataPartition(
y = bcTree$Class,
p = .8,
list = FALSE,
times = 1
)
head(trainIndex)
myDataTrain <- bcTree[trainIndex, ]
myDataTest <- bcTree[-trainIndex, ]
#To get the basic derivatives of data
summary(myDataTrain)
#to get the standard deviations of data
sapply(myDataTrain, sd)
#attach(myDataTrain)
## two-way contingency table of categorical outcome and predictors
## we want to make sure there are not 0 cells
xtabs( ~ Class + Clump_Thickness, data = myDataTrain)
#Convert the 'Class' field to categorical output variable
myDataTrain$Class <- factor(myDataTrain$Class)
## Build a decision tree model
fit <-
rpart(
Class ~ Clump_Thickness +
Uniformity_of_Cell_Size +
Uniformity_of_Cell_Shape +
Marginal_Adhesion +
Single_Epithelial_Cell_Size +
Bare_Nuclei +
Bland_Chromatin +
Normal_Nucleoli +
Mitoses,
method = "class",
data = myDataTrain
)
## Prediction
levels(myDataTrain$Class) <- levels(myDataTest$Class)
myDataTest$Class <- factor(myDataTest$Class)
myDataTest$myDataOutput.bc <- predict(fit, myDataTest, type="class")
#Confusion Matrix
predict.output.tree.bc <-(myDataTest$myDataOutput.bc)
actual.input.tree.bc <- myDataTest$Class
conf.tablr <- table(predict.output.tree.bc, actual.input.tree.bc)
confusionMatrix(conf.tablr)
## Plot
printcp(fit) # display the results
plotcp(fit) # visualize cross-validation results
summary(fit) # detailed summary of splits
# plot tree
plot(fit, uniform = TRUE,
main = "Classification Tree for marks")
text(fit,
use.n = TRUE,
all = TRUE,
cex = .8)
|
pathTiMEx<-function(initialGroupsStruct,Datamat,path,name,noReps,optionsSA,
noThreads,numsave,skipsteps,gamma,noNoOpen,additionalGenes,
limitChanged)
{
if (missing(noReps))
noReps<-100
if (missing(optionsSA))
optionsSA<-"-s -T 1 -N 200"
if (missing(noThreads))
noThreads<-4
if (missing(numsave))
numsave<-2000
if (missing(skipsteps))
skipsteps<-100
if (missing(gamma))
gamma<-0.1
if (missing(noNoOpen))
noNoOpen<-100
if (missing(additionalGenes))
additionalGenes<-NULL
# indicates the number of times the grouping and progression need to not change to finish the optimization
if (missing(limitChanged))
limitChanged<-2
groupysL<-list() # list to store all groupings at every step
PathmatsL<-list() # list to store all matrices patients x pathways at every step
unidetifEvsL<-list() # list to store whether the PathmatsL elements had only unique events at every step (via doMetagene)
cbnResultL<-list() # list to store all cbnResult structures at every step
optStructL<-list() # list to store all optimal structure at every step
epsL<-list() # list to store the estimated values of epsilon for the ML poset at every step
alphaL<-list() # list to store the values of alpha of the ML poset at every step
loglikeL<-list() # list to store the optimal log likelihod of the ML poset at every step
lamobsL<-list() # list to store the estimated values of lambda obs for the ML poset at every step
lambdasL<-list() # list to store the estimated values of lambda for the ML poset at every step
minScore<-c() # vector to store the minimum scores from the local optimization of groups at every step
clustSimConsecJacc<-c() # vector to store the Jaccard similarity index between two consecutive clusterings
clustSimConsecRand<-c() # vector to store the Rand similarity index between two consecutive clusterings
# use the groups inferred by TiMEx as an initial solution
groupysL[[1]]<-createInitialGroups(initialGroupsStruct,additionalGenes,Datamat)
#groupysL[[1]]<-append(groupysL[[1]],as.list(setdiff(c(1:dim(Datamat)[2]),unlist(groupysL[[1]])))) # also add the genes which were not part of the groupings, as separate events
PathmatsL[[1]]<-doPatForCBN(groupysL[[1]], Datamat, write=TRUE, path=path,name=name) # the initial binary alteration matrix of pathways
unidetifEvsL[[1]]<-doMetagene(PathmatsL[[1]])$groups
startPos<-make_linear_poset(dim(PathmatsL[[1]])[2]) # do a linear poset as initial solution for SA and the structure used in the first optimization run
writePosetForCBN(poset=startPos,path=path,name=name) # writes the poset to a file, to be used as input by H-CBN
cbnResultL[[1]]<-runCBN(path=path,name=name,optionsSA=optionsSA,noThreads=noThreads)
optStructL[[1]]<-cbnResultL[[1]]$optStruct
epsL[[1]]<-cbnResultL[[1]]$eps
alphaL[[1]]<-cbnResultL[[1]]$alpha
loglikeL[[1]]<-cbnResultL[[1]]$loglik
lamobsL[[1]]<-cbnResultL[[1]]$lamobs
lambdasL[[1]]<-cbnResultL[[1]]$lams
i<-2
cont<-TRUE # indicates whether to still continue the optimization
notChanged<-0 # indicates whether the inferred grouping and progression haven't changed since the previous iteration
timeElapsed<-c() # time elapsed for one iteration run of the optimizer
timeElapsed[1]<-NA
accuracyStructConsec<-rep(NA,noReps)
accuracyStructConsec[1]<-NA
while (i <= (noReps+1) && cont==TRUE)
{
print(paste("Repetition =", i, sep=" "))
ptm <- proc.time()
# keep the structure from repetition i-1 fixed and optimize for groups
groupys<-groupysL[[i-1]]
adjmat<-optStructL[[i-1]]
suppressWarnings(rm(optimGroups))
print("optimizing groupings")
if (i<=noNoOpen)
optimGroups<-optGroups2(Datamat = Datamat, groupys = groupys,
gamma = gamma, numsave = numsave,
skipsteps = skipsteps, adjmat = adjmat)
else
optimGroups<-optGroups(Datamat = Datamat, groupys = groupys,
gamma = gamma, numsave = numsave,
skipsteps = skipsteps, adjmat = adjmat)
# store the new groups
groupysL[[i]]<-cleanGroupys(optimGroups$mingroupys)
minScore[i-1]<-optimGroups$mingroupysscore
# store and write the new pathway matrix
PathmatsL[[i]]<-doPatForCBN(groupysL[[i]], Datamat, write=TRUE, path=path,name=name)
unidetifEvsL[[i]]<-doMetagene(PathmatsL[[i]])$groups
# transform the existing optimal poset from step i-1 to a new compatible one
transformAndWritePosetLinear(path,name,optStructL[[i-1]],PathmatsL[[i]])
# keep the groups from repetition i fixed and optimize for structure,
# using as initial solution the transformed previously optimized structure
cbnResultL[[i]]<-runCBN(path=path,name=name,optionsSA=optionsSA,noThreads=noThreads)
optStructL[[i]]<-cbnResultL[[i]]$optStruct
# try to merge groups with same parents and which leave the score unchanged
mergedG<-mergeGroups(Datamat = Datamat, posetCurrent = optStructL[[i]], groupysCurrent = groupysL[[i]])
if (mergedG$found==1)
{
print(paste("merging of groups done"))
groupysL[[i]]<-mergedG$groupysMerged
optStructL[[i]]<-mergedG$posetMerged
}
epsL[[i]]<-cbnResultL[[i]]$eps
alphaL[[i]]<-cbnResultL[[i]]$alpha
loglikeL[[i]]<-cbnResultL[[i]]$loglik
lamobsL[[i]]<-cbnResultL[[i]]$lamobs
lambdasL[[i]]<-cbnResultL[[i]]$lams
# check if the clustering has changed between this run and the previous one
vecAssignPrev<-returnAssignment(groupysL[[i-1]])
vecAssignNow<-returnAssignment(groupysL[[i]])
newStruct<-optStructL[[i]]
clustSimConsecJacc[i-1]<-cluster_similarity(vecAssignNow,vecAssignPrev,similarity = "jaccard")
clustSimConsecRand[i-1]<-cluster_similarity(vecAssignNow,vecAssignPrev,similarity = "rand")
print(paste("Consecutive Jaccard cluster similarity = ",clustSimConsecJacc[i-1]),sep="")
print(paste("Consecutive Rand cluster similarity = ",clustSimConsecRand[i-1]),sep="")
if (clustSimConsecRand[i-1]==1)
{
# map structure i to structure i-1, for comparison
oldStruct<-mapStructures(vecAssignPrev,vecAssignNow,optStructL[[i-1]])
accuracyStructConsec[i]<-compareStructs(oldStruct,newStruct)$accuracy
# if clustering didn't change, check if structure stayed the same by
# comparing the two structures
if (compareStructs(oldStruct,newStruct)$accuracy==1)
notChanged<-notChanged+1
else
notChanged<-0
print(paste("progression accuracy = ",compareStructs(oldStruct,newStruct)$accuracy),sep="")
} else
notChanged<-0
if (notChanged==limitChanged)
cont<-FALSE
print(paste("notChanged =", notChanged))
timeElapsed[i]<-(proc.time() - ptm)[3]
i<-i+1
}
result<-list()
result$epsL<-epsL
result$Datamat<-Datamat
result$groupysL<-groupysL
result$PathmatsL<-PathmatsL
result$unidetifEvsL<-unidetifEvsL
result$cbnResultL<-cbnResultL
result$optStructL<-optStructL
result$epsL<-epsL
result$alphaL<-alphaL
result$loglikeL<-loglikeL
result$lamobsL<-lamobsL
result$lambdasL<-lambdasL
result$minScore<-minScore
result$clustSimConsecJacc<-clustSimConsecJacc
result$clustSimConsecRand<-clustSimConsecRand
result$accuracyStructConsec<-accuracyStructConsec
result$limitChanged<-limitChanged
result$timeElapsed<-timeElapsed
result$noIters<-i-1
result$notChanged<-notChanged
result$limitChanged<-limitChanged
return(result)
}
|
/R/funcsPathTiMEx.R
|
no_license
|
cbg-ethz/pathTiMEx
|
R
| false
| false
| 8,207
|
r
|
pathTiMEx<-function(initialGroupsStruct,Datamat,path,name,noReps,optionsSA,
noThreads,numsave,skipsteps,gamma,noNoOpen,additionalGenes,
limitChanged)
{
if (missing(noReps))
noReps<-100
if (missing(optionsSA))
optionsSA<-"-s -T 1 -N 200"
if (missing(noThreads))
noThreads<-4
if (missing(numsave))
numsave<-2000
if (missing(skipsteps))
skipsteps<-100
if (missing(gamma))
gamma<-0.1
if (missing(noNoOpen))
noNoOpen<-100
if (missing(additionalGenes))
additionalGenes<-NULL
# indicates the number of times the grouping and progression need to not change to finish the optimization
if (missing(limitChanged))
limitChanged<-2
groupysL<-list() # list to store all groupings at every step
PathmatsL<-list() # list to store all matrices patients x pathways at every step
unidetifEvsL<-list() # list to store whether the PathmatsL elements had only unique events at every step (via doMetagene)
cbnResultL<-list() # list to store all cbnResult structures at every step
optStructL<-list() # list to store all optimal structure at every step
epsL<-list() # list to store the estimated values of epsilon for the ML poset at every step
alphaL<-list() # list to store the values of alpha of the ML poset at every step
loglikeL<-list() # list to store the optimal log likelihod of the ML poset at every step
lamobsL<-list() # list to store the estimated values of lambda obs for the ML poset at every step
lambdasL<-list() # list to store the estimated values of lambda for the ML poset at every step
minScore<-c() # vector to store the minimum scores from the local optimization of groups at every step
clustSimConsecJacc<-c() # vector to store the Jaccard similarity index between two consecutive clusterings
clustSimConsecRand<-c() # vector to store the Rand similarity index between two consecutive clusterings
# use the groups inferred by TiMEx as an initial solution
groupysL[[1]]<-createInitialGroups(initialGroupsStruct,additionalGenes,Datamat)
#groupysL[[1]]<-append(groupysL[[1]],as.list(setdiff(c(1:dim(Datamat)[2]),unlist(groupysL[[1]])))) # also add the genes which were not part of the groupings, as separate events
PathmatsL[[1]]<-doPatForCBN(groupysL[[1]], Datamat, write=TRUE, path=path,name=name) # the initial binary alteration matrix of pathways
unidetifEvsL[[1]]<-doMetagene(PathmatsL[[1]])$groups
startPos<-make_linear_poset(dim(PathmatsL[[1]])[2]) # do a linear poset as initial solution for SA and the structure used in the first optimization run
writePosetForCBN(poset=startPos,path=path,name=name) # writes the poset to a file, to be used as input by H-CBN
cbnResultL[[1]]<-runCBN(path=path,name=name,optionsSA=optionsSA,noThreads=noThreads)
optStructL[[1]]<-cbnResultL[[1]]$optStruct
epsL[[1]]<-cbnResultL[[1]]$eps
alphaL[[1]]<-cbnResultL[[1]]$alpha
loglikeL[[1]]<-cbnResultL[[1]]$loglik
lamobsL[[1]]<-cbnResultL[[1]]$lamobs
lambdasL[[1]]<-cbnResultL[[1]]$lams
i<-2
cont<-TRUE # indicates whether to still continue the optimization
notChanged<-0 # indicates whether the inferred grouping and progression haven't changed since the previous iteration
timeElapsed<-c() # time elapsed for one iteration run of the optimizer
timeElapsed[1]<-NA
accuracyStructConsec<-rep(NA,noReps)
accuracyStructConsec[1]<-NA
while (i <= (noReps+1) && cont==TRUE)
{
print(paste("Repetition =", i, sep=" "))
ptm <- proc.time()
# keep the structure from repetition i-1 fixed and optimize for groups
groupys<-groupysL[[i-1]]
adjmat<-optStructL[[i-1]]
suppressWarnings(rm(optimGroups))
print("optimizing groupings")
if (i<=noNoOpen)
optimGroups<-optGroups2(Datamat = Datamat, groupys = groupys,
gamma = gamma, numsave = numsave,
skipsteps = skipsteps, adjmat = adjmat)
else
optimGroups<-optGroups(Datamat = Datamat, groupys = groupys,
gamma = gamma, numsave = numsave,
skipsteps = skipsteps, adjmat = adjmat)
# store the new groups
groupysL[[i]]<-cleanGroupys(optimGroups$mingroupys)
minScore[i-1]<-optimGroups$mingroupysscore
# store and write the new pathway matrix
PathmatsL[[i]]<-doPatForCBN(groupysL[[i]], Datamat, write=TRUE, path=path,name=name)
unidetifEvsL[[i]]<-doMetagene(PathmatsL[[i]])$groups
# transform the existing optimal poset from step i-1 to a new compatible one
transformAndWritePosetLinear(path,name,optStructL[[i-1]],PathmatsL[[i]])
# keep the groups from repetition i fixed and optimize for structure,
# using as initial solution the transformed previously optimized structure
cbnResultL[[i]]<-runCBN(path=path,name=name,optionsSA=optionsSA,noThreads=noThreads)
optStructL[[i]]<-cbnResultL[[i]]$optStruct
# try to merge groups with same parents and which leave the score unchanged
mergedG<-mergeGroups(Datamat = Datamat, posetCurrent = optStructL[[i]], groupysCurrent = groupysL[[i]])
if (mergedG$found==1)
{
print(paste("merging of groups done"))
groupysL[[i]]<-mergedG$groupysMerged
optStructL[[i]]<-mergedG$posetMerged
}
epsL[[i]]<-cbnResultL[[i]]$eps
alphaL[[i]]<-cbnResultL[[i]]$alpha
loglikeL[[i]]<-cbnResultL[[i]]$loglik
lamobsL[[i]]<-cbnResultL[[i]]$lamobs
lambdasL[[i]]<-cbnResultL[[i]]$lams
# check if the clustering has changed between this run and the previous one
vecAssignPrev<-returnAssignment(groupysL[[i-1]])
vecAssignNow<-returnAssignment(groupysL[[i]])
newStruct<-optStructL[[i]]
clustSimConsecJacc[i-1]<-cluster_similarity(vecAssignNow,vecAssignPrev,similarity = "jaccard")
clustSimConsecRand[i-1]<-cluster_similarity(vecAssignNow,vecAssignPrev,similarity = "rand")
print(paste("Consecutive Jaccard cluster similarity = ",clustSimConsecJacc[i-1]),sep="")
print(paste("Consecutive Rand cluster similarity = ",clustSimConsecRand[i-1]),sep="")
if (clustSimConsecRand[i-1]==1)
{
# map structure i to structure i-1, for comparison
oldStruct<-mapStructures(vecAssignPrev,vecAssignNow,optStructL[[i-1]])
accuracyStructConsec[i]<-compareStructs(oldStruct,newStruct)$accuracy
# if clustering didn't change, check if structure stayed the same by
# comparing the two structures
if (compareStructs(oldStruct,newStruct)$accuracy==1)
notChanged<-notChanged+1
else
notChanged<-0
print(paste("progression accuracy = ",compareStructs(oldStruct,newStruct)$accuracy),sep="")
} else
notChanged<-0
if (notChanged==limitChanged)
cont<-FALSE
print(paste("notChanged =", notChanged))
timeElapsed[i]<-(proc.time() - ptm)[3]
i<-i+1
}
result<-list()
result$epsL<-epsL
result$Datamat<-Datamat
result$groupysL<-groupysL
result$PathmatsL<-PathmatsL
result$unidetifEvsL<-unidetifEvsL
result$cbnResultL<-cbnResultL
result$optStructL<-optStructL
result$epsL<-epsL
result$alphaL<-alphaL
result$loglikeL<-loglikeL
result$lamobsL<-lamobsL
result$lambdasL<-lambdasL
result$minScore<-minScore
result$clustSimConsecJacc<-clustSimConsecJacc
result$clustSimConsecRand<-clustSimConsecRand
result$accuracyStructConsec<-accuracyStructConsec
result$limitChanged<-limitChanged
result$timeElapsed<-timeElapsed
result$noIters<-i-1
result$notChanged<-notChanged
result$limitChanged<-limitChanged
return(result)
}
|
#===============================================================================
# File: 08-treatment-group-comparisons.R
# Date: Feb 3, 2021
# Purpose: replicate appendix analyses: Section 7
# Data In:
# ./data/survey_data.csv
# ./data/daily_pulse_data.csv
#===============================================================================
# PACKAGES
#===============================================================================
library(tidyverse)
library(estimatr)
library(glmnet)
library(powerLATE)
library(haven)
source('code/functions.r')
# DATA
#===============================================================================
pulse <- read_csv("data/daily_pulse_data.csv")
svy <- read_csv("data/survey_data.csv")
# Dropping observations where treatment is missing
svy <- svy[!is.na(svy$W3_PATA306_treatment_w3),]
# Analysis ----------------------------------------------------------------
# https://declaredesign.org/blog/biased-fixed-effects.html
vars <- c("party7", "age", "agesq", "female", "raceeth", "educ",
"ideo", "income", "employ", "state", "polint", "freq_tv", "freq_np",
"freq_rad", "freq_net", "freq_disc", "log_news_pre", "diet_mean_pre")
# For all following analysis, HuffPost: treatment, FoxNews:control
########################################################
### Issue H1 & H2: issue opinions
########################################################
trt <- "HuffPost"
dv <- "issue_scale"
dv_pre <- "issue_scale_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv= "H1 & H2: Conservatism")
########################################################
### Issue H3 & H4: immigration attitudes
########################################################
trt <- "HuffPost"
dv <- "imm_issue_scale"
dv_pre <- "issue_scale_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "policy14_imm_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H3 & H4: Pro-immigration")
########################################################
### Issue RQ1: heterogeneous treatment effects
########################################################
# H1 & H2
heterogeneous_effect2(dv = "issue_scale", dv_pre = "issue_scale_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "issue_scale", dv_pre = "issue_scale_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H3 & H4
heterogeneous_effect2(dv = "imm_issue_scale", dv_pre = "issue_scale_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "imm_issue_scale", dv_pre = "issue_scale_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### News H1: news browsing, 1 week
########################################################
trt <- "HuffPost"
dv <- "log_cons_1w"
dv_pre <- "log_cons_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "log_total_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H1: Cons. news (1 week)")
########################################################
### News H2: news browsing, 1 week
########################################################
trt <- "HuffPost"
dv <- "log_lib_1w"
dv_pre <- "log_lib_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "log_total_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H2: Lib. news (1 week)")
########################################################
### News H1: news browsing, 4 weeks
########################################################
trt <- "HuffPost"
dv <- "log_cons_4w"
dv_pre <- "log_cons_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "log_total_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H1: Cons. news (4 weeks)")
########################################################
### News H2: news browsing, 4 week
########################################################
trt <- "HuffPost"
dv <- "log_lib_4w"
dv_pre <- "log_lib_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "log_total_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H2: Lib. news (4 weeks)")
########################################################
### News H1: news browsing, 6 weeks
########################################################
trt <- "HuffPost"
dv <- "log_cons_6w"
dv_pre <- "log_cons_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "log_total_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H1: Cons. news (6 weeks)")
########################################################
### News H2: news browsing, 6 week
########################################################
trt <- "HuffPost"
dv <- "log_lib_6w"
dv_pre <- "log_lib_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "log_total_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H2: Lib. news (6 weeks)")
########################################################
### News RQ1: heterogeneous treatment effects by party ID / ideology
########################################################
# H1: n/s
heterogeneous_effect2(dv = "log_cons_1w", dv_pre = "log_cons_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "log_cons_1w", dv_pre = "log_cons_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H2: n/s
heterogeneous_effect2(dv = "log_lib_1w", dv_pre = "log_lib_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "log_lib_1w", dv_pre = "log_lib_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### News RQ2: heterogeneous treatment effects by pre-treatment news consumption
########################################################
# H1: n/s
heterogeneous_effect2(dv = "log_cons_1w", dv_pre = "log_hp_pre",
moderator = "log_fn_pre", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "log_cons_1w", dv_pre = "log_fn_pre",
moderator = "log_fn_pre", trt = "HuffPost", control = "FoxNews")
# H2: +
heterogeneous_effect2(dv = "log_lib_1w", dv_pre = "log_hp_pre",
moderator = "log_hp_pre", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "log_lib_1w", dv_pre = "log_fn_pre",
moderator = "log_hp_pre", trt = "HuffPost", control = "FoxNews")
########################################################
### Twitter H1: conservative link shares
########################################################
# creating Twitter variables
svy$log_cons_links <- log(svy$cons_links+1)
svy$log_cons_links_pre <- log(svy$cons_links_pre+1)
svy$log_total_links_pre <- log(svy$total_links_pre+1)
trt <- "HuffPost"
dv <- "log_cons_links"
dv_pre <- "log_cons_links_pre"
control <- "FoxNews"
# using chrome vs others as blocks here
svy$chrome <- ifelse(svy$W3_Browser_treatment_w3=="Chrome", 1, 0)
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = chrome,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, blocks="chrome", more_vars = "log_total_links_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H1: Tweets w/cons. links")
########################################################
### Twitter H2: liberal link shares
########################################################
# creating Twitter variables
svy$log_lib_links <- log(svy$lib_links+1)
svy$log_lib_links_pre <- log(svy$lib_links_pre+1)
svy$log_total_links_pre <- log(svy$total_links_pre+1)
trt <- "HuffPost"
dv <- "log_lib_links"
dv_pre <- "log_lib_links_pre"
control <- "FoxNews"
# using chrome vs others as blocks here
svy$chrome <- ifelse(svy$W3_Browser_treatment_w3=="Chrome", 1, 0)
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = chrome,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, blocks="chrome", more_vars = "log_total_links_pre")
format_latex3(dim, itt, dv = "H2: Tweets w/lib. links")
########################################################
### Twitter H3: follow conservative news sources
########################################################
# creating Twitter variables
svy$log_follows_cons_media <- log(svy$follows_cons_media+1)
svy$log_follows_elites <- log(svy$follows_elites+1)
trt <- "HuffPost"
dv <- "log_follows_cons_media"
control <- "FoxNews"
# using chrome vs others as blocks here
svy$chrome <- ifelse(svy$W3_Browser_treatment_w3=="Chrome", 1, 0)
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = chrome,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control, blocks="chrome", more_vars = "log_follows_elites")
format_latex3(dim, itt, dv = "H3: Follow cons. media")
########################################################
### Twitter H4: follow liberal news sources
########################################################
# creating Twitter variables
svy$log_follows_lib_media <- log(svy$follows_lib_media+1)
svy$log_follows_elites <- log(svy$follows_elites+1)
trt <- "HuffPost"
dv <- "log_follows_lib_media"
control <- "FoxNews"
# using chrome vs others as blocks here
svy$chrome <- ifelse(svy$W3_Browser_treatment_w3=="Chrome", 1, 0)
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = chrome,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control, blocks="chrome", more_vars = "log_follows_elites")
format_latex3(dim, itt, dv = "H4: Follow lib. media")
########################################################
### Affective H1a & H2a: Democrats, HuffPost treatment
########################################################
# creating new variables
svy$W4_PATA450a_new <- zap_labels(svy$W4_PATA450a)
svy$W2_PATA2_3_Dem_new <- zap_labels(svy$W2_PATA2_3_Dem)
trt <- "HuffPost"
dv <- "W4_PATA450a_new" # FT Democrats
dv_pre <- "W2_PATA2_3_Dem_new"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1a \\& H2a: Dem. thermometer")
########################################################
### Affective H1b & H2b: Republicans, HuffPost treatment
########################################################
# creating new variables
svy$W4_PATA450b_new <- zap_labels(svy$W4_PATA450b)
svy$W2_PATA2_3_Rep_new <- zap_labels(svy$W2_PATA2_3_Rep)
trt <- "HuffPost"
dv <- "W4_PATA450b_new" # FT Republicans
dv_pre <- "W2_PATA2_3_Rep_new"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1b \\& H2b: Rep. thermometer")
########################################################
### Affective H1c & H2c: Trump supporters, HuffPost treatment
########################################################
# creating new variables
svy$W4_PATA450e_new <- zap_labels(svy$W4_PATA450e)
svy$W2_PATA2_6_new <- zap_labels(svy$W2_PATA2_6)
trt <- "HuffPost"
dv <- "W4_PATA450e_new" # Trump social distance
dv_pre <- "W2_PATA2_6_new"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1c \\& H2c: Trump distance")
########################################################
### Polarization RQ1: heterogeneous treatment effects by party ID / ideology
########################################################
# H1a
heterogeneous_effect2(dv = "W4_PATA450a_new", dv_pre = "W2_PATA2_3_Dem_new",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "W4_PATA450a_new", dv_pre = "W2_PATA2_3_Dem_new",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H1b
heterogeneous_effect2(dv = "W4_PATA450b_new", dv_pre = "W2_PATA2_3_Rep_new",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "W4_PATA450b_new", dv_pre = "W2_PATA2_3_Rep_new",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H1c
heterogeneous_effect2(dv = "W4_PATA450e_new", dv_pre = "W2_PATA2_6_new",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "W4_PATA450e_new", dv_pre = "W2_PATA2_6_new",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Perceived H1a & H1b: HuffPost treatment
########################################################
trt <- "HuffPost"
dv <- "ppol"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1a \\& H1b: Perceived polarization")
########################################################
### Perceived RQ1: heterogeneous treatment effects by party ID / ideology
########################################################
# H1a & H1b
heterogeneous_effect2(dv = "ppol",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "ppol",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Agenda H1 & H2: HuffPost treatment
########################################################
trt <- "HuffPost"
dv <- "agenda_lean"
dv_pre <- "agenda_lean_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1 \\& H2: Agenda setting")
########################################################
### Agenda RQ1: heterogeneous treatment effects by party ID / ideology
########################################################
# H1 & H2
heterogeneous_effect2(dv = "agenda_lean", dv_pre = "agenda_lean_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "agenda_lean", dv_pre = "agenda_lean_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Agenda RQ2: heterogeneous treatment effects by pre-treatment habits
########################################################
# H1
heterogeneous_effect2(dv = "agenda_lean", dv_pre = "log_fn_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H2
heterogeneous_effect2(dv = "agenda_lean", dv_pre = "log_hp_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Approval H1a & H2a: President
########################################################
trt <- "HuffPost"
dv <- "trump_approve"
dv_pre <- "trump_approve_pre"
control = "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1a \\& H2a: Pres. approval")
########################################################
### Approval H1b & H2b: Republicans in Congress [Congress control preference]
########################################################
trt <- "HuffPost"
dv <- "cong_rep_approve"
dv_pre <- "cong_rep_approve_pre"
control = "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1b \\& H2b: Rep. Cong. pref.")
########################################################
### Approval H1c & H2c: Democrats in Congress [Congress control preference]
########################################################
trt <- "HuffPost"
dv <- "cong_dem_approve"
dv_pre <- "cong_dem_approve_pre"
control = "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1c \\& H2c: Dem. Cong. pref.")
########################################################
### Approval RQ1: heterogeneous treatment effects by party ID / ideology
########################################################
# H1a & H2a
heterogeneous_effect2(dv = "trump_approve", dv_pre = "trump_approve_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "trump_approve", dv_pre = "trump_approve_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H1b & H2b
heterogeneous_effect2(dv = "cong_rep_approve",
dv_pre = "cong_rep_approve_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "cong_rep_approve",
dv_pre = "cong_rep_approve_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H1c & H2c
heterogeneous_effect2(dv = "cong_dem_approve",
dv_pre = "cong_dem_approve_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "cong_dem_approve",
dv_pre = "cong_dem_approve_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Turnout RQ1a: Huff Post
########################################################
trt <- "HuffPost"
dv <- "vote_after"
dv_pre <- "vote_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "RQ1a: Turnout")
########################################################
### Media Trust H1a & H2a: Trust in Fox News for HuffPost treatment
########################################################
trt <- "HuffPost"
dv <- "trust_fox"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1a \\& H2a: Trust in Fox News")
########################################################
### Media Trust H1b & H2b: Trust in HuffPost for FN treatment
########################################################
trt <- "HuffPost"
dv <- "trust_hp"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1b \\& H2b: Trust in HuffPost")
########################################################
### Media Trust H3a & H3b: Media liberal bias for HP treatment
########################################################
trt <- "HuffPost"
dv <- "bias"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H3a \\& H3b: Liberal media bias")
########################################################
### Media Trust RQ1
########################################################
# H1a & H2a
heterogeneous_effect2(dv = "trust_fox",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "trust_fox",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H1b & H2b
heterogeneous_effect2(dv = "trust_hp",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "trust_hp",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H3a & H3b
heterogeneous_effect2(dv = "bias",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "bias",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Media Trust H4a: Media trust for HP group
########################################################
trt <- "HuffPost"
dv <- "trust"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H4a: Media trust")
########################################################
### Media Trust H4a: Media trust for HP group (w7)
########################################################
trt <- "HuffPost"
dv <- "trust_w7"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H4a: Media trust (w7)")
########################################################
### Factual knowledge H1: % foreign born
########################################################
trt <- "HuffPost"
dv <- "W4_PATA430a"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1: \\% foreign born")
########################################################
### Factual knowledge H2: % unemployment rate
########################################################
trt <- "HuffPost"
dv <- "W4_PATA430b"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H2: \\% unemployment")
########################################################
### Factual knowledge RQ1a & RQ1b: news reception for HP group
########################################################
trt <- "HuffPost"
dv <- "event_mokken"
dv_pre <- "event_pre_mokken"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, dv_pre = dv_pre, control = control)
format_latex3(dim, itt, dv = "RQ1a \\& RQ1b: Event knowledge")
########################################################
### Knowledge RQ2
########################################################
# H1
heterogeneous_effect2(dv = "W4_PATA430a",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "W4_PATA430a",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H2
heterogeneous_effect2(dv = "W4_PATA430b",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "W4_PATA430b",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# RQ1a & RQ1b
heterogeneous_effect2(dv = "event_mokken", dv_pre = "event_pre_mokken",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "event_mokken", dv_pre = "event_pre_mokken",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Factual knowledge RQ1b: immigration knowledge for HP group
########################################################
svy$know_immig_w2_TRUE <- svy$W2_PATA2_10_m_3 == 1
svy$know_immig_w4_TRUE <- svy$W4_PATA409_1 == 1
trt <- "HuffPost"
dv <- "know_immig_w4_TRUE"
dv_pre <- "know_immig_w2_TRUE"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, dv_pre = dv_pre, control = control)
format_latex3(dim, itt, dv = "RQ1b: Immigration event knowledge")
########################################################
### Election prediction H1
########################################################
# Subjects assigned to the Fox News treatment...
# H1a & H2a: ... will be more likely to predict the Republican Party as winning the control of the House of Representatives
trt <- "HuffPost"
dv <- "predicted_house_winner_gop"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1a \\& H2a: \\ GOP House winner")
# H1b & H2b: ... will predict a higher House vote share for the Republican Party
trt <- "HuffPost"
dv <- "predicted_vote_share_gop"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1b \\& H2b: \\ GOP House vote share")
# H1c & H2c: ... will be more likely to predict the Republican candidate to win the race for the House seat in their Congressional district
trt <- "HuffPost"
dv <- "predicted_district_winner_gop"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1c \\& H2c: \\ GOP House district winner")
########################################################
### Election prediction H3
########################################################
# (a) Pre-treatment Republican partisans in the Fox News treatment group and (b) pre-treatment Democratic partisans in the HuffPost treatment group will report a higher average certainty in their predictions than subjects assigned to the control group.
## We revised this hypothesis to be able to compare the HuffPost and the FoxNews groups directly
## Pre-treatment Democratic partisans in the HuffPost treatment group will report a higher average certainty in their predictions than pre-treatment Republican partisans in the Fox News treatment group.
svy_sub <- filter(svy, (W3_PATA306_treatment_w3 == "FoxNews" & partylean == "Republican") | (W3_PATA306_treatment_w3 == "HuffPost" & partylean == "Democrat"))
trt <- "HuffPost"
dv <- "prediction_certainty"
control <- "FoxNews"
## DIM WITHOUT BLOCKING (data sparsity issue)
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
#blocks = W3_Browser_treatment_w3,
data = svy_sub,
condition1 = control,
condition2 = trt))
## ITT WITHOUT Lin's covariate adjustment (data sparsity issue)
form <- as.formula("prediction_certainty ~ W3_PATA306_treatment_w3")
# itt <- lm_robust(form, data = dplyr::filter(svy_sub, W3_PATA306_treatment_w3 != "Control"))
itt <- lm_robust(form, svy_sub)
format_latex3(dim, list(itt), dv = "H3a \\& H3b: \\ Prediction certainty")
########################################################
### Election prediction H4
########################################################
# (a) # Pre-treatment Republican partisans in the HuffPost treatment group and (b) pre-treatment Democratic partisans in the Fox News treatment group will report a lower average certainty in their predictions than subjects assigned to the control group.
## We revised this hypothesis to be able to compare the HuffPost and the FoxNews groups directly
## Pre-treatment Republican partisans in the HuffPost treatment group will report a higher average certainty in their predictions than pre-treatment Democratic partisans in the Fox News treatment group.
svy_sub <- filter(svy, (W3_PATA306_treatment_w3 == "FoxNews" & partylean == "Democrat") | (W3_PATA306_treatment_w3 == "HuffPost" & partylean == "Republican"))
trt <- "HuffPost"
dv <- "prediction_certainty"
control <- "FoxNews"
## DIM WITHOUT BLOCKING (data sparsity issue)
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
#blocks = W3_Browser_treatment_w3,
data = svy_sub,
condition1 = control,
condition2 = trt))
## ITT WITHOUT Lin's covariate adjustment (data sparsity issue)
form <- as.formula("prediction_certainty ~ W3_PATA306_treatment_w3")
# itt <- lm_robust(form, data = dplyr::filter(svy_sub, W3_PATA306_treatment_w3 != "FoxNews"))
itt <- lm_robust(form, svy_sub)
format_latex3(dim, list(itt), dv = "H4a \\& H4b: \\ Prediction certainty")
########################################################
### Bombing H1
########################################################
# Bombing H1a: HuffPost vs. Fox News treatment
trt <- "HuffPost"
dv <- "W4_PATA462_a"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1a \\& H1b: Mail-bombing is a false flag")
########################################################
### Bombing H2
########################################################
# Bombing H2a: HuffPost vs. Fox News treatment
trt <- "HuffPost"
dv <- "W4_PATA462_b"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H2a \\& H2b: Media is accountable")
########################################################
### Bombing H3a
########################################################
# Bombing H3a: HuffPost vs. Fox News treatment
trt <- "HuffPost"
dv <- "W4_PATA462_c"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H3a \\& H3b: Trump is accountable")
|
/code/08-treatment-group-comparisons.r
|
permissive
|
NetDem-USC/homepage-experiment
|
R
| false
| false
| 40,151
|
r
|
#===============================================================================
# File: 08-treatment-group-comparisons.R
# Date: Feb 3, 2021
# Purpose: replicate appendix analyses: Section 7
# Data In:
# ./data/survey_data.csv
# ./data/daily_pulse_data.csv
#===============================================================================
# PACKAGES
#===============================================================================
library(tidyverse)
library(estimatr)
library(glmnet)
library(powerLATE)
library(haven)
source('code/functions.r')
# DATA
#===============================================================================
pulse <- read_csv("data/daily_pulse_data.csv")
svy <- read_csv("data/survey_data.csv")
# Dropping observations where treatment is missing
svy <- svy[!is.na(svy$W3_PATA306_treatment_w3),]
# Analysis ----------------------------------------------------------------
# https://declaredesign.org/blog/biased-fixed-effects.html
vars <- c("party7", "age", "agesq", "female", "raceeth", "educ",
"ideo", "income", "employ", "state", "polint", "freq_tv", "freq_np",
"freq_rad", "freq_net", "freq_disc", "log_news_pre", "diet_mean_pre")
# For all following analysis, HuffPost: treatment, FoxNews:control
########################################################
### Issue H1 & H2: issue opinions
########################################################
trt <- "HuffPost"
dv <- "issue_scale"
dv_pre <- "issue_scale_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv= "H1 & H2: Conservatism")
########################################################
### Issue H3 & H4: immigration attitudes
########################################################
trt <- "HuffPost"
dv <- "imm_issue_scale"
dv_pre <- "issue_scale_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "policy14_imm_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H3 & H4: Pro-immigration")
########################################################
### Issue RQ1: heterogeneous treatment effects
########################################################
# H1 & H2
heterogeneous_effect2(dv = "issue_scale", dv_pre = "issue_scale_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "issue_scale", dv_pre = "issue_scale_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H3 & H4
heterogeneous_effect2(dv = "imm_issue_scale", dv_pre = "issue_scale_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "imm_issue_scale", dv_pre = "issue_scale_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### News H1: news browsing, 1 week
########################################################
trt <- "HuffPost"
dv <- "log_cons_1w"
dv_pre <- "log_cons_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "log_total_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H1: Cons. news (1 week)")
########################################################
### News H2: news browsing, 1 week
########################################################
trt <- "HuffPost"
dv <- "log_lib_1w"
dv_pre <- "log_lib_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "log_total_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H2: Lib. news (1 week)")
########################################################
### News H1: news browsing, 4 weeks
########################################################
trt <- "HuffPost"
dv <- "log_cons_4w"
dv_pre <- "log_cons_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "log_total_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H1: Cons. news (4 weeks)")
########################################################
### News H2: news browsing, 4 week
########################################################
trt <- "HuffPost"
dv <- "log_lib_4w"
dv_pre <- "log_lib_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "log_total_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H2: Lib. news (4 weeks)")
########################################################
### News H1: news browsing, 6 weeks
########################################################
trt <- "HuffPost"
dv <- "log_cons_6w"
dv_pre <- "log_cons_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "log_total_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H1: Cons. news (6 weeks)")
########################################################
### News H2: news browsing, 6 week
########################################################
trt <- "HuffPost"
dv <- "log_lib_6w"
dv_pre <- "log_lib_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, more_vars = "log_total_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H2: Lib. news (6 weeks)")
########################################################
### News RQ1: heterogeneous treatment effects by party ID / ideology
########################################################
# H1: n/s
heterogeneous_effect2(dv = "log_cons_1w", dv_pre = "log_cons_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "log_cons_1w", dv_pre = "log_cons_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H2: n/s
heterogeneous_effect2(dv = "log_lib_1w", dv_pre = "log_lib_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "log_lib_1w", dv_pre = "log_lib_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### News RQ2: heterogeneous treatment effects by pre-treatment news consumption
########################################################
# H1: n/s
heterogeneous_effect2(dv = "log_cons_1w", dv_pre = "log_hp_pre",
moderator = "log_fn_pre", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "log_cons_1w", dv_pre = "log_fn_pre",
moderator = "log_fn_pre", trt = "HuffPost", control = "FoxNews")
# H2: +
heterogeneous_effect2(dv = "log_lib_1w", dv_pre = "log_hp_pre",
moderator = "log_hp_pre", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "log_lib_1w", dv_pre = "log_fn_pre",
moderator = "log_hp_pre", trt = "HuffPost", control = "FoxNews")
########################################################
### Twitter H1: conservative link shares
########################################################
# creating Twitter variables
svy$log_cons_links <- log(svy$cons_links+1)
svy$log_cons_links_pre <- log(svy$cons_links_pre+1)
svy$log_total_links_pre <- log(svy$total_links_pre+1)
trt <- "HuffPost"
dv <- "log_cons_links"
dv_pre <- "log_cons_links_pre"
control <- "FoxNews"
# using chrome vs others as blocks here
svy$chrome <- ifelse(svy$W3_Browser_treatment_w3=="Chrome", 1, 0)
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = chrome,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, blocks="chrome", more_vars = "log_total_links_pre")
compute_proportion_missing_covars(itt)
format_latex3(dim, itt, dv = "H1: Tweets w/cons. links")
########################################################
### Twitter H2: liberal link shares
########################################################
# creating Twitter variables
svy$log_lib_links <- log(svy$lib_links+1)
svy$log_lib_links_pre <- log(svy$lib_links_pre+1)
svy$log_total_links_pre <- log(svy$total_links_pre+1)
trt <- "HuffPost"
dv <- "log_lib_links"
dv_pre <- "log_lib_links_pre"
control <- "FoxNews"
# using chrome vs others as blocks here
svy$chrome <- ifelse(svy$W3_Browser_treatment_w3=="Chrome", 1, 0)
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = chrome,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control, blocks="chrome", more_vars = "log_total_links_pre")
format_latex3(dim, itt, dv = "H2: Tweets w/lib. links")
########################################################
### Twitter H3: follow conservative news sources
########################################################
# creating Twitter variables
svy$log_follows_cons_media <- log(svy$follows_cons_media+1)
svy$log_follows_elites <- log(svy$follows_elites+1)
trt <- "HuffPost"
dv <- "log_follows_cons_media"
control <- "FoxNews"
# using chrome vs others as blocks here
svy$chrome <- ifelse(svy$W3_Browser_treatment_w3=="Chrome", 1, 0)
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = chrome,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control, blocks="chrome", more_vars = "log_follows_elites")
format_latex3(dim, itt, dv = "H3: Follow cons. media")
########################################################
### Twitter H4: follow liberal news sources
########################################################
# creating Twitter variables
svy$log_follows_lib_media <- log(svy$follows_lib_media+1)
svy$log_follows_elites <- log(svy$follows_elites+1)
trt <- "HuffPost"
dv <- "log_follows_lib_media"
control <- "FoxNews"
# using chrome vs others as blocks here
svy$chrome <- ifelse(svy$W3_Browser_treatment_w3=="Chrome", 1, 0)
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = chrome,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control, blocks="chrome", more_vars = "log_follows_elites")
format_latex3(dim, itt, dv = "H4: Follow lib. media")
########################################################
### Affective H1a & H2a: Democrats, HuffPost treatment
########################################################
# creating new variables
svy$W4_PATA450a_new <- zap_labels(svy$W4_PATA450a)
svy$W2_PATA2_3_Dem_new <- zap_labels(svy$W2_PATA2_3_Dem)
trt <- "HuffPost"
dv <- "W4_PATA450a_new" # FT Democrats
dv_pre <- "W2_PATA2_3_Dem_new"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1a \\& H2a: Dem. thermometer")
########################################################
### Affective H1b & H2b: Republicans, HuffPost treatment
########################################################
# creating new variables
svy$W4_PATA450b_new <- zap_labels(svy$W4_PATA450b)
svy$W2_PATA2_3_Rep_new <- zap_labels(svy$W2_PATA2_3_Rep)
trt <- "HuffPost"
dv <- "W4_PATA450b_new" # FT Republicans
dv_pre <- "W2_PATA2_3_Rep_new"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1b \\& H2b: Rep. thermometer")
########################################################
### Affective H1c & H2c: Trump supporters, HuffPost treatment
########################################################
# creating new variables
svy$W4_PATA450e_new <- zap_labels(svy$W4_PATA450e)
svy$W2_PATA2_6_new <- zap_labels(svy$W2_PATA2_6)
trt <- "HuffPost"
dv <- "W4_PATA450e_new" # Trump social distance
dv_pre <- "W2_PATA2_6_new"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1c \\& H2c: Trump distance")
########################################################
### Polarization RQ1: heterogeneous treatment effects by party ID / ideology
########################################################
# H1a
heterogeneous_effect2(dv = "W4_PATA450a_new", dv_pre = "W2_PATA2_3_Dem_new",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "W4_PATA450a_new", dv_pre = "W2_PATA2_3_Dem_new",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H1b
heterogeneous_effect2(dv = "W4_PATA450b_new", dv_pre = "W2_PATA2_3_Rep_new",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "W4_PATA450b_new", dv_pre = "W2_PATA2_3_Rep_new",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H1c
heterogeneous_effect2(dv = "W4_PATA450e_new", dv_pre = "W2_PATA2_6_new",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "W4_PATA450e_new", dv_pre = "W2_PATA2_6_new",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Perceived H1a & H1b: HuffPost treatment
########################################################
trt <- "HuffPost"
dv <- "ppol"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1a \\& H1b: Perceived polarization")
########################################################
### Perceived RQ1: heterogeneous treatment effects by party ID / ideology
########################################################
# H1a & H1b
heterogeneous_effect2(dv = "ppol",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "ppol",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Agenda H1 & H2: HuffPost treatment
########################################################
trt <- "HuffPost"
dv <- "agenda_lean"
dv_pre <- "agenda_lean_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1 \\& H2: Agenda setting")
########################################################
### Agenda RQ1: heterogeneous treatment effects by party ID / ideology
########################################################
# H1 & H2
heterogeneous_effect2(dv = "agenda_lean", dv_pre = "agenda_lean_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "agenda_lean", dv_pre = "agenda_lean_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Agenda RQ2: heterogeneous treatment effects by pre-treatment habits
########################################################
# H1
heterogeneous_effect2(dv = "agenda_lean", dv_pre = "log_fn_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H2
heterogeneous_effect2(dv = "agenda_lean", dv_pre = "log_hp_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Approval H1a & H2a: President
########################################################
trt <- "HuffPost"
dv <- "trump_approve"
dv_pre <- "trump_approve_pre"
control = "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1a \\& H2a: Pres. approval")
########################################################
### Approval H1b & H2b: Republicans in Congress [Congress control preference]
########################################################
trt <- "HuffPost"
dv <- "cong_rep_approve"
dv_pre <- "cong_rep_approve_pre"
control = "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1b \\& H2b: Rep. Cong. pref.")
########################################################
### Approval H1c & H2c: Democrats in Congress [Congress control preference]
########################################################
trt <- "HuffPost"
dv <- "cong_dem_approve"
dv_pre <- "cong_dem_approve_pre"
control = "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1c \\& H2c: Dem. Cong. pref.")
########################################################
### Approval RQ1: heterogeneous treatment effects by party ID / ideology
########################################################
# H1a & H2a
heterogeneous_effect2(dv = "trump_approve", dv_pre = "trump_approve_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "trump_approve", dv_pre = "trump_approve_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H1b & H2b
heterogeneous_effect2(dv = "cong_rep_approve",
dv_pre = "cong_rep_approve_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "cong_rep_approve",
dv_pre = "cong_rep_approve_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H1c & H2c
heterogeneous_effect2(dv = "cong_dem_approve",
dv_pre = "cong_dem_approve_pre",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "cong_dem_approve",
dv_pre = "cong_dem_approve_pre",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Turnout RQ1a: Huff Post
########################################################
trt <- "HuffPost"
dv <- "vote_after"
dv_pre <- "vote_pre"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, dv_pre = dv_pre, trt = trt, control = control)
format_latex3(dim, itt, dv = "RQ1a: Turnout")
########################################################
### Media Trust H1a & H2a: Trust in Fox News for HuffPost treatment
########################################################
trt <- "HuffPost"
dv <- "trust_fox"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1a \\& H2a: Trust in Fox News")
########################################################
### Media Trust H1b & H2b: Trust in HuffPost for FN treatment
########################################################
trt <- "HuffPost"
dv <- "trust_hp"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1b \\& H2b: Trust in HuffPost")
########################################################
### Media Trust H3a & H3b: Media liberal bias for HP treatment
########################################################
trt <- "HuffPost"
dv <- "bias"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H3a \\& H3b: Liberal media bias")
########################################################
### Media Trust RQ1
########################################################
# H1a & H2a
heterogeneous_effect2(dv = "trust_fox",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "trust_fox",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H1b & H2b
heterogeneous_effect2(dv = "trust_hp",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "trust_hp",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H3a & H3b
heterogeneous_effect2(dv = "bias",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "bias",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Media Trust H4a: Media trust for HP group
########################################################
trt <- "HuffPost"
dv <- "trust"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H4a: Media trust")
########################################################
### Media Trust H4a: Media trust for HP group (w7)
########################################################
trt <- "HuffPost"
dv <- "trust_w7"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H4a: Media trust (w7)")
########################################################
### Factual knowledge H1: % foreign born
########################################################
trt <- "HuffPost"
dv <- "W4_PATA430a"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1: \\% foreign born")
########################################################
### Factual knowledge H2: % unemployment rate
########################################################
trt <- "HuffPost"
dv <- "W4_PATA430b"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H2: \\% unemployment")
########################################################
### Factual knowledge RQ1a & RQ1b: news reception for HP group
########################################################
trt <- "HuffPost"
dv <- "event_mokken"
dv_pre <- "event_pre_mokken"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, dv_pre = dv_pre, control = control)
format_latex3(dim, itt, dv = "RQ1a \\& RQ1b: Event knowledge")
########################################################
### Knowledge RQ2
########################################################
# H1
heterogeneous_effect2(dv = "W4_PATA430a",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "W4_PATA430a",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# H2
heterogeneous_effect2(dv = "W4_PATA430b",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "W4_PATA430b",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
# RQ1a & RQ1b
heterogeneous_effect2(dv = "event_mokken", dv_pre = "event_pre_mokken",
moderator = "party7", trt = "HuffPost", control = "FoxNews")
heterogeneous_effect2(dv = "event_mokken", dv_pre = "event_pre_mokken",
moderator = "ideo", trt = "HuffPost", control = "FoxNews")
########################################################
### Factual knowledge RQ1b: immigration knowledge for HP group
########################################################
svy$know_immig_w2_TRUE <- svy$W2_PATA2_10_m_3 == 1
svy$know_immig_w4_TRUE <- svy$W4_PATA409_1 == 1
trt <- "HuffPost"
dv <- "know_immig_w4_TRUE"
dv_pre <- "know_immig_w2_TRUE"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, dv_pre = dv_pre, control = control)
format_latex3(dim, itt, dv = "RQ1b: Immigration event knowledge")
########################################################
### Election prediction H1
########################################################
# Subjects assigned to the Fox News treatment...
# H1a & H2a: ... will be more likely to predict the Republican Party as winning the control of the House of Representatives
trt <- "HuffPost"
dv <- "predicted_house_winner_gop"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1a \\& H2a: \\ GOP House winner")
# H1b & H2b: ... will predict a higher House vote share for the Republican Party
trt <- "HuffPost"
dv <- "predicted_vote_share_gop"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1b \\& H2b: \\ GOP House vote share")
# H1c & H2c: ... will be more likely to predict the Republican candidate to win the race for the House seat in their Congressional district
trt <- "HuffPost"
dv <- "predicted_district_winner_gop"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1c \\& H2c: \\ GOP House district winner")
########################################################
### Election prediction H3
########################################################
# (a) Pre-treatment Republican partisans in the Fox News treatment group and (b) pre-treatment Democratic partisans in the HuffPost treatment group will report a higher average certainty in their predictions than subjects assigned to the control group.
## We revised this hypothesis to be able to compare the HuffPost and the FoxNews groups directly
## Pre-treatment Democratic partisans in the HuffPost treatment group will report a higher average certainty in their predictions than pre-treatment Republican partisans in the Fox News treatment group.
svy_sub <- filter(svy, (W3_PATA306_treatment_w3 == "FoxNews" & partylean == "Republican") | (W3_PATA306_treatment_w3 == "HuffPost" & partylean == "Democrat"))
trt <- "HuffPost"
dv <- "prediction_certainty"
control <- "FoxNews"
## DIM WITHOUT BLOCKING (data sparsity issue)
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
#blocks = W3_Browser_treatment_w3,
data = svy_sub,
condition1 = control,
condition2 = trt))
## ITT WITHOUT Lin's covariate adjustment (data sparsity issue)
form <- as.formula("prediction_certainty ~ W3_PATA306_treatment_w3")
# itt <- lm_robust(form, data = dplyr::filter(svy_sub, W3_PATA306_treatment_w3 != "Control"))
itt <- lm_robust(form, svy_sub)
format_latex3(dim, list(itt), dv = "H3a \\& H3b: \\ Prediction certainty")
########################################################
### Election prediction H4
########################################################
# (a) # Pre-treatment Republican partisans in the HuffPost treatment group and (b) pre-treatment Democratic partisans in the Fox News treatment group will report a lower average certainty in their predictions than subjects assigned to the control group.
## We revised this hypothesis to be able to compare the HuffPost and the FoxNews groups directly
## Pre-treatment Republican partisans in the HuffPost treatment group will report a higher average certainty in their predictions than pre-treatment Democratic partisans in the Fox News treatment group.
svy_sub <- filter(svy, (W3_PATA306_treatment_w3 == "FoxNews" & partylean == "Democrat") | (W3_PATA306_treatment_w3 == "HuffPost" & partylean == "Republican"))
trt <- "HuffPost"
dv <- "prediction_certainty"
control <- "FoxNews"
## DIM WITHOUT BLOCKING (data sparsity issue)
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
#blocks = W3_Browser_treatment_w3,
data = svy_sub,
condition1 = control,
condition2 = trt))
## ITT WITHOUT Lin's covariate adjustment (data sparsity issue)
form <- as.formula("prediction_certainty ~ W3_PATA306_treatment_w3")
# itt <- lm_robust(form, data = dplyr::filter(svy_sub, W3_PATA306_treatment_w3 != "FoxNews"))
itt <- lm_robust(form, svy_sub)
format_latex3(dim, list(itt), dv = "H4a \\& H4b: \\ Prediction certainty")
########################################################
### Bombing H1
########################################################
# Bombing H1a: HuffPost vs. Fox News treatment
trt <- "HuffPost"
dv <- "W4_PATA462_a"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H1a \\& H1b: Mail-bombing is a false flag")
########################################################
### Bombing H2
########################################################
# Bombing H2a: HuffPost vs. Fox News treatment
trt <- "HuffPost"
dv <- "W4_PATA462_b"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H2a \\& H2b: Media is accountable")
########################################################
### Bombing H3a
########################################################
# Bombing H3a: HuffPost vs. Fox News treatment
trt <- "HuffPost"
dv <- "W4_PATA462_c"
control <- "FoxNews"
## DIM
(dim <- difference_in_means(formula(paste0(dv, " ~ W3_PATA306_treatment_w3")),
blocks = W3_Browser_treatment_w3,
data = svy,
condition1 = control,
condition2 = trt))
## ITT, with Lin's covariate adjustment
itt <- run_model(dv = dv, trt = trt, control = control)
format_latex3(dim, itt, dv = "H3a \\& H3b: Trump is accountable")
|
# Exploratory Data Analysis
# Project 1
# load data
setwd("F:/self-learning/data science/data exploratory/dataset/ExData_Plotting1")
data <- read.table("household_power_consumption.txt",skip=66636, nrows = 2880,sep=";",header=T)
header <- scan("household_power_consumption.txt", nlines = 1, sep = ";", what = character())
colnames(data)<-header
# plot 1
png(file="plot1.png")
hist(data$Global_active_power,col="red",main=paste("Global Active Power"),xlab="Global Active Power (kilowatts)")
dev.off()
|
/plot1.R
|
no_license
|
kittencai/ExData_Plotting1
|
R
| false
| false
| 505
|
r
|
# Exploratory Data Analysis
# Project 1
# load data
setwd("F:/self-learning/data science/data exploratory/dataset/ExData_Plotting1")
data <- read.table("household_power_consumption.txt",skip=66636, nrows = 2880,sep=";",header=T)
header <- scan("household_power_consumption.txt", nlines = 1, sep = ";", what = character())
colnames(data)<-header
# plot 1
png(file="plot1.png")
hist(data$Global_active_power,col="red",main=paste("Global Active Power"),xlab="Global Active Power (kilowatts)")
dev.off()
|
#' Generate correlated variables
#' @description
#' `r badge('experimental')`
#'
#' Generate correlated variables using a vector of know values and desired
#' maximum and minimum correlations
#'
#' @param y A vector to generate variables correlated with.
#' @param min_cor The minimum desired correlation.
#' @param max_cor The maximum desired correlation.
#' @param nvars The number of variables.
#' @param constant A constant. Use `operation` to define which operation is
#' used.
#' @param operation The operation to be applied to the `constant` value.
#' @param x An optional vector of the same length of `y`. If not informed
#' (default) then a normally distributed variable (mean = 0, sd = 1) will be
#' used.
#'
#' @return A data frame with the `y` variable and the correlated variables.
#' @export
#' @author Tiago Olivoto \email{tiagoolivoto@@gmail.com}
#' @examples
#' \donttest{
#' library(metan)
#' y <- rnorm(n = 10)
#' cor_vars <- correlated_vars(y, nvar = 6)
#' plot(cor_vars)
#' }
#'
#'
correlated_vars <- function(y,
min_cor = -1,
max_cor = 1,
nvars,
constant = NULL,
operation = "*",
x = NULL){
rho <- round(seq(min_cor, max_cor, length.out = nvars), digits = 2)
if (missing(x)) x <- rnorm(length(y))
y_res <- residuals(lm(x ~ y))
df <- cbind(y,
sapply(rho, function(rho){
rho * sd(y_res) * y + y_res * sd(y) * sqrt(1 - rho ^ 2)
})
) %>%
as.data.frame()
names(df) <- paste(c("y", paste("r", rho, sep = "")))
if(!is.null(constant)){
if(length(constant) > 1 & length(constant) != ncol(df)){
stop("Leng of 'constant' not valid")
}
df <- sweep(df, 1, STATS = constant, FUN = operation)
}
return(list(df = df) %>% set_class("correlated_vars"))
}
#' Plot an object of class correlated_vars
#'
#' @param x An object of class correlated_vars.
#' @param ... Currently not used.
#'
#' @return An object of class gg.
#' @export
#' @examples
#' \donttest{
#' library(metan)
#' y <- rnorm(n = 10)
#' cor_vars <- correlated_vars(y, nvar = 6)
#' plot(cor_vars)
#' }
plot.correlated_vars <- function(x, ...){
x[[1]] %>%
pivot_longer(-y) %>%
ggplot(aes(y, value, group=name)) +
geom_smooth(method="lm",
formula = 'y ~ x',
color="Black") +
geom_rug(sides="b") +
geom_point(alpha=1/2) +
facet_wrap(~ name, scales="free")
}
|
/R/correlated_vars.R
|
no_license
|
cran/metan
|
R
| false
| false
| 2,622
|
r
|
#' Generate correlated variables
#' @description
#' `r badge('experimental')`
#'
#' Generate correlated variables using a vector of know values and desired
#' maximum and minimum correlations
#'
#' @param y A vector to generate variables correlated with.
#' @param min_cor The minimum desired correlation.
#' @param max_cor The maximum desired correlation.
#' @param nvars The number of variables.
#' @param constant A constant. Use `operation` to define which operation is
#' used.
#' @param operation The operation to be applied to the `constant` value.
#' @param x An optional vector of the same length of `y`. If not informed
#' (default) then a normally distributed variable (mean = 0, sd = 1) will be
#' used.
#'
#' @return A data frame with the `y` variable and the correlated variables.
#' @export
#' @author Tiago Olivoto \email{tiagoolivoto@@gmail.com}
#' @examples
#' \donttest{
#' library(metan)
#' y <- rnorm(n = 10)
#' cor_vars <- correlated_vars(y, nvar = 6)
#' plot(cor_vars)
#' }
#'
#'
correlated_vars <- function(y,
min_cor = -1,
max_cor = 1,
nvars,
constant = NULL,
operation = "*",
x = NULL){
rho <- round(seq(min_cor, max_cor, length.out = nvars), digits = 2)
if (missing(x)) x <- rnorm(length(y))
y_res <- residuals(lm(x ~ y))
df <- cbind(y,
sapply(rho, function(rho){
rho * sd(y_res) * y + y_res * sd(y) * sqrt(1 - rho ^ 2)
})
) %>%
as.data.frame()
names(df) <- paste(c("y", paste("r", rho, sep = "")))
if(!is.null(constant)){
if(length(constant) > 1 & length(constant) != ncol(df)){
stop("Leng of 'constant' not valid")
}
df <- sweep(df, 1, STATS = constant, FUN = operation)
}
return(list(df = df) %>% set_class("correlated_vars"))
}
#' Plot an object of class correlated_vars
#'
#' @param x An object of class correlated_vars.
#' @param ... Currently not used.
#'
#' @return An object of class gg.
#' @export
#' @examples
#' \donttest{
#' library(metan)
#' y <- rnorm(n = 10)
#' cor_vars <- correlated_vars(y, nvar = 6)
#' plot(cor_vars)
#' }
plot.correlated_vars <- function(x, ...){
x[[1]] %>%
pivot_longer(-y) %>%
ggplot(aes(y, value, group=name)) +
geom_smooth(method="lm",
formula = 'y ~ x',
color="Black") +
geom_rug(sides="b") +
geom_point(alpha=1/2) +
facet_wrap(~ name, scales="free")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.