content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
setwd("C:/Users/Neerav Basant/Desktop/Fall Semester/Advanced Predictive Modeling/Assignments/Assignment 5/HW5_files")
load("imbalanced.RData")
library("caret")
library("ROCR")
set.seed(10)
##### 2 (a) ########
log.fit = glm(income ~ ., family = binomial(), data = trdat)
log.pred = predict(log.fit, newdata = tedat, type = "response")
pred.test=rep(0,4000)
pred.test[log.pred > .5]=1
table(pred.test, tedat$income)
pred <- prediction(log.pred,tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
# Sampling
up_trdat = upSample(x = trdat[,-(length(trdat))], y = trdat$income, yname = "income")
down_trdat = downSample(x = trdat[,-(length(trdat))], y = trdat$income, yname = "income")
table(trdat$income)
table(up_trdat$income)
table(down_trdat$income)
up.log.fit = glm(income ~ ., family = binomial(), data = up_trdat)
up.log.pred = predict(up.log.fit, newdata = tedat, type = "response")
up.pred.test=rep(0,4000)
up.pred.test[up.log.pred > .5]=1
table(up.pred.test, tedat$income)
pred <- prediction(up.log.pred,tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
down.log.fit = glm(income ~ ., family = binomial(), data = down_trdat)
down.log.pred = predict(down.log.fit, newdata = tedat, type = "response")
down.pred.test=rep(0,4000)
down.pred.test[down.log.pred > .5]=1
table(down.pred.test, tedat$income)
pred <- prediction(down.log.pred,tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
######### 2(b) #########
library(rpart)
library(rpart.plot)
reg.fit <- rpart(income ~ ., data=trdat, method = "class", control = rpart.control(xval=5,cp=0.0001))
plotcp(reg.fit)
printcp(reg.fit)
reg.prune <- prune(reg.fit,cp=0.00081037)
reg.pred = predict(reg.prune, newdata= tedat)
reg.pred.test=rep(0,4000)
reg.pred.test[reg.pred[,2] > .5]=1
table(reg.pred.test, tedat$income)
pred <- prediction(reg.pred[,2], tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
reg.fit.loss <- rpart(income ~ ., data=trdat, method = "class", parms=list(split="information", loss=matrix(c(0,1,2,0), byrow=TRUE, nrow=2)), control = rpart.control(xval=5,cp=0.0001))
plotcp(reg.fit.loss)
printcp(reg.fit.loss)
reg.prune.loss <- prune(reg.fit.loss,cp=0.00035454)
reg.pred.loss = predict(reg.prune.loss, newdata= tedat)
reg.pred.loss.test=rep(0,4000)
reg.pred.loss.test[reg.pred.loss[,2] > .5]=1
table(reg.pred.loss.test, tedat$income)
pred <- prediction(reg.pred.loss[,2], tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
######### 2(c) #########
#install.packages("kernlab")
library(kernlab)
svm.fit <- ksvm(income ~ ., data = trdat, kernel = "vanilladot", prob.model = TRUE)
svm.pred <- predict(svm.fit, newdata = tedat, type = "r")
pred <- prediction(as.numeric(svm.pred),tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
svm.fit.loss <- ksvm(income ~ ., data = trdat, kernel = "vanilladot", class.weights=c("small"=1,"large"=2), prob.model = TRUE)
svm.pred.loss <- predict(svm.fit.loss, newdata = tedat, type = "r")
pred <- prediction(as.numeric(svm.pred.loss),tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
| /Assignment 5/HW5_Q2.R | no_license | neeravbasant/Advanced-Predictive-Modeling | R | false | false | 3,312 | r | setwd("C:/Users/Neerav Basant/Desktop/Fall Semester/Advanced Predictive Modeling/Assignments/Assignment 5/HW5_files")
load("imbalanced.RData")
library("caret")
library("ROCR")
set.seed(10)
##### 2 (a) ########
log.fit = glm(income ~ ., family = binomial(), data = trdat)
log.pred = predict(log.fit, newdata = tedat, type = "response")
pred.test=rep(0,4000)
pred.test[log.pred > .5]=1
table(pred.test, tedat$income)
pred <- prediction(log.pred,tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
# Sampling
up_trdat = upSample(x = trdat[,-(length(trdat))], y = trdat$income, yname = "income")
down_trdat = downSample(x = trdat[,-(length(trdat))], y = trdat$income, yname = "income")
table(trdat$income)
table(up_trdat$income)
table(down_trdat$income)
up.log.fit = glm(income ~ ., family = binomial(), data = up_trdat)
up.log.pred = predict(up.log.fit, newdata = tedat, type = "response")
up.pred.test=rep(0,4000)
up.pred.test[up.log.pred > .5]=1
table(up.pred.test, tedat$income)
pred <- prediction(up.log.pred,tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
down.log.fit = glm(income ~ ., family = binomial(), data = down_trdat)
down.log.pred = predict(down.log.fit, newdata = tedat, type = "response")
down.pred.test=rep(0,4000)
down.pred.test[down.log.pred > .5]=1
table(down.pred.test, tedat$income)
pred <- prediction(down.log.pred,tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
######### 2(b) #########
library(rpart)
library(rpart.plot)
reg.fit <- rpart(income ~ ., data=trdat, method = "class", control = rpart.control(xval=5,cp=0.0001))
plotcp(reg.fit)
printcp(reg.fit)
reg.prune <- prune(reg.fit,cp=0.00081037)
reg.pred = predict(reg.prune, newdata= tedat)
reg.pred.test=rep(0,4000)
reg.pred.test[reg.pred[,2] > .5]=1
table(reg.pred.test, tedat$income)
pred <- prediction(reg.pred[,2], tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
reg.fit.loss <- rpart(income ~ ., data=trdat, method = "class", parms=list(split="information", loss=matrix(c(0,1,2,0), byrow=TRUE, nrow=2)), control = rpart.control(xval=5,cp=0.0001))
plotcp(reg.fit.loss)
printcp(reg.fit.loss)
reg.prune.loss <- prune(reg.fit.loss,cp=0.00035454)
reg.pred.loss = predict(reg.prune.loss, newdata= tedat)
reg.pred.loss.test=rep(0,4000)
reg.pred.loss.test[reg.pred.loss[,2] > .5]=1
table(reg.pred.loss.test, tedat$income)
pred <- prediction(reg.pred.loss[,2], tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
######### 2(c) #########
#install.packages("kernlab")
library(kernlab)
svm.fit <- ksvm(income ~ ., data = trdat, kernel = "vanilladot", prob.model = TRUE)
svm.pred <- predict(svm.fit, newdata = tedat, type = "r")
pred <- prediction(as.numeric(svm.pred),tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
svm.fit.loss <- ksvm(income ~ ., data = trdat, kernel = "vanilladot", class.weights=c("small"=1,"large"=2), prob.model = TRUE)
svm.pred.loss <- predict(svm.fit.loss, newdata = tedat, type = "r")
pred <- prediction(as.numeric(svm.pred.loss),tedat$income)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
auc
|
library(arules)
# Load the PainData from the internet
load(url("http://mathsci.ucd.ie/~brendan/data/PainData.Rdata"))
# Look at the data (it's in transaction format)
inspect(X0)
# Look at the data (in matrix format)
X <- as(X0, "matrix")
X
# Look at the frequency of each pain criterion
summary(X0)
# Do a first analysis of the data using the apriori algorithm
fit <- apriori(X0, parameter = list(supp = 0.6, conf = 0.9))
fit <- sort(fit, by = "lift")
# Do another analysis to look at the relationships graphically.
library(arulesViz)
fit2 <- apriori(X0, parameter = list(supp = 0.3, conf = 0.9,maxlen = 2,minlen = 2))
plot(fit2)
plot(fit2, method = "grouped")
plot(fit2, method = "graph")
############################
# Load the PainData from the internet
load(url("http://mathsci.ucd.ie/~brendan/data/PainData.Rdata"))
# Look at the data (it's in transaction format)
inspect(X0)
# Look at the data (in matrix format)
X <- as(X0, "matrix")
X
# Clustering of the data
fit <- kmeans(X, centers = 2, nstart = 20)
# Choosing k
K <- 20
SSvec <- rep(NA, K)
for (k in 1:20)
{
SSvec[k] <- kmeans(X, centers = k, nstart = 20 )$tot.withinss
}
plot(SSvec)
# Look at the k = 3 results
fit <- kmeans(X, centers = 3, nstart = 20)
fit
# Inspect the results further
library(cluster)
d <- dist(X)^2
sil <- silhouette(fit$cluster, d)
plot(sil)
# Using k-medoids
d <- dist(X, method = "binary")
fit2 <- pam(d, k = 3)
X[fit2$medoids,]
# Compare results
table(fit$cluster, fit2$clustering)
###########################################
| /dataMining/painData.R | no_license | sreelathav/dataScienceWithR | R | false | false | 1,550 | r | library(arules)
# Load the PainData from the internet
load(url("http://mathsci.ucd.ie/~brendan/data/PainData.Rdata"))
# Look at the data (it's in transaction format)
inspect(X0)
# Look at the data (in matrix format)
X <- as(X0, "matrix")
X
# Look at the frequency of each pain criterion
summary(X0)
# Do a first analysis of the data using the apriori algorithm
fit <- apriori(X0, parameter = list(supp = 0.6, conf = 0.9))
fit <- sort(fit, by = "lift")
# Do another analysis to look at the relationships graphically.
library(arulesViz)
fit2 <- apriori(X0, parameter = list(supp = 0.3, conf = 0.9,maxlen = 2,minlen = 2))
plot(fit2)
plot(fit2, method = "grouped")
plot(fit2, method = "graph")
############################
# Load the PainData from the internet
load(url("http://mathsci.ucd.ie/~brendan/data/PainData.Rdata"))
# Look at the data (it's in transaction format)
inspect(X0)
# Look at the data (in matrix format)
X <- as(X0, "matrix")
X
# Clustering of the data
fit <- kmeans(X, centers = 2, nstart = 20)
# Choosing k
K <- 20
SSvec <- rep(NA, K)
for (k in 1:20)
{
SSvec[k] <- kmeans(X, centers = k, nstart = 20 )$tot.withinss
}
plot(SSvec)
# Look at the k = 3 results
fit <- kmeans(X, centers = 3, nstart = 20)
fit
# Inspect the results further
library(cluster)
d <- dist(X)^2
sil <- silhouette(fit$cluster, d)
plot(sil)
# Using k-medoids
d <- dist(X, method = "binary")
fit2 <- pam(d, k = 3)
X[fit2$medoids,]
# Compare results
table(fit$cluster, fit2$clustering)
###########################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SigmaHstdLL_LH.R
\name{SigmaHstdLL_LH}
\alias{SigmaHstdLL_LH}
\title{SigmaHstdLL_LH}
\usage{
SigmaHstdLL_LH(Y, u, h, ktype = "gaussian")
}
\arguments{
\item{Y}{the observation matrix}
\item{u}{the condition, it is a vector, one-dimensional array or
one-dimensional row(column) matrix}
\item{h}{the bandwidth, scalar}
\item{ktype}{the kernel type, can be "gaussian", "epanech", "triweight",
"biweight", "tricube", "triangular" and "cosine",
the default of ktype is "gaussian".}
}
\value{
the estimator of diagonal entries at each given u_i.
}
\description{
this routine computes the corresponding sigma matrix
with standard bandwidth at each u_i using local linear smoother.
}
\examples{
\dontrun{
data(Yresid_ll)
data(u)
data(hstd_LL_LH)
# compute the estimator of diagonal entries
Sigma_LL_LH <- SigmaHstdLL_LH(Y = Yresid_ll, u = u, h = hstd_LL_LH$minimum)
Ystd_LL_LH = Yresid_ll / sqrt(Sigma_LL_LH)
}
}
\seealso{
\code{\link{CVLL}},\code{\link{CVHstdLL_LH}}
}
| /man/SigmaHstdLL_LH.Rd | no_license | Jieli12/llfdr | R | false | true | 1,045 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SigmaHstdLL_LH.R
\name{SigmaHstdLL_LH}
\alias{SigmaHstdLL_LH}
\title{SigmaHstdLL_LH}
\usage{
SigmaHstdLL_LH(Y, u, h, ktype = "gaussian")
}
\arguments{
\item{Y}{the observation matrix}
\item{u}{the condition, it is a vector, one-dimensional array or
one-dimensional row(column) matrix}
\item{h}{the bandwidth, scalar}
\item{ktype}{the kernel type, can be "gaussian", "epanech", "triweight",
"biweight", "tricube", "triangular" and "cosine",
the default of ktype is "gaussian".}
}
\value{
the estimator of diagonal entries at each given u_i.
}
\description{
this routine computes the corresponding sigma matrix
with standard bandwidth at each u_i using local linear smoother.
}
\examples{
\dontrun{
data(Yresid_ll)
data(u)
data(hstd_LL_LH)
# compute the estimator of diagonal entries
Sigma_LL_LH <- SigmaHstdLL_LH(Y = Yresid_ll, u = u, h = hstd_LL_LH$minimum)
Ystd_LL_LH = Yresid_ll / sqrt(Sigma_LL_LH)
}
}
\seealso{
\code{\link{CVLL}},\code{\link{CVHstdLL_LH}}
}
|
##############################################################################################################
##############################################################################################################
######
###### Topic: Development of simulation code for evaluating the performance of the offer acceptance decision
###### tool (OADT). This script aims to develop the framework before moving individual components
###### to seperate functions and actually running the simulations.
######
##############################################################################################################
##############################################################################################################
library(survival)
source("A:/SRTR/CIF_Simulations/CIF_Weibull_calc.R")
source("A:/SRTR/CIF_Simulations/candidate_iter.R")
source("A:/SRTR/CIF_Simulations/sim_iter.R")
# the sample size for the simulation
n <- 10000
# I need to simulate survival times for the individual components of the CIF. We will work with Weibull distributions
# since the likelihood of a removal reason usually increases (DD Tx and WL removal/mortality) or decreases (LD tx)
# decreased donor transplant baseline parameters
dd_shape <- 1.05
dd_scale_bl <- 5
# living donor transplant baseline parameters
ld_shape <- 0.95
ld_scale_bl <- 20
# removal baseline parameters
rem_shape <- 1
rem_scale_bl <- 20
# wl mortality baseline parameters
wlm_shape <- 1.05
wlm_scale_bl <- 20
########################################################################################################################
#
# Need to specify the effect of candidate age and candidate allocation priority on the likelihood of a deceased donor
# transplant. Similarly, need to specify the effect of diabetes on the likelihood of removal and wl mortality
#
diabetes_effect <- 0.35
allocation_effect <- -0.50
age_effect <- 0.10
########################################################################################################################
#
# Need to specify the baseline hazard and covariate effects for the posttransplant survival models and the
# covariate effects (assuming an exponential distributed survival functions)
#
dd_bl <- -log(0.850) / 3
ld_bl <- -log(0.925) / 3
rem_bl <- -log(0.700) / 3
c_age_effect <- log(2) / 4
c_diabetes_effect <- log(1.25)
d_quality_effect <- 4 / 100
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
n1 <- 250
n2 <- 500
test1_start <- proc.time()
test1 <- sim_iter(1,
n1,
dd_bl,
ld_bl,
rem_bl,
c_age_effect,
c_diabetes_effect,
d_quality_effect,
dd_shape,
dd_scale_bl,
ld_shape,
ld_scale_bl,
rem_shape,
rem_scale_bl,
wlm_shape,
wlm_scale_bl,
0.50,
100,
allocation_effect,
age_effect,
diabetes_effect)
test1_end <- proc.time()
test2_start <- proc.time()
test2 <- sim_iter(1,
n2,
dd_bl,
ld_bl,
rem_bl,
c_age_effect,
c_diabetes_effect,
d_quality_effect,
dd_shape,
dd_scale_bl,
ld_shape,
ld_scale_bl,
rem_shape,
rem_scale_bl,
wlm_shape,
wlm_scale_bl,
0.50,
100,
allocation_effect,
age_effect,
diabetes_effect)
test2_end <- proc.time()
test3_start <- proc.time()
test3 <- sim_iter_vec(1,
n1,
dd_bl,
ld_bl,
rem_bl,
c_age_effect,
c_diabetes_effect,
d_quality_effect,
dd_shape,
dd_scale_bl,
ld_shape,
ld_scale_bl,
rem_shape,
rem_scale_bl,
wlm_shape,
wlm_scale_bl,
0.50,
100,
allocation_effect,
age_effect,
diabetes_effect)
test3_end <- proc.time()
test4_start <- proc.time()
test4 <- sim_iter_vec(1,
n2,
dd_bl,
ld_bl,
rem_bl,
c_age_effect,
c_diabetes_effect,
d_quality_effect,
dd_shape,
dd_scale_bl,
ld_shape,
ld_scale_bl,
rem_shape,
rem_scale_bl,
wlm_shape,
wlm_scale_bl,
0.50,
100,
allocation_effect,
age_effect,
diabetes_effect)
test4_end <- proc.time()
(test1_end[3] - test1_start[3]) / 60
(test2_end[3] - test2_start[3]) / 60
(test3_end[3] - test3_start[3]) / 60
| /development_code.R | no_license | weyxx003/OADT_Simulations | R | false | false | 5,918 | r | ##############################################################################################################
##############################################################################################################
######
###### Topic: Development of simulation code for evaluating the performance of the offer acceptance decision
###### tool (OADT). This script aims to develop the framework before moving individual components
###### to seperate functions and actually running the simulations.
######
##############################################################################################################
##############################################################################################################
library(survival)
source("A:/SRTR/CIF_Simulations/CIF_Weibull_calc.R")
source("A:/SRTR/CIF_Simulations/candidate_iter.R")
source("A:/SRTR/CIF_Simulations/sim_iter.R")
# the sample size for the simulation
n <- 10000
# I need to simulate survival times for the individual components of the CIF. We will work with Weibull distributions
# since the likelihood of a removal reason usually increases (DD Tx and WL removal/mortality) or decreases (LD tx)
# decreased donor transplant baseline parameters
dd_shape <- 1.05
dd_scale_bl <- 5
# living donor transplant baseline parameters
ld_shape <- 0.95
ld_scale_bl <- 20
# removal baseline parameters
rem_shape <- 1
rem_scale_bl <- 20
# wl mortality baseline parameters
wlm_shape <- 1.05
wlm_scale_bl <- 20
########################################################################################################################
#
# Need to specify the effect of candidate age and candidate allocation priority on the likelihood of a deceased donor
# transplant. Similarly, need to specify the effect of diabetes on the likelihood of removal and wl mortality
#
diabetes_effect <- 0.35
allocation_effect <- -0.50
age_effect <- 0.10
########################################################################################################################
#
# Need to specify the baseline hazard and covariate effects for the posttransplant survival models and the
# covariate effects (assuming an exponential distributed survival functions)
#
dd_bl <- -log(0.850) / 3
ld_bl <- -log(0.925) / 3
rem_bl <- -log(0.700) / 3
c_age_effect <- log(2) / 4
c_diabetes_effect <- log(1.25)
d_quality_effect <- 4 / 100
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
n1 <- 250
n2 <- 500
test1_start <- proc.time()
test1 <- sim_iter(1,
n1,
dd_bl,
ld_bl,
rem_bl,
c_age_effect,
c_diabetes_effect,
d_quality_effect,
dd_shape,
dd_scale_bl,
ld_shape,
ld_scale_bl,
rem_shape,
rem_scale_bl,
wlm_shape,
wlm_scale_bl,
0.50,
100,
allocation_effect,
age_effect,
diabetes_effect)
test1_end <- proc.time()
test2_start <- proc.time()
test2 <- sim_iter(1,
n2,
dd_bl,
ld_bl,
rem_bl,
c_age_effect,
c_diabetes_effect,
d_quality_effect,
dd_shape,
dd_scale_bl,
ld_shape,
ld_scale_bl,
rem_shape,
rem_scale_bl,
wlm_shape,
wlm_scale_bl,
0.50,
100,
allocation_effect,
age_effect,
diabetes_effect)
test2_end <- proc.time()
test3_start <- proc.time()
test3 <- sim_iter_vec(1,
n1,
dd_bl,
ld_bl,
rem_bl,
c_age_effect,
c_diabetes_effect,
d_quality_effect,
dd_shape,
dd_scale_bl,
ld_shape,
ld_scale_bl,
rem_shape,
rem_scale_bl,
wlm_shape,
wlm_scale_bl,
0.50,
100,
allocation_effect,
age_effect,
diabetes_effect)
test3_end <- proc.time()
test4_start <- proc.time()
test4 <- sim_iter_vec(1,
n2,
dd_bl,
ld_bl,
rem_bl,
c_age_effect,
c_diabetes_effect,
d_quality_effect,
dd_shape,
dd_scale_bl,
ld_shape,
ld_scale_bl,
rem_shape,
rem_scale_bl,
wlm_shape,
wlm_scale_bl,
0.50,
100,
allocation_effect,
age_effect,
diabetes_effect)
test4_end <- proc.time()
(test1_end[3] - test1_start[3]) / 60
(test2_end[3] - test2_start[3]) / 60
(test3_end[3] - test3_start[3]) / 60
|
# Code accompanying the manuscript "Bayesian Analysis of Formula One Race Results"
# Last edited 2022-12-11 by @vankesteren
# Contents: status filtering, some EDA
library(tidyverse)
library(firatheme)
# Data loading ----
f1_dat <- read_rds("dat/f1_dat.rds")
f1_dat_finished <- f1_dat %>% filter(finished)
# Some EDA ----
# finish position
f1_dat_finished %>%
ggplot(aes(x = factor(position))) +
geom_bar(fill = firaCols[4]) +
theme_fira() +
labs(
title = "Distribution of finish positions",
subtitle = "F1 hybrid era (2014-2021)",
x = "Finish position",
y = "Count"
)
ggsave("img/eda_finish_position.png", width = 9, height = 6, bg = "white")
# basic plot
f1_dat_finished %>%
filter(driver %in% c("hamilton", "raikkonen", "giovinazzi"), year > 2015) %>%
ggplot(aes(x = factor(position), fill = driver)) +
geom_bar(position = position_dodge(preserve = "single")) +
theme_fira() +
scale_fill_fira() +
labs(
x = "Finish position",
y = "Count",
title = "Different drivers' finish positions",
subtitle = "Conditional on finishing the race",
fill = ""
) +
theme(legend.position = "top") +
facet_wrap(~year)
ggsave("img/eda_finish_drivers.png", width = 9, height = 6, bg = "white")
# average finish positions for 2021 season
f1_dat_finished %>%
filter(year == 2021) %>%
group_by(driver) %>%
summarize(mean_position = mean(position, na.rm = TRUE), sem = sd(position, na.rm = TRUE) / sqrt(n())) %>%
mutate(driver = fct_reorder(driver, -mean_position)) %>%
ggplot(aes(y = driver,
x = mean_position,
xmin = mean_position - 2*sem,
xmax = mean_position + 2*sem)) +
geom_pointrange(size = .4) +
theme_fira() +
labs(
y = "",
x = "Position (mean ± 2⋅se)",
title = "2021 Season Finish Positions",
subtitle = "Conditional on finishing the race"
)
ggsave("img/eda_finish_2021.png", width = 9, height = 6, bg = "white")
| /02_eda.R | permissive | vankesteren/f1model | R | false | false | 1,947 | r | # Code accompanying the manuscript "Bayesian Analysis of Formula One Race Results"
# Last edited 2022-12-11 by @vankesteren
# Contents: status filtering, some EDA
library(tidyverse)
library(firatheme)
# Data loading ----
f1_dat <- read_rds("dat/f1_dat.rds")
f1_dat_finished <- f1_dat %>% filter(finished)
# Some EDA ----
# finish position
f1_dat_finished %>%
ggplot(aes(x = factor(position))) +
geom_bar(fill = firaCols[4]) +
theme_fira() +
labs(
title = "Distribution of finish positions",
subtitle = "F1 hybrid era (2014-2021)",
x = "Finish position",
y = "Count"
)
ggsave("img/eda_finish_position.png", width = 9, height = 6, bg = "white")
# basic plot
f1_dat_finished %>%
filter(driver %in% c("hamilton", "raikkonen", "giovinazzi"), year > 2015) %>%
ggplot(aes(x = factor(position), fill = driver)) +
geom_bar(position = position_dodge(preserve = "single")) +
theme_fira() +
scale_fill_fira() +
labs(
x = "Finish position",
y = "Count",
title = "Different drivers' finish positions",
subtitle = "Conditional on finishing the race",
fill = ""
) +
theme(legend.position = "top") +
facet_wrap(~year)
ggsave("img/eda_finish_drivers.png", width = 9, height = 6, bg = "white")
# average finish positions for 2021 season
f1_dat_finished %>%
filter(year == 2021) %>%
group_by(driver) %>%
summarize(mean_position = mean(position, na.rm = TRUE), sem = sd(position, na.rm = TRUE) / sqrt(n())) %>%
mutate(driver = fct_reorder(driver, -mean_position)) %>%
ggplot(aes(y = driver,
x = mean_position,
xmin = mean_position - 2*sem,
xmax = mean_position + 2*sem)) +
geom_pointrange(size = .4) +
theme_fira() +
labs(
y = "",
x = "Position (mean ± 2⋅se)",
title = "2021 Season Finish Positions",
subtitle = "Conditional on finishing the race"
)
ggsave("img/eda_finish_2021.png", width = 9, height = 6, bg = "white")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_update_security_group_rule_descriptions_egress}
\alias{ec2_update_security_group_rule_descriptions_egress}
\title{[VPC only] Updates the description of an egress (outbound)
security group rule}
\usage{
ec2_update_security_group_rule_descriptions_egress(DryRun, GroupId,
GroupName, IpPermissions)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{GroupId}{The ID of the security group. You must specify either the security group
ID or the security group name in the request. For security groups in a
nondefault VPC, you must specify the security group ID.}
\item{GroupName}{[Default VPC] The name of the security group. You must specify either
the security group ID or the security group name in the request.}
\item{IpPermissions}{[required] The IP permissions for the security group rule.}
}
\value{
A list with the following syntax:\preformatted{list(
Return = TRUE|FALSE
)
}
}
\description{
[VPC only] Updates the description of an egress (outbound) security
group rule. You can replace an existing description, or add a
description to a rule that did not have one previously.
You specify the description as part of the IP permissions structure. You
can remove a description for a security group rule by omitting the
description parameter in the request.
}
\section{Request syntax}{
\preformatted{svc$update_security_group_rule_descriptions_egress(
DryRun = TRUE|FALSE,
GroupId = "string",
GroupName = "string",
IpPermissions = list(
list(
FromPort = 123,
IpProtocol = "string",
IpRanges = list(
list(
CidrIp = "string",
Description = "string"
)
),
Ipv6Ranges = list(
list(
CidrIpv6 = "string",
Description = "string"
)
),
PrefixListIds = list(
list(
Description = "string",
PrefixListId = "string"
)
),
ToPort = 123,
UserIdGroupPairs = list(
list(
Description = "string",
GroupId = "string",
GroupName = "string",
PeeringStatus = "string",
UserId = "string",
VpcId = "string",
VpcPeeringConnectionId = "string"
)
)
)
)
)
}
}
\examples{
\dontrun{
# This example updates the description for the specified security group
# rule.
svc$update_security_group_rule_descriptions_egress(
GroupId = "sg-123abc12",
IpPermissions = list(
list(
FromPort = 80L,
IpProtocol = "tcp",
IpRanges = list(
list(
CidrIp = "203.0.113.0/24",
Description = "Outbound HTTP access to server 2"
)
),
ToPort = 80L
)
)
)
}
}
\keyword{internal}
| /cran/paws.compute/man/ec2_update_security_group_rule_descriptions_egress.Rd | permissive | TWarczak/paws | R | false | true | 3,039 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_update_security_group_rule_descriptions_egress}
\alias{ec2_update_security_group_rule_descriptions_egress}
\title{[VPC only] Updates the description of an egress (outbound)
security group rule}
\usage{
ec2_update_security_group_rule_descriptions_egress(DryRun, GroupId,
GroupName, IpPermissions)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{GroupId}{The ID of the security group. You must specify either the security group
ID or the security group name in the request. For security groups in a
nondefault VPC, you must specify the security group ID.}
\item{GroupName}{[Default VPC] The name of the security group. You must specify either
the security group ID or the security group name in the request.}
\item{IpPermissions}{[required] The IP permissions for the security group rule.}
}
\value{
A list with the following syntax:\preformatted{list(
Return = TRUE|FALSE
)
}
}
\description{
[VPC only] Updates the description of an egress (outbound) security
group rule. You can replace an existing description, or add a
description to a rule that did not have one previously.
You specify the description as part of the IP permissions structure. You
can remove a description for a security group rule by omitting the
description parameter in the request.
}
\section{Request syntax}{
\preformatted{svc$update_security_group_rule_descriptions_egress(
DryRun = TRUE|FALSE,
GroupId = "string",
GroupName = "string",
IpPermissions = list(
list(
FromPort = 123,
IpProtocol = "string",
IpRanges = list(
list(
CidrIp = "string",
Description = "string"
)
),
Ipv6Ranges = list(
list(
CidrIpv6 = "string",
Description = "string"
)
),
PrefixListIds = list(
list(
Description = "string",
PrefixListId = "string"
)
),
ToPort = 123,
UserIdGroupPairs = list(
list(
Description = "string",
GroupId = "string",
GroupName = "string",
PeeringStatus = "string",
UserId = "string",
VpcId = "string",
VpcPeeringConnectionId = "string"
)
)
)
)
)
}
}
\examples{
\dontrun{
# This example updates the description for the specified security group
# rule.
svc$update_security_group_rule_descriptions_egress(
GroupId = "sg-123abc12",
IpPermissions = list(
list(
FromPort = 80L,
IpProtocol = "tcp",
IpRanges = list(
list(
CidrIp = "203.0.113.0/24",
Description = "Outbound HTTP access to server 2"
)
),
ToPort = 80L
)
)
)
}
}
\keyword{internal}
|
setwd('C:/BWCHEN/2_Course/MathReview/Bowen2018/KSUMathReview/RMaterials')
rm(list=ls())
library(datasets)
setwd('C:/BWCHEN/2_Course/MathReview/Bowen2018/Lecture11')
# Return the working directory.
getwd()
# Remove everything saved in R.
rm(list = ls())
# or to remove the single object
x <- 3 # Assign the numeric value 3 to the object called x.
rm('x')
# Install packages
install.packages('dplyr')
# Call the package
library(dplyr)
# Or, you can use the function below.
require(dplyr)
plot
?systemfit
# Read CSV data.
dat1 <- data.table::fread('comtrade_trade_data.csv')
# Read Excel data
dat2 <- readxl::read_xls('UN Comtrade Country List.xls',
sheet = 1, range = 'A1:I294')
# Write the data in CSV format.
# You should see a csv file in your working directory
# after running the codes below.
write.csv(dat1, file = 'comtrade_trade_data_test.csv')
# Write the data in excel format.
xlsx::write.xlsx(dat2, file = 'comtrade_trade_data_test.csv',
sheetName = 'Sheet1')
# Use R as an calculator
234/432
7392347983 + 378923749
39749*203023
# Use R to print something
'Hello, world'
print('Hello, world')
cat('Hello, world')
x <- 3 # a numerical value
x
x + 1 # Add 1 to the value in x
x <- 'Kansas State University' # a character
x
x + 1 # Encounter an error, because x is an character
x <- TRUE # a logical value
x
y
data(cars)
head(cars)
tail(cars)
ncol(cars) # Number of columns in the dataset
nrow(cars) # Number of rows in the dataset
colnames(cars) <- c('SPEED', 'DISTANCE') # Specify the column names.
summary(cars) # Summary stats of the data
cars$type <- 'Chevy' # Make an another column.
cars[1, 2]
func <- function(x) {
y = ...
return(y)
}
func <- function(x) {
y = x^2 # square of x.
return(y)
}
y <- func(x = 2)
y
func2 <- function(x1, x2){
y = (x1 + x2)^2
return(y)
}
y <- func2(x1 = 2, x2 = 4)
y
func <- function(x) {
is.character(x) # Is the input a character?
}
func('J')
func(1)
func(TRUE)
func <- function(x) {
max(x) # What is the maximum number in the vector?
}
set.seed(101)
y <- rnorm(100, 0, 1)
func(y)
func_max <- function(x) {
max(x[-which(x == max(x))]) # What is the second-largest number in the vector?
}
y <- rnorm(10, 0, 1)
func_max(y)
func_max <- function(x) {
sort(x, decreasing = TRUE)[2] # What is the second-largest number in the vector?
}
set.seed(101)
y <- rnorm(10, 0, 1)
func_max(y)
func <- function(x1, x2) {
y = x1 + x2
out <- list(y = y, x1 = x1, x2 = x2)
return(out)
}
y <- func(2, 3)
y[[1]]; y[['y']]
y[[2]];y[['x1']]
guessfunc <- function(i){
computerguess <- sample(c('head', 'tail'), 1)
if(i == computerguess){
print('You win')
}else{
print('You loss')
}
}
set.seed(1011)
guessfunc('head')
guessfunc('tail')
set.seed(1012) # Now we play with another machine.
guessfunc2 <- function(i){
computerguess <- sample(c('head', 'tail'), 1)
cost <- 6
if(i == computerguess){
revenue <- 10
}else{
revenue <- 0
}
netincome <- revenue - cost
return(netincome)
}
guessfunc2('head')
guessfunc2('tail')
set.seed(1012)
guessfunc3 <- function(i){
computerguess <- sample(c('head', 'tail'), 1)
cost <- 6
revenue <- ifelse(i == computerguess, 10, 0) # simplified version of if-else.
netincome <- revenue - cost
return(netincome)
}
guessfunc3('head')
guessfunc3('tail')
StringSample <- c('I like Kansas State University. I am proud of being a Wildcat.')
test <- strsplit(StringSample, split = ' ') # Split the string by space.
test[[1]]
paste(test[[1]], collapse = ' ') # Return to original format.
test <- strsplit(StringSample, split = 'a') # Split the string by the letter "a".
test[[1]]
nchar(StringSample) # Number of characters in the string
grep('Kansas', StringSample) # Returns the index of element where "Kansas" is
grepl('Kansas', StringSample) # Is "Kansas" in the vector?
sub('I', 'i', StringSample) # Replace "I" by "i", for the first time that "I" appears.
gsub('I', 'i', StringSample) # Replace all "I"s by "i".
substr(StringSample, 1, 4) # Choose the string from 1 to 4.
toupper(StringSample) # Capitalize all the letters
tolower(StringSample) # De-capitalize all the letters
set.seed(10)
A = matrix(sample(c(1:20), 9), nrow = 3, ncol = 3, byrow = TRUE)
A
set.seed(11)
B = matrix(sample(c(1:20), 9), nrow = 3, ncol = 3, byrow = TRUE)
B
# Matrix operations
A + B
A - B
A %*% B # Note that it is different to A*B
solve(A) # The inverse of A
B2 <- matrix(sample(c(1:20), 3), nrow = 3, ncol = 1, byrow = TRUE)
solve(A, B2) # Solve Ax= B2
crossprod(A, B) # = A'B
crossprod(A) # = A'A
t(A) # Transpose of A
diag(A) # diagonal matrix with diagonal elements in A
eigen(A) # eigenvalues of A
set.seed(101)
dat <- data.frame(matrix(rnorm(100, 0, 1), 20, 5)) # Create a matrix with 20 rows and 5 columns.
# The data are from a standard normal distribution.
apply(dat, 2, sd) # Compute column means
out1 <- lapply(c(1: ncol(dat)), function(i) sd(dat[, i]))
out1
out2 <- sapply(c(1: ncol(dat)), function(i) sd(dat[, i]))
out2
for (i in 1:ncol(dat)){
print(sd(dat[, i]))
}
colValues <- dat[, 1] # Values in the first column
for (i in 1:length(colValues)){
print(colValues[i])
if(colValues[i] > 0.1) break
}
for (i in 1:length(colValues)){
print(colValues[i])
stopifnot(colValues[i] <= 0.1)
}
| /RMaterials/Session1.R | no_license | yz81/KSUMathReview | R | false | false | 5,312 | r | setwd('C:/BWCHEN/2_Course/MathReview/Bowen2018/KSUMathReview/RMaterials')
rm(list=ls())
library(datasets)
setwd('C:/BWCHEN/2_Course/MathReview/Bowen2018/Lecture11')
# Return the working directory.
getwd()
# Remove everything saved in R.
rm(list = ls())
# or to remove the single object
x <- 3 # Assign the numeric value 3 to the object called x.
rm('x')
# Install packages
install.packages('dplyr')
# Call the package
library(dplyr)
# Or, you can use the function below.
require(dplyr)
plot
?systemfit
# Read CSV data.
dat1 <- data.table::fread('comtrade_trade_data.csv')
# Read Excel data
dat2 <- readxl::read_xls('UN Comtrade Country List.xls',
sheet = 1, range = 'A1:I294')
# Write the data in CSV format.
# You should see a csv file in your working directory
# after running the codes below.
write.csv(dat1, file = 'comtrade_trade_data_test.csv')
# Write the data in excel format.
xlsx::write.xlsx(dat2, file = 'comtrade_trade_data_test.csv',
sheetName = 'Sheet1')
# Use R as an calculator
234/432
7392347983 + 378923749
39749*203023
# Use R to print something
'Hello, world'
print('Hello, world')
cat('Hello, world')
x <- 3 # a numerical value
x
x + 1 # Add 1 to the value in x
x <- 'Kansas State University' # a character
x
x + 1 # Encounter an error, because x is an character
x <- TRUE # a logical value
x
y
data(cars)
head(cars)
tail(cars)
ncol(cars) # Number of columns in the dataset
nrow(cars) # Number of rows in the dataset
colnames(cars) <- c('SPEED', 'DISTANCE') # Specify the column names.
summary(cars) # Summary stats of the data
cars$type <- 'Chevy' # Make an another column.
cars[1, 2]
func <- function(x) {
y = ...
return(y)
}
func <- function(x) {
y = x^2 # square of x.
return(y)
}
y <- func(x = 2)
y
func2 <- function(x1, x2){
y = (x1 + x2)^2
return(y)
}
y <- func2(x1 = 2, x2 = 4)
y
func <- function(x) {
is.character(x) # Is the input a character?
}
func('J')
func(1)
func(TRUE)
func <- function(x) {
max(x) # What is the maximum number in the vector?
}
set.seed(101)
y <- rnorm(100, 0, 1)
func(y)
func_max <- function(x) {
max(x[-which(x == max(x))]) # What is the second-largest number in the vector?
}
y <- rnorm(10, 0, 1)
func_max(y)
func_max <- function(x) {
sort(x, decreasing = TRUE)[2] # What is the second-largest number in the vector?
}
set.seed(101)
y <- rnorm(10, 0, 1)
func_max(y)
func <- function(x1, x2) {
y = x1 + x2
out <- list(y = y, x1 = x1, x2 = x2)
return(out)
}
y <- func(2, 3)
y[[1]]; y[['y']]
y[[2]];y[['x1']]
guessfunc <- function(i){
computerguess <- sample(c('head', 'tail'), 1)
if(i == computerguess){
print('You win')
}else{
print('You loss')
}
}
set.seed(1011)
guessfunc('head')
guessfunc('tail')
set.seed(1012) # Now we play with another machine.
guessfunc2 <- function(i){
computerguess <- sample(c('head', 'tail'), 1)
cost <- 6
if(i == computerguess){
revenue <- 10
}else{
revenue <- 0
}
netincome <- revenue - cost
return(netincome)
}
guessfunc2('head')
guessfunc2('tail')
set.seed(1012)
guessfunc3 <- function(i){
computerguess <- sample(c('head', 'tail'), 1)
cost <- 6
revenue <- ifelse(i == computerguess, 10, 0) # simplified version of if-else.
netincome <- revenue - cost
return(netincome)
}
guessfunc3('head')
guessfunc3('tail')
StringSample <- c('I like Kansas State University. I am proud of being a Wildcat.')
test <- strsplit(StringSample, split = ' ') # Split the string by space.
test[[1]]
paste(test[[1]], collapse = ' ') # Return to original format.
test <- strsplit(StringSample, split = 'a') # Split the string by the letter "a".
test[[1]]
nchar(StringSample) # Number of characters in the string
grep('Kansas', StringSample) # Returns the index of element where "Kansas" is
grepl('Kansas', StringSample) # Is "Kansas" in the vector?
sub('I', 'i', StringSample) # Replace "I" by "i", for the first time that "I" appears.
gsub('I', 'i', StringSample) # Replace all "I"s by "i".
substr(StringSample, 1, 4) # Choose the string from 1 to 4.
toupper(StringSample) # Capitalize all the letters
tolower(StringSample) # De-capitalize all the letters
set.seed(10)
A = matrix(sample(c(1:20), 9), nrow = 3, ncol = 3, byrow = TRUE)
A
set.seed(11)
B = matrix(sample(c(1:20), 9), nrow = 3, ncol = 3, byrow = TRUE)
B
# Matrix operations
A + B
A - B
A %*% B # Note that it is different to A*B
solve(A) # The inverse of A
B2 <- matrix(sample(c(1:20), 3), nrow = 3, ncol = 1, byrow = TRUE)
solve(A, B2) # Solve Ax= B2
crossprod(A, B) # = A'B
crossprod(A) # = A'A
t(A) # Transpose of A
diag(A) # diagonal matrix with diagonal elements in A
eigen(A) # eigenvalues of A
set.seed(101)
dat <- data.frame(matrix(rnorm(100, 0, 1), 20, 5)) # Create a matrix with 20 rows and 5 columns.
# The data are from a standard normal distribution.
apply(dat, 2, sd) # Compute column means
out1 <- lapply(c(1: ncol(dat)), function(i) sd(dat[, i]))
out1
out2 <- sapply(c(1: ncol(dat)), function(i) sd(dat[, i]))
out2
for (i in 1:ncol(dat)){
print(sd(dat[, i]))
}
colValues <- dat[, 1] # Values in the first column
for (i in 1:length(colValues)){
print(colValues[i])
if(colValues[i] > 0.1) break
}
for (i in 1:length(colValues)){
print(colValues[i])
stopifnot(colValues[i] <= 0.1)
}
|
library(data.table)
##Getting files
URL<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(URL, destfile="UCIHAR.zip", method="curl")
unzip("UCIHAR.zip")
xtest<-read.table("./UCI HAR Dataset/test/X_test.txt")
xtrain<-read.table("./UCI HAR Dataset/train/X_train.txt")
ytest<-read.table("./UCI HAR Dataset/test/y_test.txt")
ytrain<-read.table("./UCI HAR Dataset/train/y_train.txt")
subjecttest<-read.table("./UCI HAR Dataset/test/subject_test.txt")
subjecttrain<-read.table("./UCI HAR Dataset/train/subject_train.txt")
##Merging training & test sets observations
subject<-rbind(subjecttest, subjecttrain)
setnames(subject, "V1", "subject")
activity<-rbind(ytest, ytrain)
setnames(activity, "V1", "activity")
dataset<-rbind(xtest, xtrain)
##load reference information
featurenames<-read.table("/Users/Herodotus/JHUDataAnalysis/M3W4/UCI HAR Dataset/features.txt", col.names=c("FeatureID","Feature"))
activitynames<-read.table("/Users/Herodotus/JHUDataAnalysis/M3W4/UCI HAR Dataset/activity_labels.txt", col.names=c("ActivityID", "Activity"))
##extracts mean & std deviation measurements into a clean dataset, then labels columns
mean_std<-grep("mean\\(\\)|std\\(\\)",featurenames[,2])
cleandata<-dataset[,mean_std]
names(cleandata)<-featurenames[mean_std,2]
##substitute activity names
activity[,1]<-activitynames[activity[,1],2]
##merge columns
subject.activity<-cbind(subject, activity)
dataset<-cbind(subject.activity, cleandata)
## create second dataset with average for each subject & activity
tidydata<-data.table(dataset)
meltdata<-melt(tidydata, id=c("SubjectID","Activity"))
castdata<-dcast(meltdata, SubjectID+Activity~variable,mean)
write.table(castdata, file="tidydata.txt", sep=",", row.names=FALSE)
| /Assignment/run_analysis.R | no_license | LingGoh/Getting-and-cleaning-data | R | false | false | 1,782 | r | library(data.table)
##Getting files
URL<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(URL, destfile="UCIHAR.zip", method="curl")
unzip("UCIHAR.zip")
xtest<-read.table("./UCI HAR Dataset/test/X_test.txt")
xtrain<-read.table("./UCI HAR Dataset/train/X_train.txt")
ytest<-read.table("./UCI HAR Dataset/test/y_test.txt")
ytrain<-read.table("./UCI HAR Dataset/train/y_train.txt")
subjecttest<-read.table("./UCI HAR Dataset/test/subject_test.txt")
subjecttrain<-read.table("./UCI HAR Dataset/train/subject_train.txt")
##Merging training & test sets observations
subject<-rbind(subjecttest, subjecttrain)
setnames(subject, "V1", "subject")
activity<-rbind(ytest, ytrain)
setnames(activity, "V1", "activity")
dataset<-rbind(xtest, xtrain)
##load reference information
featurenames<-read.table("/Users/Herodotus/JHUDataAnalysis/M3W4/UCI HAR Dataset/features.txt", col.names=c("FeatureID","Feature"))
activitynames<-read.table("/Users/Herodotus/JHUDataAnalysis/M3W4/UCI HAR Dataset/activity_labels.txt", col.names=c("ActivityID", "Activity"))
##extracts mean & std deviation measurements into a clean dataset, then labels columns
mean_std<-grep("mean\\(\\)|std\\(\\)",featurenames[,2])
cleandata<-dataset[,mean_std]
names(cleandata)<-featurenames[mean_std,2]
##substitute activity names
activity[,1]<-activitynames[activity[,1],2]
##merge columns
subject.activity<-cbind(subject, activity)
dataset<-cbind(subject.activity, cleandata)
## create second dataset with average for each subject & activity
tidydata<-data.table(dataset)
meltdata<-melt(tidydata, id=c("SubjectID","Activity"))
castdata<-dcast(meltdata, SubjectID+Activity~variable,mean)
write.table(castdata, file="tidydata.txt", sep=",", row.names=FALSE)
|
##' Parse hyperparameters for data splitting algorithm
##'
##' @title Input for data splitting algorithms
##' @param split.method A character string specifying the algorithm for data splitting:
##' \itemize{
##' \item{"loob"} leave one out bootstrap
##' \item{"bootcv"} bootstrap cross validation
##' \item{"cv5"} 5-fold cross validation
##' \item{"loocv"} leave one out cross validation aka N-1 fold cross validation
##' \item{"632plus"} Efron's .632+ bootstrap
##' }
##' @param B Number of repetitions of bootstrap or k-fold cross-validation
##' @param N Sample size
##' @param M Subsample size. Default is N (no subsampling).
##' @param seed Integer passed to set.seed. If not given or NA no seed is set.
##' @return A list with the following elements:
##' \itemize{
##' \item{split.methodName}: the print name of the algorithm
##' \item{split.method}: the internal name of the algorithm
##' \item{index}: the index for data splitting. For bootstrap splitting this
##' is a matrix with B columns and M rows identifying the in-bag subjects. For k-fold
##' cross-validation this is a matrix with B columns identifying the membership to the k groups.
##' \item{k}: the k of k-fold cross-validation
##' \item{N}: the sample size
##' \item{M}: the subsample size
##' }
##' @seealso Score
##' @examples
##' # 3-fold crossvalidation
##' getSplitMethod("cv3",B=4,N=37)
##'
##' # bootstrap with replacement
##' getSplitMethod("loob",B=4,N=37)
##'
##' # bootstrap without replacement
##' getSplitMethod("loob",B=4,N=37,M=20)
##'
##' @export
##' @author Thomas A. Gerds <tag@@biostat.ku.dk>
getSplitMethod <- function(split.method,B,N,M,seed){
if (!missing(seed) && !is.null(seed) && !is.na(seed[[1]])) set.seed(seed)
if (missing(split.method)) split.method <- ""
split.methodName <- NULL
split.method <- tolower(split.method)
k <- as.numeric(substring(grep("^cv[0-9]+$",split.method,value=TRUE,ignore.case=TRUE),3))
if (length(k)==0) k <- NULL
## none
if (split.method %in% c("","noplan","none","no data",NA,FALSE,0L)) {
B <- 0
split.method <- "noplan"
split.methodName <- "no data splitting"
}
## classical cross-validation
if (!is.null(k)){ ## classical cross-validation
split.method <- "crossval"
split.methodName <- paste(k,"fold cross-validation",sep="-")
if (missing(B)) B <- 1 # repeat k-fold CrossVal one time
}
else{
if (length(grep("loocv",split.method,ignore.case=TRUE))>0){
split.method <- "loocv"
split.methodName <- "LeaveOneOutCV"
k <- N-1
B <- 1
}
}
## resample or subsample bootstrap
if(length(grep("^boot",split.method,value=FALSE,ignore.case=TRUE))>0){
split.method <- "BootCv"
split.methodName <- "BootCv"
if (missing(B)) B <- 100
}
if (length(grep("632",split.method,value=FALSE,ignore.case=TRUE))>0){
if (length(grep("plus|\\+",split.method,value=FALSE,ignore.case=TRUE))>0){
split.method <- "Boot632plus"
split.methodName <- ".632+"
if (missing(B)) B <- 100
}
else{
split.method <- "Boot632"
split.methodName <- ".632"
if (missing(B)) B <- 100
}
}
## default is leave one out bootstrap
## if ((length(grep("looboot|loob|leaveoneoutb",split.method,value=FALSE,ignore.case=TRUE))>0) ||
if (!(split.method %in% c("noplan","crossval","loocv","BootCv","Boot632","Boot632plus"))){
split.method <- "LeaveOneOutBoot"
split.methodName <- "LeaveOneOutBoot"
if (missing(B)) B <- 100
}
if (missing(M)) M <- N
stopifnot(M[[1]]>0 && M[[1]]<=N[[1]])
subsampling <- M!=N
if (M<1) M <- round(M*N)
ResampleIndex <- switch(split.method,
"loocv"={
matrix(1:N,ncol=1)
},
"noplan"={
NULL
},
"crossval"={
do.call("cbind",lapply(1:B,function(b){sample(rep(1:k,length.out=N))}))
},
{ ## default is bootstrap
## split.method <- "LeaveOneOutBoot"
## split.methodName <- "LeaveOneOutBoot"
ResampleIndex <- do.call("cbind",lapply(1:B,function(b){
sort(sample(1:N,size=M,replace=!subsampling))
}))
})
if (missing(B)) {
B <- switch(split.method,"loocv"={1},"noplan"={0},{100})
}
else{
stopifnot(B[[1]]<0 || B[[1]]==round(B[[1]]))
}
out <- list(name=split.methodName,
internal.name=split.method,
index=ResampleIndex,
k=k,
B=B,
M=M,
N=N)
class(out) <- "split.method"
out
}
| /R/getSplitMethod.R | no_license | LoSerigne/riskRegression | R | false | false | 5,061 | r | ##' Parse hyperparameters for data splitting algorithm
##'
##' @title Input for data splitting algorithms
##' @param split.method A character string specifying the algorithm for data splitting:
##' \itemize{
##' \item{"loob"} leave one out bootstrap
##' \item{"bootcv"} bootstrap cross validation
##' \item{"cv5"} 5-fold cross validation
##' \item{"loocv"} leave one out cross validation aka N-1 fold cross validation
##' \item{"632plus"} Efron's .632+ bootstrap
##' }
##' @param B Number of repetitions of bootstrap or k-fold cross-validation
##' @param N Sample size
##' @param M Subsample size. Default is N (no subsampling).
##' @param seed Integer passed to set.seed. If not given or NA no seed is set.
##' @return A list with the following elements:
##' \itemize{
##' \item{split.methodName}: the print name of the algorithm
##' \item{split.method}: the internal name of the algorithm
##' \item{index}: the index for data splitting. For bootstrap splitting this
##' is a matrix with B columns and M rows identifying the in-bag subjects. For k-fold
##' cross-validation this is a matrix with B columns identifying the membership to the k groups.
##' \item{k}: the k of k-fold cross-validation
##' \item{N}: the sample size
##' \item{M}: the subsample size
##' }
##' @seealso Score
##' @examples
##' # 3-fold crossvalidation
##' getSplitMethod("cv3",B=4,N=37)
##'
##' # bootstrap with replacement
##' getSplitMethod("loob",B=4,N=37)
##'
##' # bootstrap without replacement
##' getSplitMethod("loob",B=4,N=37,M=20)
##'
##' @export
##' @author Thomas A. Gerds <tag@@biostat.ku.dk>
getSplitMethod <- function(split.method,B,N,M,seed){
if (!missing(seed) && !is.null(seed) && !is.na(seed[[1]])) set.seed(seed)
if (missing(split.method)) split.method <- ""
split.methodName <- NULL
split.method <- tolower(split.method)
k <- as.numeric(substring(grep("^cv[0-9]+$",split.method,value=TRUE,ignore.case=TRUE),3))
if (length(k)==0) k <- NULL
## none
if (split.method %in% c("","noplan","none","no data",NA,FALSE,0L)) {
B <- 0
split.method <- "noplan"
split.methodName <- "no data splitting"
}
## classical cross-validation
if (!is.null(k)){ ## classical cross-validation
split.method <- "crossval"
split.methodName <- paste(k,"fold cross-validation",sep="-")
if (missing(B)) B <- 1 # repeat k-fold CrossVal one time
}
else{
if (length(grep("loocv",split.method,ignore.case=TRUE))>0){
split.method <- "loocv"
split.methodName <- "LeaveOneOutCV"
k <- N-1
B <- 1
}
}
## resample or subsample bootstrap
if(length(grep("^boot",split.method,value=FALSE,ignore.case=TRUE))>0){
split.method <- "BootCv"
split.methodName <- "BootCv"
if (missing(B)) B <- 100
}
if (length(grep("632",split.method,value=FALSE,ignore.case=TRUE))>0){
if (length(grep("plus|\\+",split.method,value=FALSE,ignore.case=TRUE))>0){
split.method <- "Boot632plus"
split.methodName <- ".632+"
if (missing(B)) B <- 100
}
else{
split.method <- "Boot632"
split.methodName <- ".632"
if (missing(B)) B <- 100
}
}
## default is leave one out bootstrap
## if ((length(grep("looboot|loob|leaveoneoutb",split.method,value=FALSE,ignore.case=TRUE))>0) ||
if (!(split.method %in% c("noplan","crossval","loocv","BootCv","Boot632","Boot632plus"))){
split.method <- "LeaveOneOutBoot"
split.methodName <- "LeaveOneOutBoot"
if (missing(B)) B <- 100
}
if (missing(M)) M <- N
stopifnot(M[[1]]>0 && M[[1]]<=N[[1]])
subsampling <- M!=N
if (M<1) M <- round(M*N)
ResampleIndex <- switch(split.method,
"loocv"={
matrix(1:N,ncol=1)
},
"noplan"={
NULL
},
"crossval"={
do.call("cbind",lapply(1:B,function(b){sample(rep(1:k,length.out=N))}))
},
{ ## default is bootstrap
## split.method <- "LeaveOneOutBoot"
## split.methodName <- "LeaveOneOutBoot"
ResampleIndex <- do.call("cbind",lapply(1:B,function(b){
sort(sample(1:N,size=M,replace=!subsampling))
}))
})
if (missing(B)) {
B <- switch(split.method,"loocv"={1},"noplan"={0},{100})
}
else{
stopifnot(B[[1]]<0 || B[[1]]==round(B[[1]]))
}
out <- list(name=split.methodName,
internal.name=split.method,
index=ResampleIndex,
k=k,
B=B,
M=M,
N=N)
class(out) <- "split.method"
out
}
|
step <- function(model, ddf="Satterthwaite", type=3, alpha.random = 0.1,
alpha.fixed = 0.05, reduce.fixed = TRUE, reduce.random = TRUE,
fixed.calc=TRUE ,lsmeans.calc=TRUE, difflsmeans.calc=TRUE,
test.effs=NULL, keep.effs = NULL,...)
{
if(!inherits(model, "lmerMod"))
stop("The model is not linear mixed effects model")
ddf <- checkNameDDF(ddf)
result <- stepFun(model = model, ddf = ddf , type = type,
alpha.random = alpha.random,
alpha.fixed = alpha.fixed,
reduce.fixed = reduce.fixed,
reduce.random = reduce.random,
fixed.calc = fixed.calc,
lsmeans.calc = lsmeans.calc,
difflsmeans.calc = difflsmeans.calc,
test.effs = test.effs,
keep.effs = keep.effs, change.contr = TRUE)
class(result) <- "step"
result
}
print.step <- function(x, ...)
{
if(!is.null(x$rand.table))
{
cat("\nRandom effects:\n")
x$rand.table[,"p.value"] <- format.pval(x$rand.table[,"p.value"], digits=4,
eps=1e-7)
x$rand.table[,"Chi.sq"] <- round(x$rand.table[,"Chi.sq"], 2)
print(x$rand.table)
}
if(is.null(x$anova.table)){
}else{
if(nrow(x$anova.table) != 0)
{
if(class(x$model) == "lm" | class(x$model) == "gls")
{
cat("\nFixed effects:\n")
print(x$anova.table)
cat("\nLeast squares means:\n")
print(x$lsmeans.table)
cat("\nFinal model:\n")
print(x$model)
return()
}
else
{
cat("\nFixed effects:\n")
x$anova.table[,"Pr(>F)"] <- format.pval(x$anova.table[,"Pr(>F)"],
digits=4, eps=1e-7)
x$anova.table[,c("Sum Sq","Mean Sq", "F.value")] <-
round(x$anova.table[,c("Sum Sq","Mean Sq", "F.value")],4)
x$anova.table[,"DenDF"] <- round(x$anova.table[,"DenDF"],2)
print(x$anova.table)
if(!is.null(x$lsmeans.table))
{
cat("\nLeast squares means:\n")
printCoefmat(x$lsmeans.table, dig.tst=3 ,
tst.ind=c(1:(which(colnames(x$lsmeans.table)=="Estimate")-1),
which(colnames(x$lsmeans.table)=="DF")),
digits=3 , P.values = TRUE, has.Pvalue=TRUE)
}
if(!is.null(x$diffs.lsmeans.table))
{
cat("\n Differences of LSMEANS:\n")
printCoefmat(x$diffs.lsmeans.table, dig.tst=1 ,
tst.ind=c(1:(which(colnames(x$diffs.lsmeans.table)==
"Estimate")-1),
which(colnames(x$diffs.lsmeans.table)=="DF")),
digits=3 , P.values = TRUE, has.Pvalue=TRUE)
}
}
}
else
print(x$anova.table)
}
cat("\nFinal model:\n")
print(x$model@call)
}
plot.step <- function(x, main = NULL, cex = 1.4,
which.plot = c("LSMEANS", "DIFF of LSMEANS"),
effs = NULL, mult = TRUE, ...)
{
if(!is.null(x$lsmeans.table) && nrow(x$lsmeans.table)>0 && ("LSMEANS" %in% which.plot)){
if(length(which.plot) == 1 && which.plot == "LSMEANS")
return(plotLSMEANS(x$lsmeans.table, x$response, "LSMEANS", main = main, cex = cex,
effs = effs, mult = mult))
}
if(!is.null(x$diffs.lsmeans.table) && nrow(x$diffs.lsmeans.table)>0
&& ("DIFF of LSMEANS" %in% which.plot))
plotLSMEANS(x$diffs.lsmeans.table, x$response, "DIFF of LSMEANS",
main = main, cex = cex, effs = effs, mult = mult)
}
lmer <- function(formula, data = NULL, REML = TRUE,
control = lmerControl(), start = NULL, verbose = 0L,
subset, weights, na.action, offset, contrasts = NULL,
devFunOnly = FALSE, ...)
{
mc <- match.call()
mc[[1]] <- quote(lme4::lmer)
model <- eval.parent(mc)
if(inherits(model, "merMod"))
model <- as(model,"merModLmerTest")
return(model)
}
setMethod("anova", signature(object="merModLmerTest"),
function(object, ..., ddf="Satterthwaite", type=3)
{
mCall <- match.call(expand.dots = TRUE)
dots <- list(...)
modp <- if (length(dots))
sapply(dots, is, "merModLmerTest") | sapply(dots, is, "merMod") |
sapply(dots, is, "lm") else logical(0)
if (any(modp)) {
return(callNextMethod())
}
else
{
cnm <- callNextMethod()
if(!is.null(ddf) && ddf=="lme4")
return(cnm)
{
table <- cnm
## errors in specifying the parameters
ddf <- checkNameDDF(ddf)
an.table <- tryCatch({calcANOVA(model=object, ddf=ddf, type=type)}
, error = function(e) { NULL })
if(!is.null(an.table))
{
table <- an.table
attr(table, "heading") <-
paste("Analysis of Variance Table of type", as.roman(type) ,
" with ", ddf,
"\napproximation for degrees of freedom")
}
else
message("anova from lme4 is returned\nsome computational error has occurred in lmerTest")
class(table) <- c("anova", "data.frame")
return(table)
}
}
})
setMethod("summary", signature(object = "merModLmerTest"),
function(object, ddf="Satterthwaite", ...){
if(!is.null(ddf) && ddf=="lme4"){
if(class(object) == "merModLmerTest")
return(summary(as(object, "lmerMod")))
#return(cl)
}else{
## commented callNextMethod
## since it produces warning, summary cannot have multiple arguments
##cl <- callNextMethod()
if(class(object) == "merModLmerTest")
cl <- summary(as(object, "lmerMod"))
#errors in specifying the parameters
ddf <- checkNameDDF(ddf)
tsum <- tryCatch( {calcSummary(object, ddf)},
error = function(e) { NULL })
if(is.null(tsum)){
message("summary from lme4 is returned\nsome computational error has occurred in lmerTest")
return(cl)
}
coefs.satt <- cbind(cl$coefficients[,1:2, drop = FALSE], tsum$df,
tsum$tvalue, tsum$tpvalue)
cl$coefficients <- coefs.satt
colnames(cl$coefficients)[3:5] <- c("df","t value","Pr(>|t|)")
}
cl$methTitle <- paste(cl$methTitle, "\nt-tests use ", ddf,
"approximations to degrees of freedom")
return(cl)
})
calcSatterth <- function(model, L){
if(!inherits(model, "lmerMod"))
stop("The model is not linear mixed effects model")
rho <- list() ## vector containing info about model
rho <- rhoInit(rho, model) ## save lmer outcome in rho vactor
rho$A <- calcApvar(rho)
calcSatt(rho, L, calcSS = FALSE)
}
rand <- function(model, ...)
{
if(!inherits(model, "lmerMod"))
stop("The model is not linear mixed effects model")
result <- testrand(model, reduce.random = FALSE, keep.effs = NULL,
alpha.random = 0.1)
res <- list(rand.table=result)
class(res) <- "rand"
res
}
print.rand <- function(x, ...)
{
cat("Analysis of Random effects Table:\n")
if(!is.null(x))
printCoefmat(x$rand.table, digits=3 , dig.tst=1 ,
tst.ind=c(which(colnames(x$rand.table)=="Chi.DF"),
which(colnames(x$rand.table)=="elim.num")),
P.values=TRUE, has.Pvalue=TRUE)
}
lsmeans <- function(model, test.effs=NULL, ddf = "Satterthwaite", ...){
.Deprecated("lsmeansLT", package = "lmerTest")
lsmeansLT(model, test.effs=test.effs, ddf = ddf)
}
lsmeansLT <- function(model, test.effs=NULL, ddf = "Satterthwaite", ...)
{
if(!inherits(model, "lmerMod"))
stop("The model is not linear mixed effects model")
ddf <- checkNameDDF(ddf)
result <- lsmeans.calc(model, 0.05, test.effs = test.effs,
lsmeansORdiff = TRUE, ddf)
res <- list(lsmeans.table=result$summ.data, response=result$response)
class(res) <- "lsmeansLT"
res
}
print.lsmeansLT <- function(x, ...)
{
cat("Least Squares Means table:\n")
printCoefmat(data.matrix(x$lsmeans.table), dig.tst=1,
tst.ind=c(1:(which(colnames(x$lsmeans.table)=="Estimate")-1),
which(colnames(x$lsmeans.table)=="DF")), digits=3 ,
P.values=TRUE, has.Pvalue=TRUE)
}
# plot.lsmeans <- function(x, main = NULL, cex = 1.4, effs = NULL, mult = TRUE, ...){
# .Deprecated("plot.lsmeansLT", package = "lmerTest")
# plot.lsmeansLT(x, main = main, cex = cex, effs = effs, mult = mult)
# }
plot.lsmeansLT <- function(x, main = NULL, cex = 1.4, effs = NULL, mult = TRUE, ...)
{
#plots for LSMEANS
if(!is.null(x$lsmeans.table) && nrow(x$lsmeans.table)>0)
plotLSMEANS(x$lsmeans.table, x$response, "LSMEANS", main = main, cex = cex,
effs = effs, mult = mult)
}
difflsmeans <- function(model, test.effs=NULL, ddf = "Satterthwaite", ...)
{
if(!inherits(model, "lmerMod"))
stop("The model is not linear mixed effects model")
ddf <- checkNameDDF(ddf)
result <- lsmeans.calc(model, 0.05, test.effs = test.effs,
lsmeansORdiff = FALSE, ddf)
res <- list(diffs.lsmeans.table=result$summ.data,
response=result$response)
class(res) <- "difflsmeans"
res
}
print.difflsmeans <- function(x, ...)
{
cat("Differences of LSMEANS:\n")
printCoefmat(data.matrix(x$diffs.lsmeans.table), dig.tst=1,
tst.ind=c(1:(which(colnames(x$diffs.lsmeans.table)=="Estimate")-1),
which(colnames(x$diffs.lsmeans.table)=="DF")), digits=3,
P.values=TRUE, has.Pvalue=TRUE)
}
plot.difflsmeans <- function(x, main = NULL, cex = 1.4, effs = NULL,
mult = TRUE, ...)
{
#plots for DIFF of LSMEANS
if(!is.null(x$diffs.lsmeans.table) && nrow(x$diffs.lsmeans.table)>0)
plotLSMEANS(x$diffs.lsmeans.table, x$response, "DIFF of LSMEANS",
main = main, cex = cex, effs = effs, mult = mult)
} | /R/generalFunctions.R | no_license | atyre2/lmerTest | R | false | false | 11,424 | r | step <- function(model, ddf="Satterthwaite", type=3, alpha.random = 0.1,
alpha.fixed = 0.05, reduce.fixed = TRUE, reduce.random = TRUE,
fixed.calc=TRUE ,lsmeans.calc=TRUE, difflsmeans.calc=TRUE,
test.effs=NULL, keep.effs = NULL,...)
{
if(!inherits(model, "lmerMod"))
stop("The model is not linear mixed effects model")
ddf <- checkNameDDF(ddf)
result <- stepFun(model = model, ddf = ddf , type = type,
alpha.random = alpha.random,
alpha.fixed = alpha.fixed,
reduce.fixed = reduce.fixed,
reduce.random = reduce.random,
fixed.calc = fixed.calc,
lsmeans.calc = lsmeans.calc,
difflsmeans.calc = difflsmeans.calc,
test.effs = test.effs,
keep.effs = keep.effs, change.contr = TRUE)
class(result) <- "step"
result
}
print.step <- function(x, ...)
{
if(!is.null(x$rand.table))
{
cat("\nRandom effects:\n")
x$rand.table[,"p.value"] <- format.pval(x$rand.table[,"p.value"], digits=4,
eps=1e-7)
x$rand.table[,"Chi.sq"] <- round(x$rand.table[,"Chi.sq"], 2)
print(x$rand.table)
}
if(is.null(x$anova.table)){
}else{
if(nrow(x$anova.table) != 0)
{
if(class(x$model) == "lm" | class(x$model) == "gls")
{
cat("\nFixed effects:\n")
print(x$anova.table)
cat("\nLeast squares means:\n")
print(x$lsmeans.table)
cat("\nFinal model:\n")
print(x$model)
return()
}
else
{
cat("\nFixed effects:\n")
x$anova.table[,"Pr(>F)"] <- format.pval(x$anova.table[,"Pr(>F)"],
digits=4, eps=1e-7)
x$anova.table[,c("Sum Sq","Mean Sq", "F.value")] <-
round(x$anova.table[,c("Sum Sq","Mean Sq", "F.value")],4)
x$anova.table[,"DenDF"] <- round(x$anova.table[,"DenDF"],2)
print(x$anova.table)
if(!is.null(x$lsmeans.table))
{
cat("\nLeast squares means:\n")
printCoefmat(x$lsmeans.table, dig.tst=3 ,
tst.ind=c(1:(which(colnames(x$lsmeans.table)=="Estimate")-1),
which(colnames(x$lsmeans.table)=="DF")),
digits=3 , P.values = TRUE, has.Pvalue=TRUE)
}
if(!is.null(x$diffs.lsmeans.table))
{
cat("\n Differences of LSMEANS:\n")
printCoefmat(x$diffs.lsmeans.table, dig.tst=1 ,
tst.ind=c(1:(which(colnames(x$diffs.lsmeans.table)==
"Estimate")-1),
which(colnames(x$diffs.lsmeans.table)=="DF")),
digits=3 , P.values = TRUE, has.Pvalue=TRUE)
}
}
}
else
print(x$anova.table)
}
cat("\nFinal model:\n")
print(x$model@call)
}
plot.step <- function(x, main = NULL, cex = 1.4,
which.plot = c("LSMEANS", "DIFF of LSMEANS"),
effs = NULL, mult = TRUE, ...)
{
if(!is.null(x$lsmeans.table) && nrow(x$lsmeans.table)>0 && ("LSMEANS" %in% which.plot)){
if(length(which.plot) == 1 && which.plot == "LSMEANS")
return(plotLSMEANS(x$lsmeans.table, x$response, "LSMEANS", main = main, cex = cex,
effs = effs, mult = mult))
}
if(!is.null(x$diffs.lsmeans.table) && nrow(x$diffs.lsmeans.table)>0
&& ("DIFF of LSMEANS" %in% which.plot))
plotLSMEANS(x$diffs.lsmeans.table, x$response, "DIFF of LSMEANS",
main = main, cex = cex, effs = effs, mult = mult)
}
lmer <- function(formula, data = NULL, REML = TRUE,
control = lmerControl(), start = NULL, verbose = 0L,
subset, weights, na.action, offset, contrasts = NULL,
devFunOnly = FALSE, ...)
{
mc <- match.call()
mc[[1]] <- quote(lme4::lmer)
model <- eval.parent(mc)
if(inherits(model, "merMod"))
model <- as(model,"merModLmerTest")
return(model)
}
setMethod("anova", signature(object="merModLmerTest"),
function(object, ..., ddf="Satterthwaite", type=3)
{
mCall <- match.call(expand.dots = TRUE)
dots <- list(...)
modp <- if (length(dots))
sapply(dots, is, "merModLmerTest") | sapply(dots, is, "merMod") |
sapply(dots, is, "lm") else logical(0)
if (any(modp)) {
return(callNextMethod())
}
else
{
cnm <- callNextMethod()
if(!is.null(ddf) && ddf=="lme4")
return(cnm)
{
table <- cnm
## errors in specifying the parameters
ddf <- checkNameDDF(ddf)
an.table <- tryCatch({calcANOVA(model=object, ddf=ddf, type=type)}
, error = function(e) { NULL })
if(!is.null(an.table))
{
table <- an.table
attr(table, "heading") <-
paste("Analysis of Variance Table of type", as.roman(type) ,
" with ", ddf,
"\napproximation for degrees of freedom")
}
else
message("anova from lme4 is returned\nsome computational error has occurred in lmerTest")
class(table) <- c("anova", "data.frame")
return(table)
}
}
})
setMethod("summary", signature(object = "merModLmerTest"),
function(object, ddf="Satterthwaite", ...){
if(!is.null(ddf) && ddf=="lme4"){
if(class(object) == "merModLmerTest")
return(summary(as(object, "lmerMod")))
#return(cl)
}else{
## commented callNextMethod
## since it produces warning, summary cannot have multiple arguments
##cl <- callNextMethod()
if(class(object) == "merModLmerTest")
cl <- summary(as(object, "lmerMod"))
#errors in specifying the parameters
ddf <- checkNameDDF(ddf)
tsum <- tryCatch( {calcSummary(object, ddf)},
error = function(e) { NULL })
if(is.null(tsum)){
message("summary from lme4 is returned\nsome computational error has occurred in lmerTest")
return(cl)
}
coefs.satt <- cbind(cl$coefficients[,1:2, drop = FALSE], tsum$df,
tsum$tvalue, tsum$tpvalue)
cl$coefficients <- coefs.satt
colnames(cl$coefficients)[3:5] <- c("df","t value","Pr(>|t|)")
}
cl$methTitle <- paste(cl$methTitle, "\nt-tests use ", ddf,
"approximations to degrees of freedom")
return(cl)
})
calcSatterth <- function(model, L){
if(!inherits(model, "lmerMod"))
stop("The model is not linear mixed effects model")
rho <- list() ## vector containing info about model
rho <- rhoInit(rho, model) ## save lmer outcome in rho vactor
rho$A <- calcApvar(rho)
calcSatt(rho, L, calcSS = FALSE)
}
rand <- function(model, ...)
{
if(!inherits(model, "lmerMod"))
stop("The model is not linear mixed effects model")
result <- testrand(model, reduce.random = FALSE, keep.effs = NULL,
alpha.random = 0.1)
res <- list(rand.table=result)
class(res) <- "rand"
res
}
print.rand <- function(x, ...)
{
cat("Analysis of Random effects Table:\n")
if(!is.null(x))
printCoefmat(x$rand.table, digits=3 , dig.tst=1 ,
tst.ind=c(which(colnames(x$rand.table)=="Chi.DF"),
which(colnames(x$rand.table)=="elim.num")),
P.values=TRUE, has.Pvalue=TRUE)
}
lsmeans <- function(model, test.effs=NULL, ddf = "Satterthwaite", ...){
.Deprecated("lsmeansLT", package = "lmerTest")
lsmeansLT(model, test.effs=test.effs, ddf = ddf)
}
lsmeansLT <- function(model, test.effs=NULL, ddf = "Satterthwaite", ...)
{
if(!inherits(model, "lmerMod"))
stop("The model is not linear mixed effects model")
ddf <- checkNameDDF(ddf)
result <- lsmeans.calc(model, 0.05, test.effs = test.effs,
lsmeansORdiff = TRUE, ddf)
res <- list(lsmeans.table=result$summ.data, response=result$response)
class(res) <- "lsmeansLT"
res
}
print.lsmeansLT <- function(x, ...)
{
cat("Least Squares Means table:\n")
printCoefmat(data.matrix(x$lsmeans.table), dig.tst=1,
tst.ind=c(1:(which(colnames(x$lsmeans.table)=="Estimate")-1),
which(colnames(x$lsmeans.table)=="DF")), digits=3 ,
P.values=TRUE, has.Pvalue=TRUE)
}
# plot.lsmeans <- function(x, main = NULL, cex = 1.4, effs = NULL, mult = TRUE, ...){
# .Deprecated("plot.lsmeansLT", package = "lmerTest")
# plot.lsmeansLT(x, main = main, cex = cex, effs = effs, mult = mult)
# }
plot.lsmeansLT <- function(x, main = NULL, cex = 1.4, effs = NULL, mult = TRUE, ...)
{
#plots for LSMEANS
if(!is.null(x$lsmeans.table) && nrow(x$lsmeans.table)>0)
plotLSMEANS(x$lsmeans.table, x$response, "LSMEANS", main = main, cex = cex,
effs = effs, mult = mult)
}
difflsmeans <- function(model, test.effs=NULL, ddf = "Satterthwaite", ...)
{
if(!inherits(model, "lmerMod"))
stop("The model is not linear mixed effects model")
ddf <- checkNameDDF(ddf)
result <- lsmeans.calc(model, 0.05, test.effs = test.effs,
lsmeansORdiff = FALSE, ddf)
res <- list(diffs.lsmeans.table=result$summ.data,
response=result$response)
class(res) <- "difflsmeans"
res
}
print.difflsmeans <- function(x, ...)
{
cat("Differences of LSMEANS:\n")
printCoefmat(data.matrix(x$diffs.lsmeans.table), dig.tst=1,
tst.ind=c(1:(which(colnames(x$diffs.lsmeans.table)=="Estimate")-1),
which(colnames(x$diffs.lsmeans.table)=="DF")), digits=3,
P.values=TRUE, has.Pvalue=TRUE)
}
plot.difflsmeans <- function(x, main = NULL, cex = 1.4, effs = NULL,
mult = TRUE, ...)
{
#plots for DIFF of LSMEANS
if(!is.null(x$diffs.lsmeans.table) && nrow(x$diffs.lsmeans.table)>0)
plotLSMEANS(x$diffs.lsmeans.table, x$response, "DIFF of LSMEANS",
main = main, cex = cex, effs = effs, mult = mult)
} |
library(CAvariants)
### Name: emerson.poly
### Title: Orthogonal polynomials
### Aliases: emerson.poly
### Keywords: nonparametric
### ** Examples
emerson.poly(c(1,2,3,4,5), as.vector(c(.1,.2,.3,.2,.2)))
| /data/genthat_extracted_code/CAvariants/examples/emerson.poly.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 211 | r | library(CAvariants)
### Name: emerson.poly
### Title: Orthogonal polynomials
### Aliases: emerson.poly
### Keywords: nonparametric
### ** Examples
emerson.poly(c(1,2,3,4,5), as.vector(c(.1,.2,.3,.2,.2)))
|
args = commandArgs(trailingOnly=T)
infile = args[1]
outfile = args[2]
data = read.table(infile, header=T, sep="\t", row.names=1)
data.cmd = cmdscale(dist(data))
write(t(data.cmd), outfile, ncolumn=2)
| /lib/R/scripts/cmdscale.R | permissive | exabugs/node-searcher | R | false | false | 204 | r | args = commandArgs(trailingOnly=T)
infile = args[1]
outfile = args[2]
data = read.table(infile, header=T, sep="\t", row.names=1)
data.cmd = cmdscale(dist(data))
write(t(data.cmd), outfile, ncolumn=2)
|
# This Code is Written to Utilize the Quantmod Package to extract
# the top 5 cryptocurrencies by market cap
# The selection was based on
library(quantmod)
getSymbols(c("BTC-USD","ETH-USD","XRP-USD","BCH-USD","LTC-USD"))
| /Code/DataCollection/01_CollectingHistoricalData.R | no_license | fmegahed/cryptopredictions | R | false | false | 232 | r | # This Code is Written to Utilize the Quantmod Package to extract
# the top 5 cryptocurrencies by market cap
# The selection was based on
library(quantmod)
getSymbols(c("BTC-USD","ETH-USD","XRP-USD","BCH-USD","LTC-USD"))
|
# Load packages
library(boral)
# Read in data
data <- read.csv("Butterfly_Compiled.csv")
PA <- data[,1:55]
Env <- data[,56:59]
# model
system.time(
JSDM <- boral(PA, # PA data
X = Env, # Covariates, DO NOT INCLUDE INTERCEPT COLUMN
family = "binomial", # makes model use PA data, probit link
num.lv = 2, # set number of latent variables
save.model = TRUE, # saves JAGS model as a txt file, allows coda package to analyse MCMC
mcmc.control = list(n.burnin = 10000, # mcmc set up. using set up from Warton TREE paper
n.iteration = 60000,
n.thin = 50,
seed = 28041948),
model.name = NULL,
prior.control = list(type = c("normal","normal","normal","uniform"),
hypparams = c(100, 20, 1, 50), ssvs.index = -1, ssvs.g = 1e-6)) # name of saved txt file. Can change, but default means dont have to change code between models
)
| /Boral - Butterfly - NP/boralJSDM.R | no_license | dansmi-hub/JSDM_Inference | R | false | false | 1,067 | r | # Load packages
library(boral)
# Read in data
data <- read.csv("Butterfly_Compiled.csv")
PA <- data[,1:55]
Env <- data[,56:59]
# model
system.time(
JSDM <- boral(PA, # PA data
X = Env, # Covariates, DO NOT INCLUDE INTERCEPT COLUMN
family = "binomial", # makes model use PA data, probit link
num.lv = 2, # set number of latent variables
save.model = TRUE, # saves JAGS model as a txt file, allows coda package to analyse MCMC
mcmc.control = list(n.burnin = 10000, # mcmc set up. using set up from Warton TREE paper
n.iteration = 60000,
n.thin = 50,
seed = 28041948),
model.name = NULL,
prior.control = list(type = c("normal","normal","normal","uniform"),
hypparams = c(100, 20, 1, 50), ssvs.index = -1, ssvs.g = 1e-6)) # name of saved txt file. Can change, but default means dont have to change code between models
)
|
getwd()
MuscleMass<-read.csv("MuscleMass.csv")
MuscleMass
x <- MuscleMass$Age
y <- MuscleMass$MuscleMass
quadmodel <- lm(y ~ x + I(x^2),data=MuscleMass)
summary(quadmodel)
x_seq <- seq(0,100,0.1)
predicted<-predict(quadmodel,list(x=x_seq,x=x_seq^2))
plot(x,y,pch=16,xlab="Age",ylab="MuscleMass",main="Quadratic Model Fit",col="blue")
lines(x_seq,predicted,col="pink",lwd=3)
MuscleMass$Age
#Print Quadratic Residuals and test nonconstant variance with mean zero
#Age variable are identically and independently distributed
#The errors are iid
quadmodel$residuals
plot(x,quadmodel$residuals,col="red",pch=20)
plot(quadmodel$fitted.values,quadmodel$residuals,col="red",pch=20)
#Is the model a good fit?
plot(quadmodel)
#Are the variables normally distributed
par(mfrow=c(1,1))
qqnorm(MuscleMass$Age,col="red",pch=20,main="Normal Q-Q Plot Age")
qqline(MuscleMass$Age,col="steelblue",lwd=2)
qqnorm(MuscleMass$MuscleMass,col="red",pch=20,main="Normal Q-Q Plot Muscle Mass")
qqline(MuscleMass$MuscleMass,col="steelblue",lwd=2)
#histogram
hist(x,col="blue")
hist(y,col="red")
plot(y,type="o",lwd=3,col="red",xlab="Age of Woman",main="Over Plot Muscle Mass vs. Age")
install.packages("ggpubr")
library(ggpubr)
MuscleMass_Group
MuscleMass_Group<-MuscleMass_Group[-c(2)]
MuscleMass_Group<-as.data.frame(MuscleMass_Group)
MuscleMass_Group
str(MuscleMass_Group)
MuscleMass_Group$Group <- ordered(MuscleMass_Group$Group,levels=c("Age40","Age50","Age60","Age70"))
levels(MuscleMass_Group$Group)
ggboxplot(MuscleMass_Group, x = "Group", y = "MuscleMass",
color = "Group", palette = c("red", "blue","black","green"),
order = c("Age40", "Age50", "Age60","Age70"),
ylab = "MuscleMass", xlab = "Age")
| /MuslceMass.R | no_license | Soulstealer07/PolynomialRegression | R | false | false | 1,767 | r | getwd()
MuscleMass<-read.csv("MuscleMass.csv")
MuscleMass
x <- MuscleMass$Age
y <- MuscleMass$MuscleMass
quadmodel <- lm(y ~ x + I(x^2),data=MuscleMass)
summary(quadmodel)
x_seq <- seq(0,100,0.1)
predicted<-predict(quadmodel,list(x=x_seq,x=x_seq^2))
plot(x,y,pch=16,xlab="Age",ylab="MuscleMass",main="Quadratic Model Fit",col="blue")
lines(x_seq,predicted,col="pink",lwd=3)
MuscleMass$Age
#Print Quadratic Residuals and test nonconstant variance with mean zero
#Age variable are identically and independently distributed
#The errors are iid
quadmodel$residuals
plot(x,quadmodel$residuals,col="red",pch=20)
plot(quadmodel$fitted.values,quadmodel$residuals,col="red",pch=20)
#Is the model a good fit?
plot(quadmodel)
#Are the variables normally distributed
par(mfrow=c(1,1))
qqnorm(MuscleMass$Age,col="red",pch=20,main="Normal Q-Q Plot Age")
qqline(MuscleMass$Age,col="steelblue",lwd=2)
qqnorm(MuscleMass$MuscleMass,col="red",pch=20,main="Normal Q-Q Plot Muscle Mass")
qqline(MuscleMass$MuscleMass,col="steelblue",lwd=2)
#histogram
hist(x,col="blue")
hist(y,col="red")
plot(y,type="o",lwd=3,col="red",xlab="Age of Woman",main="Over Plot Muscle Mass vs. Age")
install.packages("ggpubr")
library(ggpubr)
MuscleMass_Group
MuscleMass_Group<-MuscleMass_Group[-c(2)]
MuscleMass_Group<-as.data.frame(MuscleMass_Group)
MuscleMass_Group
str(MuscleMass_Group)
MuscleMass_Group$Group <- ordered(MuscleMass_Group$Group,levels=c("Age40","Age50","Age60","Age70"))
levels(MuscleMass_Group$Group)
ggboxplot(MuscleMass_Group, x = "Group", y = "MuscleMass",
color = "Group", palette = c("red", "blue","black","green"),
order = c("Age40", "Age50", "Age60","Age70"),
ylab = "MuscleMass", xlab = "Age")
|
library(plantecophys)
### Name: AciC4
### Title: C4 Photosynthesis
### Aliases: AciC4
### ** Examples
# Simulate a C4 A-Ci curve.
aci <- AciC4(Ci=seq(5,600, length=101))
with(aci, plot(Ci, ALEAF, type='l', ylim=c(0,max(ALEAF))))
| /data/genthat_extracted_code/plantecophys/examples/AciC4.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 237 | r | library(plantecophys)
### Name: AciC4
### Title: C4 Photosynthesis
### Aliases: AciC4
### ** Examples
# Simulate a C4 A-Ci curve.
aci <- AciC4(Ci=seq(5,600, length=101))
with(aci, plot(Ci, ALEAF, type='l', ylim=c(0,max(ALEAF))))
|
#!/usr/bin/env R
# Run 1 iteration of Boruta permutations
borutaboot_1iter_rf <- function(labeltoken, seed){
fnstem = "borutadat_rf_iter" # filename stem
library(SummarizedExperiment)
library(Boruta)
message("loading rda objects and functions...")
load("sesetfilt_degseahack_targetaml.rda")
# Importance Functions
impRF <- function(df, classes, ntrees=100, seed=2019){
require(randomForest)
set.seed(seed)
class <- as.numeric(classes)
rffit <- randomForest(class ~ ., data = as.matrix(df), ntree = ntrees,proximity = TRUE)
# rfimp <- as.numeric(getRFIvar(rfmodel=rffit))
rfimp <- importance(rffit)[,1]
return(rfimp)
}
message("extracting data and classes...")
classes <- as.character(degfilt.se$deg.risk)
data <- t(assay(degfilt.se))
it <- list() # iteration list with by-gene summaries
si <- seq(1, nrow(data), 1) # sample (row) indices
si.rg0 <- si[which(degfilt.se$deg.risk==0)]
si.rg1 <- si[which(degfilt.se$deg.risk==1)]
# get random sample subset
message("retrieving randomized data subset objects...")
set.seed(seed)
train.rg0 <- sample(si.rg0, 40)
train.rg1 <- sample(si.rg1, 51)
trainindices <- c(train.rg0, train.rg1)
dfi <- data[trainindices,]
classesi <- classes[trainindices]
# run boruta permutations
message("running boruta permutations...")
bdat <- Boruta(x = dfi,
y = classesi,
getImp = impRF)
# save data
message("completed boruta permutations. Saving...")
save(bdat, file= paste0(fnstem,labeltoken,".rda",collapse=""))
message("returning...")
}
suppressWarnings(borutaboot_1iter_rf(labeltoken = commandArgs(T)[1],
seed = commandArgs(T)[2])) | /online_supplement/programming_resources/fig4_boruta_bootstraps/scripts_results/borutaboot_iter_rf.R | permissive | NCBI-Hackathons/ConsensusML | R | false | false | 1,721 | r | #!/usr/bin/env R
# Run 1 iteration of Boruta permutations
borutaboot_1iter_rf <- function(labeltoken, seed){
fnstem = "borutadat_rf_iter" # filename stem
library(SummarizedExperiment)
library(Boruta)
message("loading rda objects and functions...")
load("sesetfilt_degseahack_targetaml.rda")
# Importance Functions
impRF <- function(df, classes, ntrees=100, seed=2019){
require(randomForest)
set.seed(seed)
class <- as.numeric(classes)
rffit <- randomForest(class ~ ., data = as.matrix(df), ntree = ntrees,proximity = TRUE)
# rfimp <- as.numeric(getRFIvar(rfmodel=rffit))
rfimp <- importance(rffit)[,1]
return(rfimp)
}
message("extracting data and classes...")
classes <- as.character(degfilt.se$deg.risk)
data <- t(assay(degfilt.se))
it <- list() # iteration list with by-gene summaries
si <- seq(1, nrow(data), 1) # sample (row) indices
si.rg0 <- si[which(degfilt.se$deg.risk==0)]
si.rg1 <- si[which(degfilt.se$deg.risk==1)]
# get random sample subset
message("retrieving randomized data subset objects...")
set.seed(seed)
train.rg0 <- sample(si.rg0, 40)
train.rg1 <- sample(si.rg1, 51)
trainindices <- c(train.rg0, train.rg1)
dfi <- data[trainindices,]
classesi <- classes[trainindices]
# run boruta permutations
message("running boruta permutations...")
bdat <- Boruta(x = dfi,
y = classesi,
getImp = impRF)
# save data
message("completed boruta permutations. Saving...")
save(bdat, file= paste0(fnstem,labeltoken,".rda",collapse=""))
message("returning...")
}
suppressWarnings(borutaboot_1iter_rf(labeltoken = commandArgs(T)[1],
seed = commandArgs(T)[2])) |
#' Retrieve package information
#'
#' @section Usage:
#' \preformatted{
#' myPackage <- CranPackage$new("package_name")
#' myPackage$get_downloads()
#' myPackage$get_results()
#' myPackage$get_title()
#' myPackage$get_description()
#' myPackage$get_version()
#' myPackage$get_r_dep()
#' myPackage$get_imports()
#' myPackage$get_suggest()
#' myPackage$get_publish_date()
#' myPackage$get_license()
#' myPackage$get_authors()
#' myPackage$get_maintainer()
#' myPackage$get_urls()
#' }
#'
#' @section Arguments:
#' \describe{
#' \item{package_name}{Name of the R packge.}
#' }
#'
#' @section Details:
#'
#' To create \code{CranPackage} objects, you need to use \code{CranPackage$new("package_name")}.
#'
#' \code{myPackage$get_downloads()} will return the downloads of the package
#' from the RStudio CRAN mirror for the last day, last week, last month and
#' total downloads.
#'
#' \code{myPackage$get_check_results()} will return the CRAN check results of
#' the package.
#'
#' \code{myPackage$get_title()} will return the title of the package.
#'
#' \code{myPackage$get_description()} will return the description of the package.
#'
#' \code{myPackage$get_version()} will return the version of the package.
#'
#' \code{myPackage$get_r_dep()} will return the R dependency of the package.
#'
#' \code{myPackage$get_imports()} will return the R packages imported by the package.
#'
#' \code{myPackage$get_suggests()} will return the R packages suggested by the package.
#'
#' \code{myPackage$get_publish_date()} will return the date the package was published on CRAN.
#'
#' \code{myPackage$get_license()} will return the license under which the package has been released.
#'
#' \code{myPackage$get_authors()} will return the names of the authors of the package.
#'
#' \code{myPackage$get_maintainer()} will return the name of the maintainer of the package.
#'
#' \code{myPackage$get_urls()} will return the URLs associated with the package.
#'
#' @examples
#' \dontrun{
#' myPackage <- CranPackage$new("dplyr")
#' myPackage$get_title()
#' myPackage$get_version()
#' myPackage$get_r_deps()
#' myPackage$get_imports()
#' }
#'
#' @name CranPackage
#' @docType class
#' @format An R6 class.
#' @export
#'
NULL
CranPackage <- R6::R6Class("CranPackage",
public = list(
package_name = NULL,
initialize = function(package_name = NA) {
self$package_name <- package_name
},
get_downloads = function() {
get_cran_downloads(self$package_name)
},
get_title = function() {
get_cran_title(self$package_name)
},
get_description = function() {
get_cran_desc(self$package_name)
},
get_version = function() {
get_cran_version(self$package_name)
},
get_r_dep = function() {
get_cran_r_dep(self$package_name)
},
get_imports = function() {
get_cran_imports(self$package_name)
},
get_suggests = function() {
get_cran_suggests(self$package_name)
},
get_publish_date = function() {
get_cran_pub_date(self$package_name)
},
get_license = function() {
get_cran_license(self$package_name)
},
get_authors = function() {
get_cran_authors(self$package_name)
},
get_maintainer = function() {
get_cran_maintainer(self$package_name)
},
get_urls = function() {
get_cran_urls(self$package_name)
},
get_check_results = function() {
get_cran_results(self$package)
}
)
)
#' Downloads
#'
#' Package downloads from RStudio CRAN mirror.
#'
#' @param package_name Name of the package.
#'
#' @examples
#' \dontrun{
#' get_cran_downloads("dplyr")
#' }
#'
#' @importFrom magrittr %>%
#'
#' @export
#'
get_cran_downloads <- function(package_name) {
check_cran(package_name)
count <- NULL
latest <- lubridate::today() - 2
last_day <- cranlogs::cran_downloads(package_name, from = latest, to = latest) %>%
dplyr::select(count) %>%
sum()
last_week <- cranlogs::cran_downloads(package_name, "last-week") %>%
dplyr::select(count) %>%
sum()
last_month <- cranlogs::cran_downloads(package_name, "last-month") %>%
dplyr::select(count) %>%
sum()
overall <- cranlogs::cran_downloads(package_name, from = "2012-10-01", to = latest) %>%
dplyr::select(count) %>%
sum()
tibble::tibble(
latest = last_day,
last_week = last_week,
last_month = last_month,
total = overall
)
}
#' Check results
#'
#' Return latest CRAN build results.
#'
#' @param package_name Name of the package.
#'
#' @examples
#' \dontrun{
#' get_cran_results("dplyr")
#' }
#'
#' @export
#'
get_cran_results <- function(package_name) {
check_cran(package_name)
url <- glue::glue(
"https://cran.r-project.org/web/checks/check_results_", package_name, ".html"
)
mem_read_html(url) %>%
rvest::html_nodes("table") %>%
rvest::html_table() %>%
magrittr::extract2(1)
}
#' Title
#'
#' Retrieve the title of the package from CRAN.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_title("dplyr")
#' }
#'
#' @export
#'
get_cran_title <- function(package_name) {
check_cran(package_name)
url <- glue::glue("https://cran.r-project.org/package=", package_name)
mem_read_html(url) %>%
rvest::html_nodes("h2") %>%
rvest::html_text()
}
#' Description
#'
#' Retrieve the description of the package from CRAN.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_desc("dplyr")
#' }
#'
#' @export
#'
get_cran_desc <- function(package_name) {
check_cran(package_name)
url <- glue::glue("https://cran.r-project.org/package=", package_name)
mem_read_html(url) %>%
rvest::html_nodes("p") %>%
rvest::html_text() %>%
magrittr::extract(1)
}
#' Version
#'
#' Retrieve the latest version of the package from CRAN.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_version("dplyr")
#' }
#'
#' @export
#'
get_cran_version <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Version:") %>%
magrittr::use_series(X2)
}
#' Dependency
#'
#' Retrieve the R version on which the package depends.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_r_dep("dplyr")
#' }
#'
#' @export
#'
get_cran_r_dep <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Depends:") %>%
magrittr::use_series(X2)
}
#' Imports
#'
#' Retrieve the list of packages imported.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_imports("dplyr")
#' }
#'
#' @export
#'
get_cran_imports <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Imports:") %>%
magrittr::use_series(X2) %>%
stringr::str_split(pattern = ", ") %>%
unlist() %>%
tibble::tibble() %>%
magrittr::set_colnames("imports")
}
#' Suggests
#'
#' Retrieve the list of packages suggested.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_suggests("dplyr")
#' }
#'
#' @export
#'
get_cran_suggests <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Suggests:") %>%
magrittr::use_series(X2) %>%
stringr::str_split(pattern = ", ") %>%
unlist() %>%
tibble::tibble() %>%
magrittr::set_colnames("suggests")
}
#' Published date
#'
#' Retrieve the latest date on which the package was published to CRAN.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_pub_date("dplyr")
#' }
#'
#' @export
#'
get_cran_pub_date <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Published:") %>%
magrittr::use_series(X2)
}
#' License
#'
#' Retrieve the license type of the package.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_license("dplyr")
#' }
#'
#' @export
#'
get_cran_license <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "License:") %>%
magrittr::use_series(X2)
}
#' Authors
#'
#' Retrieve the list of authors of the package.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_authors("dplyr")
#' }
#'
#' @export
#'
get_cran_authors <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Author:") %>%
magrittr::use_series(X2) %>%
stringr::str_split(",\n ") %>%
unlist() %>%
tibble::tibble() %>%
magrittr::set_colnames("name")
}
#' Maintainer
#'
#' Retrieve the details of the maintainer of the package.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_maintainer("dplyr")
#' }
#'
#' @export
#'
get_cran_maintainer <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Maintainer:") %>%
magrittr::use_series(X2)
}
#' URL
#'
#' Retrieve the list of URLs associated with the package.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_urls("dplyr")
#' }
#'
#' @export
#'
get_cran_urls <- function(package_name) {
X1 <- NULL
X2 <- NULL
bugs <-
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "BugReports:") %>%
magrittr::use_series(X2) %>%
magrittr::extract(1)
website <-
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "URL:") %>%
magrittr::use_series(X2) %>%
stringr::str_split(pattern = ", ") %>%
unlist()
tibble::tibble(urls = c(bugs, website)) %>%
dplyr::mutate(website = dplyr::case_when(
stringr::str_detect(urls, pattern = "issues") ~ "Bugs",
stringr::str_detect(urls, pattern = "github") ~ "GitHub",
stringr::str_detect(urls, pattern = "gitlab") ~ "GitLab",
stringr::str_detect(urls, pattern = "r-forge") ~ "R-Forge",
TRUE ~ "Others"
)
) %>%
dplyr::select(website, urls)
}
get_cran_table <- function(package_name) {
check_cran(package_name)
url <- glue::glue(
"https://cran.r-project.org/package=", package_name
)
mem_read_html(url) %>%
rvest::html_nodes("table") %>%
rvest::html_table() %>%
magrittr::extract2(1)
}
mem_get_cran_table <- memoise::memoise(get_cran_table)
mem_read_html <- memoise::memoise(xml2::read_html)
check_cran <- function(package_name) {
if (curl::has_internet()) {
url <- glue::glue("https://cran.r-project.org/package=", package_name)
status <-
url %>%
httr::GET() %>%
httr::status_code()
if (status != 200) {
stop("Please check the package name.", call. = FALSE)
}
} else {
stop("Please check your internet connection.", call. = FALSE)
}
}
| /R/info-cran.R | permissive | guhjy/pkginfo | R | false | false | 11,121 | r | #' Retrieve package information
#'
#' @section Usage:
#' \preformatted{
#' myPackage <- CranPackage$new("package_name")
#' myPackage$get_downloads()
#' myPackage$get_results()
#' myPackage$get_title()
#' myPackage$get_description()
#' myPackage$get_version()
#' myPackage$get_r_dep()
#' myPackage$get_imports()
#' myPackage$get_suggest()
#' myPackage$get_publish_date()
#' myPackage$get_license()
#' myPackage$get_authors()
#' myPackage$get_maintainer()
#' myPackage$get_urls()
#' }
#'
#' @section Arguments:
#' \describe{
#' \item{package_name}{Name of the R packge.}
#' }
#'
#' @section Details:
#'
#' To create \code{CranPackage} objects, you need to use \code{CranPackage$new("package_name")}.
#'
#' \code{myPackage$get_downloads()} will return the downloads of the package
#' from the RStudio CRAN mirror for the last day, last week, last month and
#' total downloads.
#'
#' \code{myPackage$get_check_results()} will return the CRAN check results of
#' the package.
#'
#' \code{myPackage$get_title()} will return the title of the package.
#'
#' \code{myPackage$get_description()} will return the description of the package.
#'
#' \code{myPackage$get_version()} will return the version of the package.
#'
#' \code{myPackage$get_r_dep()} will return the R dependency of the package.
#'
#' \code{myPackage$get_imports()} will return the R packages imported by the package.
#'
#' \code{myPackage$get_suggests()} will return the R packages suggested by the package.
#'
#' \code{myPackage$get_publish_date()} will return the date the package was published on CRAN.
#'
#' \code{myPackage$get_license()} will return the license under which the package has been released.
#'
#' \code{myPackage$get_authors()} will return the names of the authors of the package.
#'
#' \code{myPackage$get_maintainer()} will return the name of the maintainer of the package.
#'
#' \code{myPackage$get_urls()} will return the URLs associated with the package.
#'
#' @examples
#' \dontrun{
#' myPackage <- CranPackage$new("dplyr")
#' myPackage$get_title()
#' myPackage$get_version()
#' myPackage$get_r_deps()
#' myPackage$get_imports()
#' }
#'
#' @name CranPackage
#' @docType class
#' @format An R6 class.
#' @export
#'
NULL
CranPackage <- R6::R6Class("CranPackage",
public = list(
package_name = NULL,
initialize = function(package_name = NA) {
self$package_name <- package_name
},
get_downloads = function() {
get_cran_downloads(self$package_name)
},
get_title = function() {
get_cran_title(self$package_name)
},
get_description = function() {
get_cran_desc(self$package_name)
},
get_version = function() {
get_cran_version(self$package_name)
},
get_r_dep = function() {
get_cran_r_dep(self$package_name)
},
get_imports = function() {
get_cran_imports(self$package_name)
},
get_suggests = function() {
get_cran_suggests(self$package_name)
},
get_publish_date = function() {
get_cran_pub_date(self$package_name)
},
get_license = function() {
get_cran_license(self$package_name)
},
get_authors = function() {
get_cran_authors(self$package_name)
},
get_maintainer = function() {
get_cran_maintainer(self$package_name)
},
get_urls = function() {
get_cran_urls(self$package_name)
},
get_check_results = function() {
get_cran_results(self$package)
}
)
)
#' Downloads
#'
#' Package downloads from RStudio CRAN mirror.
#'
#' @param package_name Name of the package.
#'
#' @examples
#' \dontrun{
#' get_cran_downloads("dplyr")
#' }
#'
#' @importFrom magrittr %>%
#'
#' @export
#'
get_cran_downloads <- function(package_name) {
check_cran(package_name)
count <- NULL
latest <- lubridate::today() - 2
last_day <- cranlogs::cran_downloads(package_name, from = latest, to = latest) %>%
dplyr::select(count) %>%
sum()
last_week <- cranlogs::cran_downloads(package_name, "last-week") %>%
dplyr::select(count) %>%
sum()
last_month <- cranlogs::cran_downloads(package_name, "last-month") %>%
dplyr::select(count) %>%
sum()
overall <- cranlogs::cran_downloads(package_name, from = "2012-10-01", to = latest) %>%
dplyr::select(count) %>%
sum()
tibble::tibble(
latest = last_day,
last_week = last_week,
last_month = last_month,
total = overall
)
}
#' Check results
#'
#' Return latest CRAN build results.
#'
#' @param package_name Name of the package.
#'
#' @examples
#' \dontrun{
#' get_cran_results("dplyr")
#' }
#'
#' @export
#'
get_cran_results <- function(package_name) {
check_cran(package_name)
url <- glue::glue(
"https://cran.r-project.org/web/checks/check_results_", package_name, ".html"
)
mem_read_html(url) %>%
rvest::html_nodes("table") %>%
rvest::html_table() %>%
magrittr::extract2(1)
}
#' Title
#'
#' Retrieve the title of the package from CRAN.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_title("dplyr")
#' }
#'
#' @export
#'
get_cran_title <- function(package_name) {
check_cran(package_name)
url <- glue::glue("https://cran.r-project.org/package=", package_name)
mem_read_html(url) %>%
rvest::html_nodes("h2") %>%
rvest::html_text()
}
#' Description
#'
#' Retrieve the description of the package from CRAN.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_desc("dplyr")
#' }
#'
#' @export
#'
get_cran_desc <- function(package_name) {
check_cran(package_name)
url <- glue::glue("https://cran.r-project.org/package=", package_name)
mem_read_html(url) %>%
rvest::html_nodes("p") %>%
rvest::html_text() %>%
magrittr::extract(1)
}
#' Version
#'
#' Retrieve the latest version of the package from CRAN.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_version("dplyr")
#' }
#'
#' @export
#'
get_cran_version <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Version:") %>%
magrittr::use_series(X2)
}
#' Dependency
#'
#' Retrieve the R version on which the package depends.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_r_dep("dplyr")
#' }
#'
#' @export
#'
get_cran_r_dep <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Depends:") %>%
magrittr::use_series(X2)
}
#' Imports
#'
#' Retrieve the list of packages imported.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_imports("dplyr")
#' }
#'
#' @export
#'
get_cran_imports <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Imports:") %>%
magrittr::use_series(X2) %>%
stringr::str_split(pattern = ", ") %>%
unlist() %>%
tibble::tibble() %>%
magrittr::set_colnames("imports")
}
#' Suggests
#'
#' Retrieve the list of packages suggested.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_suggests("dplyr")
#' }
#'
#' @export
#'
get_cran_suggests <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Suggests:") %>%
magrittr::use_series(X2) %>%
stringr::str_split(pattern = ", ") %>%
unlist() %>%
tibble::tibble() %>%
magrittr::set_colnames("suggests")
}
#' Published date
#'
#' Retrieve the latest date on which the package was published to CRAN.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_pub_date("dplyr")
#' }
#'
#' @export
#'
get_cran_pub_date <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Published:") %>%
magrittr::use_series(X2)
}
#' License
#'
#' Retrieve the license type of the package.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_license("dplyr")
#' }
#'
#' @export
#'
get_cran_license <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "License:") %>%
magrittr::use_series(X2)
}
#' Authors
#'
#' Retrieve the list of authors of the package.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_authors("dplyr")
#' }
#'
#' @export
#'
get_cran_authors <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Author:") %>%
magrittr::use_series(X2) %>%
stringr::str_split(",\n ") %>%
unlist() %>%
tibble::tibble() %>%
magrittr::set_colnames("name")
}
#' Maintainer
#'
#' Retrieve the details of the maintainer of the package.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_maintainer("dplyr")
#' }
#'
#' @export
#'
get_cran_maintainer <- function(package_name) {
X1 <- NULL
X2 <- NULL
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "Maintainer:") %>%
magrittr::use_series(X2)
}
#' URL
#'
#' Retrieve the list of URLs associated with the package.
#'
#' @param package_name Name of the R package.
#'
#' @examples
#' \dontrun{
#' get_cran_urls("dplyr")
#' }
#'
#' @export
#'
get_cran_urls <- function(package_name) {
X1 <- NULL
X2 <- NULL
bugs <-
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "BugReports:") %>%
magrittr::use_series(X2) %>%
magrittr::extract(1)
website <-
package_name %>%
mem_get_cran_table() %>%
dplyr::filter(X1 == "URL:") %>%
magrittr::use_series(X2) %>%
stringr::str_split(pattern = ", ") %>%
unlist()
tibble::tibble(urls = c(bugs, website)) %>%
dplyr::mutate(website = dplyr::case_when(
stringr::str_detect(urls, pattern = "issues") ~ "Bugs",
stringr::str_detect(urls, pattern = "github") ~ "GitHub",
stringr::str_detect(urls, pattern = "gitlab") ~ "GitLab",
stringr::str_detect(urls, pattern = "r-forge") ~ "R-Forge",
TRUE ~ "Others"
)
) %>%
dplyr::select(website, urls)
}
get_cran_table <- function(package_name) {
check_cran(package_name)
url <- glue::glue(
"https://cran.r-project.org/package=", package_name
)
mem_read_html(url) %>%
rvest::html_nodes("table") %>%
rvest::html_table() %>%
magrittr::extract2(1)
}
mem_get_cran_table <- memoise::memoise(get_cran_table)
mem_read_html <- memoise::memoise(xml2::read_html)
check_cran <- function(package_name) {
if (curl::has_internet()) {
url <- glue::glue("https://cran.r-project.org/package=", package_name)
status <-
url %>%
httr::GET() %>%
httr::status_code()
if (status != 200) {
stop("Please check the package name.", call. = FALSE)
}
} else {
stop("Please check your internet connection.", call. = FALSE)
}
}
|
# This file is part of the RcmdrPlugin.Export package.
# The current code is adapted from Rcmdr.
# file created: 02 Feb 2008
# last modified: 22 Oct 2009
latexExport <- function(){
.tmpObject <- popOutput(keep=TRUE)
objectClass <- class(.tmpObject)
objectClass <- objectClass[[1]]
objectName <- paste(".", objectClass, sep="")
if (is.null(objectClass) == "TRUE" | objectClass == "logical"){
.tmpObject <- popOutput()
rm(.tmpObject)
Message(message=paste("latex() cannot export objects of class '", objectClass,
"'", sep=""), type="note")
Message(message=paste("the stack is probably empty", sep=""), type="warning")
return()
} else if (objectClass == "function" | objectClass == "help_files_with_topic" |
objectClass == "packageIQR" | objectClass == "trellis" | objectClass == "xtable" |
objectClass == "latex" | objectClass == "stem.leaf" | objectClass == "multinom"){
.tmpObject <- popOutput()
rm(.tmpObject)
Message(message=paste("latex() cannot export objects of class '", objectClass,
"'", sep=""), type="note")
return(latexExport())
} else {
initializeDialog(title=gettextRcmdr("Export objects using latex()"))
}
dataFrame <- tkframe(top)
xBox <- variableListBox(dataFrame, paste(objectClass), title=gettextRcmdr("Object class"))
optionsFrame <- tkframe(top)
captionInput <- tclVar("")
captionField <- tkentry(optionsFrame, width="15", textvariable=captionInput)
radioButtons(window=optionsFrame, name="caption.loc", buttons=c("top", "bottom"),
values=c("top", "bottom"), labels=gettextRcmdr(c("Top", "Bottom")),
title=gettextRcmdr("Caption loc."))
labelInput <- tclVar("")
labelField <- tkentry(optionsFrame, width="15", textvariable=labelInput)
if (objectClass == "numSummary") {
objectCol <- ncol(.tmpObject$table)
} else if (objectClass == "summary.lm") {
objectCol <- ncol(.tmpObject$coefficients)
} else if (objectClass == "rcorr"){
objectCol <- ncol(.tmpObject$r)
} else if (objectClass == "rcorr.adjust"){
objectCol <- ncol(.tmpObject$R$r)
} else if (objectClass == "confint.glht"){
objectCol <- ncol(.tmpObject$confint)
} else if (objectClass == "factanal"){
objectCol <- ncol(.tmpObject$loadings)
} else if (objectClass == "reliability"){
objectCol <- ncol(.tmpObject$rel.matrix)
} else if (objectClass == "summary.aov" & length(.tmpObject)==1) {
objectCol <- ncol(.tmpObject[[1]])
} else if (objectClass == "summary.princomp") {
objectCol <- length(.tmpObject$sdev)
} else {
objectCol <- ncol(.tmpObject)
}
if (is.null(objectCol)) {
digitsVector <- paste("2")
} else if (is.na(objectCol)) {
digitsVector <- paste("2")
} else {
digitsVector <- paste("c(", paste(rep("2,", (objectCol-1)), collapse=""), "2)", sep="")
}
digitsInput <- tclVar(paste(digitsVector))
digitsField <- tkentry(optionsFrame, width="15", textvariable=digitsInput)
additionalFrame <- tkframe(top)
sizeInput <- tclVar("normal")
sizeField <- tkentry(additionalFrame, width="15", textvariable=sizeInput)
centerInput <- tclVar("center")
centerField <- tkentry(additionalFrame, width="15", textvariable=centerInput)
naVariable <- tclVar("1")
naCheckBox <- tkcheckbutton(additionalFrame, variable=naVariable)
fileInput <- tclVar("")
fileField <- tkentry(additionalFrame, width="15", textvariable=fileInput)
checkboxesFrame <- tkframe(top)
appendVariable <- tclVar("1")
appendCheckBox <- tkcheckbutton(checkboxesFrame, variable=appendVariable)
visualiseVariable <- tclVar("0")
visualiseCheckBox <- tkcheckbutton(checkboxesFrame, variable=visualiseVariable)
longtableVariable <- tclVar("0")
longtableCheckBox <- tkcheckbutton(checkboxesFrame, variable=longtableVariable)
tab.envVariable <- tclVar("1")
tab.envCheckBox <- tkcheckbutton(checkboxesFrame, variable=tab.envVariable)
landscapeVariable <- tclVar("0")
landscapeCheckBox <- tkcheckbutton(checkboxesFrame, variable=landscapeVariable)
booktabsVariable <- tclVar("0")
booktabsCheckBox <- tkcheckbutton(checkboxesFrame, variable=booktabsVariable)
ctableVariable <- tclVar("0")
ctableCheckBox <- tkcheckbutton(checkboxesFrame, variable=ctableVariable)
vbarVariable <- tclVar("0")
vbarCheckBox <- tkcheckbutton(checkboxesFrame, variable=vbarVariable)
nomarginsVariable <- tclVar("1")
nomarginsCheckBox <- tkcheckbutton(checkboxesFrame, variable=nomarginsVariable)
onOK <- function(){
caption <- paste(tclvalue(captionInput))
label <- paste(tclvalue(labelInput))
digits <- paste(tclvalue(digitsInput))
caption.loc <- tclvalue(caption.locVariable)
size <- paste(tclvalue(sizeInput))
center <- paste(tclvalue(centerInput))
na <- paste(tclvalue(naVariable))
file <- paste(tclvalue(fileInput))
append <- paste(tclvalue(appendVariable))
visualise <- paste(tclvalue(visualiseVariable))
longtable <- paste(tclvalue(longtableVariable))
tab.env <- paste(tclvalue(tab.envVariable))
landscape <- paste(tclvalue(landscapeVariable))
booktabs <- paste(tclvalue(booktabsVariable))
ctable <- paste(tclvalue(ctableVariable))
closeDialog()
vbar <- paste(tclvalue(vbarVariable))
nomargins <- paste(tclvalue(nomarginsVariable))
closeDialog()
if (caption != ""){
caption <- paste(", caption=", '"', paste(tclvalue(captionInput)), '"', sep="")
}
if (label != ""){
label <- paste(", label=", '"', paste(tclvalue(labelInput)), '"', sep="")
}
if (digits != ""){
digits <- paste(", cdec=", paste(tclvalue(digitsInput)), sep="")
}
if (caption != ""){
if (caption.loc == "top"){
caption.loc <- paste("", sep="")
} else if (caption.loc == "bottom"){
caption.loc <- paste(", caption.loc=", '"', paste(tclvalue(caption.locVariable)), '"', sep="")
}
} else {
caption.loc <- paste("", sep="")
}
if (size != ""){
if (size == "normal"){
size <- paste("", sep="")
}
else size <- paste(", size=", '"', paste(tclvalue(sizeInput)), '"', sep="")
}
if (center != ""){
if (center == "center"){
center <- paste("", sep="")
}
else center <- paste(", center=", '"', paste(tclvalue(centerInput)), '"', sep="")
}
if (na == "1"){
na <- paste("", sep="")
}
else if (na == "0"){
na <- paste(', na.blank=FALSE', sep="")
}
if (file == ""){
inObject <- paste("", sep="")
if (visualise == "1"){
secondTime <- TRUE
} else {
secondTime <- FALSE
}
} else if (file != ""){
secondTime <- FALSE
if (visualise == "1"){
inObject <- paste("", sep="")
} else {
inObject <- paste(objectName, " <- ", sep="")
}
}
if (file != ""){
file <- paste(', file="', file, '.tex"', sep="")
if (append == "1"){
append <- paste(", append=TRUE", sep="")
} else if (append == "0"){
append <- paste("", sep="")
}
} else if (file == ""){
file <- paste(', file=""', sep="")
append <- paste("", sep="")
}
if (longtable == "1"){
longtable <- paste(', longtable=TRUE', sep="")
} else {
longtable <- paste("", sep="")
}
if (tab.env == "0"){
tab.env <- paste(', table.env=FALSE', sep="")
} else {
tab.env <- paste("", sep="")
}
if (landscape == "1"){
landscape <- paste(', landscape=TRUE', sep="")
} else if (landscape == "0"){
landscape <- paste("", sep="")
}
if (booktabs == "1"){
booktabs <- paste(', booktabs=TRUE', sep="")
}
else if (booktabs == "0"){
booktabs <- paste("", sep="")
}
if (ctable == "1"){
ctable <- paste(', ctable=TRUE', sep="")
}
else if (ctable == "0"){
ctable <- paste("", sep="")
}
if (vbar == "1"){
vbar <- paste(', vbar=TRUE', sep="")
}
else if (vbar == "0"){
vbar <- paste("", sep="")
}
if (nomargins == "1"){
nomargins <- paste("", sep="")
}
else if (nomargins == "0"){
nomargins <- paste(', nomargins=FALSE', sep="")
}
functionName <- "latex"
objectCommandName <- NULL
commandRepeat <- 1
if (objectClass == "numSummary"){
objectCommandName <- paste(objectName, "$table", sep="")
} else if (objectClass == "summary.lm") {
objectCommandName <- paste(objectName, "$coefficients", sep="")
### use *[i], like in xtableExport()
} else if (objectClass == "summary.multinom"){
objectCommandName1 <- paste("as.data.frame(", objectName, "$coefficients)", sep="")
objectCommandName2 <- paste("as.data.frame(", objectName, "$standard.errors)", sep="")
objectCommandName <- c(objectCommandName1, objectCommandName2)
commandRepeat <- 2
} else if (objectClass == "polr"){
objectCommandName1 <- paste("as.data.frame(", objectName, "$coefficients)", sep="")
objectCommandName2 <- paste("as.data.frame(", objectName, "$zeta)", sep="")
objectCommandName <- c(objectCommandName1, objectCommandName2)
commandRepeat <- 2
} else if (objectClass == "summary.polr"){
objectCommandName <- paste(objectName, "$coefficients", sep="")
} else if (objectClass == "reliability"){
objectCommandName <- paste(objectName, "$rel.matrix", sep="")
} else if (objectClass == "confint.glht"){
objectCommandName <- paste(objectName, "$", "confint", sep="")
} else if (objectClass == "factanal"){
objectCommandName <- paste("as.table(", objectName, "$loadings)", sep="")
} else if (objectClass == "outlier.test"){
objectCommandName <- paste("as.matrix(", objectName, "$test)", sep="")
} else if (objectClass == "array" | objectClass == "integer" |
objectClass == "character" | objectClass == "numeric"){
objectCommandName <- paste("as.data.frame(", objectName, ")", sep="")
###FIXME support for `rcorr' possibly buggy
} else if (objectClass == "rcorr"){
objectCommandName <- paste(objectName, sep="")
functionName <- "latex.list"
} else if (objectClass == "rcorr.adjust"){
commandRepeat <- 4
objectCommandList <- c("$R$r", "$R$n", "$R$P", "$P")
for (i in 1:commandRepeat){
objectCommandName[i] <- paste(objectName, objectCommandList[i], sep="")
}
} else if (objectClass == "by" & is.list(.tmpObject)==TRUE){
commandRepeat <- length(.tmpObject)
# objectCommandName <- NULL
for (i in 1:commandRepeat){
objectCommandName[i] <- paste("as.matrix(", objectName,
"[[", i, "]])", sep="")
}
} else if (objectClass == "table"){
objectCommandName <- paste("as.matrix(", objectName, ")", sep="")
} else if (objectClass == "summary.aov" & length(.tmpObject)==1) {
objectCommandName <- paste(objectName, "[[1]]", sep="")
} else if (objectClass == "summary.princomp"){
objectCommandName <- paste(objectName, "$sdev", sep="")
} else {
objectName <- paste(".object", sep="")
objectCommandName <- paste(objectName)
}
assign(objectName, .tmpObject)
if (inObject != ""){
inObject <- paste(objectName, " <- ", sep="")
}
cmds <- character(3)
cmds[1] <- paste0("local({\n ",
"## retrieve the last printed object\n ",
objectName, " <- popOutput()")
#justDoIt(paste(objectName, " <- .tmpObject", sep=""))
#eval(parse(text=paste(objectName, " <- .tmpObject", sep="")))
#logger(paste(objectName, " <- popOutput() ## retrieve the last printed object", sep=""))
.matPercentage <- FALSE
if (objectClass == "matrix"){
eval(parse(text=paste('.matPercentage <- !(nrow(as.matrix(grep("%",
colnames(', objectCommandName, "), fixed=TRUE))) == 0)",
sep="")))
}
need.sanitize <- (objectClass == "numSummary" || .matPercentage == TRUE)
if (need.sanitize){
run.sanitize <- paste(" ## escape strings for LaTeX\n ",
"colnames(", objectCommandName,
") <- \n latexTranslate(", "colnames(", objectCommandName,
"))", sep="")
cmds[2] <- run.sanitize
#logger(run.sanitize)
#eval(parse(text=run.sanitize))
} else {
cmds[2] <- ""
}
run.command <- character(commandRepeat)
for (i in 1:commandRepeat){
run.command[i] <- paste(" ", inObject, functionName, "(", objectCommandName[i],
caption, caption.loc, label, digits, size, na, file, append,
longtable, tab.env, landscape, booktabs, ctable, vbar, nomargins,
center, ', title="")', sep="")
#logger(run.command)
#eval(parse(text=run.command))
}
if (secondTime){
file <- paste0("") ##default file runs DVI preview
##without print() preview works only for last call
if(commandRepeat > 1){
usePrint <- paste0("print(")
endPrint <- paste0(")")
} else {
usePrint <- paste0("")
endPrint <- paste0("")
}
run.preview <- character(commandRepeat)
for (i in 1:commandRepeat){
run.preview[i] <- paste0(if(i==1) " ## DVI preview\n " else " ",
usePrint, inObject, functionName, "(", objectCommandName[i],
caption, caption.loc, label, digits, size, na, file, append,
longtable, tab.env, landscape, booktabs, ctable, vbar,
nomargins, center, ', title="")', endPrint)
#logger(run.preview)
#eval(parse(text=run.preview))
}
}
commands <- paste(c(cmds[1], if(need.sanitize) run.sanitize,
run.command, if(secondTime) run.preview), collapse="\n")
doItAndPrint(paste(commands, "\n})", sep=""))
#eval(parse(text=paste('rm(list=c(', '"', objectName, '"))', sep="")))
#logger(paste("remove(", objectName, ")", sep=""))
tkdestroy(top)
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="latex")
tkgrid(getFrame(xBox), sticky="nw")
tkgrid(tklabel(optionsFrame, text=gettextRcmdr("Arguments"), fg="blue"))
tkgrid(tklabel(optionsFrame, text=gettextRcmdr("Caption:")), captionField, sticky="w")
tkgrid(tklabel(optionsFrame, text=gettextRcmdr("Label:")), labelField, sticky="w")
tkgrid(tklabel(optionsFrame, text=gettextRcmdr("Digits:")), digitsField, sticky="w")
tkgrid(tklabel(optionsFrame, text=gettextRcmdr(" ")), sticky="w")
tkgrid(caption.locFrame, sticky="sw")
tkgrid(tklabel(additionalFrame, text=gettextRcmdr("Printing "), fg="blue"))
tkgrid(tklabel(additionalFrame, text=gettextRcmdr("Size:")), sizeField, sticky="w")
tkgrid(tklabel(additionalFrame, text=gettextRcmdr("Center:")), centerField, sticky="w")
tkgrid(tklabel(additionalFrame, text=gettextRcmdr("Blank NA")), naCheckBox, sticky="w")
tkgrid(tklabel(additionalFrame, text=gettextRcmdr("File:")), fileField, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Append")), appendCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Preview")), visualiseCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Table env.")), tab.envCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Use 'longtable'")), longtableCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Landscape")), landscapeCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Use 'booktabs'")), booktabsCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Use 'ctable'")), ctableCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Vertical bar")), vbarCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("No margins")), nomarginsCheckBox, sticky="w")
tkgrid(dataFrame, tklabel(top, text=" "), additionalFrame, sticky="nw")
tkgrid(optionsFrame, tklabel(top, text=" "), checkboxesFrame, sticky="nw")
tkgrid(buttonsFrame, columnspan=3, sticky="w")
dialogSuffix(rows=4, columns=3)
}
| /RcmdrPlugin.Export/R/latexExport.R | no_license | ingted/R-Examples | R | false | false | 18,048 | r | # This file is part of the RcmdrPlugin.Export package.
# The current code is adapted from Rcmdr.
# file created: 02 Feb 2008
# last modified: 22 Oct 2009
latexExport <- function(){
.tmpObject <- popOutput(keep=TRUE)
objectClass <- class(.tmpObject)
objectClass <- objectClass[[1]]
objectName <- paste(".", objectClass, sep="")
if (is.null(objectClass) == "TRUE" | objectClass == "logical"){
.tmpObject <- popOutput()
rm(.tmpObject)
Message(message=paste("latex() cannot export objects of class '", objectClass,
"'", sep=""), type="note")
Message(message=paste("the stack is probably empty", sep=""), type="warning")
return()
} else if (objectClass == "function" | objectClass == "help_files_with_topic" |
objectClass == "packageIQR" | objectClass == "trellis" | objectClass == "xtable" |
objectClass == "latex" | objectClass == "stem.leaf" | objectClass == "multinom"){
.tmpObject <- popOutput()
rm(.tmpObject)
Message(message=paste("latex() cannot export objects of class '", objectClass,
"'", sep=""), type="note")
return(latexExport())
} else {
initializeDialog(title=gettextRcmdr("Export objects using latex()"))
}
dataFrame <- tkframe(top)
xBox <- variableListBox(dataFrame, paste(objectClass), title=gettextRcmdr("Object class"))
optionsFrame <- tkframe(top)
captionInput <- tclVar("")
captionField <- tkentry(optionsFrame, width="15", textvariable=captionInput)
radioButtons(window=optionsFrame, name="caption.loc", buttons=c("top", "bottom"),
values=c("top", "bottom"), labels=gettextRcmdr(c("Top", "Bottom")),
title=gettextRcmdr("Caption loc."))
labelInput <- tclVar("")
labelField <- tkentry(optionsFrame, width="15", textvariable=labelInput)
if (objectClass == "numSummary") {
objectCol <- ncol(.tmpObject$table)
} else if (objectClass == "summary.lm") {
objectCol <- ncol(.tmpObject$coefficients)
} else if (objectClass == "rcorr"){
objectCol <- ncol(.tmpObject$r)
} else if (objectClass == "rcorr.adjust"){
objectCol <- ncol(.tmpObject$R$r)
} else if (objectClass == "confint.glht"){
objectCol <- ncol(.tmpObject$confint)
} else if (objectClass == "factanal"){
objectCol <- ncol(.tmpObject$loadings)
} else if (objectClass == "reliability"){
objectCol <- ncol(.tmpObject$rel.matrix)
} else if (objectClass == "summary.aov" & length(.tmpObject)==1) {
objectCol <- ncol(.tmpObject[[1]])
} else if (objectClass == "summary.princomp") {
objectCol <- length(.tmpObject$sdev)
} else {
objectCol <- ncol(.tmpObject)
}
if (is.null(objectCol)) {
digitsVector <- paste("2")
} else if (is.na(objectCol)) {
digitsVector <- paste("2")
} else {
digitsVector <- paste("c(", paste(rep("2,", (objectCol-1)), collapse=""), "2)", sep="")
}
digitsInput <- tclVar(paste(digitsVector))
digitsField <- tkentry(optionsFrame, width="15", textvariable=digitsInput)
additionalFrame <- tkframe(top)
sizeInput <- tclVar("normal")
sizeField <- tkentry(additionalFrame, width="15", textvariable=sizeInput)
centerInput <- tclVar("center")
centerField <- tkentry(additionalFrame, width="15", textvariable=centerInput)
naVariable <- tclVar("1")
naCheckBox <- tkcheckbutton(additionalFrame, variable=naVariable)
fileInput <- tclVar("")
fileField <- tkentry(additionalFrame, width="15", textvariable=fileInput)
checkboxesFrame <- tkframe(top)
appendVariable <- tclVar("1")
appendCheckBox <- tkcheckbutton(checkboxesFrame, variable=appendVariable)
visualiseVariable <- tclVar("0")
visualiseCheckBox <- tkcheckbutton(checkboxesFrame, variable=visualiseVariable)
longtableVariable <- tclVar("0")
longtableCheckBox <- tkcheckbutton(checkboxesFrame, variable=longtableVariable)
tab.envVariable <- tclVar("1")
tab.envCheckBox <- tkcheckbutton(checkboxesFrame, variable=tab.envVariable)
landscapeVariable <- tclVar("0")
landscapeCheckBox <- tkcheckbutton(checkboxesFrame, variable=landscapeVariable)
booktabsVariable <- tclVar("0")
booktabsCheckBox <- tkcheckbutton(checkboxesFrame, variable=booktabsVariable)
ctableVariable <- tclVar("0")
ctableCheckBox <- tkcheckbutton(checkboxesFrame, variable=ctableVariable)
vbarVariable <- tclVar("0")
vbarCheckBox <- tkcheckbutton(checkboxesFrame, variable=vbarVariable)
nomarginsVariable <- tclVar("1")
nomarginsCheckBox <- tkcheckbutton(checkboxesFrame, variable=nomarginsVariable)
onOK <- function(){
caption <- paste(tclvalue(captionInput))
label <- paste(tclvalue(labelInput))
digits <- paste(tclvalue(digitsInput))
caption.loc <- tclvalue(caption.locVariable)
size <- paste(tclvalue(sizeInput))
center <- paste(tclvalue(centerInput))
na <- paste(tclvalue(naVariable))
file <- paste(tclvalue(fileInput))
append <- paste(tclvalue(appendVariable))
visualise <- paste(tclvalue(visualiseVariable))
longtable <- paste(tclvalue(longtableVariable))
tab.env <- paste(tclvalue(tab.envVariable))
landscape <- paste(tclvalue(landscapeVariable))
booktabs <- paste(tclvalue(booktabsVariable))
ctable <- paste(tclvalue(ctableVariable))
closeDialog()
vbar <- paste(tclvalue(vbarVariable))
nomargins <- paste(tclvalue(nomarginsVariable))
closeDialog()
if (caption != ""){
caption <- paste(", caption=", '"', paste(tclvalue(captionInput)), '"', sep="")
}
if (label != ""){
label <- paste(", label=", '"', paste(tclvalue(labelInput)), '"', sep="")
}
if (digits != ""){
digits <- paste(", cdec=", paste(tclvalue(digitsInput)), sep="")
}
if (caption != ""){
if (caption.loc == "top"){
caption.loc <- paste("", sep="")
} else if (caption.loc == "bottom"){
caption.loc <- paste(", caption.loc=", '"', paste(tclvalue(caption.locVariable)), '"', sep="")
}
} else {
caption.loc <- paste("", sep="")
}
if (size != ""){
if (size == "normal"){
size <- paste("", sep="")
}
else size <- paste(", size=", '"', paste(tclvalue(sizeInput)), '"', sep="")
}
if (center != ""){
if (center == "center"){
center <- paste("", sep="")
}
else center <- paste(", center=", '"', paste(tclvalue(centerInput)), '"', sep="")
}
if (na == "1"){
na <- paste("", sep="")
}
else if (na == "0"){
na <- paste(', na.blank=FALSE', sep="")
}
if (file == ""){
inObject <- paste("", sep="")
if (visualise == "1"){
secondTime <- TRUE
} else {
secondTime <- FALSE
}
} else if (file != ""){
secondTime <- FALSE
if (visualise == "1"){
inObject <- paste("", sep="")
} else {
inObject <- paste(objectName, " <- ", sep="")
}
}
if (file != ""){
file <- paste(', file="', file, '.tex"', sep="")
if (append == "1"){
append <- paste(", append=TRUE", sep="")
} else if (append == "0"){
append <- paste("", sep="")
}
} else if (file == ""){
file <- paste(', file=""', sep="")
append <- paste("", sep="")
}
if (longtable == "1"){
longtable <- paste(', longtable=TRUE', sep="")
} else {
longtable <- paste("", sep="")
}
if (tab.env == "0"){
tab.env <- paste(', table.env=FALSE', sep="")
} else {
tab.env <- paste("", sep="")
}
if (landscape == "1"){
landscape <- paste(', landscape=TRUE', sep="")
} else if (landscape == "0"){
landscape <- paste("", sep="")
}
if (booktabs == "1"){
booktabs <- paste(', booktabs=TRUE', sep="")
}
else if (booktabs == "0"){
booktabs <- paste("", sep="")
}
if (ctable == "1"){
ctable <- paste(', ctable=TRUE', sep="")
}
else if (ctable == "0"){
ctable <- paste("", sep="")
}
if (vbar == "1"){
vbar <- paste(', vbar=TRUE', sep="")
}
else if (vbar == "0"){
vbar <- paste("", sep="")
}
if (nomargins == "1"){
nomargins <- paste("", sep="")
}
else if (nomargins == "0"){
nomargins <- paste(', nomargins=FALSE', sep="")
}
functionName <- "latex"
objectCommandName <- NULL
commandRepeat <- 1
if (objectClass == "numSummary"){
objectCommandName <- paste(objectName, "$table", sep="")
} else if (objectClass == "summary.lm") {
objectCommandName <- paste(objectName, "$coefficients", sep="")
### use *[i], like in xtableExport()
} else if (objectClass == "summary.multinom"){
objectCommandName1 <- paste("as.data.frame(", objectName, "$coefficients)", sep="")
objectCommandName2 <- paste("as.data.frame(", objectName, "$standard.errors)", sep="")
objectCommandName <- c(objectCommandName1, objectCommandName2)
commandRepeat <- 2
} else if (objectClass == "polr"){
objectCommandName1 <- paste("as.data.frame(", objectName, "$coefficients)", sep="")
objectCommandName2 <- paste("as.data.frame(", objectName, "$zeta)", sep="")
objectCommandName <- c(objectCommandName1, objectCommandName2)
commandRepeat <- 2
} else if (objectClass == "summary.polr"){
objectCommandName <- paste(objectName, "$coefficients", sep="")
} else if (objectClass == "reliability"){
objectCommandName <- paste(objectName, "$rel.matrix", sep="")
} else if (objectClass == "confint.glht"){
objectCommandName <- paste(objectName, "$", "confint", sep="")
} else if (objectClass == "factanal"){
objectCommandName <- paste("as.table(", objectName, "$loadings)", sep="")
} else if (objectClass == "outlier.test"){
objectCommandName <- paste("as.matrix(", objectName, "$test)", sep="")
} else if (objectClass == "array" | objectClass == "integer" |
objectClass == "character" | objectClass == "numeric"){
objectCommandName <- paste("as.data.frame(", objectName, ")", sep="")
###FIXME support for `rcorr' possibly buggy
} else if (objectClass == "rcorr"){
objectCommandName <- paste(objectName, sep="")
functionName <- "latex.list"
} else if (objectClass == "rcorr.adjust"){
commandRepeat <- 4
objectCommandList <- c("$R$r", "$R$n", "$R$P", "$P")
for (i in 1:commandRepeat){
objectCommandName[i] <- paste(objectName, objectCommandList[i], sep="")
}
} else if (objectClass == "by" & is.list(.tmpObject)==TRUE){
commandRepeat <- length(.tmpObject)
# objectCommandName <- NULL
for (i in 1:commandRepeat){
objectCommandName[i] <- paste("as.matrix(", objectName,
"[[", i, "]])", sep="")
}
} else if (objectClass == "table"){
objectCommandName <- paste("as.matrix(", objectName, ")", sep="")
} else if (objectClass == "summary.aov" & length(.tmpObject)==1) {
objectCommandName <- paste(objectName, "[[1]]", sep="")
} else if (objectClass == "summary.princomp"){
objectCommandName <- paste(objectName, "$sdev", sep="")
} else {
objectName <- paste(".object", sep="")
objectCommandName <- paste(objectName)
}
assign(objectName, .tmpObject)
if (inObject != ""){
inObject <- paste(objectName, " <- ", sep="")
}
cmds <- character(3)
cmds[1] <- paste0("local({\n ",
"## retrieve the last printed object\n ",
objectName, " <- popOutput()")
#justDoIt(paste(objectName, " <- .tmpObject", sep=""))
#eval(parse(text=paste(objectName, " <- .tmpObject", sep="")))
#logger(paste(objectName, " <- popOutput() ## retrieve the last printed object", sep=""))
.matPercentage <- FALSE
if (objectClass == "matrix"){
eval(parse(text=paste('.matPercentage <- !(nrow(as.matrix(grep("%",
colnames(', objectCommandName, "), fixed=TRUE))) == 0)",
sep="")))
}
need.sanitize <- (objectClass == "numSummary" || .matPercentage == TRUE)
if (need.sanitize){
run.sanitize <- paste(" ## escape strings for LaTeX\n ",
"colnames(", objectCommandName,
") <- \n latexTranslate(", "colnames(", objectCommandName,
"))", sep="")
cmds[2] <- run.sanitize
#logger(run.sanitize)
#eval(parse(text=run.sanitize))
} else {
cmds[2] <- ""
}
run.command <- character(commandRepeat)
for (i in 1:commandRepeat){
run.command[i] <- paste(" ", inObject, functionName, "(", objectCommandName[i],
caption, caption.loc, label, digits, size, na, file, append,
longtable, tab.env, landscape, booktabs, ctable, vbar, nomargins,
center, ', title="")', sep="")
#logger(run.command)
#eval(parse(text=run.command))
}
if (secondTime){
file <- paste0("") ##default file runs DVI preview
##without print() preview works only for last call
if(commandRepeat > 1){
usePrint <- paste0("print(")
endPrint <- paste0(")")
} else {
usePrint <- paste0("")
endPrint <- paste0("")
}
run.preview <- character(commandRepeat)
for (i in 1:commandRepeat){
run.preview[i] <- paste0(if(i==1) " ## DVI preview\n " else " ",
usePrint, inObject, functionName, "(", objectCommandName[i],
caption, caption.loc, label, digits, size, na, file, append,
longtable, tab.env, landscape, booktabs, ctable, vbar,
nomargins, center, ', title="")', endPrint)
#logger(run.preview)
#eval(parse(text=run.preview))
}
}
commands <- paste(c(cmds[1], if(need.sanitize) run.sanitize,
run.command, if(secondTime) run.preview), collapse="\n")
doItAndPrint(paste(commands, "\n})", sep=""))
#eval(parse(text=paste('rm(list=c(', '"', objectName, '"))', sep="")))
#logger(paste("remove(", objectName, ")", sep=""))
tkdestroy(top)
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="latex")
tkgrid(getFrame(xBox), sticky="nw")
tkgrid(tklabel(optionsFrame, text=gettextRcmdr("Arguments"), fg="blue"))
tkgrid(tklabel(optionsFrame, text=gettextRcmdr("Caption:")), captionField, sticky="w")
tkgrid(tklabel(optionsFrame, text=gettextRcmdr("Label:")), labelField, sticky="w")
tkgrid(tklabel(optionsFrame, text=gettextRcmdr("Digits:")), digitsField, sticky="w")
tkgrid(tklabel(optionsFrame, text=gettextRcmdr(" ")), sticky="w")
tkgrid(caption.locFrame, sticky="sw")
tkgrid(tklabel(additionalFrame, text=gettextRcmdr("Printing "), fg="blue"))
tkgrid(tklabel(additionalFrame, text=gettextRcmdr("Size:")), sizeField, sticky="w")
tkgrid(tklabel(additionalFrame, text=gettextRcmdr("Center:")), centerField, sticky="w")
tkgrid(tklabel(additionalFrame, text=gettextRcmdr("Blank NA")), naCheckBox, sticky="w")
tkgrid(tklabel(additionalFrame, text=gettextRcmdr("File:")), fileField, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Append")), appendCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Preview")), visualiseCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Table env.")), tab.envCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Use 'longtable'")), longtableCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Landscape")), landscapeCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Use 'booktabs'")), booktabsCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Use 'ctable'")), ctableCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("Vertical bar")), vbarCheckBox, sticky="w")
tkgrid(tklabel(checkboxesFrame, text=gettextRcmdr("No margins")), nomarginsCheckBox, sticky="w")
tkgrid(dataFrame, tklabel(top, text=" "), additionalFrame, sticky="nw")
tkgrid(optionsFrame, tklabel(top, text=" "), checkboxesFrame, sticky="nw")
tkgrid(buttonsFrame, columnspan=3, sticky="w")
dialogSuffix(rows=4, columns=3)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DiffMap.R
\name{LAFON}
\alias{LAFON}
\title{e-value estimation function
Computes best e-value according to Lafon criterion}
\usage{
LAFON(data)
}
\arguments{
\item{data}{raw data for which diffusion coordinates are calculated}
}
\description{
e-value estimation function
Computes best e-value according to Lafon criterion
}
| /man/LAFON.Rd | no_license | schaugf/DiffMap | R | false | true | 407 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DiffMap.R
\name{LAFON}
\alias{LAFON}
\title{e-value estimation function
Computes best e-value according to Lafon criterion}
\usage{
LAFON(data)
}
\arguments{
\item{data}{raw data for which diffusion coordinates are calculated}
}
\description{
e-value estimation function
Computes best e-value according to Lafon criterion
}
|
#install.packages("intergraph")
#data_graph <- read.csv("Data/graph_total.csv", header = TRUE)
#data_graph <- read.csv("Data/graph2.csv", header = TRUE)
library(igraph)
#data_graph <- read.csv("Data/tstgraph.csv", header = TRUE)
data_graph <- read.csv("Data/246265366/246265366.csv", header = TRUE)
data_graph$sender_id<-as.factor(data_graph$sender_id)
data_graph$receiver_id<-as.factor(data_graph$receiver_id)
#http://www.shizukalab.com/toolkits/sna/weighted-edgelists
el=data_graph
el[,1]=as.character(el[,1])
el[,2]=as.character(el[,2])
el=as.matrix(el)
g=graph.edgelist(el[,1:2],directed=TRUE)
E(g)$weight=as.numeric(el[,4])
E(g)$type=as.character(el[,3])
el[,4]<-as.numeric(el[,4])
E(g)$weight<-ifelse(as.numeric(el[,4])>1000,5,ifelse(as.numeric(el[,4])>500,4,ifelse(as.numeric(el[,4])>200,3,ifelse(as.numeric(el[,4])>100,2,ifelse(as.numeric(el[,4])>50,1,ifelse(as.numeric(el[,4])>10,0.5,0.1))))))
E(g)$color <- as.character(ifelse(data_graph$type=='money_transfer','#FF0000', #rojo
ifelse(data_graph$type=='user_certification_payment','#FF0088', #rosa
ifelse(data_graph$type=='account_fund','#8400FF', #violeta
ifelse(data_graph$type=='pos_payment','#0000FF', #azul fuerte
ifelse(data_graph$type=='regular_payment','#0084FF', #celeste
ifelse(data_graph$type=='subscription_payment','#00FFFF', #turquesa
ifelse(data_graph$type=='random_charge','#00FF84', #verde agua
ifelse(data_graph$type=='recurring_payment','#84FF00', #verde fibron
ifelse(data_graph$type=='payment_addition','#FFFF00', '#FF8000' #naranja
)
)
)
)
)
)
)
)
)
)
#hacer el in
V(g)$color_servicio <- as.character(ifelse(as.character(V(g)$name)=='246364511','red',ifelse(as.character(V(g)$name)=='226742606' | as.character(V(g)$name)=='24','orange','lightblue'))) #rojo
V(g)$nombres <- as.character(data_graph$sender_id)
#paleta_colores <-c('money_transfer' = '#FF0000','user_certification_payment' = '#FF0088','account_fund' = '#8400FF','pos_payment' = '#0000FF','regular_payment' = '#0084FF','subscription_payment' = '#00FFFF','random_charge' = '#00FF84','recurring_payment' = '#84FF00','payment_addition' = '#FFFF00','cellphone_recharge'='#FF00F0')
#plot(g,layout=layout.fruchterman.reingold,edge.width=1,edge.color= E(g)$color,vertex.size=1 )
plot(g,
vertex.color=V(g)$color_servicio ,
layout=layout.davidson.harel(g),
edge.width=E(g)$weight*2,
edge.color= E(g)$color,
vertex.size=10,
edge.arrow.size=E(g)$weight,
vertex.label = V(g)$names
)
#data.frame(data_graph$sender_id,E(g)$color_servicio)
#data.frame(data_graph$sender_id,V(g)$color_servicio)
| /graphs_trianulation/crea.R | no_license | laiunce/helpfull_code | R | false | false | 3,516 | r | #install.packages("intergraph")
#data_graph <- read.csv("Data/graph_total.csv", header = TRUE)
#data_graph <- read.csv("Data/graph2.csv", header = TRUE)
library(igraph)
#data_graph <- read.csv("Data/tstgraph.csv", header = TRUE)
data_graph <- read.csv("Data/246265366/246265366.csv", header = TRUE)
data_graph$sender_id<-as.factor(data_graph$sender_id)
data_graph$receiver_id<-as.factor(data_graph$receiver_id)
#http://www.shizukalab.com/toolkits/sna/weighted-edgelists
el=data_graph
el[,1]=as.character(el[,1])
el[,2]=as.character(el[,2])
el=as.matrix(el)
g=graph.edgelist(el[,1:2],directed=TRUE)
E(g)$weight=as.numeric(el[,4])
E(g)$type=as.character(el[,3])
el[,4]<-as.numeric(el[,4])
E(g)$weight<-ifelse(as.numeric(el[,4])>1000,5,ifelse(as.numeric(el[,4])>500,4,ifelse(as.numeric(el[,4])>200,3,ifelse(as.numeric(el[,4])>100,2,ifelse(as.numeric(el[,4])>50,1,ifelse(as.numeric(el[,4])>10,0.5,0.1))))))
E(g)$color <- as.character(ifelse(data_graph$type=='money_transfer','#FF0000', #rojo
ifelse(data_graph$type=='user_certification_payment','#FF0088', #rosa
ifelse(data_graph$type=='account_fund','#8400FF', #violeta
ifelse(data_graph$type=='pos_payment','#0000FF', #azul fuerte
ifelse(data_graph$type=='regular_payment','#0084FF', #celeste
ifelse(data_graph$type=='subscription_payment','#00FFFF', #turquesa
ifelse(data_graph$type=='random_charge','#00FF84', #verde agua
ifelse(data_graph$type=='recurring_payment','#84FF00', #verde fibron
ifelse(data_graph$type=='payment_addition','#FFFF00', '#FF8000' #naranja
)
)
)
)
)
)
)
)
)
)
#hacer el in
V(g)$color_servicio <- as.character(ifelse(as.character(V(g)$name)=='246364511','red',ifelse(as.character(V(g)$name)=='226742606' | as.character(V(g)$name)=='24','orange','lightblue'))) #rojo
V(g)$nombres <- as.character(data_graph$sender_id)
#paleta_colores <-c('money_transfer' = '#FF0000','user_certification_payment' = '#FF0088','account_fund' = '#8400FF','pos_payment' = '#0000FF','regular_payment' = '#0084FF','subscription_payment' = '#00FFFF','random_charge' = '#00FF84','recurring_payment' = '#84FF00','payment_addition' = '#FFFF00','cellphone_recharge'='#FF00F0')
#plot(g,layout=layout.fruchterman.reingold,edge.width=1,edge.color= E(g)$color,vertex.size=1 )
plot(g,
vertex.color=V(g)$color_servicio ,
layout=layout.davidson.harel(g),
edge.width=E(g)$weight*2,
edge.color= E(g)$color,
vertex.size=10,
edge.arrow.size=E(g)$weight,
vertex.label = V(g)$names
)
#data.frame(data_graph$sender_id,E(g)$color_servicio)
#data.frame(data_graph$sender_id,V(g)$color_servicio)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/approxGrid.R
\name{approx.grid}
\alias{approx.grid}
\title{Linear interpolate between 2 parallel grids}
\usage{
approx.grid(x, y, xout)
}
\arguments{
\item{x}{vector of 2 scalars}
\item{y}{list of 2 grids (must be equi-spaced)}
\item{xout}{scalar to interpolate at}
}
\value{
A grid of interpolated values (or NA values), with dimension matching that of either grid in y.
}
\description{
Linear interpolate between 2 parallel grids
}
| /man/approx.grid.Rd | no_license | shenan1/covariates | R | false | true | 514 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/approxGrid.R
\name{approx.grid}
\alias{approx.grid}
\title{Linear interpolate between 2 parallel grids}
\usage{
approx.grid(x, y, xout)
}
\arguments{
\item{x}{vector of 2 scalars}
\item{y}{list of 2 grids (must be equi-spaced)}
\item{xout}{scalar to interpolate at}
}
\value{
A grid of interpolated values (or NA values), with dimension matching that of either grid in y.
}
\description{
Linear interpolate between 2 parallel grids
}
|
#########################################################
# PLSC 504 -- Fall 2017
#
# Frailty Models
#
########################################################
# Load packages (install as needed), set options:
library(RCurl)
library(RColorBrewer)
library(colorspace)
library(foreign)
library(gtools)
library(boot)
library(plyr)
library(texreg)
library(statmod)
library(survival)
library(pscl)
library(smcure)
library(nltm)
library(coxme)
options(scipen = 6) # bias against scientific notation
options(digits = 2) # show fewer decimal places
########################################
# Simulate some frailty-type data...
set.seed(7222009)
G<-1:40 # "groups"
F<-rnorm(40) # frailties
data<-data.frame(cbind(G,F))
data<-data[rep(1:nrow(data),each=20),]
data$X<-rbinom(nrow(data),1,0.5)
data$T<-rexp(nrow(data),rate=exp(0+1*data$X+(2*data$F)))
data$C<-rbinom(nrow(data),1,0.5)
data<-data[order(data$F),]
S<-Surv(data$T,data$C)
Fcolors<-diverge_hcl(length(F))[rank(F)]
pdf("FrailtyKM.pdf",6,5)
par(mar=c(4,4,2,2))
plot(survfit(S~strata(data$G)),col=Fcolors,mark=20,
xlab="ln(Time)",ylab="Survival",log="x",xlim=c(0.0001,100))
legend("bottomleft",bty="n",cex=0.9,inset=0,
c("Reds indicate strata","with larger frailties;",
"blues with smaller ones"))
dev.off()
cox.noF<-coxph(S~X,data=data)
summary(cox.noF)
weib.noF<-survreg(S~X,data=data,dist="weib")
summary(weib.noF)
cox.F<-coxph(S~X+frailty.gaussian(F),data=data)
summary(cox.F)
weib.F<-survreg(S~X+frailty.gaussian(F),data=data,dist="weib")
summary(weib.F)
# Predicted survival plot:
#
# plot(survfit(cox.noF),log="x",mark.time=FALSE,lwd=c(3,1,1),
# xlab="ln(Time)",ylab="Fitted Survival")
# lines(survfit(cox.F),col="red",log="x",mark.time=FALSE,lwd=c(3,1,1))
# Examples using leaders data...
leadURL<-"https://raw.githubusercontent.com/PrisonRodeo/PLSC504-2017-git/master/Data/leaders.csv"
temp<-getURL(leadURL)
lead<-read.csv(textConnection(temp))
rm(temp)
lead<-lead[lead$year<2004,]
lead.S<-Surv(lead$tenstart,lead$tenure,lead$tenureend)
Rs<-as.matrix(lead[,13:17])
lead$region<-factor((Rs %*% 1:ncol(Rs))+1,
labels=c("NorthAm",colnames(Rs)))
rm(Rs)
lead.F<-coxph(lead.S~female*region+frailty.gamma(ccode),data=lead)
summary(lead.F)
pdf("leadHats.pdf",6,5)
par(mar=c(4,4,2,2))
plot(survfit(lead.F,se.fit=TRUE),conf.int=TRUE,mark.time=FALSE,
log="x",lwd=c(2,1,1),col="red",xlab="Time (in days)",
ylab="Survival")
lines(survfit(lead.S~1),conf.int=FALSE,col="black",
mark.time=FALSE,lwd=2)
legend("bottomleft",bty="n",inset=0.04,lty=1,lwd=3,col=c("red","black"),
c("Predicted Survival","Baseline (Univariate) Survival"))
dev.off()
# Mixed-effects
lead.coxME<-coxme(lead.S~female + (1 | ccode/female),data=lead)
lead.coxME
# Stratified vs. Frailty, etc.
pdf("lead-KM.pdf",6,5)
par(mar=c(4,4,2,2))
plot(survfit(lead.S~1,id=leadid,data=lead),mark.time=FALSE,lwd=c(3,1,1),
xlab="Time (in days)",ylab="Survival",log="x")
dev.off()
# Plot strata by country
pdf("leadKMcountries.pdf",6,5)
par(mar=c(4,4,2,2))
plot(survfit(lead.S~strata(ccode),id=leadid,data=lead),
col=brewer.pal(9,"Set1"),log="x",mark.time=FALSE,
xlab="Time (in days)", ylab="Survival")
dev.off()
# Plot strata by region:
pdf("leadKMregions.pdf",6,5)
par(mar=c(4,4,2,2))
plot(survfit(lead.S~strata(region),id=leadid,data=lead),
col=brewer.pal(6,"Set1"),lwd=2,log="x",mark.time=FALSE,
xlab="Time (in days)", ylab="Survival")
legend("bottomleft",inset=0.02,bty="n",col=brewer.pal(6,"Set1"),
c("N. America","Latin America","Europe","Africa","Asia",
"Middle East"),lty=1,lwd=2)
dev.off()
lead.Fstrat<-coxph(lead.S~female*strata(region)+
frailty.gamma(ccode),data=lead)
summary(lead.Fstrat)
lead.stratCl<-coxph(lead.S~female*strata(region)+
cluster(ccode),data=lead)
summary(lead.stratCl)
lead.FstratCl<-coxph(lead.S~female*strata(region)+frailty.gamma(ccode)+
cluster(ccode),data=lead)
# boom
| /Code/PLSC504-2017-Frailty-Models.R | no_license | anhnguyendepocen/PLSC504-2017-git | R | false | false | 4,017 | r | #########################################################
# PLSC 504 -- Fall 2017
#
# Frailty Models
#
########################################################
# Load packages (install as needed), set options:
library(RCurl)
library(RColorBrewer)
library(colorspace)
library(foreign)
library(gtools)
library(boot)
library(plyr)
library(texreg)
library(statmod)
library(survival)
library(pscl)
library(smcure)
library(nltm)
library(coxme)
options(scipen = 6) # bias against scientific notation
options(digits = 2) # show fewer decimal places
########################################
# Simulate some frailty-type data...
set.seed(7222009)
G<-1:40 # "groups"
F<-rnorm(40) # frailties
data<-data.frame(cbind(G,F))
data<-data[rep(1:nrow(data),each=20),]
data$X<-rbinom(nrow(data),1,0.5)
data$T<-rexp(nrow(data),rate=exp(0+1*data$X+(2*data$F)))
data$C<-rbinom(nrow(data),1,0.5)
data<-data[order(data$F),]
S<-Surv(data$T,data$C)
Fcolors<-diverge_hcl(length(F))[rank(F)]
pdf("FrailtyKM.pdf",6,5)
par(mar=c(4,4,2,2))
plot(survfit(S~strata(data$G)),col=Fcolors,mark=20,
xlab="ln(Time)",ylab="Survival",log="x",xlim=c(0.0001,100))
legend("bottomleft",bty="n",cex=0.9,inset=0,
c("Reds indicate strata","with larger frailties;",
"blues with smaller ones"))
dev.off()
cox.noF<-coxph(S~X,data=data)
summary(cox.noF)
weib.noF<-survreg(S~X,data=data,dist="weib")
summary(weib.noF)
cox.F<-coxph(S~X+frailty.gaussian(F),data=data)
summary(cox.F)
weib.F<-survreg(S~X+frailty.gaussian(F),data=data,dist="weib")
summary(weib.F)
# Predicted survival plot:
#
# plot(survfit(cox.noF),log="x",mark.time=FALSE,lwd=c(3,1,1),
# xlab="ln(Time)",ylab="Fitted Survival")
# lines(survfit(cox.F),col="red",log="x",mark.time=FALSE,lwd=c(3,1,1))
# Examples using leaders data...
leadURL<-"https://raw.githubusercontent.com/PrisonRodeo/PLSC504-2017-git/master/Data/leaders.csv"
temp<-getURL(leadURL)
lead<-read.csv(textConnection(temp))
rm(temp)
lead<-lead[lead$year<2004,]
lead.S<-Surv(lead$tenstart,lead$tenure,lead$tenureend)
Rs<-as.matrix(lead[,13:17])
lead$region<-factor((Rs %*% 1:ncol(Rs))+1,
labels=c("NorthAm",colnames(Rs)))
rm(Rs)
lead.F<-coxph(lead.S~female*region+frailty.gamma(ccode),data=lead)
summary(lead.F)
pdf("leadHats.pdf",6,5)
par(mar=c(4,4,2,2))
plot(survfit(lead.F,se.fit=TRUE),conf.int=TRUE,mark.time=FALSE,
log="x",lwd=c(2,1,1),col="red",xlab="Time (in days)",
ylab="Survival")
lines(survfit(lead.S~1),conf.int=FALSE,col="black",
mark.time=FALSE,lwd=2)
legend("bottomleft",bty="n",inset=0.04,lty=1,lwd=3,col=c("red","black"),
c("Predicted Survival","Baseline (Univariate) Survival"))
dev.off()
# Mixed-effects
lead.coxME<-coxme(lead.S~female + (1 | ccode/female),data=lead)
lead.coxME
# Stratified vs. Frailty, etc.
pdf("lead-KM.pdf",6,5)
par(mar=c(4,4,2,2))
plot(survfit(lead.S~1,id=leadid,data=lead),mark.time=FALSE,lwd=c(3,1,1),
xlab="Time (in days)",ylab="Survival",log="x")
dev.off()
# Plot strata by country
pdf("leadKMcountries.pdf",6,5)
par(mar=c(4,4,2,2))
plot(survfit(lead.S~strata(ccode),id=leadid,data=lead),
col=brewer.pal(9,"Set1"),log="x",mark.time=FALSE,
xlab="Time (in days)", ylab="Survival")
dev.off()
# Plot strata by region:
pdf("leadKMregions.pdf",6,5)
par(mar=c(4,4,2,2))
plot(survfit(lead.S~strata(region),id=leadid,data=lead),
col=brewer.pal(6,"Set1"),lwd=2,log="x",mark.time=FALSE,
xlab="Time (in days)", ylab="Survival")
legend("bottomleft",inset=0.02,bty="n",col=brewer.pal(6,"Set1"),
c("N. America","Latin America","Europe","Africa","Asia",
"Middle East"),lty=1,lwd=2)
dev.off()
lead.Fstrat<-coxph(lead.S~female*strata(region)+
frailty.gamma(ccode),data=lead)
summary(lead.Fstrat)
lead.stratCl<-coxph(lead.S~female*strata(region)+
cluster(ccode),data=lead)
summary(lead.stratCl)
lead.FstratCl<-coxph(lead.S~female*strata(region)+frailty.gamma(ccode)+
cluster(ccode),data=lead)
# boom
|
#' Check OTP parameters
#'
#' @param otpcon Object of otpcon class
#' @param ... Other optional parameters
#' @keywords internal
#' @importFrom utils hasName
otp_check_params <- function (otpcon, ...)
{
call <- sys.call()
call[[1]] <- as.name('list')
args <- eval.parent(call)
coll <- checkmate::makeAssertCollection()
# all functions must provide otpcon.
checkmate::assert_class(otpcon, "otpconnect", add = coll)
if (hasName(args, "fromPlace")) {
checkmate::assert_numeric(
args[["fromPlace"]],
lower = -180,
upper = 180,
len = 2,
add = coll
)
}
if (hasName(args, "toPlace")) {
checkmate::assert_numeric(
args[["toPlace"]],
lower = -180,
upper = 180,
len = 2,
add = coll
)
}
if (hasName(args, "maxWalkDistance")) {
checkmate::assert_number(args[["maxWalkDistance"]], lower = 0, add = coll)
}
if (hasName(args, "arriveBy")) {
checkmate::assert_logical(args[["arriveBy"]], add = coll)
}
if (hasName(args, "walkReluctance")) {
checkmate::assert_number(args[["walkReluctance"]], lower = 0, add = coll)
}
if (hasName(args, "waitReluctance")) {
if (isFALSE(checkmate::testNumber(
args[["waitReluctance"]],
lower = 1,
upper = ifelse(hasName(args, "walkReluctance"), args[["walkReluctance"]], 2)
))) {
coll$push(
"waitReluctance should be greater than 1 and less than walkReluctance (default = 2). See OTP API PlannerResource documentation"
)
}
}
if (hasName(args, "transferPenalty")) {
checkmate::assert_integerish(args[["transferPenalty"]], lower = 0, add = coll)
}
if (hasName(args, "minTransferTime")) {
checkmate::assert_integerish(args[["minTransferTime"]], lower = 0, add = coll)
}
if (hasName(args, "cutoffs")) {
checkmate::assert_integerish(args[["cutoffs"]], lower = 0, add = coll)
}
if (hasName(args, "batch")) {
checkmate::assert_logical(args[["batch"]], add = coll)
}
checkmate::reportAssertions(coll)
# check date and time are valid
if (hasName(args, "date")) {
if (otp_is_date(args[["date"]]) == FALSE) {
stop("date must be in the format mm-dd-yyyy")
}
}
if (hasName(args, "time")) {
if (otp_is_time(args[["time"]]) == FALSE) {
stop("time must be in the format hh:mm:ss")
}
}
}
| /R/otp_check_params.R | permissive | marcusyoung/otpr | R | false | false | 2,394 | r | #' Check OTP parameters
#'
#' @param otpcon Object of otpcon class
#' @param ... Other optional parameters
#' @keywords internal
#' @importFrom utils hasName
otp_check_params <- function (otpcon, ...)
{
call <- sys.call()
call[[1]] <- as.name('list')
args <- eval.parent(call)
coll <- checkmate::makeAssertCollection()
# all functions must provide otpcon.
checkmate::assert_class(otpcon, "otpconnect", add = coll)
if (hasName(args, "fromPlace")) {
checkmate::assert_numeric(
args[["fromPlace"]],
lower = -180,
upper = 180,
len = 2,
add = coll
)
}
if (hasName(args, "toPlace")) {
checkmate::assert_numeric(
args[["toPlace"]],
lower = -180,
upper = 180,
len = 2,
add = coll
)
}
if (hasName(args, "maxWalkDistance")) {
checkmate::assert_number(args[["maxWalkDistance"]], lower = 0, add = coll)
}
if (hasName(args, "arriveBy")) {
checkmate::assert_logical(args[["arriveBy"]], add = coll)
}
if (hasName(args, "walkReluctance")) {
checkmate::assert_number(args[["walkReluctance"]], lower = 0, add = coll)
}
if (hasName(args, "waitReluctance")) {
if (isFALSE(checkmate::testNumber(
args[["waitReluctance"]],
lower = 1,
upper = ifelse(hasName(args, "walkReluctance"), args[["walkReluctance"]], 2)
))) {
coll$push(
"waitReluctance should be greater than 1 and less than walkReluctance (default = 2). See OTP API PlannerResource documentation"
)
}
}
if (hasName(args, "transferPenalty")) {
checkmate::assert_integerish(args[["transferPenalty"]], lower = 0, add = coll)
}
if (hasName(args, "minTransferTime")) {
checkmate::assert_integerish(args[["minTransferTime"]], lower = 0, add = coll)
}
if (hasName(args, "cutoffs")) {
checkmate::assert_integerish(args[["cutoffs"]], lower = 0, add = coll)
}
if (hasName(args, "batch")) {
checkmate::assert_logical(args[["batch"]], add = coll)
}
checkmate::reportAssertions(coll)
# check date and time are valid
if (hasName(args, "date")) {
if (otp_is_date(args[["date"]]) == FALSE) {
stop("date must be in the format mm-dd-yyyy")
}
}
if (hasName(args, "time")) {
if (otp_is_time(args[["time"]]) == FALSE) {
stop("time must be in the format hh:mm:ss")
}
}
}
|
#Read file and subset by dates
pc <- read.table("household_power_consumption.txt", header=TRUE, sep=";")
pc <- pc[(pc$Date == "1/2/2007" | pc$Date == "2/2/2007"),]
#Select columns and Remove for no "?"
pc2 <- pc[pc$Global_active_power != "?", "Global_active_power"]
#Convert factor to numeric
pc2 <- as.numeric(levels(pc2)[pc2])
#Draw histogram to both screen and pgn file
hist(pc2, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power")
dev.copy(png, file="plot1.png")
dev.off() | /plot1.R | no_license | swpark7/ExData_Plotting1 | R | false | false | 506 | r | #Read file and subset by dates
pc <- read.table("household_power_consumption.txt", header=TRUE, sep=";")
pc <- pc[(pc$Date == "1/2/2007" | pc$Date == "2/2/2007"),]
#Select columns and Remove for no "?"
pc2 <- pc[pc$Global_active_power != "?", "Global_active_power"]
#Convert factor to numeric
pc2 <- as.numeric(levels(pc2)[pc2])
#Draw histogram to both screen and pgn file
hist(pc2, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power")
dev.copy(png, file="plot1.png")
dev.off() |
require(digest)
require(stringi)
require(data.table)
################################################################################################
#
# 01. Loading of benchmark data sets
#
################################################################################################
# 01b. Get text from randomly selected tweets
################################################################################################
tweets <- readLines('data/tweets.txt', encoding = 'UTF-8')
# verify checksum of loaded lines
digest(paste0(tweets, collapse = '||'),
algo='sha256',
serialize=F)==
"7fa3bf921c393fe7009bc60971b2bb8396414e7602bb4f409bed78c7192c30f4"
# 01c. Get text from randomly selected blog descriptions
################################################################################################
# make sure we can read it back in
blogs <- readLines('data/blogs.txt', encoding = 'UTF-8')
# verify checksum of loaded lines
digest(paste0(blogs, collapse = '||'),
algo='sha256',
serialize=F)==
"14b3c593e543eb8b2932cf00b646ed653e336897a03c82098b725e6e1f9b7aa2"
################################################################################################
#
# 02. Define the functions used for benchmarking
#
################################################################################################
# 02a. Pre-processing functions
################################################################################################
# split.sentence
# Returns a matrix containing in column i the part of the line before the ith word (sentence)
# and the ith word (nextWord).
# The function is used in benchmark to generate and evaluate predictions for the partial lines.
split.sentence <- compiler::cmpfun(function(line) {
require(stringi)
# append a space to the sentence (to make sure we always create one result with only the
# last word missing)
sent <- paste0(line, ' ')
sep <- stri_locate_all_regex(line,
pattern = '[^\\w\'@#\u2018\u2019\u201b]+',
omit_empty=T,
case_insensitive=T)[[1]]
sapply(seq_len(nrow(sep)),
function(i) {
c(sentence=ifelse(i>1, substr(line, 1, sep[i-1,2]), ''),
nextWord=tolower(substr(line, max(sep[i-1,2]+1, 1), min(nchar(line), sep[i,1]-1)))
)
})
}, options=list(optimize=3))
# 02b. Benchmarking function
################################################################################################
# benchmark
# Evaluates the performance of a next word prediction algorithm based on the provided test data-
# set(s).
#
# Parameters
# FUN Function that produces the next word prediction. The function should take a single
# character value as first input and return a vector of character values represen-
# ting the top-3 predictions (with the 1st value being the first prediction).
# ... Additional parameters to pass to FUN.
# sent.list Named list of character vectors containing the text lines used for the benchmark.
# ext.output If TRUE, return additional details about the R environment and loaded packages
# after completing the benchmark.
benchmark <- compiler::cmpfun(function(FUN, ..., sent.list, ext.output=T) {
require(stringi)
require(digest)
require(data.table)
result <- rbindlist(lapply(names(sent.list),
function(list.name) {
sentences <- sent.list[[list.name]]
score <- 0
max.score <-0
hit.count.top3 <- 0
hit.count.top1 <- 0
total.count <- 0
time <- system.time({
for (sent in sentences) {
split <- split.sentence(sent[1])
max.score <- max.score + ncol(split)*3
total.count <- total.count + ncol(split)
rank <- sapply(seq_len(ncol(split)),
function(i) {
min(which(FUN(split[1,i], ...)==split[2,i]),4)
})
score <- score + sum(4-rank)
hit.count.top3 <- hit.count.top3 + sum(rank<4)
hit.count.top1 <- hit.count.top1 + sum(rank==1)
}
})
list('list.name' = list.name,
'line.count' = length(sentences),
'word.count' = sum(stri_count_words(sentences)),
'hash' = digest(paste0(sentences, collapse = '||'), algo='sha256', serialize=F),
'score' = score,
'max.score' = max.score,
'hit.count.top3' = hit.count.top3,
'hit.count.top1' = hit.count.top1,
'total.count' = total.count,
'total.runtime' = time[3]
)
}), use.names=T)
setkey(result, list.name)
# The overall scores are calculated weighting each data set equally (independent of the
# number of lines in each dataset).
overall.score.percent = 100 * result[,sum(score/max.score)/.N]
overall.precision.top3 = 100 * result[,sum(hit.count.top3/total.count)/.N]
overall.precision.top1 = 100 * result[,sum(hit.count.top1/total.count)/.N]
average.runtime = 1000 * result[,sum(total.runtime)/sum(total.count)]
number.of.predictions = result[,sum(total.count)]
total.mem.used = sum(unlist(lapply(ls(.GlobalEnv),
function(x) {
object.size(get(x,
envir = .GlobalEnv,
inherits = FALSE))
})))/(1024^2)
cat(sprintf(paste0('Overall top-3 score: %.2f %%\n',
'Overall top-1 precision: %.2f %%\n',
'Overall top-3 precision: %.2f %%\n',
'Average runtime: %.2f msec\n',
'Number of predictions: %d\n',
'Total memory used: %.2f MB\n'),
overall.score.percent,
overall.precision.top1,
overall.precision.top3,
average.runtime,
number.of.predictions,
total.mem.used
))
cat('\nDataset details\n')
for (p.list.name in result$list.name) {
res <- result[list(p.list.name)]
cat(sprintf(paste0(' Dataset "%s" (%d lines, %d words, hash %s)\n',
' Score: %.2f %%, Top-1 precision: %.2f %%, Top-3 precision: %.2f %%\n'
),
p.list.name,
res$line.count,
res$word.count,
res$hash,
100 * res$score/res$max.score,
100 * res$hit.count.top1/res$total.count,
100 * res$hit.count.top3/res$total.count
))
}
if (ext.output==T) {
packages <- sort(stri_replace_first_fixed(search()[stri_detect_regex(search(),
'^package:')],
'package:', ''))
cat(sprintf(paste0('\n\n%s, platform %s\n',
'Attached non-base packages: %s\n',
'Unattached non-base packages: %s'
),
sessionInfo()$R.version$version.string,
sessionInfo()$platform,
paste0(sapply(sessionInfo()$otherPkgs,
function(pkg) {
paste0(pkg$Package, ' (v', pkg$Version, ')')
}),
collapse = ', '),
paste0(sapply(sessionInfo()$loadedOnly,
function(pkg) {
paste0(pkg$Package, ' (v', pkg$Version, ')')
}),
collapse = ', ')
))
}
}, options=list(optimize =3))
################################################################################################
#
# 03. Define the wrapper function to be called by benchmark
#
################################################################################################
# As an example, we create a very simple baseline algorithm which always returns
# the three most frequent English words.
predict.baseline <- function(x){prediction.function(x)}
################################################################################################
#
# 04. Perform the benchmark
#
################################################################################################
benchmark(predict.baseline,
# additional parameters to be passed to the prediction function can be inserted here
sent.list = list('tweets' = tweets,
'blogs' = blogs),
ext.output = T)
| /Johns.Hopkins.Data.Science.Specialization/NLP.project/R.Project/benchmark.R | no_license | douglas-thoms/Coursera | R | false | false | 9,454 | r | require(digest)
require(stringi)
require(data.table)
################################################################################################
#
# 01. Loading of benchmark data sets
#
################################################################################################
# 01b. Get text from randomly selected tweets
################################################################################################
tweets <- readLines('data/tweets.txt', encoding = 'UTF-8')
# verify checksum of loaded lines
digest(paste0(tweets, collapse = '||'),
algo='sha256',
serialize=F)==
"7fa3bf921c393fe7009bc60971b2bb8396414e7602bb4f409bed78c7192c30f4"
# 01c. Get text from randomly selected blog descriptions
################################################################################################
# make sure we can read it back in
blogs <- readLines('data/blogs.txt', encoding = 'UTF-8')
# verify checksum of loaded lines
digest(paste0(blogs, collapse = '||'),
algo='sha256',
serialize=F)==
"14b3c593e543eb8b2932cf00b646ed653e336897a03c82098b725e6e1f9b7aa2"
################################################################################################
#
# 02. Define the functions used for benchmarking
#
################################################################################################
# 02a. Pre-processing functions
################################################################################################
# split.sentence
# Returns a matrix containing in column i the part of the line before the ith word (sentence)
# and the ith word (nextWord).
# The function is used in benchmark to generate and evaluate predictions for the partial lines.
split.sentence <- compiler::cmpfun(function(line) {
require(stringi)
# append a space to the sentence (to make sure we always create one result with only the
# last word missing)
sent <- paste0(line, ' ')
sep <- stri_locate_all_regex(line,
pattern = '[^\\w\'@#\u2018\u2019\u201b]+',
omit_empty=T,
case_insensitive=T)[[1]]
sapply(seq_len(nrow(sep)),
function(i) {
c(sentence=ifelse(i>1, substr(line, 1, sep[i-1,2]), ''),
nextWord=tolower(substr(line, max(sep[i-1,2]+1, 1), min(nchar(line), sep[i,1]-1)))
)
})
}, options=list(optimize=3))
# 02b. Benchmarking function
################################################################################################
# benchmark
# Evaluates the performance of a next word prediction algorithm based on the provided test data-
# set(s).
#
# Parameters
# FUN Function that produces the next word prediction. The function should take a single
# character value as first input and return a vector of character values represen-
# ting the top-3 predictions (with the 1st value being the first prediction).
# ... Additional parameters to pass to FUN.
# sent.list Named list of character vectors containing the text lines used for the benchmark.
# ext.output If TRUE, return additional details about the R environment and loaded packages
# after completing the benchmark.
benchmark <- compiler::cmpfun(function(FUN, ..., sent.list, ext.output=T) {
require(stringi)
require(digest)
require(data.table)
result <- rbindlist(lapply(names(sent.list),
function(list.name) {
sentences <- sent.list[[list.name]]
score <- 0
max.score <-0
hit.count.top3 <- 0
hit.count.top1 <- 0
total.count <- 0
time <- system.time({
for (sent in sentences) {
split <- split.sentence(sent[1])
max.score <- max.score + ncol(split)*3
total.count <- total.count + ncol(split)
rank <- sapply(seq_len(ncol(split)),
function(i) {
min(which(FUN(split[1,i], ...)==split[2,i]),4)
})
score <- score + sum(4-rank)
hit.count.top3 <- hit.count.top3 + sum(rank<4)
hit.count.top1 <- hit.count.top1 + sum(rank==1)
}
})
list('list.name' = list.name,
'line.count' = length(sentences),
'word.count' = sum(stri_count_words(sentences)),
'hash' = digest(paste0(sentences, collapse = '||'), algo='sha256', serialize=F),
'score' = score,
'max.score' = max.score,
'hit.count.top3' = hit.count.top3,
'hit.count.top1' = hit.count.top1,
'total.count' = total.count,
'total.runtime' = time[3]
)
}), use.names=T)
setkey(result, list.name)
# The overall scores are calculated weighting each data set equally (independent of the
# number of lines in each dataset).
overall.score.percent = 100 * result[,sum(score/max.score)/.N]
overall.precision.top3 = 100 * result[,sum(hit.count.top3/total.count)/.N]
overall.precision.top1 = 100 * result[,sum(hit.count.top1/total.count)/.N]
average.runtime = 1000 * result[,sum(total.runtime)/sum(total.count)]
number.of.predictions = result[,sum(total.count)]
total.mem.used = sum(unlist(lapply(ls(.GlobalEnv),
function(x) {
object.size(get(x,
envir = .GlobalEnv,
inherits = FALSE))
})))/(1024^2)
cat(sprintf(paste0('Overall top-3 score: %.2f %%\n',
'Overall top-1 precision: %.2f %%\n',
'Overall top-3 precision: %.2f %%\n',
'Average runtime: %.2f msec\n',
'Number of predictions: %d\n',
'Total memory used: %.2f MB\n'),
overall.score.percent,
overall.precision.top1,
overall.precision.top3,
average.runtime,
number.of.predictions,
total.mem.used
))
cat('\nDataset details\n')
for (p.list.name in result$list.name) {
res <- result[list(p.list.name)]
cat(sprintf(paste0(' Dataset "%s" (%d lines, %d words, hash %s)\n',
' Score: %.2f %%, Top-1 precision: %.2f %%, Top-3 precision: %.2f %%\n'
),
p.list.name,
res$line.count,
res$word.count,
res$hash,
100 * res$score/res$max.score,
100 * res$hit.count.top1/res$total.count,
100 * res$hit.count.top3/res$total.count
))
}
if (ext.output==T) {
packages <- sort(stri_replace_first_fixed(search()[stri_detect_regex(search(),
'^package:')],
'package:', ''))
cat(sprintf(paste0('\n\n%s, platform %s\n',
'Attached non-base packages: %s\n',
'Unattached non-base packages: %s'
),
sessionInfo()$R.version$version.string,
sessionInfo()$platform,
paste0(sapply(sessionInfo()$otherPkgs,
function(pkg) {
paste0(pkg$Package, ' (v', pkg$Version, ')')
}),
collapse = ', '),
paste0(sapply(sessionInfo()$loadedOnly,
function(pkg) {
paste0(pkg$Package, ' (v', pkg$Version, ')')
}),
collapse = ', ')
))
}
}, options=list(optimize =3))
################################################################################################
#
# 03. Define the wrapper function to be called by benchmark
#
################################################################################################
# As an example, we create a very simple baseline algorithm which always returns
# the three most frequent English words.
predict.baseline <- function(x){prediction.function(x)}
################################################################################################
#
# 04. Perform the benchmark
#
################################################################################################
benchmark(predict.baseline,
# additional parameters to be passed to the prediction function can be inserted here
sent.list = list('tweets' = tweets,
'blogs' = blogs),
ext.output = T)
|
#09/10/2020
#NAIVE BAYES
library(naivebayes)
"Computing probabilities"
# Compute P(A)
str(where9am)
unique(where9am$location)
p_A <- nrow(subset(where9am, location=="office"))/nrow(where9am)
p_A
# Compute P(B)
unique(where9am$daytype)
p_B <- nrow(subset(where9am, daytype=="weekday"))/nrow(where9am)
p_B
# Compute the observed P(A and B)
p_AB <- nrow(subset(where9am, (daytype=="weekday" &
location=="office")
))/nrow(where9am)
p_AB
# Compute P(A | B) and print its value
p_A_given_B <- p_AB/p_B
p_A_given_B
"A simple Naive Bayes location model
the probability that Brett is at work or at
home at 9am is highly dependent on whether
it is the weekend or a weekday."
# Load the naivebayes package
library(naivebayes)
# Build the location prediction model
locmodel <- naive_bayes(location~daytype, data = where9am)
# Predict Thursday's 9am location
thursday9am #daytype - weekday
str(thursday9am) #data.frame, factor w 2 levels weekday weekend
predict(locmodel, newdata = thursday9am)
# Predict Saturdays's 9am location
saturday9am #weekend
predict(locmodel, saturday9am)
"Examining 'raw' probabilities"
# Examine the location prediction model
locmodel
# Obtain the predicted probabilities for Thursday at 9am
predict(locmodel, thursday9am , type = "prob")
# Obtain the predicted probabilities for Saturday at 9am
predict(locmodel, saturday9am , type = "prob")
"Understanding independence
Understanding the idea of event independence will become important
as you learn more about how 'naive' Bayes got its name.
Which of the following is true about independent events?
Knowing the outcome of one event does not help predict the other.
One event is independent of another if knowing one doesn't give you
information about how likely the other is.
For example, knowing if it's raining in New York
doesn't help you predict the weather in San Francisco.
The weather events in the two cities are independent of each other.
"
"The Naive Bayes algorithm got its name because it makes a
'naive' assumption about event independence.
What is the purpose of making this assumption?
The joint probability of independent events can be computed much
more simply by multiplying their individual probabilities.
"
"A more sophisticated location model
The locations dataset records Brett's location every hour for 13
weeks. Each hour, the tracking information includes the daytype
(weekend or weekday) as well as the hourtype (morning, afternoon,
evening, or night)."
# Build a NB model of location
str(locations)
locmodel <- naive_bayes(location~daytype+hourtype, data=locations)
# Predict Brett's location on a weekday afternoon
str(weekday_afternoon)
predict(locmodel, weekday_afternoon)
# Predict Brett's location on a weekday evening
predict(locmodel, weekday_evening)
"Preparing for unforeseen circumstances
While Brett was tracking his location over 13 weeks, he never went
into the office during the weekend. Consequently, the joint
probability of P(office and weekend) = 0.
Explore how this impacts the predicted probability that Brett may go
to work on the weekend in the future. Additionally, you can see how
using the Laplace correction will allow a small chance for these
types of unforeseen circumstances.
"
# Observe the predicted probabilities for a weekend afternoon
predict(locmodel, weekend_afternoon, type="prob")
# Build a new model using the Laplace correction
locmodel2 <- naive_bayes(location~daytype+hourtype,
data=locations, laplace=1)
# Observe the new predicted probabilities for a weekend afternoon
predict(locmodel2, weekend_afternoon, type="prob")
"Adding the Laplace correction allows for the small chance that
Brett might go to the office on the weekend in the future.
The small probability added to every outcome ensures that they are
all possible even if never previously observed."
"Handling numeric predictors
Numeric data is often binned before it is used with Naive Bayes.
ex
age values recoded as 'child' or 'adult' categories
geographic coordinates recoded into geographic regions (West, East, etc.)
test scores divided into four groups by percentile"
| /Naive_Bayes.R | no_license | camaral82/datacamp | R | false | false | 4,337 | r | #09/10/2020
#NAIVE BAYES
library(naivebayes)
"Computing probabilities"
# Compute P(A)
str(where9am)
unique(where9am$location)
p_A <- nrow(subset(where9am, location=="office"))/nrow(where9am)
p_A
# Compute P(B)
unique(where9am$daytype)
p_B <- nrow(subset(where9am, daytype=="weekday"))/nrow(where9am)
p_B
# Compute the observed P(A and B)
p_AB <- nrow(subset(where9am, (daytype=="weekday" &
location=="office")
))/nrow(where9am)
p_AB
# Compute P(A | B) and print its value
p_A_given_B <- p_AB/p_B
p_A_given_B
"A simple Naive Bayes location model
the probability that Brett is at work or at
home at 9am is highly dependent on whether
it is the weekend or a weekday."
# Load the naivebayes package
library(naivebayes)
# Build the location prediction model
locmodel <- naive_bayes(location~daytype, data = where9am)
# Predict Thursday's 9am location
thursday9am #daytype - weekday
str(thursday9am) #data.frame, factor w 2 levels weekday weekend
predict(locmodel, newdata = thursday9am)
# Predict Saturdays's 9am location
saturday9am #weekend
predict(locmodel, saturday9am)
"Examining 'raw' probabilities"
# Examine the location prediction model
locmodel
# Obtain the predicted probabilities for Thursday at 9am
predict(locmodel, thursday9am , type = "prob")
# Obtain the predicted probabilities for Saturday at 9am
predict(locmodel, saturday9am , type = "prob")
"Understanding independence
Understanding the idea of event independence will become important
as you learn more about how 'naive' Bayes got its name.
Which of the following is true about independent events?
Knowing the outcome of one event does not help predict the other.
One event is independent of another if knowing one doesn't give you
information about how likely the other is.
For example, knowing if it's raining in New York
doesn't help you predict the weather in San Francisco.
The weather events in the two cities are independent of each other.
"
"The Naive Bayes algorithm got its name because it makes a
'naive' assumption about event independence.
What is the purpose of making this assumption?
The joint probability of independent events can be computed much
more simply by multiplying their individual probabilities.
"
"A more sophisticated location model
The locations dataset records Brett's location every hour for 13
weeks. Each hour, the tracking information includes the daytype
(weekend or weekday) as well as the hourtype (morning, afternoon,
evening, or night)."
# Build a NB model of location
str(locations)
locmodel <- naive_bayes(location~daytype+hourtype, data=locations)
# Predict Brett's location on a weekday afternoon
str(weekday_afternoon)
predict(locmodel, weekday_afternoon)
# Predict Brett's location on a weekday evening
predict(locmodel, weekday_evening)
"Preparing for unforeseen circumstances
While Brett was tracking his location over 13 weeks, he never went
into the office during the weekend. Consequently, the joint
probability of P(office and weekend) = 0.
Explore how this impacts the predicted probability that Brett may go
to work on the weekend in the future. Additionally, you can see how
using the Laplace correction will allow a small chance for these
types of unforeseen circumstances.
"
# Observe the predicted probabilities for a weekend afternoon
predict(locmodel, weekend_afternoon, type="prob")
# Build a new model using the Laplace correction
locmodel2 <- naive_bayes(location~daytype+hourtype,
data=locations, laplace=1)
# Observe the new predicted probabilities for a weekend afternoon
predict(locmodel2, weekend_afternoon, type="prob")
"Adding the Laplace correction allows for the small chance that
Brett might go to the office on the weekend in the future.
The small probability added to every outcome ensures that they are
all possible even if never previously observed."
"Handling numeric predictors
Numeric data is often binned before it is used with Naive Bayes.
ex
age values recoded as 'child' or 'adult' categories
geographic coordinates recoded into geographic regions (West, East, etc.)
test scores divided into four groups by percentile"
|
### Assignment : diagonalize_matrix() ###
context("diagonalize_matrix()")
test_that("Assignment: diagonalize_matrix", {
X <- matrix(c(-1,-3,-3,3,5,3,-1,-1,1),ncol=3)
expect_that(exists("diagonalize_matrix"), is_true(),
info = "Fel: diagonalize_matrix() saknas.")
expect_that(diagonalize_matrix, is_a("function"),
info = "Fel: diagonalize_matrix ska vara en funktion.")
expect_self_contained(object = diagonalize_matrix,
"Fel: Funktionen har fria variabler")
expect_that(all(names(formals(diagonalize_matrix)) %in% c("X")), condition=is_true(),
info = "Fel: Argumenten i funktionen har felaktiga namn.")
expect_that(class(diagonalize_matrix(X)),
is_equivalent_to("list"),
info="Fel: Funktionen returnerar inte en lista")
expect_that(all(names(diagonalize_matrix(X)) %in%
c("D", "P", "Pinv")), is_true(),
info="Fel: Funktionen returnerar inte korrekta listelementnamn.")
expect_that(class(diagonalize_matrix(X)$D),
is_equivalent_to("matrix"),
info="Fel: D ska vara en matris")
expect_that(class(diagonalize_matrix(X)$P),
is_equivalent_to("matrix"),
info="Fel: P ska vara en matris")
expect_that(class(diagonalize_matrix(X)$Pinv),
is_equivalent_to("matrix"),
info="Fel: Pinv ska vara en matris")
expect_error(diagonalize_matrix(matrix(c(3,3,2,2),byrow = TRUE,ncol=2)),
info = "Fel: Funktionen avbryter inte for singulara matriser (det(A) = 0).")
# Check that mat$P %*% mat$D %*% mat$Pinv returns the matrix
A <- matrix(3:6,ncol=2)
mat <- diagonalize_matrix(A)
expect_true(all(diag(round(mat$D,2)) %in% c(9.22, -0.22)),
info="Fel: Berakning av D")
expect_true(all(round(mat$P %*% mat$D %*% mat$Pinv) == A),
info="Fel: P %*% D %*% Pinv returnerar inte den ursprungliga matrisen.")
B <- matrix(c(-1,-3,-3,3,5,3,-1,-1,1),ncol=3)
mat <- diagonalize_matrix(B)
expect_true(all(diag(round(mat$D,2)) %in% c(2,2,1)),
info="Fel: Berakning av D ar felaktig.")
expect_true(all(round(mat$P %*% mat$D %*% mat$Pinv) == B),
info="Fel: P %*% D %*% Pinv returnerar inte den ursprungliga matrisen.")
})
| /Labs/Tests/Tasks/diagonalize_matrix_tests.R | no_license | janss00n/KursRprgm | R | false | false | 2,320 | r | ### Assignment : diagonalize_matrix() ###
context("diagonalize_matrix()")
test_that("Assignment: diagonalize_matrix", {
X <- matrix(c(-1,-3,-3,3,5,3,-1,-1,1),ncol=3)
expect_that(exists("diagonalize_matrix"), is_true(),
info = "Fel: diagonalize_matrix() saknas.")
expect_that(diagonalize_matrix, is_a("function"),
info = "Fel: diagonalize_matrix ska vara en funktion.")
expect_self_contained(object = diagonalize_matrix,
"Fel: Funktionen har fria variabler")
expect_that(all(names(formals(diagonalize_matrix)) %in% c("X")), condition=is_true(),
info = "Fel: Argumenten i funktionen har felaktiga namn.")
expect_that(class(diagonalize_matrix(X)),
is_equivalent_to("list"),
info="Fel: Funktionen returnerar inte en lista")
expect_that(all(names(diagonalize_matrix(X)) %in%
c("D", "P", "Pinv")), is_true(),
info="Fel: Funktionen returnerar inte korrekta listelementnamn.")
expect_that(class(diagonalize_matrix(X)$D),
is_equivalent_to("matrix"),
info="Fel: D ska vara en matris")
expect_that(class(diagonalize_matrix(X)$P),
is_equivalent_to("matrix"),
info="Fel: P ska vara en matris")
expect_that(class(diagonalize_matrix(X)$Pinv),
is_equivalent_to("matrix"),
info="Fel: Pinv ska vara en matris")
expect_error(diagonalize_matrix(matrix(c(3,3,2,2),byrow = TRUE,ncol=2)),
info = "Fel: Funktionen avbryter inte for singulara matriser (det(A) = 0).")
# Check that mat$P %*% mat$D %*% mat$Pinv returns the matrix
A <- matrix(3:6,ncol=2)
mat <- diagonalize_matrix(A)
expect_true(all(diag(round(mat$D,2)) %in% c(9.22, -0.22)),
info="Fel: Berakning av D")
expect_true(all(round(mat$P %*% mat$D %*% mat$Pinv) == A),
info="Fel: P %*% D %*% Pinv returnerar inte den ursprungliga matrisen.")
B <- matrix(c(-1,-3,-3,3,5,3,-1,-1,1),ncol=3)
mat <- diagonalize_matrix(B)
expect_true(all(diag(round(mat$D,2)) %in% c(2,2,1)),
info="Fel: Berakning av D ar felaktig.")
expect_true(all(round(mat$P %*% mat$D %*% mat$Pinv) == B),
info="Fel: P %*% D %*% Pinv returnerar inte den ursprungliga matrisen.")
})
|
RefAve2W <-
function(sigma,Beta,X,y,delta) {
# auxliary functionfor RefSigmaW()
n <- length(y); p <- ncol(X); xk <- 1.718; c0 <- -0.1351788
ku <- kc <- 0; nu <- sum(delta)
r <- as.vector((y-X%*%as.matrix(Beta))/sigma)
if (nu > 0) {
ru <- r[delta==1]
ku <- sum(ChiSw(ru-c0)) }
if (nu < n) {
rc <- r[delta==0]
kc <- sum(unlist(lapply(rc,IChidlweibul,k=xk))) }
(ku+kc)/(n-p)}
| /R/RefAve2W.r | no_license | cran/RobustAFT | R | false | false | 393 | r | RefAve2W <-
function(sigma,Beta,X,y,delta) {
# auxliary functionfor RefSigmaW()
n <- length(y); p <- ncol(X); xk <- 1.718; c0 <- -0.1351788
ku <- kc <- 0; nu <- sum(delta)
r <- as.vector((y-X%*%as.matrix(Beta))/sigma)
if (nu > 0) {
ru <- r[delta==1]
ku <- sum(ChiSw(ru-c0)) }
if (nu < n) {
rc <- r[delta==0]
kc <- sum(unlist(lapply(rc,IChidlweibul,k=xk))) }
(ku+kc)/(n-p)}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{apply_method_rsm}
\alias{apply_method_rsm}
\title{Apply a method for a particular seed and scenario}
\usage{
apply_method_rsm(seed, scenario, method)
}
\arguments{
\item{seed}{(vector or list of integers) the seeds for the pseudo-rng which identifies/indexes trials}
\item{scenario}{a list including elements fn and args, the function name for the datamaker to be used and additional arguments}
\item{method}{a list including elements fn, name and args}
}
\value{
none; output are saved in the output subdirectory
}
\description{
Apply a method for a particular seed and scenario. THe subscript _rsm refers to a particular repetition
of a particular scenario for particular method.
}
| /man/apply_method_rsm.Rd | no_license | mengyin/dscr | R | false | false | 746 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{apply_method_rsm}
\alias{apply_method_rsm}
\title{Apply a method for a particular seed and scenario}
\usage{
apply_method_rsm(seed, scenario, method)
}
\arguments{
\item{seed}{(vector or list of integers) the seeds for the pseudo-rng which identifies/indexes trials}
\item{scenario}{a list including elements fn and args, the function name for the datamaker to be used and additional arguments}
\item{method}{a list including elements fn, name and args}
}
\value{
none; output are saved in the output subdirectory
}
\description{
Apply a method for a particular seed and scenario. THe subscript _rsm refers to a particular repetition
of a particular scenario for particular method.
}
|
source("check_packages.R")
check_packages(c("devtools","dplyr","data.table","lubridate","bit64","stringr","rgdal","rgeos","geosphere","sp","raster","e1071","sqldf"))
base = '/home/vis/cr173/Sta523/data/parking'
park = tbl_df(fread(paste0(base,"/NYParkingViolations.csv"),stringsAsFactors=FALSE))
#eliminate space in column name
colnames(park)<-str_replace_all(colnames(park)," ",".")
#change the format of Issue.Date
park$Issue.Date = mdy(park$Issue.Date)
#set the beginning date and ending date
start = mdy("8/1/2013")
end = mdy("6/30/2014")
#catch the address corresponding to a Violation.Precinct interval and a Issue.Date interval.
#That's all address we need.
addr = filter(park[,-1], (Violation.Precinct %in% c(1,5,6,7,9,10,13,14,17,18,19,20,22,23,24,25,26,28,30,32,33,34) & Issue.Date >= start & Issue.Date <= end)) %>%
mutate(House.Number = str_trim(House.Number), Street.Name = str_trim(Street.Name)) %>%
filter(House.Number != "" & Street.Name != "") %>%
filter(str_detect(House.Number,"[0-9]+")) %>%
transmute(Violation.Precinct = Violation.Precinct, addr = paste(House.Number, Street.Name)) %>%
mutate(addr = tolower(addr))
pl = readOGR(paste0(base,"/pluto/Manhattan/"),"MNMapPLUTO")
pt = gCentroid(pl,byid=TRUE)
tax = cbind(data.frame(pt@coords), tolower(as.character(pl@data$Address)))
colnames(tax)=c("longitude","latitude","addr")
tax$addr<-str_replace_all(tax$addr,"street","st")
tax$addr<-str_replace_all(tax$addr,"west","w")
tax$addr<-str_replace_all(tax$addr,"east","e")
tax$addr<-str_replace_all(tax$addr,"south","s")
tax$addr<-str_replace_all(tax$addr,"north","n")
tax$addr<-str_replace_all(tax$addr,"avenue","ave")
tax$addr<-str_replace_all(tax$addr,"lane","ln")
tax$addr<-str_replace_all(tax$addr, "place", "pl")
tax$addr<-str_replace_all(tax$addr, "drive", "dr")
addr$addr<-str_replace_all(addr$addr, "th ", " ")
addr$addr<-str_replace_all(addr$addr, "3rd ", "3 ")
addr$addr<-str_replace_all(addr$addr, "2nd ", "2 ")
addr$addr<-str_replace_all(addr$addr, "1st ", "1 ")
addr$addr<-str_replace_all(addr$addr,"avenue","ave")
tax$addr<-gsub(" bl"," blv",tax$addr,fixed=T)
# data after merge park addr and pluto
data = unique(inner_join(addr, tax))
colnames(data)=c("addr","Precinct","Longitude","Latitude")
#Since the data size is too large, we subset the data randomly
set.seed (7)
n = nrow(data)
n.sub=round(0.15*n)
data_sub=data[sample(1:n, n.sub),2:4]
colnames(data_sub)=c("Precinct","x","y")
data_sub=data.frame(data_sub)
data_sub$Precinct = as.integer(as.character(data_sub$Precinct))
data_sub=na.omit(data_sub)
svm_data=svm(as.factor(Precinct)~.,data=data_sub,cross=10)
ny = readOGR(paste0(base,"/nybb/"),"nybb")
manh=ny[ny@data$BoroName=="Manhattan",]
r = rasterize(manh, raster(ncols=500,nrows=1000,ext=extent(bbox(manh))))
cells = which(!is.na(r[]))
crds = xyFromCell(r,cells)
pred = predict(svm_data,crds)
r[cells] = as.numeric(as.character(pred))
dist = sort(unique(data_sub$Precinct))
index=which(!(dist %in% r[]))
dist=dist[-index]
l=list()
for(i in seq_along(dist))
{
l[[i]] = rasterToPolygons(r, function(x) x==dist[i], dissolve=TRUE)
l[[i]]@polygons[[1]]@ID = as.character(dist[i])
rownames(l[[i]]@data) = dist[i]
colnames(l[[i]]@data) = "Precinct"
}
pd = do.call(rbind, l)
writeGeoJSON = function(sp, file)
{
stopifnot(class(sp) == "SpatialPolygonsDataFrame")
stopifnot(!missing(sp))
sp = createSPComment(sp)
poly_json = function(x)
{
owners = as.integer(str_split(comment(x)," ")[[1]])
paste("[",
paste(
sapply(which(owners == 0), function(i)
{
res = "[ ["
res = paste(res, paste("[", apply(x@Polygons[[i]]@coords, 1, paste, collapse=", "), "]", collapse=", "))
for (j in which(i %in% owners))
{
res = paste(res, "], [")
res = paste(res, paste("[", apply(x@Polygons[[j]]@coords, 1, paste, collapse=", "), "]", collapse=", "))
}
res = paste(res, "] ]")
}),
collapse = ", "
),
"]")
}
qt = function(x) paste0('"',x,'"')
res = paste('{',
'"type": "FeatureCollection",',
'"features": [',
paste(
sapply(1:nrow(sp), function(i)
{
paste('{ "type": "Feature",',
'"properties": { ',
paste(qt(names(sp)), sp@data[i,], sep=": ", collapse=", "),
' },',
'"geometry": {',
' "type": "MultiPolygon",',
' "coordinates": ',
poly_json(sp@polygons[[i]]),
'} }',
sep="\n")
}),
collapse=",\n"
),
'] }')
cat(length(res),"\n\n")
write(res, file = file)
}
writeGeoJSON(pd,"./precinct.json")
| /proj3/project3.R | no_license | layla0605/sta-523 | R | false | false | 5,049 | r |
source("check_packages.R")
check_packages(c("devtools","dplyr","data.table","lubridate","bit64","stringr","rgdal","rgeos","geosphere","sp","raster","e1071","sqldf"))
base = '/home/vis/cr173/Sta523/data/parking'
park = tbl_df(fread(paste0(base,"/NYParkingViolations.csv"),stringsAsFactors=FALSE))
#eliminate space in column name
colnames(park)<-str_replace_all(colnames(park)," ",".")
#change the format of Issue.Date
park$Issue.Date = mdy(park$Issue.Date)
#set the beginning date and ending date
start = mdy("8/1/2013")
end = mdy("6/30/2014")
#catch the address corresponding to a Violation.Precinct interval and a Issue.Date interval.
#That's all address we need.
addr = filter(park[,-1], (Violation.Precinct %in% c(1,5,6,7,9,10,13,14,17,18,19,20,22,23,24,25,26,28,30,32,33,34) & Issue.Date >= start & Issue.Date <= end)) %>%
mutate(House.Number = str_trim(House.Number), Street.Name = str_trim(Street.Name)) %>%
filter(House.Number != "" & Street.Name != "") %>%
filter(str_detect(House.Number,"[0-9]+")) %>%
transmute(Violation.Precinct = Violation.Precinct, addr = paste(House.Number, Street.Name)) %>%
mutate(addr = tolower(addr))
pl = readOGR(paste0(base,"/pluto/Manhattan/"),"MNMapPLUTO")
pt = gCentroid(pl,byid=TRUE)
tax = cbind(data.frame(pt@coords), tolower(as.character(pl@data$Address)))
colnames(tax)=c("longitude","latitude","addr")
tax$addr<-str_replace_all(tax$addr,"street","st")
tax$addr<-str_replace_all(tax$addr,"west","w")
tax$addr<-str_replace_all(tax$addr,"east","e")
tax$addr<-str_replace_all(tax$addr,"south","s")
tax$addr<-str_replace_all(tax$addr,"north","n")
tax$addr<-str_replace_all(tax$addr,"avenue","ave")
tax$addr<-str_replace_all(tax$addr,"lane","ln")
tax$addr<-str_replace_all(tax$addr, "place", "pl")
tax$addr<-str_replace_all(tax$addr, "drive", "dr")
addr$addr<-str_replace_all(addr$addr, "th ", " ")
addr$addr<-str_replace_all(addr$addr, "3rd ", "3 ")
addr$addr<-str_replace_all(addr$addr, "2nd ", "2 ")
addr$addr<-str_replace_all(addr$addr, "1st ", "1 ")
addr$addr<-str_replace_all(addr$addr,"avenue","ave")
tax$addr<-gsub(" bl"," blv",tax$addr,fixed=T)
# data after merge park addr and pluto
data = unique(inner_join(addr, tax))
colnames(data)=c("addr","Precinct","Longitude","Latitude")
#Since the data size is too large, we subset the data randomly
set.seed (7)
n = nrow(data)
n.sub=round(0.15*n)
data_sub=data[sample(1:n, n.sub),2:4]
colnames(data_sub)=c("Precinct","x","y")
data_sub=data.frame(data_sub)
data_sub$Precinct = as.integer(as.character(data_sub$Precinct))
data_sub=na.omit(data_sub)
svm_data=svm(as.factor(Precinct)~.,data=data_sub,cross=10)
ny = readOGR(paste0(base,"/nybb/"),"nybb")
manh=ny[ny@data$BoroName=="Manhattan",]
r = rasterize(manh, raster(ncols=500,nrows=1000,ext=extent(bbox(manh))))
cells = which(!is.na(r[]))
crds = xyFromCell(r,cells)
pred = predict(svm_data,crds)
r[cells] = as.numeric(as.character(pred))
dist = sort(unique(data_sub$Precinct))
index=which(!(dist %in% r[]))
dist=dist[-index]
l=list()
for(i in seq_along(dist))
{
l[[i]] = rasterToPolygons(r, function(x) x==dist[i], dissolve=TRUE)
l[[i]]@polygons[[1]]@ID = as.character(dist[i])
rownames(l[[i]]@data) = dist[i]
colnames(l[[i]]@data) = "Precinct"
}
pd = do.call(rbind, l)
writeGeoJSON = function(sp, file)
{
stopifnot(class(sp) == "SpatialPolygonsDataFrame")
stopifnot(!missing(sp))
sp = createSPComment(sp)
poly_json = function(x)
{
owners = as.integer(str_split(comment(x)," ")[[1]])
paste("[",
paste(
sapply(which(owners == 0), function(i)
{
res = "[ ["
res = paste(res, paste("[", apply(x@Polygons[[i]]@coords, 1, paste, collapse=", "), "]", collapse=", "))
for (j in which(i %in% owners))
{
res = paste(res, "], [")
res = paste(res, paste("[", apply(x@Polygons[[j]]@coords, 1, paste, collapse=", "), "]", collapse=", "))
}
res = paste(res, "] ]")
}),
collapse = ", "
),
"]")
}
qt = function(x) paste0('"',x,'"')
res = paste('{',
'"type": "FeatureCollection",',
'"features": [',
paste(
sapply(1:nrow(sp), function(i)
{
paste('{ "type": "Feature",',
'"properties": { ',
paste(qt(names(sp)), sp@data[i,], sep=": ", collapse=", "),
' },',
'"geometry": {',
' "type": "MultiPolygon",',
' "coordinates": ',
poly_json(sp@polygons[[i]]),
'} }',
sep="\n")
}),
collapse=",\n"
),
'] }')
cat(length(res),"\n\n")
write(res, file = file)
}
writeGeoJSON(pd,"./precinct.json")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hy_sed_daily_suscon.R
\name{hy_sed_daily_suscon}
\alias{hy_sed_daily_suscon}
\title{Extract daily suspended sediment concentration information from the HYDAT database}
\format{A tibble with 5 variables:
\describe{
\item{STATION_NUMBER}{Unique 7 digit Water Survey of Canada station number}
\item{Date}{Observation date. Formatted as a Date class.}
\item{Parameter}{Parameter being measured. Only possible value is SUSCON}
\item{Value}{Discharge value. The units are mg/l.}
\item{Symbol}{Measurement/river conditions}
}}
\source{
HYDAT
}
\usage{
hy_sed_daily_suscon(station_number = NULL, hydat_path = NULL,
prov_terr_state_loc = NULL, start_date = "ALL", end_date = "ALL",
symbol_output = "code")
}
\arguments{
\item{station_number}{A seven digit Water Survey of Canada station number. If this argument is omitted, the value of \code{prov_terr_state_loc}
is returned.}
\item{hydat_path}{The default for this argument is to look for hydat in the same location where it
was saved by using \code{download_hydat}. Therefore this argument is almost always omitted from a function call.
You can see where hydat was downloaded using \code{hy_dir()}}
\item{prov_terr_state_loc}{Province, state or territory. If this argument is omitted, the value of \code{station_number}
is returned. See \code{unique(allstations$prov_terr_state_loc)}}
\item{start_date}{Leave blank if all dates are required. Date format needs to be in YYYY-MM-DD. Date is inclusive.}
\item{end_date}{Leave blank if all dates are required. Date format needs to be in YYYY-MM-DD. Date is inclusive.}
\item{symbol_output}{Set whether the raw code, or the \code{english} or the \code{french} translations are outputted. Default
value is \code{code}.}
}
\value{
A tibble of daily suspended sediment concentration
}
\description{
Provides wrapper to turn the SED_DLY_SUSCON table in HYDAT into a tidy data frame of daily suspended sediment concentration information.
\code{station_number} and \code{prov_terr_state_loc} can both be supplied. If both are omitted all values from the \code{hy_stations}
table are returned. That is a large vector for \code{hy_sed_daily_suscon}.
}
\examples{
\dontrun{
hy_sed_daily_suscon(station_number = "01CE003")
}
}
\seealso{
Other HYDAT functions: \code{\link{hy_agency_list}},
\code{\link{hy_annual_instant_peaks}},
\code{\link{hy_annual_stats}},
\code{\link{hy_daily_flows}},
\code{\link{hy_daily_levels}},
\code{\link{hy_data_symbols}},
\code{\link{hy_data_types}}, \code{\link{hy_datum_list}},
\code{\link{hy_monthly_flows}},
\code{\link{hy_monthly_levels}},
\code{\link{hy_reg_office_list}},
\code{\link{hy_sed_daily_loads}},
\code{\link{hy_sed_monthly_loads}},
\code{\link{hy_sed_monthly_suscon}},
\code{\link{hy_sed_samples_psd}},
\code{\link{hy_sed_samples}}, \code{\link{hy_stations}},
\code{\link{hy_stn_data_coll}},
\code{\link{hy_stn_data_range}},
\code{\link{hy_stn_op_schedule}},
\code{\link{hy_stn_regulation}}, \code{\link{hy_version}}
}
| /man/hy_sed_daily_suscon.Rd | permissive | njatel/tidyhydat | R | false | true | 3,092 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hy_sed_daily_suscon.R
\name{hy_sed_daily_suscon}
\alias{hy_sed_daily_suscon}
\title{Extract daily suspended sediment concentration information from the HYDAT database}
\format{A tibble with 5 variables:
\describe{
\item{STATION_NUMBER}{Unique 7 digit Water Survey of Canada station number}
\item{Date}{Observation date. Formatted as a Date class.}
\item{Parameter}{Parameter being measured. Only possible value is SUSCON}
\item{Value}{Discharge value. The units are mg/l.}
\item{Symbol}{Measurement/river conditions}
}}
\source{
HYDAT
}
\usage{
hy_sed_daily_suscon(station_number = NULL, hydat_path = NULL,
prov_terr_state_loc = NULL, start_date = "ALL", end_date = "ALL",
symbol_output = "code")
}
\arguments{
\item{station_number}{A seven digit Water Survey of Canada station number. If this argument is omitted, the value of \code{prov_terr_state_loc}
is returned.}
\item{hydat_path}{The default for this argument is to look for hydat in the same location where it
was saved by using \code{download_hydat}. Therefore this argument is almost always omitted from a function call.
You can see where hydat was downloaded using \code{hy_dir()}}
\item{prov_terr_state_loc}{Province, state or territory. If this argument is omitted, the value of \code{station_number}
is returned. See \code{unique(allstations$prov_terr_state_loc)}}
\item{start_date}{Leave blank if all dates are required. Date format needs to be in YYYY-MM-DD. Date is inclusive.}
\item{end_date}{Leave blank if all dates are required. Date format needs to be in YYYY-MM-DD. Date is inclusive.}
\item{symbol_output}{Set whether the raw code, or the \code{english} or the \code{french} translations are outputted. Default
value is \code{code}.}
}
\value{
A tibble of daily suspended sediment concentration
}
\description{
Provides wrapper to turn the SED_DLY_SUSCON table in HYDAT into a tidy data frame of daily suspended sediment concentration information.
\code{station_number} and \code{prov_terr_state_loc} can both be supplied. If both are omitted all values from the \code{hy_stations}
table are returned. That is a large vector for \code{hy_sed_daily_suscon}.
}
\examples{
\dontrun{
hy_sed_daily_suscon(station_number = "01CE003")
}
}
\seealso{
Other HYDAT functions: \code{\link{hy_agency_list}},
\code{\link{hy_annual_instant_peaks}},
\code{\link{hy_annual_stats}},
\code{\link{hy_daily_flows}},
\code{\link{hy_daily_levels}},
\code{\link{hy_data_symbols}},
\code{\link{hy_data_types}}, \code{\link{hy_datum_list}},
\code{\link{hy_monthly_flows}},
\code{\link{hy_monthly_levels}},
\code{\link{hy_reg_office_list}},
\code{\link{hy_sed_daily_loads}},
\code{\link{hy_sed_monthly_loads}},
\code{\link{hy_sed_monthly_suscon}},
\code{\link{hy_sed_samples_psd}},
\code{\link{hy_sed_samples}}, \code{\link{hy_stations}},
\code{\link{hy_stn_data_coll}},
\code{\link{hy_stn_data_range}},
\code{\link{hy_stn_op_schedule}},
\code{\link{hy_stn_regulation}}, \code{\link{hy_version}}
}
|
testlist <- list(Rs = c(-3.77044550050926e+242, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), atmp = numeric(0), relh = c(7.64681398433536e-304, -4.29227809743625e-307, 1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 7.31195213563656e+256, -1.93925524631599e-68, 2.08343441298214e-168, 1.39098956557385e-309 ), temp = 1.11231963688461e-307)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615863613-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 474 | r | testlist <- list(Rs = c(-3.77044550050926e+242, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), atmp = numeric(0), relh = c(7.64681398433536e-304, -4.29227809743625e-307, 1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 7.31195213563656e+256, -1.93925524631599e-68, 2.08343441298214e-168, 1.39098956557385e-309 ), temp = 1.11231963688461e-307)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
#LIBRARIES:
library(tidyverse)
library(ggeffects)
install.packages("patchwork")
library(patchwork)
options(scipen = 999)
#DATA IMPORT
movies <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-03-09/movies.csv')
#PLOTS
#dependent
sum(is.na(movies$metascore))
meta_h <-
movies %>%
filter(!is.na(metascore)) %>%
ggplot(aes(x=metascore)) +
geom_histogram(color = "white")
meta_b <-
movies %>%
filter(!is.na(metascore)) %>%
ggplot(aes(y=metascore)) +
geom_boxplot()
meta_p <-
movies %>%
filter(!is.na(metascore)) %>%
ggplot(aes(y=metascore, x=seq_along(metascore)))+
geom_point()+
labs(x="row numbers")
meta_h+meta_b+meta_p
#main independent
sum(is.na(movies$budget_2013))#no missings
budget_uni<-movies %>%
ggplot(aes(x=log10(budget_2013)))+
geom_histogram()
budget_bi<-movies %>%
filter(!is.na(metascore)) %>%
ggplot(aes(y=metascore, x= log10(budget_2013)))+
geom_point()+
geom_smooth()
budget_uni+budget_bi
#other independent
#year
sum(is.na(movies$year))#no missings
year_uni<-movies %>%
ggplot(aes(x= year))+
geom_bar()
year_bi<-movies %>%
ggplot(aes(y= metascore, x= year))+
geom_point()
#rated
sum(is.na(movies$rated))
movies <- movies %>%
mutate(rated= as.factor(rated))
movies<-movies %>%
mutate(rated = replace(rated, rated =="N/A", NA)) %>%
mutate(rated = fct_collapse(rated, "NR"= c("Not Rated","Unrated"))) %>%
mutate(rated = replace(rated, rated == "NR", NA)) %>%
mutate(rated= fct_collapse(rated, "NC-17"= c("NC-17", "X"))) %>%
mutate(rated= fct_collapse(rated, "PG"= c("PG", "TV-PG"))) %>%
mutate(rated= fct_collapse(rated, "PG-13"= c("PG-13", "TV-14")))
rated_uni<-movies %>%
ggplot(aes(x=rated))+
geom_bar()
rated_bi<-movies %>%
filter(!is.na(metascore)) %>%
ggplot(aes(x= rated, y= metascore))+
geom_boxplot()
#runtime
movies$runtime2 <- as.numeric(gsub(' min', '', movies$runtime))
sum(is.na(movies$runtime2))
runtime_uni<-movies %>%
filter(!is.na(runtime2)) %>%
ggplot(aes(x= runtime2))+
geom_histogram(stat="count")
runtime_bi<-movies %>%
filter(!is.na(runtime2)) %>%
filter(!is.na(metascore)) %>%
ggplot(aes(y=metascore, x= runtime2))+
geom_point()
#binary
sum(is.na(movies$binary))#no missings
binary_uni<-movies %>%
ggplot(aes(x= binary))+
geom_bar()
binary_bi<-movies %>%
filter(!is.na(metascore)) %>%
ggplot(aes(x= binary,y = metascore))+
geom_boxplot()
(year_uni+year_bi)/(runtime_uni+runtime_bi)/ (rated_uni+ rated_bi)/(binary_uni+binary_bi)+
plot_annotation(tag_levels = 'A') &
theme(plot.tag = element_text(size = 8))
####Model presentation and interpretation
movies2 <-
movies %>%
filter(!is.na(metascore)) %>%
mutate(year_c = year-mean(year),
runtime_c = runtime2 - mean(runtime2),
budget_log = log10(budget_2013),
budget_log_c= budget_log- mean(budget_log))
model<-lm(metascore~budget_log+year+runtime2+rated+binary, data=movies2)
model_c <- lm(metascore~budget_log_c+year_c+runtime_c+rated+binary, data=movies2)
summary(model_c)
####Model fit and diagnostics
par(mfrow = c(2,2))
plot(model)
### Visual model presentation and interpretation
model_marginal_effect <- plot(ggpredict(model, terms = c("year", "runtime2")), colors = c("red", "orange", "blue"))
model_marginal_effect
| /antonenko atestace.R | no_license | poliantonenko/AppliedRegressionInR | R | false | false | 3,362 | r | #LIBRARIES:
library(tidyverse)
library(ggeffects)
install.packages("patchwork")
library(patchwork)
options(scipen = 999)
#DATA IMPORT
movies <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-03-09/movies.csv')
#PLOTS
#dependent
sum(is.na(movies$metascore))
meta_h <-
movies %>%
filter(!is.na(metascore)) %>%
ggplot(aes(x=metascore)) +
geom_histogram(color = "white")
meta_b <-
movies %>%
filter(!is.na(metascore)) %>%
ggplot(aes(y=metascore)) +
geom_boxplot()
meta_p <-
movies %>%
filter(!is.na(metascore)) %>%
ggplot(aes(y=metascore, x=seq_along(metascore)))+
geom_point()+
labs(x="row numbers")
meta_h+meta_b+meta_p
#main independent
sum(is.na(movies$budget_2013))#no missings
budget_uni<-movies %>%
ggplot(aes(x=log10(budget_2013)))+
geom_histogram()
budget_bi<-movies %>%
filter(!is.na(metascore)) %>%
ggplot(aes(y=metascore, x= log10(budget_2013)))+
geom_point()+
geom_smooth()
budget_uni+budget_bi
#other independent
#year
sum(is.na(movies$year))#no missings
year_uni<-movies %>%
ggplot(aes(x= year))+
geom_bar()
year_bi<-movies %>%
ggplot(aes(y= metascore, x= year))+
geom_point()
#rated
sum(is.na(movies$rated))
movies <- movies %>%
mutate(rated= as.factor(rated))
movies<-movies %>%
mutate(rated = replace(rated, rated =="N/A", NA)) %>%
mutate(rated = fct_collapse(rated, "NR"= c("Not Rated","Unrated"))) %>%
mutate(rated = replace(rated, rated == "NR", NA)) %>%
mutate(rated= fct_collapse(rated, "NC-17"= c("NC-17", "X"))) %>%
mutate(rated= fct_collapse(rated, "PG"= c("PG", "TV-PG"))) %>%
mutate(rated= fct_collapse(rated, "PG-13"= c("PG-13", "TV-14")))
rated_uni<-movies %>%
ggplot(aes(x=rated))+
geom_bar()
rated_bi<-movies %>%
filter(!is.na(metascore)) %>%
ggplot(aes(x= rated, y= metascore))+
geom_boxplot()
#runtime
movies$runtime2 <- as.numeric(gsub(' min', '', movies$runtime))
sum(is.na(movies$runtime2))
runtime_uni<-movies %>%
filter(!is.na(runtime2)) %>%
ggplot(aes(x= runtime2))+
geom_histogram(stat="count")
runtime_bi<-movies %>%
filter(!is.na(runtime2)) %>%
filter(!is.na(metascore)) %>%
ggplot(aes(y=metascore, x= runtime2))+
geom_point()
#binary
sum(is.na(movies$binary))#no missings
binary_uni<-movies %>%
ggplot(aes(x= binary))+
geom_bar()
binary_bi<-movies %>%
filter(!is.na(metascore)) %>%
ggplot(aes(x= binary,y = metascore))+
geom_boxplot()
(year_uni+year_bi)/(runtime_uni+runtime_bi)/ (rated_uni+ rated_bi)/(binary_uni+binary_bi)+
plot_annotation(tag_levels = 'A') &
theme(plot.tag = element_text(size = 8))
####Model presentation and interpretation
movies2 <-
movies %>%
filter(!is.na(metascore)) %>%
mutate(year_c = year-mean(year),
runtime_c = runtime2 - mean(runtime2),
budget_log = log10(budget_2013),
budget_log_c= budget_log- mean(budget_log))
model<-lm(metascore~budget_log+year+runtime2+rated+binary, data=movies2)
model_c <- lm(metascore~budget_log_c+year_c+runtime_c+rated+binary, data=movies2)
summary(model_c)
####Model fit and diagnostics
par(mfrow = c(2,2))
plot(model)
### Visual model presentation and interpretation
model_marginal_effect <- plot(ggpredict(model, terms = c("year", "runtime2")), colors = c("red", "orange", "blue"))
model_marginal_effect
|
correlationAnalysis <- function(X){
TS1 <- X$LAST %>% dplyr::select(X$Maturity[1])#saving appropriate timeseries from dataframe based on maturities
TS2 <- X$LAST %>% dplyr::select(X$Maturity[2])
FirstTS <- TS1 %>% log(.) %>% lapply(., diff) %>% unlist(.) #calculating log return for the two time series
SecondTS <- TS2 %>% log(.) %>% lapply(., diff) %>% unlist(.)
Window <- X$CorrelationWindow
Lag <- X$CorrelationLag
Dates <- X$Date
X$CasualityData$Values$Window <- list()
X$CasualityData$Dates$Window <- list()
for (i in 1:(length(Window))){
X$CasualityData$Values$Window[[i]] <- list()
X$CasualityData$Dates$Window[[i]] <- list()
for (j in 1:(length(Lag))){
Correlation <- staticCorrelation(FirstTS, SecondTS, Window[i], Lag[j])
X$CasualityData$Values$Window[[i]]$Lag[[j]] <- Correlation
X$CasualityData$Dates$Window[[i]]$Lag[[j]] <- Dates[(Window[i]+Lag[j]):length(Dates), 1]
}
}
return(X)
}
| /correlationAnalysis.r | no_license | tucson10101/-efrp_r_Csepregi_Englert | R | false | false | 982 | r | correlationAnalysis <- function(X){
TS1 <- X$LAST %>% dplyr::select(X$Maturity[1])#saving appropriate timeseries from dataframe based on maturities
TS2 <- X$LAST %>% dplyr::select(X$Maturity[2])
FirstTS <- TS1 %>% log(.) %>% lapply(., diff) %>% unlist(.) #calculating log return for the two time series
SecondTS <- TS2 %>% log(.) %>% lapply(., diff) %>% unlist(.)
Window <- X$CorrelationWindow
Lag <- X$CorrelationLag
Dates <- X$Date
X$CasualityData$Values$Window <- list()
X$CasualityData$Dates$Window <- list()
for (i in 1:(length(Window))){
X$CasualityData$Values$Window[[i]] <- list()
X$CasualityData$Dates$Window[[i]] <- list()
for (j in 1:(length(Lag))){
Correlation <- staticCorrelation(FirstTS, SecondTS, Window[i], Lag[j])
X$CasualityData$Values$Window[[i]]$Lag[[j]] <- Correlation
X$CasualityData$Dates$Window[[i]]$Lag[[j]] <- Dates[(Window[i]+Lag[j]):length(Dates), 1]
}
}
return(X)
}
|
## ---- include = FALSE----------------------------------------------------
knitr::opts_chunk$set(collapse = TRUE, warning = FALSE, message = FALSE,
fig.height = 5, fig.width = 10,
dpi = 200)
library(knitr)
library(cr17)
library(dplyr)
## ---- echo = FALSE-------------------------------------------------------
sampleTable <- data.frame('Patient' = numeric(5),
'Time' = numeric(5),
'Status' = numeric(5))
sampleTable$Patient <- 1:5
sampleTable$Time <- c(5,1,4,5,2)
sampleTable$Status <- c(1,0,1,2,2)
kable(sampleTable)
## ------------------------------------------------------------------------
survivalCurves <- fitSurvival(time = LUAD$time,
risk = LUAD$event,
group = LUAD$gender,
cens = "alive")
## ------------------------------------------------------------------------
names(survivalCurves)
## ------------------------------------------------------------------------
SC <- survivalCurves$death[c("time",
"n.risk",
"n.event",
"n.censor",
"surv",
"strata",
"std.err",
"lower",
"upper")]
SC <- as.data.frame(SC)
SC <- filter(SC, strata == "male")
kable(head(SC, n = 10))
## ---- out.width=700, fig.align='center'----------------------------------
plotSurvival(fit = survivalCurves,
target = 1500,
ggtheme = theme_gray(),
legendtitle = "Gender")
## ------------------------------------------------------------------------
testSurvival(time = LUAD$time,
risk = LUAD$event,
group = LUAD$gender,
cens = "alive",
rho = 0)
## ------------------------------------------------------------------------
coxModel <- fitCox(time = LUAD$time,
risk = LUAD$event,
group = LUAD$gender,
cens = "alive",
conf.int = 0.95)
## ------------------------------------------------------------------------
names(coxModel)
## ------------------------------------------------------------------------
coxModel$death$coefficients
## ------------------------------------------------------------------------
kable(testCox(fitCox = coxModel))
## ------------------------------------------------------------------------
cuminc <- fitCuminc(time = LUAD$time,
risk = LUAD$event,
group = LUAD$gender,
cens = "alive")
## ------------------------------------------------------------------------
names(cuminc)
## ------------------------------------------------------------------------
femaleDeathCuminc <- cuminc[["female death"]]
femaleDeathCuminc <- as.data.frame(femaleDeathCuminc)
kable(head(femaleDeathCuminc))
## ---- out.width=700, fig.align='center'----------------------------------
plotCuminc(ci = cuminc,
cens = "alive",
target = 1500,
ggtheme = theme_gray(),
legendtitle = "Gender")
## ------------------------------------------------------------------------
testCuminc(cuminc)
## ------------------------------------------------------------------------
reg <- fitReg(time = LUAD$time,
risk = LUAD$event,
group = LUAD$gender,
cens = "alive")
## ------------------------------------------------------------------------
reg$death$coef
## ------------------------------------------------------------------------
testReg(reg)
## ---- eval=FALSE---------------------------------------------------------
# riskTab(time = LUAD$time,
# risk = LUAD$event,
# group = LUAD$gender,
# cens = "alive",
# title = "Number at risk")
#
## ---- eval=FALSE---------------------------------------------------------
# eventTab(time = LUAD$time,
# risk = LUAD$event,
# group = LUAD$gender,
# cens = "alive",
# title = "Number of events")
#
## ---- eval = FALSE-------------------------------------------------------
# summarizeCR(time = LUAD$time,
# risk = LUAD$event,
# group = LUAD$gender, cens = "alive")
## ---- eval = FALSE-------------------------------------------------------
# summarizeCR(time = LUAD$time,
# risk = LUAD$event,
# group = LUAD$gender,
# cens = "alive",
# rho = 1,
# target = 800,
# type = "kaplan-meier",
# ggtheme = theme_gray(),
# titleSurv = "Survival analysis",
# titleCuminc = "Competing risks models",
# xtitle = "Days",
# ytitleSurv = "Survival curves",
# ytitleCuminc = "Cumulative incidence functions",
# legendtitle = "Gender")
| /data/genthat_extracted_code/cr17/vignettes/vignette.R | no_license | surayaaramli/typeRrh | R | false | false | 5,146 | r | ## ---- include = FALSE----------------------------------------------------
knitr::opts_chunk$set(collapse = TRUE, warning = FALSE, message = FALSE,
fig.height = 5, fig.width = 10,
dpi = 200)
library(knitr)
library(cr17)
library(dplyr)
## ---- echo = FALSE-------------------------------------------------------
sampleTable <- data.frame('Patient' = numeric(5),
'Time' = numeric(5),
'Status' = numeric(5))
sampleTable$Patient <- 1:5
sampleTable$Time <- c(5,1,4,5,2)
sampleTable$Status <- c(1,0,1,2,2)
kable(sampleTable)
## ------------------------------------------------------------------------
survivalCurves <- fitSurvival(time = LUAD$time,
risk = LUAD$event,
group = LUAD$gender,
cens = "alive")
## ------------------------------------------------------------------------
names(survivalCurves)
## ------------------------------------------------------------------------
SC <- survivalCurves$death[c("time",
"n.risk",
"n.event",
"n.censor",
"surv",
"strata",
"std.err",
"lower",
"upper")]
SC <- as.data.frame(SC)
SC <- filter(SC, strata == "male")
kable(head(SC, n = 10))
## ---- out.width=700, fig.align='center'----------------------------------
plotSurvival(fit = survivalCurves,
target = 1500,
ggtheme = theme_gray(),
legendtitle = "Gender")
## ------------------------------------------------------------------------
testSurvival(time = LUAD$time,
risk = LUAD$event,
group = LUAD$gender,
cens = "alive",
rho = 0)
## ------------------------------------------------------------------------
coxModel <- fitCox(time = LUAD$time,
risk = LUAD$event,
group = LUAD$gender,
cens = "alive",
conf.int = 0.95)
## ------------------------------------------------------------------------
names(coxModel)
## ------------------------------------------------------------------------
coxModel$death$coefficients
## ------------------------------------------------------------------------
kable(testCox(fitCox = coxModel))
## ------------------------------------------------------------------------
cuminc <- fitCuminc(time = LUAD$time,
risk = LUAD$event,
group = LUAD$gender,
cens = "alive")
## ------------------------------------------------------------------------
names(cuminc)
## ------------------------------------------------------------------------
femaleDeathCuminc <- cuminc[["female death"]]
femaleDeathCuminc <- as.data.frame(femaleDeathCuminc)
kable(head(femaleDeathCuminc))
## ---- out.width=700, fig.align='center'----------------------------------
plotCuminc(ci = cuminc,
cens = "alive",
target = 1500,
ggtheme = theme_gray(),
legendtitle = "Gender")
## ------------------------------------------------------------------------
testCuminc(cuminc)
## ------------------------------------------------------------------------
reg <- fitReg(time = LUAD$time,
risk = LUAD$event,
group = LUAD$gender,
cens = "alive")
## ------------------------------------------------------------------------
reg$death$coef
## ------------------------------------------------------------------------
testReg(reg)
## ---- eval=FALSE---------------------------------------------------------
# riskTab(time = LUAD$time,
# risk = LUAD$event,
# group = LUAD$gender,
# cens = "alive",
# title = "Number at risk")
#
## ---- eval=FALSE---------------------------------------------------------
# eventTab(time = LUAD$time,
# risk = LUAD$event,
# group = LUAD$gender,
# cens = "alive",
# title = "Number of events")
#
## ---- eval = FALSE-------------------------------------------------------
# summarizeCR(time = LUAD$time,
# risk = LUAD$event,
# group = LUAD$gender, cens = "alive")
## ---- eval = FALSE-------------------------------------------------------
# summarizeCR(time = LUAD$time,
# risk = LUAD$event,
# group = LUAD$gender,
# cens = "alive",
# rho = 1,
# target = 800,
# type = "kaplan-meier",
# ggtheme = theme_gray(),
# titleSurv = "Survival analysis",
# titleCuminc = "Competing risks models",
# xtitle = "Days",
# ytitleSurv = "Survival curves",
# ytitleCuminc = "Cumulative incidence functions",
# legendtitle = "Gender")
|
de_of_de_facet_tab=target_log2tpm[genes_for_kap, all$TARGET_SHORT]
de_tab=as.data.frame(t(de_of_de_facet_tab))
de_tab$risk=all$risk
de_tab$patient=rownames(de_tab)
de_tab_melted=melt(de_tab, id.vars=c("patient", "risk"))
de_tab_melted$risk=factor(de_tab_melted$risk, levels = c("low_risk_4s", "intermediate_risk","high_risk"))
de_tab_melted$variable=factor(de_tab_melted$variable, levels = c(rownames(de_of_de_sorted)))
de_tab_melted
unique(seqc_risks$risk)
seqc_risk2=seqc_risks
seqc_risk2$risk[which(seqc_risk2$risk=="low_risk")]="low_risk_4s"
unique(seqc_risk2$risk)
genes_for_seqc=genes_for_kap[which(genes_for_kap%in%rownames(seqc_tpm_log2))]
seqc_facet_tab=seqc_tpm_log2[genes_for_seqc, seqc_risk2$patient]
seqc_facet_tab=as.data.frame(t(seqc_facet_tab))
seqc_facet_tab$risk=seqc_risk2$risk
seqc_facet_tab$patient=rownames(seqc_facet_tab)
seqc_facet_tab_melted=melt(seqc_facet_tab, id.vars=c("patient", "risk"))
seqc_facet_tab_melted$risk=factor(seqc_facet_tab_melted$risk, levels = c("low_risk_4s", "intermediate_risk","high_risk"))
de_tab_melted$dataset="TARGET"
seqc_facet_tab_melted$dataset="SEQC"
de_tab_melted
seqc_facet_tab_melted
de_of_de_two_datasets=rbind(de_tab_melted, seqc_facet_tab_melted)
ggplot(data = de_of_de_two_datasets, aes(x=risk,y=value, group=dataset)) +
geom_quasirandom(shape=1, alpha=0.3)+facet_wrap(~variable,scales="free_y")+
geom_pointrange(mapping = aes(x = risk, y = value),
stat = "summary",
fun.ymin = function(z) {quantile(z,0.25)},
fun.ymax = function(z) {quantile(z,0.75)},
fun.y = median, shape=22, fill="black",
position=position_dodge(width=0.8)) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"),
text=element_text(size = 14),
axis.text.x = element_text(angle = 90, hjust = 1))
de_of_de_two_datasets$risk
de_of_de_two_datasets$variable=as.character(de_of_de_two_datasets$variable)
de_of_de_two_datasets$risk=as.character(de_of_de_two_datasets$risk)
de_of_de_two_datasets$risk=factor(de_of_de_two_datasets$risk, levels=c("low_risk_4s", "intermediate_risk", "high_risk"))
de_of_de_two_datasets$variable=factor(de_of_de_two_datasets$variable, levels=genes_for_kap)
de_of_de_two_datasets$dataset=factor(de_of_de_two_datasets$dataset, levels=c("TARGET", "SEQC"))
which(de_of_de_two_datasets$variable%in%c("ALDH1A2", "PHF24", "CCDC144NL-AS1", "NCAN", "PRAME", "CRYBB2", "SIX3", "CPEB1", "TMEM163", "LSMEM1", "EPB41L4B", "RAP1GAP2", "SLC9A7"))
de_of_de_two_datasets_cons=de_of_de_two_datasets[which(de_of_de_two_datasets$variable%in%c("MAGEA4", "PRAME", "CRYBB2", "SIX3")),]
ggplot(data=de_of_de_two_datasets_cons, aes(x=risk, y=value, group=dataset)) +
geom_quasirandom(shape=19, alpha=0.2, cex=0.5,dodge.width=.8)+facet_wrap(~variable,scales="free_y")+
geom_pointrange(mapping = aes(x = risk, y = value, shape=dataset, fill=dataset),
stat = "summary", colour="#1c2e4a",
fun.ymin = function(z) {quantile(z,0.25)},
fun.ymax = function(z) {quantile(z,0.75)},
fun.y = median, fill="black",
position=position_dodge(width=0.8)) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"),
text=element_text(size = 14),
axis.text.x = element_text(angle = 90, hjust = 1))+
labs(y="Expression (log2(TPM))")
de_of_de_two_datasets$da
de_of_de_two_datasets_no_inter=de_of_de_two_datasets[which(de_of_de_two_datasets$risk!="intermediate_risk"),]
de_of_de_two_datasets_no_inter=de_of_de_two_datasets_cons[which(de_of_de_two_datasets_cons$risk!="intermediate_risk"),]
de_of_de_two_datasets_cons=de_of_de_two_datasets[which(de_of_de_two_datasets$variable%in%c("ALDH1A2", "PHF24", "CCDC144NL-AS1", "NCAN", "PRAME", "CRYBB2", "SIX3", "CPEB1", "TMEM163", "LSMEM1", "EPB41L4B", "RAP1GAP2", "SLC9A7")),]
ggplot(data=de_of_de_two_datasets_no_inter, aes(x=risk, y=value, group=dataset)) +
geom_quasirandom(shape=19, alpha=0.2, cex=0.5,dodge.width=.8)+facet_wrap(~variable,scales="free_y")+
geom_pointrange(mapping = aes(x = risk, y = value, shape=dataset, fill=dataset),
stat = "summary", colour="#1c2e4a",
fun.ymin = function(z) {quantile(z,0.25)},
fun.ymax = function(z) {quantile(z,0.75)},
fun.y = median, fill="black",
position=position_dodge(width=0.8)) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"),
text=element_text(size = 14),
axis.text.x = element_text(angle = 90, hjust = 1))+
labs(y="Expression (log2(TPM))")
FindClusters(srat_inhouse@assays$RNA@counts, srat_inhouse@meta.data$idents_for_plot)
unique(srat_inhouse@meta.data$new_idents)
| /fig4B_with_seqc.R | no_license | GKild/neuroblastoma | R | false | false | 5,192 | r | de_of_de_facet_tab=target_log2tpm[genes_for_kap, all$TARGET_SHORT]
de_tab=as.data.frame(t(de_of_de_facet_tab))
de_tab$risk=all$risk
de_tab$patient=rownames(de_tab)
de_tab_melted=melt(de_tab, id.vars=c("patient", "risk"))
de_tab_melted$risk=factor(de_tab_melted$risk, levels = c("low_risk_4s", "intermediate_risk","high_risk"))
de_tab_melted$variable=factor(de_tab_melted$variable, levels = c(rownames(de_of_de_sorted)))
de_tab_melted
unique(seqc_risks$risk)
seqc_risk2=seqc_risks
seqc_risk2$risk[which(seqc_risk2$risk=="low_risk")]="low_risk_4s"
unique(seqc_risk2$risk)
genes_for_seqc=genes_for_kap[which(genes_for_kap%in%rownames(seqc_tpm_log2))]
seqc_facet_tab=seqc_tpm_log2[genes_for_seqc, seqc_risk2$patient]
seqc_facet_tab=as.data.frame(t(seqc_facet_tab))
seqc_facet_tab$risk=seqc_risk2$risk
seqc_facet_tab$patient=rownames(seqc_facet_tab)
seqc_facet_tab_melted=melt(seqc_facet_tab, id.vars=c("patient", "risk"))
seqc_facet_tab_melted$risk=factor(seqc_facet_tab_melted$risk, levels = c("low_risk_4s", "intermediate_risk","high_risk"))
de_tab_melted$dataset="TARGET"
seqc_facet_tab_melted$dataset="SEQC"
de_tab_melted
seqc_facet_tab_melted
de_of_de_two_datasets=rbind(de_tab_melted, seqc_facet_tab_melted)
ggplot(data = de_of_de_two_datasets, aes(x=risk,y=value, group=dataset)) +
geom_quasirandom(shape=1, alpha=0.3)+facet_wrap(~variable,scales="free_y")+
geom_pointrange(mapping = aes(x = risk, y = value),
stat = "summary",
fun.ymin = function(z) {quantile(z,0.25)},
fun.ymax = function(z) {quantile(z,0.75)},
fun.y = median, shape=22, fill="black",
position=position_dodge(width=0.8)) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"),
text=element_text(size = 14),
axis.text.x = element_text(angle = 90, hjust = 1))
de_of_de_two_datasets$risk
de_of_de_two_datasets$variable=as.character(de_of_de_two_datasets$variable)
de_of_de_two_datasets$risk=as.character(de_of_de_two_datasets$risk)
de_of_de_two_datasets$risk=factor(de_of_de_two_datasets$risk, levels=c("low_risk_4s", "intermediate_risk", "high_risk"))
de_of_de_two_datasets$variable=factor(de_of_de_two_datasets$variable, levels=genes_for_kap)
de_of_de_two_datasets$dataset=factor(de_of_de_two_datasets$dataset, levels=c("TARGET", "SEQC"))
which(de_of_de_two_datasets$variable%in%c("ALDH1A2", "PHF24", "CCDC144NL-AS1", "NCAN", "PRAME", "CRYBB2", "SIX3", "CPEB1", "TMEM163", "LSMEM1", "EPB41L4B", "RAP1GAP2", "SLC9A7"))
de_of_de_two_datasets_cons=de_of_de_two_datasets[which(de_of_de_two_datasets$variable%in%c("MAGEA4", "PRAME", "CRYBB2", "SIX3")),]
ggplot(data=de_of_de_two_datasets_cons, aes(x=risk, y=value, group=dataset)) +
geom_quasirandom(shape=19, alpha=0.2, cex=0.5,dodge.width=.8)+facet_wrap(~variable,scales="free_y")+
geom_pointrange(mapping = aes(x = risk, y = value, shape=dataset, fill=dataset),
stat = "summary", colour="#1c2e4a",
fun.ymin = function(z) {quantile(z,0.25)},
fun.ymax = function(z) {quantile(z,0.75)},
fun.y = median, fill="black",
position=position_dodge(width=0.8)) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"),
text=element_text(size = 14),
axis.text.x = element_text(angle = 90, hjust = 1))+
labs(y="Expression (log2(TPM))")
de_of_de_two_datasets$da
de_of_de_two_datasets_no_inter=de_of_de_two_datasets[which(de_of_de_two_datasets$risk!="intermediate_risk"),]
de_of_de_two_datasets_no_inter=de_of_de_two_datasets_cons[which(de_of_de_two_datasets_cons$risk!="intermediate_risk"),]
de_of_de_two_datasets_cons=de_of_de_two_datasets[which(de_of_de_two_datasets$variable%in%c("ALDH1A2", "PHF24", "CCDC144NL-AS1", "NCAN", "PRAME", "CRYBB2", "SIX3", "CPEB1", "TMEM163", "LSMEM1", "EPB41L4B", "RAP1GAP2", "SLC9A7")),]
ggplot(data=de_of_de_two_datasets_no_inter, aes(x=risk, y=value, group=dataset)) +
geom_quasirandom(shape=19, alpha=0.2, cex=0.5,dodge.width=.8)+facet_wrap(~variable,scales="free_y")+
geom_pointrange(mapping = aes(x = risk, y = value, shape=dataset, fill=dataset),
stat = "summary", colour="#1c2e4a",
fun.ymin = function(z) {quantile(z,0.25)},
fun.ymax = function(z) {quantile(z,0.75)},
fun.y = median, fill="black",
position=position_dodge(width=0.8)) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"),
text=element_text(size = 14),
axis.text.x = element_text(angle = 90, hjust = 1))+
labs(y="Expression (log2(TPM))")
FindClusters(srat_inhouse@assays$RNA@counts, srat_inhouse@meta.data$idents_for_plot)
unique(srat_inhouse@meta.data$new_idents)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot.mesh.2.5D}
\alias{plot.mesh.2.5D}
\title{Plot a mesh.2.5D object}
\usage{
\method{plot}{mesh.2.5D}(x, ...)
}
\arguments{
\item{x}{A \code{mesh.2.5D} object generated by \code{create.mesh.2.5D}.}
\item{...}{Arguments representing graphical options to be passed to \link[graphics]{par}.}
}
\value{
No return value
}
\description{
Plot the triangulation of a \code{mesh.2.5D} object, generated by \code{create.mesh.2.5D}
}
\examples{
library(fdaPDE)
## Upload the hub2.5D the data
data(hub2.5D)
hub2.5D.nodes = hub2.5D$hub2.5D.nodes
hub2.5D.triangles = hub2.5D$hub2.5D.triangles
## Create mesh
mesh = create.mesh.2.5D(nodes = hub2.5D.nodes, triangles = hub2.5D.triangles)
plot(mesh)
}
| /man/plot.mesh.2.5D.Rd | no_license | cran/fdaPDE | R | false | true | 781 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot.mesh.2.5D}
\alias{plot.mesh.2.5D}
\title{Plot a mesh.2.5D object}
\usage{
\method{plot}{mesh.2.5D}(x, ...)
}
\arguments{
\item{x}{A \code{mesh.2.5D} object generated by \code{create.mesh.2.5D}.}
\item{...}{Arguments representing graphical options to be passed to \link[graphics]{par}.}
}
\value{
No return value
}
\description{
Plot the triangulation of a \code{mesh.2.5D} object, generated by \code{create.mesh.2.5D}
}
\examples{
library(fdaPDE)
## Upload the hub2.5D the data
data(hub2.5D)
hub2.5D.nodes = hub2.5D$hub2.5D.nodes
hub2.5D.triangles = hub2.5D$hub2.5D.triangles
## Create mesh
mesh = create.mesh.2.5D(nodes = hub2.5D.nodes, triangles = hub2.5D.triangles)
plot(mesh)
}
|
\name{UniformManifoldApproximationProjection}
\alias{UniformManifoldApproximationProjection}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Uniform Manifold Approximation and Projection
}
\description{
Uniform manifold approximation and projection is a technique for dimension reduction. The algorithm was described by [McInnes et al., 2018].
}
\usage{
UniformManifoldApproximationProjection(DataOrDistances, k,
Epochs,OutputDimension=2,Algorithm='umap_pkg',PlotIt=FALSE,Cls,\dots)
}
\arguments{
\item{DataOrDistances}{
Numerical matrix defined as either
\code{Data}, i.e., [1:n,1:d], nonsymmetric, and consists of n cases of d-dimensional data points with every case having d attributes, variables or features,
or
\code{Distances}, i.e.,[1:n,1:n], symmetric and consists of n cases, e.g., \code{as.matrix(dist(Data,method))}
}
\item{k}{
number of k nearest neighbors, Important parameter, if not given, settings of package \pkg{umap} will be used, default of package \pkg{umap} is currently 15
}
\item{Epochs}{
Number of eppochs (scalar), i.e, training length, default of package \pkg{umap} is currently 200
}
\item{OutputDimension}{
Number of dimensions in the Outputspace, default=2
}
\item{Algorithm}{
'umap': provides an interface for two implementations. One is written from scratch other one requires python \pkg{umap}
'uwot_pkg': complete re-implementation in R (and C++, via the 'Rcpp' package) of \pkg{uwot}
}
\item{PlotIt}{
Default: FALSE, If TRUE: Plots the projection as a 2d visualization.
OutputDimension>2: only the first two dimensions will be shown
}
\item{Cls}{
Optional,: only relevant if PlotIt=TRUE. Numeric vector, given Classification in numbers: every element is the cluster number of a certain corresponding element of data.
}
\item{\dots}{
one of the other 21 parameters that can be specified, please see \code{\link[umap]{umap.defaults}} of package \pkg{umap} for details or parameters to be set in package \pkg{uwot} depending on the choice of \code{Algorithm}.
}
}
\details{
To the knowledge of the author of this function no peer-reviewed publication of the method exists. Use with greate care.
}
\value{
List of
\item{ProjectedPoints}{[1:n,OutputDimension], n by OutputDimension matrix containing coordinates of the Projection}
\item{ModelObject}{output of \code{\link[umap]{umap}}}
\item{Setting}{specific settings used in \code{UniformManifoldApproximationProjection}}
}
\references{
[McInnes et al., 2018] McInnes, L., Healy, J., & Melville, J.: Umap: Uniform manifold approximation and projection for dimension reduction, arXiv preprint arXiv:1802.03426, 2018.
[Ultsch/Siemon, 1990] Ultsch, A., & Siemon, H. P.: Kohonen's Self Organizing Feature Maps for Exploratory Data Analysis, International Neural Network Conference, pp. 305-308, Kluwer Academic Press, Paris, France, 1990.
}
\author{
Michael Thrun
}
\note{
Uniform Manifold Approximation and Projection and U-matrix [Ultsch/Siemon, 1990] are both sometimes abbreviated with Umap. Hence the abbreveviation is omitted here.
}
\seealso{
\code{\link[umap]{umap}}
}
\examples{
data('Hepta')
Data=Hepta$Data
Proj=UniformManifoldApproximationProjection(Data)
\dontrun{
PlotProjectedPoints(Proj$ProjectedPoints,Hepta$Cls)
}
}
\keyword{UniformManifoldApproximationProjection}
\concept{Uniform Manifold Approximation Projection}
\concept{Projection Method}
\concept{Dimensionality Reduction} | /issuestests/ProjectionBasedClustering/man/UniformManifoldApproximationProjection.Rd | no_license | akhikolla/RcppDeepStateTest | R | false | false | 3,541 | rd | \name{UniformManifoldApproximationProjection}
\alias{UniformManifoldApproximationProjection}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Uniform Manifold Approximation and Projection
}
\description{
Uniform manifold approximation and projection is a technique for dimension reduction. The algorithm was described by [McInnes et al., 2018].
}
\usage{
UniformManifoldApproximationProjection(DataOrDistances, k,
Epochs,OutputDimension=2,Algorithm='umap_pkg',PlotIt=FALSE,Cls,\dots)
}
\arguments{
\item{DataOrDistances}{
Numerical matrix defined as either
\code{Data}, i.e., [1:n,1:d], nonsymmetric, and consists of n cases of d-dimensional data points with every case having d attributes, variables or features,
or
\code{Distances}, i.e.,[1:n,1:n], symmetric and consists of n cases, e.g., \code{as.matrix(dist(Data,method))}
}
\item{k}{
number of k nearest neighbors, Important parameter, if not given, settings of package \pkg{umap} will be used, default of package \pkg{umap} is currently 15
}
\item{Epochs}{
Number of eppochs (scalar), i.e, training length, default of package \pkg{umap} is currently 200
}
\item{OutputDimension}{
Number of dimensions in the Outputspace, default=2
}
\item{Algorithm}{
'umap': provides an interface for two implementations. One is written from scratch other one requires python \pkg{umap}
'uwot_pkg': complete re-implementation in R (and C++, via the 'Rcpp' package) of \pkg{uwot}
}
\item{PlotIt}{
Default: FALSE, If TRUE: Plots the projection as a 2d visualization.
OutputDimension>2: only the first two dimensions will be shown
}
\item{Cls}{
Optional,: only relevant if PlotIt=TRUE. Numeric vector, given Classification in numbers: every element is the cluster number of a certain corresponding element of data.
}
\item{\dots}{
one of the other 21 parameters that can be specified, please see \code{\link[umap]{umap.defaults}} of package \pkg{umap} for details or parameters to be set in package \pkg{uwot} depending on the choice of \code{Algorithm}.
}
}
\details{
To the knowledge of the author of this function no peer-reviewed publication of the method exists. Use with greate care.
}
\value{
List of
\item{ProjectedPoints}{[1:n,OutputDimension], n by OutputDimension matrix containing coordinates of the Projection}
\item{ModelObject}{output of \code{\link[umap]{umap}}}
\item{Setting}{specific settings used in \code{UniformManifoldApproximationProjection}}
}
\references{
[McInnes et al., 2018] McInnes, L., Healy, J., & Melville, J.: Umap: Uniform manifold approximation and projection for dimension reduction, arXiv preprint arXiv:1802.03426, 2018.
[Ultsch/Siemon, 1990] Ultsch, A., & Siemon, H. P.: Kohonen's Self Organizing Feature Maps for Exploratory Data Analysis, International Neural Network Conference, pp. 305-308, Kluwer Academic Press, Paris, France, 1990.
}
\author{
Michael Thrun
}
\note{
Uniform Manifold Approximation and Projection and U-matrix [Ultsch/Siemon, 1990] are both sometimes abbreviated with Umap. Hence the abbreveviation is omitted here.
}
\seealso{
\code{\link[umap]{umap}}
}
\examples{
data('Hepta')
Data=Hepta$Data
Proj=UniformManifoldApproximationProjection(Data)
\dontrun{
PlotProjectedPoints(Proj$ProjectedPoints,Hepta$Cls)
}
}
\keyword{UniformManifoldApproximationProjection}
\concept{Uniform Manifold Approximation Projection}
\concept{Projection Method}
\concept{Dimensionality Reduction} |
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
source("predictNextWord.R", local=TRUE)
source("form.R", local=TRUE)
source("maxNgramSize.R", local=TRUE)
source("ctest.R", local=TRUE)
load("all3models")
library(shiny)
library(shinyjs)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
shinyjs::hide("hideDetails")
shinyjs::hide("details")
output$prediction <- renderText({
predictNextWord(input$textContent, inputType=input$textType)
})
output$maxNgramSize <- renderText({
paste("This model uses a max ngram size of", maxNgramSize(), ".", sep=" ")
})
output$outputContent <- renderPrint({
paste("'", format(input$textContent), "'", sep="")
})
output$formed <- renderPrint({
paste("'", form(input$textContent), "'", sep="")
})
output$phraseTables <- renderTable({
ctest(form(input$textContent))
})
observeEvent(input$showDetails, {
shinyjs::show("details")
shinyjs::hide("showDetails")
shinyjs::show("hideDetails")
})
observeEvent(input$hideDetails, {
shinyjs::hide("details")
shinyjs::hide("hideDetails")
shinyjs::show("showDetails")
})
})
| /server.R | no_license | jeromma/PredictNextWord | R | false | false | 1,359 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
source("predictNextWord.R", local=TRUE)
source("form.R", local=TRUE)
source("maxNgramSize.R", local=TRUE)
source("ctest.R", local=TRUE)
load("all3models")
library(shiny)
library(shinyjs)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
shinyjs::hide("hideDetails")
shinyjs::hide("details")
output$prediction <- renderText({
predictNextWord(input$textContent, inputType=input$textType)
})
output$maxNgramSize <- renderText({
paste("This model uses a max ngram size of", maxNgramSize(), ".", sep=" ")
})
output$outputContent <- renderPrint({
paste("'", format(input$textContent), "'", sep="")
})
output$formed <- renderPrint({
paste("'", form(input$textContent), "'", sep="")
})
output$phraseTables <- renderTable({
ctest(form(input$textContent))
})
observeEvent(input$showDetails, {
shinyjs::show("details")
shinyjs::hide("showDetails")
shinyjs::show("hideDetails")
})
observeEvent(input$hideDetails, {
shinyjs::hide("details")
shinyjs::hide("hideDetails")
shinyjs::show("showDetails")
})
})
|
library(gpuR)
context("vclMatrix Row and Column Methods")
# set seed
set.seed(123)
ORDER_X <- 4
ORDER_Y <- 5
# Base R objects
A <- matrix(rnorm(ORDER_X*ORDER_Y), nrow=ORDER_X, ncol=ORDER_Y)
B <- matrix(rnorm(ORDER_X*ORDER_Y), nrow=ORDER_X, ncol=ORDER_Y)
R <- rowSums(A)
C <- colSums(A)
RM <- rowMeans(A)
CM <- colMeans(A)
test_that("vclMatrix Single Precision Column Sums",
{
has_gpu_skip()
fgpuX <- vclMatrix(A, type="float")
gpuC <- colSums(fgpuX)
expect_is(gpuC, "fvclVector")
expect_equal(gpuC[], C, tolerance=1e-06,
info="float covariance values not equivalent")
})
test_that("vclMatrix Double Precision Column Sums",
{
has_gpu_skip()
has_double_skip()
dgpuX <- vclMatrix(A, type="double")
gpuC <- colSums(dgpuX)
expect_is(gpuC, "dvclVector")
expect_equal(gpuC[], C, tolerance=.Machine$double.eps ^ 0.5,
info="double colSums not equivalent")
})
test_that("vclMatrix Single Precision Row Sums",
{
has_gpu_skip()
fgpuX <- vclMatrix(A, type="float")
gpuC <- rowSums(fgpuX)
expect_is(gpuC, "fvclVector")
expect_equal(gpuC[], R, tolerance=1e-06,
info="float covariance values not equivalent")
})
test_that("vclMatrix Double Precision Row Sums",
{
has_gpu_skip()
has_double_skip()
dgpuX <- vclMatrix(A, type="double")
gpuC <- rowSums(dgpuX)
expect_is(gpuC, "dvclVector")
expect_equal(gpuC[], R, tolerance=.Machine$double.eps ^ 0.5,
info="double colSums not equivalent")
})
test_that("vclMatrix Single Precision Column Means",
{
has_gpu_skip()
fgpuX <- vclMatrix(A, type="float")
gpuC <- colMeans(fgpuX)
expect_is(gpuC, "fvclVector")
expect_equal(gpuC[], CM, tolerance=1e-06,
info="float covariance values not equivalent")
})
test_that("vclMatrix Double Precision Column Means",
{
has_gpu_skip()
has_double_skip()
dgpuX <- vclMatrix(A, type="double")
gpuC <- colMeans(dgpuX)
expect_is(gpuC, "dvclVector")
expect_equal(gpuC[], CM, tolerance=.Machine$double.eps ^ 0.5,
info="double colSums not equivalent")
})
test_that("vclMatrix Single Precision Row Means",
{
has_gpu_skip()
fgpuX <- vclMatrix(A, type="float")
gpuC <- rowMeans(fgpuX)
expect_is(gpuC, "fvclVector")
expect_equal(gpuC[], RM, tolerance=1e-06,
info="float covariance values not equivalent")
})
test_that("vclMatrix Double Precision Row Means",
{
has_gpu_skip()
has_double_skip()
dgpuX <- vclMatrix(A, type="double")
gpuC <- rowMeans(dgpuX)
expect_is(gpuC, "dvclVector")
expect_equal(gpuC[], RM, tolerance=.Machine$double.eps ^ 0.5,
info="double colSums not equivalent")
})
#cbind/rbind tests
test_that("vclMatrix Single Precision cbind",
{
has_gpu_skip()
C_bind <- cbind(A, B)
C_scalar <- cbind(1, A)
C_scalar2 <- cbind(A,1)
gpuA <- vclMatrix(A, type="float")
gpuB <- vclMatrix(B, type="float")
gpuC <- cbind(gpuA, gpuB)
expect_is(gpuC, "fvclMatrix")
expect_equal(gpuC[], C_bind, tolerance=1e-06,
info="float cbind not equivalent")
gpu_scalar <- cbind(1, gpuA)
gpu_scalar2 <- cbind(gpuA, 1)
expect_equal(gpu_scalar[], C_scalar, tolerance=1e-06,
info="float scalar cbind not equivalent")
expect_equal(gpu_scalar2[], C_scalar2, tolerance=1e-06,
info="float scalar cbind not equivalent")
})
test_that("vclMatrix Double Precision cbind",
{
has_gpu_skip()
has_double_skip()
C_bind <- cbind(A, B)
C_scalar <- cbind(1, A)
C_scalar2 <- cbind(A,1)
gpuA <- vclMatrix(A, type="double")
gpuB <- vclMatrix(B, type="double")
gpuC <- cbind(gpuA, gpuB)
expect_is(gpuC, "dvclMatrix")
expect_equal(gpuC[], C_bind, tolerance=.Machine$double.eps^0.5,
info="double cbind not equivalent")
gpu_scalar <- cbind(1, gpuA)
gpu_scalar2 <- cbind(gpuA, 1)
expect_equal(gpu_scalar[], C_scalar, tolerance=.Machine$double.eps^0.5,
info="double scalar cbind not equivalent")
expect_equal(gpu_scalar2[], C_scalar2, tolerance=.Machine$double.eps^0.5,
info="double scalar cbind not equivalent")
})
test_that("vclMatrix Single Precision rbind",
{
has_gpu_skip()
C_bind <- rbind(A, B)
C_scalar <- rbind(1, A)
C_scalar2 <- rbind(A,1)
gpuA <- vclMatrix(A, type="float")
gpuB <- vclMatrix(B, type="float")
gpuC <- rbind(gpuA, gpuB)
expect_is(gpuC, "fvclMatrix")
expect_equal(gpuC[], C_bind, tolerance=1e-06,
info="float rbind not equivalent")
gpu_scalar <- rbind(1, gpuA)
gpu_scalar2 <- rbind(gpuA, 1)
expect_equal(gpu_scalar[], C_scalar, tolerance=1e-06,
info="float scalar rbind not equivalent")
expect_equal(gpu_scalar2[], C_scalar2, tolerance=1e-06,
info="float scalar rbind not equivalent")
})
test_that("vclMatrix Double Precision rbind",
{
has_gpu_skip()
has_double_skip()
C_bind <- rbind(A, B)
C_scalar <- rbind(1, A)
C_scalar2 <- rbind(A,1)
gpuA <- vclMatrix(A, type="double")
gpuB <- vclMatrix(B, type="double")
gpuC <- rbind(gpuA, gpuB)
expect_is(gpuC, "dvclMatrix")
expect_equal(gpuC[], C_bind, tolerance=.Machine$double.eps^0.5,
info="double rbind not equivalent")
gpu_scalar <- rbind(1, gpuA)
gpu_scalar2 <- rbind(gpuA, 1)
expect_equal(gpu_scalar[], C_scalar, tolerance=.Machine$double.eps^0.5,
info="double scalar rbind not equivalent")
expect_equal(gpu_scalar2[], C_scalar2, tolerance=.Machine$double.eps^0.5,
info="double scalar rbind not equivalent")
})
| /inst/testWithGPU/testthat/test_vclMatrix_row_col.R | no_license | cran/gpuR | R | false | false | 6,327 | r | library(gpuR)
context("vclMatrix Row and Column Methods")
# set seed
set.seed(123)
ORDER_X <- 4
ORDER_Y <- 5
# Base R objects
A <- matrix(rnorm(ORDER_X*ORDER_Y), nrow=ORDER_X, ncol=ORDER_Y)
B <- matrix(rnorm(ORDER_X*ORDER_Y), nrow=ORDER_X, ncol=ORDER_Y)
R <- rowSums(A)
C <- colSums(A)
RM <- rowMeans(A)
CM <- colMeans(A)
test_that("vclMatrix Single Precision Column Sums",
{
has_gpu_skip()
fgpuX <- vclMatrix(A, type="float")
gpuC <- colSums(fgpuX)
expect_is(gpuC, "fvclVector")
expect_equal(gpuC[], C, tolerance=1e-06,
info="float covariance values not equivalent")
})
test_that("vclMatrix Double Precision Column Sums",
{
has_gpu_skip()
has_double_skip()
dgpuX <- vclMatrix(A, type="double")
gpuC <- colSums(dgpuX)
expect_is(gpuC, "dvclVector")
expect_equal(gpuC[], C, tolerance=.Machine$double.eps ^ 0.5,
info="double colSums not equivalent")
})
test_that("vclMatrix Single Precision Row Sums",
{
has_gpu_skip()
fgpuX <- vclMatrix(A, type="float")
gpuC <- rowSums(fgpuX)
expect_is(gpuC, "fvclVector")
expect_equal(gpuC[], R, tolerance=1e-06,
info="float covariance values not equivalent")
})
test_that("vclMatrix Double Precision Row Sums",
{
has_gpu_skip()
has_double_skip()
dgpuX <- vclMatrix(A, type="double")
gpuC <- rowSums(dgpuX)
expect_is(gpuC, "dvclVector")
expect_equal(gpuC[], R, tolerance=.Machine$double.eps ^ 0.5,
info="double colSums not equivalent")
})
test_that("vclMatrix Single Precision Column Means",
{
has_gpu_skip()
fgpuX <- vclMatrix(A, type="float")
gpuC <- colMeans(fgpuX)
expect_is(gpuC, "fvclVector")
expect_equal(gpuC[], CM, tolerance=1e-06,
info="float covariance values not equivalent")
})
test_that("vclMatrix Double Precision Column Means",
{
has_gpu_skip()
has_double_skip()
dgpuX <- vclMatrix(A, type="double")
gpuC <- colMeans(dgpuX)
expect_is(gpuC, "dvclVector")
expect_equal(gpuC[], CM, tolerance=.Machine$double.eps ^ 0.5,
info="double colSums not equivalent")
})
test_that("vclMatrix Single Precision Row Means",
{
has_gpu_skip()
fgpuX <- vclMatrix(A, type="float")
gpuC <- rowMeans(fgpuX)
expect_is(gpuC, "fvclVector")
expect_equal(gpuC[], RM, tolerance=1e-06,
info="float covariance values not equivalent")
})
test_that("vclMatrix Double Precision Row Means",
{
has_gpu_skip()
has_double_skip()
dgpuX <- vclMatrix(A, type="double")
gpuC <- rowMeans(dgpuX)
expect_is(gpuC, "dvclVector")
expect_equal(gpuC[], RM, tolerance=.Machine$double.eps ^ 0.5,
info="double colSums not equivalent")
})
#cbind/rbind tests
test_that("vclMatrix Single Precision cbind",
{
has_gpu_skip()
C_bind <- cbind(A, B)
C_scalar <- cbind(1, A)
C_scalar2 <- cbind(A,1)
gpuA <- vclMatrix(A, type="float")
gpuB <- vclMatrix(B, type="float")
gpuC <- cbind(gpuA, gpuB)
expect_is(gpuC, "fvclMatrix")
expect_equal(gpuC[], C_bind, tolerance=1e-06,
info="float cbind not equivalent")
gpu_scalar <- cbind(1, gpuA)
gpu_scalar2 <- cbind(gpuA, 1)
expect_equal(gpu_scalar[], C_scalar, tolerance=1e-06,
info="float scalar cbind not equivalent")
expect_equal(gpu_scalar2[], C_scalar2, tolerance=1e-06,
info="float scalar cbind not equivalent")
})
test_that("vclMatrix Double Precision cbind",
{
has_gpu_skip()
has_double_skip()
C_bind <- cbind(A, B)
C_scalar <- cbind(1, A)
C_scalar2 <- cbind(A,1)
gpuA <- vclMatrix(A, type="double")
gpuB <- vclMatrix(B, type="double")
gpuC <- cbind(gpuA, gpuB)
expect_is(gpuC, "dvclMatrix")
expect_equal(gpuC[], C_bind, tolerance=.Machine$double.eps^0.5,
info="double cbind not equivalent")
gpu_scalar <- cbind(1, gpuA)
gpu_scalar2 <- cbind(gpuA, 1)
expect_equal(gpu_scalar[], C_scalar, tolerance=.Machine$double.eps^0.5,
info="double scalar cbind not equivalent")
expect_equal(gpu_scalar2[], C_scalar2, tolerance=.Machine$double.eps^0.5,
info="double scalar cbind not equivalent")
})
test_that("vclMatrix Single Precision rbind",
{
has_gpu_skip()
C_bind <- rbind(A, B)
C_scalar <- rbind(1, A)
C_scalar2 <- rbind(A,1)
gpuA <- vclMatrix(A, type="float")
gpuB <- vclMatrix(B, type="float")
gpuC <- rbind(gpuA, gpuB)
expect_is(gpuC, "fvclMatrix")
expect_equal(gpuC[], C_bind, tolerance=1e-06,
info="float rbind not equivalent")
gpu_scalar <- rbind(1, gpuA)
gpu_scalar2 <- rbind(gpuA, 1)
expect_equal(gpu_scalar[], C_scalar, tolerance=1e-06,
info="float scalar rbind not equivalent")
expect_equal(gpu_scalar2[], C_scalar2, tolerance=1e-06,
info="float scalar rbind not equivalent")
})
test_that("vclMatrix Double Precision rbind",
{
has_gpu_skip()
has_double_skip()
C_bind <- rbind(A, B)
C_scalar <- rbind(1, A)
C_scalar2 <- rbind(A,1)
gpuA <- vclMatrix(A, type="double")
gpuB <- vclMatrix(B, type="double")
gpuC <- rbind(gpuA, gpuB)
expect_is(gpuC, "dvclMatrix")
expect_equal(gpuC[], C_bind, tolerance=.Machine$double.eps^0.5,
info="double rbind not equivalent")
gpu_scalar <- rbind(1, gpuA)
gpu_scalar2 <- rbind(gpuA, 1)
expect_equal(gpu_scalar[], C_scalar, tolerance=.Machine$double.eps^0.5,
info="double scalar rbind not equivalent")
expect_equal(gpu_scalar2[], C_scalar2, tolerance=.Machine$double.eps^0.5,
info="double scalar rbind not equivalent")
})
|
#''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#' Построение истории по списку инструменту из архивов сделок по фьючерсам и
#' опционам с сайта Московской биржи
#'
#'
#' 2016-03-20 | rlukerin@gmail.com
#''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#' Месяц Код фьючерса
#' Январь F
#' Февраль G
#' Март H
#' Апрель J
#' Май K
#' Июнь M
#' Июль N
#' Август Q
#' Сентябрь U
#' Октябрь V
#' Ноябрь X
#' Декабрь Z
#'
library(data.table)
options(digits.secs=3)
getSymbData<-function(symbol, fname){
futFname<-gsub(".ZIP","ft.csv",gsub("FT","",toupper(fname)))
unzip(fname, files=futFname)
secDT<-fread(futFname, stringsAsFactors = FALSE)
file.remove(futFname)
secDT[code %in%symbol][, dat_time:=as.POSIXct(strptime(dat_time,format="%Y-%m-%d %H:%M:%OS"))][]
}
getSymbol.MOEX<-function(from=as.Date("2016-03-28"), to=as.Date("2016-04-05"),symbList=c("GZM5", "GZH5"), homeDir="~/repos/Data/MOEX/"){
years<-year(from):year(to)
fileFilter<-paste("FT",format(seq.Date(from=from, to=to, by=1),"%y%m%d"),".zip",sep="")
symbDT<-rbindlist(lapply(years,FUN=function(y){
setwd(paste(homeDir,y,sep=""))
fileList<-dir()[dir() %in% fileFilter]
rbindlist(lapply(fileList,FUN=function(f)getSymbData(symbList,f)))}))
symbDT
}
| /getSymbolMOEXDATA.R | no_license | tpopenfoose/rltrading | R | false | false | 1,508 | r | #''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#' Построение истории по списку инструменту из архивов сделок по фьючерсам и
#' опционам с сайта Московской биржи
#'
#'
#' 2016-03-20 | rlukerin@gmail.com
#''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#' Месяц Код фьючерса
#' Январь F
#' Февраль G
#' Март H
#' Апрель J
#' Май K
#' Июнь M
#' Июль N
#' Август Q
#' Сентябрь U
#' Октябрь V
#' Ноябрь X
#' Декабрь Z
#'
library(data.table)
options(digits.secs=3)
getSymbData<-function(symbol, fname){
futFname<-gsub(".ZIP","ft.csv",gsub("FT","",toupper(fname)))
unzip(fname, files=futFname)
secDT<-fread(futFname, stringsAsFactors = FALSE)
file.remove(futFname)
secDT[code %in%symbol][, dat_time:=as.POSIXct(strptime(dat_time,format="%Y-%m-%d %H:%M:%OS"))][]
}
getSymbol.MOEX<-function(from=as.Date("2016-03-28"), to=as.Date("2016-04-05"),symbList=c("GZM5", "GZH5"), homeDir="~/repos/Data/MOEX/"){
years<-year(from):year(to)
fileFilter<-paste("FT",format(seq.Date(from=from, to=to, by=1),"%y%m%d"),".zip",sep="")
symbDT<-rbindlist(lapply(years,FUN=function(y){
setwd(paste(homeDir,y,sep=""))
fileList<-dir()[dir() %in% fileFilter]
rbindlist(lapply(fileList,FUN=function(f)getSymbData(symbList,f)))}))
symbDT
}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
skip_if(on_old_windows())
library(dplyr, warn.conflicts = FALSE)
library(stringr)
tbl <- example_data
# Add some better string data
tbl$verses <- verses[[1]]
# c(" a ", " b ", " c ", ...) increasing padding
# nchar = 3 5 7 9 11 13 15 17 19 21
tbl$padded_strings <- stringr::str_pad(letters[1:10], width = 2 * (1:10) + 1, side = "both")
tbl$another_chr <- tail(letters, 10)
test_that("basic select/filter/collect", {
batch <- record_batch(tbl)
b2 <- batch %>%
select(int, chr) %>%
filter(int > 5)
expect_s3_class(b2, "arrow_dplyr_query")
t2 <- collect(b2)
expect_equal(t2, tbl[tbl$int > 5 & !is.na(tbl$int), c("int", "chr")])
# Test that the original object is not affected
expect_identical(collect(batch), tbl)
})
test_that("dim() on query", {
compare_dplyr_binding(
.input %>%
filter(int > 5) %>%
select(int, chr) %>%
dim(),
tbl
)
})
test_that("Print method", {
expect_output(
record_batch(tbl) %>%
filter(dbl > 2, chr == "d" | chr == "f") %>%
select(chr, int, lgl) %>%
filter(int < 5) %>%
select(int, chr) %>%
print(),
'RecordBatch (query)
int: int32
chr: string
* Filter: (((dbl > 2) and ((chr == "d") or (chr == "f"))) and (int < 5))
See $.data for the source Arrow object',
fixed = TRUE
)
})
test_that("pull", {
compare_dplyr_binding(
.input %>% pull(),
tbl
)
compare_dplyr_binding(
.input %>% pull(1),
tbl
)
compare_dplyr_binding(
.input %>% pull(chr),
tbl
)
compare_dplyr_binding(
.input %>%
filter(int > 4) %>%
rename(strng = chr) %>%
pull(strng),
tbl
)
})
test_that("collect(as_data_frame=FALSE)", {
batch <- record_batch(tbl)
b1 <- batch %>% collect(as_data_frame = FALSE)
expect_r6_class(b1, "RecordBatch")
b2 <- batch %>%
select(int, chr) %>%
filter(int > 5) %>%
collect(as_data_frame = FALSE)
# collect(as_data_frame = FALSE) always returns Table now
expect_r6_class(b2, "Table")
expected <- tbl[tbl$int > 5 & !is.na(tbl$int), c("int", "chr")]
expect_equal(as.data.frame(b2), expected)
b3 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
collect(as_data_frame = FALSE)
expect_r6_class(b3, "Table")
expect_equal(as.data.frame(b3), set_names(expected, c("int", "strng")))
b4 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
group_by(int) %>%
collect(as_data_frame = FALSE)
expect_s3_class(b4, "arrow_dplyr_query")
expect_equal(
as.data.frame(b4),
expected %>%
rename(strng = chr) %>%
group_by(int)
)
})
test_that("compute()", {
batch <- record_batch(tbl)
b1 <- batch %>% compute()
expect_r6_class(b1, "RecordBatch")
b2 <- batch %>%
select(int, chr) %>%
filter(int > 5) %>%
compute()
expect_r6_class(b2, "Table")
expected <- tbl[tbl$int > 5 & !is.na(tbl$int), c("int", "chr")]
expect_equal(as.data.frame(b2), expected)
b3 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
compute()
expect_r6_class(b3, "Table")
expect_equal(as.data.frame(b3), set_names(expected, c("int", "strng")))
b4 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
group_by(int) %>%
compute()
expect_s3_class(b4, "arrow_dplyr_query")
expect_equal(
as.data.frame(b4),
expected %>%
rename(strng = chr) %>%
group_by(int)
)
})
test_that("head", {
batch <- record_batch(tbl)
b2 <- batch %>%
select(int, chr) %>%
filter(int > 5) %>%
head(2)
expect_s3_class(b2, "arrow_dplyr_query")
expected <- tbl[tbl$int > 5 & !is.na(tbl$int), c("int", "chr")][1:2, ]
expect_equal(collect(b2), expected)
b3 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
head(2)
expect_s3_class(b3, "arrow_dplyr_query")
expect_equal(as.data.frame(b3), set_names(expected, c("int", "strng")))
b4 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
group_by(int) %>%
head(2)
expect_s3_class(b4, "arrow_dplyr_query")
expect_equal(
as.data.frame(b4),
expected %>%
rename(strng = chr) %>%
group_by(int)
)
expect_equal(
batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
head(2) %>%
mutate(twice = int * 2) %>%
collect(),
expected %>%
rename(strng = chr) %>%
mutate(twice = int * 2)
)
# This would fail if we evaluated head() after filter()
expect_equal(
batch %>%
select(int, strng = chr) %>%
head(2) %>%
filter(int > 5) %>%
collect(),
expected %>%
rename(strng = chr) %>%
filter(FALSE)
)
})
test_that("arrange then head returns the right data (ARROW-14162)", {
compare_dplyr_binding(
.input %>%
# mpg has ties so we need to sort by two things to get deterministic order
arrange(mpg, disp) %>%
head(4) %>%
collect(),
mtcars,
ignore_attr = "row.names"
)
})
test_that("arrange then tail returns the right data", {
compare_dplyr_binding(
.input %>%
# mpg has ties so we need to sort by two things to get deterministic order
arrange(mpg, disp) %>%
tail(4) %>%
collect(),
mtcars,
ignore_attr = "row.names"
)
})
test_that("tail", {
batch <- record_batch(tbl)
b2 <- batch %>%
select(int, chr) %>%
filter(int > 5) %>%
arrange(int) %>%
tail(2)
expect_s3_class(b2, "arrow_dplyr_query")
expected <- tail(tbl[tbl$int > 5 & !is.na(tbl$int), c("int", "chr")], 2)
expect_equal(as.data.frame(b2), expected)
b3 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
arrange(int) %>%
tail(2)
expect_s3_class(b3, "arrow_dplyr_query")
expect_equal(as.data.frame(b3), set_names(expected, c("int", "strng")))
b4 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
group_by(int) %>%
arrange(int) %>%
tail(2)
expect_s3_class(b4, "arrow_dplyr_query")
expect_equal(
as.data.frame(b4),
expected %>%
rename(strng = chr) %>%
group_by(int)
)
})
test_that("No duplicate field names are allowed in an arrow_dplyr_query", {
expect_error(
Table$create(tbl, tbl) %>%
filter(int > 0),
regexp = paste0(
'The following field names were found more than once in the data: "int", "dbl", ',
'"dbl2", "lgl", "false", "chr", "fct", "verses", "padded_strings"'
)
)
})
| /r/tests/testthat/test-dplyr-query.R | permissive | guyuqi/arrow | R | false | false | 7,236 | r | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
skip_if(on_old_windows())
library(dplyr, warn.conflicts = FALSE)
library(stringr)
tbl <- example_data
# Add some better string data
tbl$verses <- verses[[1]]
# c(" a ", " b ", " c ", ...) increasing padding
# nchar = 3 5 7 9 11 13 15 17 19 21
tbl$padded_strings <- stringr::str_pad(letters[1:10], width = 2 * (1:10) + 1, side = "both")
tbl$another_chr <- tail(letters, 10)
test_that("basic select/filter/collect", {
batch <- record_batch(tbl)
b2 <- batch %>%
select(int, chr) %>%
filter(int > 5)
expect_s3_class(b2, "arrow_dplyr_query")
t2 <- collect(b2)
expect_equal(t2, tbl[tbl$int > 5 & !is.na(tbl$int), c("int", "chr")])
# Test that the original object is not affected
expect_identical(collect(batch), tbl)
})
test_that("dim() on query", {
compare_dplyr_binding(
.input %>%
filter(int > 5) %>%
select(int, chr) %>%
dim(),
tbl
)
})
test_that("Print method", {
expect_output(
record_batch(tbl) %>%
filter(dbl > 2, chr == "d" | chr == "f") %>%
select(chr, int, lgl) %>%
filter(int < 5) %>%
select(int, chr) %>%
print(),
'RecordBatch (query)
int: int32
chr: string
* Filter: (((dbl > 2) and ((chr == "d") or (chr == "f"))) and (int < 5))
See $.data for the source Arrow object',
fixed = TRUE
)
})
test_that("pull", {
compare_dplyr_binding(
.input %>% pull(),
tbl
)
compare_dplyr_binding(
.input %>% pull(1),
tbl
)
compare_dplyr_binding(
.input %>% pull(chr),
tbl
)
compare_dplyr_binding(
.input %>%
filter(int > 4) %>%
rename(strng = chr) %>%
pull(strng),
tbl
)
})
test_that("collect(as_data_frame=FALSE)", {
batch <- record_batch(tbl)
b1 <- batch %>% collect(as_data_frame = FALSE)
expect_r6_class(b1, "RecordBatch")
b2 <- batch %>%
select(int, chr) %>%
filter(int > 5) %>%
collect(as_data_frame = FALSE)
# collect(as_data_frame = FALSE) always returns Table now
expect_r6_class(b2, "Table")
expected <- tbl[tbl$int > 5 & !is.na(tbl$int), c("int", "chr")]
expect_equal(as.data.frame(b2), expected)
b3 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
collect(as_data_frame = FALSE)
expect_r6_class(b3, "Table")
expect_equal(as.data.frame(b3), set_names(expected, c("int", "strng")))
b4 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
group_by(int) %>%
collect(as_data_frame = FALSE)
expect_s3_class(b4, "arrow_dplyr_query")
expect_equal(
as.data.frame(b4),
expected %>%
rename(strng = chr) %>%
group_by(int)
)
})
test_that("compute()", {
batch <- record_batch(tbl)
b1 <- batch %>% compute()
expect_r6_class(b1, "RecordBatch")
b2 <- batch %>%
select(int, chr) %>%
filter(int > 5) %>%
compute()
expect_r6_class(b2, "Table")
expected <- tbl[tbl$int > 5 & !is.na(tbl$int), c("int", "chr")]
expect_equal(as.data.frame(b2), expected)
b3 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
compute()
expect_r6_class(b3, "Table")
expect_equal(as.data.frame(b3), set_names(expected, c("int", "strng")))
b4 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
group_by(int) %>%
compute()
expect_s3_class(b4, "arrow_dplyr_query")
expect_equal(
as.data.frame(b4),
expected %>%
rename(strng = chr) %>%
group_by(int)
)
})
test_that("head", {
batch <- record_batch(tbl)
b2 <- batch %>%
select(int, chr) %>%
filter(int > 5) %>%
head(2)
expect_s3_class(b2, "arrow_dplyr_query")
expected <- tbl[tbl$int > 5 & !is.na(tbl$int), c("int", "chr")][1:2, ]
expect_equal(collect(b2), expected)
b3 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
head(2)
expect_s3_class(b3, "arrow_dplyr_query")
expect_equal(as.data.frame(b3), set_names(expected, c("int", "strng")))
b4 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
group_by(int) %>%
head(2)
expect_s3_class(b4, "arrow_dplyr_query")
expect_equal(
as.data.frame(b4),
expected %>%
rename(strng = chr) %>%
group_by(int)
)
expect_equal(
batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
head(2) %>%
mutate(twice = int * 2) %>%
collect(),
expected %>%
rename(strng = chr) %>%
mutate(twice = int * 2)
)
# This would fail if we evaluated head() after filter()
expect_equal(
batch %>%
select(int, strng = chr) %>%
head(2) %>%
filter(int > 5) %>%
collect(),
expected %>%
rename(strng = chr) %>%
filter(FALSE)
)
})
test_that("arrange then head returns the right data (ARROW-14162)", {
compare_dplyr_binding(
.input %>%
# mpg has ties so we need to sort by two things to get deterministic order
arrange(mpg, disp) %>%
head(4) %>%
collect(),
mtcars,
ignore_attr = "row.names"
)
})
test_that("arrange then tail returns the right data", {
compare_dplyr_binding(
.input %>%
# mpg has ties so we need to sort by two things to get deterministic order
arrange(mpg, disp) %>%
tail(4) %>%
collect(),
mtcars,
ignore_attr = "row.names"
)
})
test_that("tail", {
batch <- record_batch(tbl)
b2 <- batch %>%
select(int, chr) %>%
filter(int > 5) %>%
arrange(int) %>%
tail(2)
expect_s3_class(b2, "arrow_dplyr_query")
expected <- tail(tbl[tbl$int > 5 & !is.na(tbl$int), c("int", "chr")], 2)
expect_equal(as.data.frame(b2), expected)
b3 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
arrange(int) %>%
tail(2)
expect_s3_class(b3, "arrow_dplyr_query")
expect_equal(as.data.frame(b3), set_names(expected, c("int", "strng")))
b4 <- batch %>%
select(int, strng = chr) %>%
filter(int > 5) %>%
group_by(int) %>%
arrange(int) %>%
tail(2)
expect_s3_class(b4, "arrow_dplyr_query")
expect_equal(
as.data.frame(b4),
expected %>%
rename(strng = chr) %>%
group_by(int)
)
})
test_that("No duplicate field names are allowed in an arrow_dplyr_query", {
expect_error(
Table$create(tbl, tbl) %>%
filter(int > 0),
regexp = paste0(
'The following field names were found more than once in the data: "int", "dbl", ',
'"dbl2", "lgl", "false", "chr", "fct", "verses", "padded_strings"'
)
)
})
|
library(ggplot)
library(rCharts)
library(GGally)
setwd('/Users/conorosullivan/Documents/FourthModule/visualization/final_project/app')
df<-read.csv('country_data.csv')
### BUBBLE PLOT IN RCHARTS ###
hh<-hPlot(x = "MedianAge",
y = "AnnualPopulationGrowth",
data = df,
type = "point",
size="AdolescentFertilityRate",
group = "continent")
h1$title(text='Median Age vs Annual Population Growth in 2010')
# h1$tooltip("Potato")
h1
### BUBBLE PLOT IN GGPLOT ###
plotBub <- function(first, second, third, colorBy) {
# print(paste(third))
if (colorBy == 'Region') colorBy <- 'continent'
else if (colorBy == 'Religion') colorBy <- 'religion'
thirdlabel <- paste('Size:',third)
df$sizelabel <- rep(third, nrow(df))
df$labelposx <- rep(2, nrow(df))
df$labelposy <- rep(35, nrow(df))
labelposxx <- max(df[,first],na.rm=T)
labelposyy <- max(df[,second],na.rm=T)
yy <- df[,first]
xx <- df[,second]
p<-ggplot(df)+
geom_point(aes_string(y=first,
x=second,
size=third,
color=colorBy),alpha=.4)+
theme_minimal()+
theme(legend.position = 'top',
legend.text = element_text(size=16,angle=0),
legend.title = element_text(size=18,angle=0),
axis.title.x = element_text(size=16,angle=0),
axis.title.y = element_text(size=16,angle=90))+
scale_size(guide = 'none', range = c(4, 15))#+annotate("text", x=labelposxx*.4, y=labelposyy*.4, label = thirdlabel, hjust = 0)
if (colorBy == "religion") p <- p + scale_color_brewer("Religion",palette="Set1")
else if (colorBy == 'region') p <- p + scale_color_brewer("Region",palette="Dark2")
# else p <- p + scale_color_brewer("Region",palette="Dark2")
p <- p + guides(colour = guide_legend(nrow=2, override.aes=list(size=4)))
show(p)
}
plotBub('MedianAge',"AnnualPopulationGrowth",'AdolescentFertilityRate','Region')
plotBub('MedianAge',"AnnualPopulationGrowth",'AdolescentFertilityRate','HumanDevelopmentIndex')
### PARALLEL COORDINATES PLOT ###
library(GGally)
library(scales)
dfsmall<-df[,c("LifeExpectancyAtBirth",
"MeanYearsSchooling",
"GrossNationalIncomePerCapita",
"GenderInequalityIndex",
"AdolescentFertilityRate",
"continent",
"religion",
"HumanDevelopmentIndex")]
dfsmall[,"GrossNationalIncomePerCapita"]<- -1*dfsmall[,"GrossNationalIncomePerCapita"]
dfsmall[,"MeanYearsSchooling"]<--1*dfsmall[,"MeanYearsSchooling"]
dfsmall[,"LifeExpectancyAtBirth"]<--1*dfsmall[,"LifeExpectancyAtBirth"]
plotPar <- function(dat, al, colorBy) {
p <- ggparcoord(data = dat,
columns = 1:5,
groupColumn = colorBy,
showPoints = FALSE,
alphaLines = al,
shadeBox = NULL,
scale = "uniminmax"
)+theme_minimal()+
theme(axis.ticks.x = element_blank())+
xlab('Feature')+ylab('Proportion of Maximum Feature Value')+
scale_x_discrete(expand=c(0,0))
if (colorBy == "religion") p <- p + scale_color_brewer("Religion",palette="Set2")
else if (colorBy == 'region') p <- p + scale_color_brewer("Region",palette="Dark2")
show(p)
}
plotPar(dfsmall, .4, "HumanDevelopmentIndex")
plotPar(dfsmall, .4, "Region")
plotPar <- function(dat, al, colorBy, selectRegion, selectReligion) {
if (colorBy == "Region") {
acolorBy <- 'continent'
palette <- brewer_pal(type = "qual", palette = "Dark2")(nlevels(dfsmall$continent))
if (length(selectRegion) %in% c(1:5)) {
allLevels <- levels(df[,acolorBy])
palette[which(!allLevels %in% selectRegion)] <- "grey85"
}
}
if (colorBy == "Religion") {
acolorBy <- 'religion'
palette <- brewer_pal(type = "qual", palette = "Set1")(nlevels(dfsmall$religion))
if (length(selectReligion) %in% c(1:4)) {
allLevels <- levels(df[,acolorBy])
palette[which(!allLevels %in% selectReligion)] <- "grey85"
}
}
print(palette)
p <- ggparcoord(data = dat,
columns = 1:5,
#groupColumn = colorBy,
groupColumn = acolorBy,
showPoints = FALSE,
alphaLines = al,
shadeBox = NULL,
scale = "uniminmax"
)+theme_minimal()+
theme(axis.ticks.x = element_blank())+
theme(legend.position = 'top',
legend.text = element_text(size=16,angle=0),
legend.title = element_text(size=18,angle=0))+
xlab('')+ylab('Proportion of Maximum Feature Value')+
scale_x_discrete(expand=c(0,0))+
scale_color_manual(paste(colorBy),limits=levels(dfsmall[,acolorBy]), values=palette)+
guides(colour = guide_legend(override.aes = list(size = 4), nrow = 2))
show(p)
}
plotPar(dfsmall, .4, "HumanDevelopmentIndex",c("Arab States"),c("Islam"))
### BAR PLOT ###
ags<-aggregate(list(df$LifeExpectancyAtBirth, df$MeanYearsSchooling, df$GrossNationalIncomePerCapita, df$AdolescentFertilityRate), by=list(df$continent), FUN=mean, na.rm=T)
colnames(ags)<-c("Region",
"LifeExpectancyAtBirth",
"MeanYearsSchooling",
"GrossNationalIncomePerCapita",
"AdolescentFertilityRate")
normalize<-function(x){
x<-(x-min(x))/(max(x) - min(x))
x[which(x==min(x))]<-x[which(x==min(x))]+.05
x
}
ags2<-cbind(ags[,c("Region")],data.frame(apply(ags[,2:ncol(ags)],2,FUN=normalize)))
colnames(ags2)[1]<-"Region"
library(reshape)
mags<-melt(ags2, id="Region")
p<-ggplot(mags)+geom_bar(aes(x=variable,y=value, group=Region,fill=Region),stat="identity",position="dodge")
p
plotPar <- function(dat, al, colorBy) {
if (colorBy == 'Region') colorBy <- 'continent'
else colorBy <- 'religion'
print(colorBy)
p <- ggparcoord(data = dat,
columns = 1:5,
groupColumn = colorBy,
showPoints = FALSE,
alphaLines = al,
shadeBox = NULL,
scale = "uniminmax"
)+theme_minimal()+
theme(axis.ticks.x = element_blank())+
xlab('Feature')+ylab('Proportion of Maximum Feature Value')+
scale_x_discrete(expand=c(0,0))
if (colorBy == "religion") p <- p + scale_color_brewer("Religion",palette="Paired")
else p <- p + scale_color_brewer("Region",palette="Paired")
show(p)
}
plotPar(dfsmall, .4, "continent")
boxdf<-dfsmall[,c("LifeExpectancyAtBirth","MeanYearsSchooling","GrossNationalIncomePerCapita","AdolescentFertilityRate","continent")]
boxdf<-dfsmall[,c("LifeExpectancyAtBirth","MeanYearsSchooling","GrossNationalIncomePerCapita","AdolescentFertilityRate")]
divbymax<-function(x){
x<-x/max(x,na.rm=T)
x
}
boxdf<-apply(boxdf,2,FUN=divbymax)
boxdf<-data.frame(boxdf)
boxdf$continent<-df$continent
boxdf$religion<-df$religion
boxdfm<-melt(boxdf,id=c('continent','religion'))
plotBox<-function(colorBy){
actualColorBy <- colorBy
if (colorBy == 'Region') colorBy <- 'continent'
else colorBy <- 'religion'
print(colorBy)
xx<-boxdfm[,paste(colorBy)]
boxdfm$xx <- xx
print(head(xx))
p<-ggplot(boxdfm)+
#geom_boxplot(aes(x=continent,y=value,fill=continent))+
geom_boxplot(aes(x=xx,y=value,fill=xx))+
facet_grid(. ~ variable)+
ylab("")+
xlab("")+
theme(axis.ticks.y=element_blank(),
axis.text.y = element_blank(),
legend.position = "top",
legend.text = element_text(size = 14, angle=0),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
strip.text.x = element_text(size = 16, angle = 0),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
scale_fill_brewer(palette="Dark2")+
labs(fill = paste(actualColorBy,':',sep=""))+
guides(fill = guide_legend(title.theme = element_text(size=20,angle=0), override.aes = list(shape = 15)))
show(p)
}
plotBox('Religion')
dfi<-df
dfi<-dfi[order(dfi$Name),]
dfi$Name <- as.character(dfi$Name)
dfi$Name[21] <- 'Bolivia'
dfi$Name[42] <- 'Ivory Coast'
dfi$Name[143] <- 'USSR'
dfi$Name[40] <- 'Zaire'
dfi$Name[191] <- 'Venezuela'
dfi$Name[79] <- 'Iran'
dfi$Name[46] <- "Czechoslovakia"
dfi$Name[172] <- 'Tanzania'
dfi$Name[187] <- 'USA'
plotMap <- function(fillChoice){
dfi$region <- dfi$Name
countries <- map_data("world")
choro <- merge(countries, dfi, sort = FALSE, by = "region")
choro <- choro[order(choro$order), ]
fChoice <- dfi[,paste(fillChoice)]
if (fillChoice == 'LifeExpectancyAtBirth') {
p <- qplot(long, lat, data = choro, group = group, fill = LifeExpectancyAtBirth,
geom = "polygon")
}
if (fillChoice == 'AnnualPopulationGrowth') {
p <- qplot(long, lat, data = choro, group = group, fill = AnnualPopulationGrowth,
geom = "polygon")
}
if (fillChoice == 'GrossNationalIncomePerCapita') {
p <- qplot(long, lat, data = choro, group = group, fill = GrossNationalIncomePerCapita,
geom = "polygon")
}
if (fillChoice == 'MeanYearsSchooling') {
p <- qplot(long, lat, data = choro, group = group, fill = MeanYearsSchooling,
geom = "polygon")
}
if (fillChoice == 'AdolescentFertilityRate') {
p <- qplot(long, lat, data = choro, group = group, fill = AdolescentFertilityRate,
geom = "polygon")
}
p <- p + theme_bw()
p <- p + theme(axis.text = element_blank(),
axis.title = element_blank())
p <- p + theme(axis.ticks = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
p <- p + scale_x_continuous(expand = c(0, 0))
p <- p + scale_y_continuous(expand = c(0, 0))
p
}
plotMap('LifeExpectancyAtBirth')
plotMap('AnnualPopulationGrowth')
plotMap('GrossNationalIncomePerCapita')
plotMap('MeanYearsSchooling')
plotMap('AdolescentFertilityRate')
| /project-prototype/prototypes.R | no_license | ConorOSullivan/msan622 | R | false | false | 10,177 | r | library(ggplot)
library(rCharts)
library(GGally)
setwd('/Users/conorosullivan/Documents/FourthModule/visualization/final_project/app')
df<-read.csv('country_data.csv')
### BUBBLE PLOT IN RCHARTS ###
hh<-hPlot(x = "MedianAge",
y = "AnnualPopulationGrowth",
data = df,
type = "point",
size="AdolescentFertilityRate",
group = "continent")
h1$title(text='Median Age vs Annual Population Growth in 2010')
# h1$tooltip("Potato")
h1
### BUBBLE PLOT IN GGPLOT ###
plotBub <- function(first, second, third, colorBy) {
# print(paste(third))
if (colorBy == 'Region') colorBy <- 'continent'
else if (colorBy == 'Religion') colorBy <- 'religion'
thirdlabel <- paste('Size:',third)
df$sizelabel <- rep(third, nrow(df))
df$labelposx <- rep(2, nrow(df))
df$labelposy <- rep(35, nrow(df))
labelposxx <- max(df[,first],na.rm=T)
labelposyy <- max(df[,second],na.rm=T)
yy <- df[,first]
xx <- df[,second]
p<-ggplot(df)+
geom_point(aes_string(y=first,
x=second,
size=third,
color=colorBy),alpha=.4)+
theme_minimal()+
theme(legend.position = 'top',
legend.text = element_text(size=16,angle=0),
legend.title = element_text(size=18,angle=0),
axis.title.x = element_text(size=16,angle=0),
axis.title.y = element_text(size=16,angle=90))+
scale_size(guide = 'none', range = c(4, 15))#+annotate("text", x=labelposxx*.4, y=labelposyy*.4, label = thirdlabel, hjust = 0)
if (colorBy == "religion") p <- p + scale_color_brewer("Religion",palette="Set1")
else if (colorBy == 'region') p <- p + scale_color_brewer("Region",palette="Dark2")
# else p <- p + scale_color_brewer("Region",palette="Dark2")
p <- p + guides(colour = guide_legend(nrow=2, override.aes=list(size=4)))
show(p)
}
plotBub('MedianAge',"AnnualPopulationGrowth",'AdolescentFertilityRate','Region')
plotBub('MedianAge',"AnnualPopulationGrowth",'AdolescentFertilityRate','HumanDevelopmentIndex')
### PARALLEL COORDINATES PLOT ###
library(GGally)
library(scales)
dfsmall<-df[,c("LifeExpectancyAtBirth",
"MeanYearsSchooling",
"GrossNationalIncomePerCapita",
"GenderInequalityIndex",
"AdolescentFertilityRate",
"continent",
"religion",
"HumanDevelopmentIndex")]
dfsmall[,"GrossNationalIncomePerCapita"]<- -1*dfsmall[,"GrossNationalIncomePerCapita"]
dfsmall[,"MeanYearsSchooling"]<--1*dfsmall[,"MeanYearsSchooling"]
dfsmall[,"LifeExpectancyAtBirth"]<--1*dfsmall[,"LifeExpectancyAtBirth"]
plotPar <- function(dat, al, colorBy) {
p <- ggparcoord(data = dat,
columns = 1:5,
groupColumn = colorBy,
showPoints = FALSE,
alphaLines = al,
shadeBox = NULL,
scale = "uniminmax"
)+theme_minimal()+
theme(axis.ticks.x = element_blank())+
xlab('Feature')+ylab('Proportion of Maximum Feature Value')+
scale_x_discrete(expand=c(0,0))
if (colorBy == "religion") p <- p + scale_color_brewer("Religion",palette="Set2")
else if (colorBy == 'region') p <- p + scale_color_brewer("Region",palette="Dark2")
show(p)
}
plotPar(dfsmall, .4, "HumanDevelopmentIndex")
plotPar(dfsmall, .4, "Region")
plotPar <- function(dat, al, colorBy, selectRegion, selectReligion) {
if (colorBy == "Region") {
acolorBy <- 'continent'
palette <- brewer_pal(type = "qual", palette = "Dark2")(nlevels(dfsmall$continent))
if (length(selectRegion) %in% c(1:5)) {
allLevels <- levels(df[,acolorBy])
palette[which(!allLevels %in% selectRegion)] <- "grey85"
}
}
if (colorBy == "Religion") {
acolorBy <- 'religion'
palette <- brewer_pal(type = "qual", palette = "Set1")(nlevels(dfsmall$religion))
if (length(selectReligion) %in% c(1:4)) {
allLevels <- levels(df[,acolorBy])
palette[which(!allLevels %in% selectReligion)] <- "grey85"
}
}
print(palette)
p <- ggparcoord(data = dat,
columns = 1:5,
#groupColumn = colorBy,
groupColumn = acolorBy,
showPoints = FALSE,
alphaLines = al,
shadeBox = NULL,
scale = "uniminmax"
)+theme_minimal()+
theme(axis.ticks.x = element_blank())+
theme(legend.position = 'top',
legend.text = element_text(size=16,angle=0),
legend.title = element_text(size=18,angle=0))+
xlab('')+ylab('Proportion of Maximum Feature Value')+
scale_x_discrete(expand=c(0,0))+
scale_color_manual(paste(colorBy),limits=levels(dfsmall[,acolorBy]), values=palette)+
guides(colour = guide_legend(override.aes = list(size = 4), nrow = 2))
show(p)
}
plotPar(dfsmall, .4, "HumanDevelopmentIndex",c("Arab States"),c("Islam"))
### BAR PLOT ###
ags<-aggregate(list(df$LifeExpectancyAtBirth, df$MeanYearsSchooling, df$GrossNationalIncomePerCapita, df$AdolescentFertilityRate), by=list(df$continent), FUN=mean, na.rm=T)
colnames(ags)<-c("Region",
"LifeExpectancyAtBirth",
"MeanYearsSchooling",
"GrossNationalIncomePerCapita",
"AdolescentFertilityRate")
normalize<-function(x){
x<-(x-min(x))/(max(x) - min(x))
x[which(x==min(x))]<-x[which(x==min(x))]+.05
x
}
ags2<-cbind(ags[,c("Region")],data.frame(apply(ags[,2:ncol(ags)],2,FUN=normalize)))
colnames(ags2)[1]<-"Region"
library(reshape)
mags<-melt(ags2, id="Region")
p<-ggplot(mags)+geom_bar(aes(x=variable,y=value, group=Region,fill=Region),stat="identity",position="dodge")
p
plotPar <- function(dat, al, colorBy) {
if (colorBy == 'Region') colorBy <- 'continent'
else colorBy <- 'religion'
print(colorBy)
p <- ggparcoord(data = dat,
columns = 1:5,
groupColumn = colorBy,
showPoints = FALSE,
alphaLines = al,
shadeBox = NULL,
scale = "uniminmax"
)+theme_minimal()+
theme(axis.ticks.x = element_blank())+
xlab('Feature')+ylab('Proportion of Maximum Feature Value')+
scale_x_discrete(expand=c(0,0))
if (colorBy == "religion") p <- p + scale_color_brewer("Religion",palette="Paired")
else p <- p + scale_color_brewer("Region",palette="Paired")
show(p)
}
plotPar(dfsmall, .4, "continent")
boxdf<-dfsmall[,c("LifeExpectancyAtBirth","MeanYearsSchooling","GrossNationalIncomePerCapita","AdolescentFertilityRate","continent")]
boxdf<-dfsmall[,c("LifeExpectancyAtBirth","MeanYearsSchooling","GrossNationalIncomePerCapita","AdolescentFertilityRate")]
divbymax<-function(x){
x<-x/max(x,na.rm=T)
x
}
boxdf<-apply(boxdf,2,FUN=divbymax)
boxdf<-data.frame(boxdf)
boxdf$continent<-df$continent
boxdf$religion<-df$religion
boxdfm<-melt(boxdf,id=c('continent','religion'))
plotBox<-function(colorBy){
actualColorBy <- colorBy
if (colorBy == 'Region') colorBy <- 'continent'
else colorBy <- 'religion'
print(colorBy)
xx<-boxdfm[,paste(colorBy)]
boxdfm$xx <- xx
print(head(xx))
p<-ggplot(boxdfm)+
#geom_boxplot(aes(x=continent,y=value,fill=continent))+
geom_boxplot(aes(x=xx,y=value,fill=xx))+
facet_grid(. ~ variable)+
ylab("")+
xlab("")+
theme(axis.ticks.y=element_blank(),
axis.text.y = element_blank(),
legend.position = "top",
legend.text = element_text(size = 14, angle=0),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
strip.text.x = element_text(size = 16, angle = 0),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
scale_fill_brewer(palette="Dark2")+
labs(fill = paste(actualColorBy,':',sep=""))+
guides(fill = guide_legend(title.theme = element_text(size=20,angle=0), override.aes = list(shape = 15)))
show(p)
}
plotBox('Religion')
dfi<-df
dfi<-dfi[order(dfi$Name),]
dfi$Name <- as.character(dfi$Name)
dfi$Name[21] <- 'Bolivia'
dfi$Name[42] <- 'Ivory Coast'
dfi$Name[143] <- 'USSR'
dfi$Name[40] <- 'Zaire'
dfi$Name[191] <- 'Venezuela'
dfi$Name[79] <- 'Iran'
dfi$Name[46] <- "Czechoslovakia"
dfi$Name[172] <- 'Tanzania'
dfi$Name[187] <- 'USA'
plotMap <- function(fillChoice){
dfi$region <- dfi$Name
countries <- map_data("world")
choro <- merge(countries, dfi, sort = FALSE, by = "region")
choro <- choro[order(choro$order), ]
fChoice <- dfi[,paste(fillChoice)]
if (fillChoice == 'LifeExpectancyAtBirth') {
p <- qplot(long, lat, data = choro, group = group, fill = LifeExpectancyAtBirth,
geom = "polygon")
}
if (fillChoice == 'AnnualPopulationGrowth') {
p <- qplot(long, lat, data = choro, group = group, fill = AnnualPopulationGrowth,
geom = "polygon")
}
if (fillChoice == 'GrossNationalIncomePerCapita') {
p <- qplot(long, lat, data = choro, group = group, fill = GrossNationalIncomePerCapita,
geom = "polygon")
}
if (fillChoice == 'MeanYearsSchooling') {
p <- qplot(long, lat, data = choro, group = group, fill = MeanYearsSchooling,
geom = "polygon")
}
if (fillChoice == 'AdolescentFertilityRate') {
p <- qplot(long, lat, data = choro, group = group, fill = AdolescentFertilityRate,
geom = "polygon")
}
p <- p + theme_bw()
p <- p + theme(axis.text = element_blank(),
axis.title = element_blank())
p <- p + theme(axis.ticks = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
p <- p + scale_x_continuous(expand = c(0, 0))
p <- p + scale_y_continuous(expand = c(0, 0))
p
}
plotMap('LifeExpectancyAtBirth')
plotMap('AnnualPopulationGrowth')
plotMap('GrossNationalIncomePerCapita')
plotMap('MeanYearsSchooling')
plotMap('AdolescentFertilityRate')
|
shinyUI(fluidPage(
titlePanel("FAVE-extract: Remeasurement"),
sidebarLayout(
sidebarPanel(
helpText("Choose a vowel class to see how the ANAE's distribution for that vowel
class compares to the speaker's own distribution."),
selectInput("vowel",
label = "Choose a vowel to display",
choices = c("*hr", "ae", "aeh", "ah", "ahr", "aw", "ay", "ay0",
"e", "ey", "eyF", "eyr", "i", "iw", "iy", "iyF", "iyr", "o",
"oh", "ow", "owF", "owr", "oy", "Tuw", "u", "uh", "uw", "uwr"),
selected = "ae")),
mainPanel(
plotOutput("vowelPlot")
)
)
)) | /remeasure/ui.R | no_license | JoFrhwld/methods_xv | R | false | false | 706 | r | shinyUI(fluidPage(
titlePanel("FAVE-extract: Remeasurement"),
sidebarLayout(
sidebarPanel(
helpText("Choose a vowel class to see how the ANAE's distribution for that vowel
class compares to the speaker's own distribution."),
selectInput("vowel",
label = "Choose a vowel to display",
choices = c("*hr", "ae", "aeh", "ah", "ahr", "aw", "ay", "ay0",
"e", "ey", "eyF", "eyr", "i", "iw", "iy", "iyF", "iyr", "o",
"oh", "ow", "owF", "owr", "oy", "Tuw", "u", "uh", "uw", "uwr"),
selected = "ae")),
mainPanel(
plotOutput("vowelPlot")
)
)
)) |
## yearly_category_HSV.R
##
##
library(data.table)
#####-------------------------------------
##### setup data
#####-------------------------------------
#dt = fread("~/Documents/wifire/processedData/features/output_SD_all.csv", header = FALSE)
load("./processedData/features/digital_trad_art_MATLABfeatures.Rda")
res = as.data.frame(dt[,lapply(.SD, FUN = function(x) as.double(mean(x)/sd(x))), by = list(Year, category), .SDcols = 45:68])
res = subset(res, Year != 2000)
rm(dt)
res.H = res[,c(1,2,which(grepl("HH", names(res))))]
res.S = res[,c(1,2,which(grepl("SH", names(res))))]
res.V = res[,c(1,2,which(grepl("VH", names(res))))]
rm(res)
library(ggplot2)
library(reshape)
# res.H.m = melt(res.H, id.vars = c("day", "hour.chunk"))
# res.S.m = melt(res.S)
# res.V.m = melt(res.V)
#
# ggplot(res.H.m, aes(x = variable, y = value, colour = day)) + geom_point() + facet_wrap(~ hour.chunk, nrow = 3)
# ggplot(res.S.m, aes(x = variable, y = value, colour = day)) + geom_point() + facet_wrap(~ hour.chunk, nrow = 3)
# ggplot(res.V.m, aes(x = variable, y = value, colour = day)) + geom_point() + facet_wrap(~ hour.chunk, nrow = 3)
#####-------------------------------------
#####
#####-------------------------------------
return_mds_per_category = function(df){
categories = unique(df$category)
categories.mds = data.frame(category = NA, Year = NA, coord.1 = NA, coord.2 = NA)
for (i in c(1:length(categories))){
df.temp = subset(df, category == categories[i])
df.temp$category = NULL
row.names(df.temp) = df.temp$Year
dist.matrix = dist(df.temp[, -which(names(df.temp) == "Year")], method = "euclidean")
fit <- cmdscale(dist.matrix,eig=TRUE, k=2) # k is the number of dim
mds.1 = fit$points[,1]
mds.2 = fit$points[,2]
if (max(mds.1) < max(abs(mds.1))) { mds.1 = -1*mds.1}
#if (max(mds.2) < max(abs(mds.2))) { mds.2 = -1*mds.2}
categories.mds = rbind(categories.mds, data.frame(category = categories[i], Year = row.names(fit$points), coord.1 = mds.1, coord.2 = mds.2))
}
return(categories.mds[-1,])
}
mds.H = return_mds_per_category(res.H)
#p = ggplot(mds.H, aes(x = coord.1, y = coord.2, label = Year)) + facet_wrap( ~ category, nrow = 2) + geom_text() + ggtitle("MDS of Hue Distributions")
p = ggplot(mds.H, aes(x = coord.1, y = coord.2, label = Year, colour = category)) + geom_text() + ggtitle("MDS of Hue Distributions")
ggsave(file = "./figures/Hue_MDS.pdf", p)
mds.S = return_mds_per_category(res.S)
#p = ggplot(mds.S, aes(x = coord.1, y = coord.2, label = Year)) + facet_wrap( ~ category, nrow = 2) + geom_text() + ggtitle("MDS of Saturation Distributions")
p = ggplot(mds.S, aes(x = coord.1, y = coord.2, label = Year, colour = category)) + geom_text() + ggtitle("MDS of Saturation Distributions")
ggsave(file = "./figures/Saturation_MDS.pdf", p)
mds.V = return_mds_per_category(res.V)
ggplot(mds.V, aes(x = coord.1, y = coord.2, label = Year, colour = category)) + geom_text() + ggtitle("MDS of Value Distributions")
ggsave(file = "./figures/Value_MDS.pdf", p)
#####-------------------------------------
##### setup Year distance matrix
#####-------------------------------------
first.row = c(0:9)
dist.years = toeplitz(first.row)
unique.years = as.numeric(unique(res.S$Year))
rownames(dist.years) = unique.years[order(unique.years)]
colnames(dist.years) = unique.years[order(unique.years)]
#####-------------------------------------
##### setup hour distance matrix
#####-------------------------------------
get.dist.df = function(x){
if (is.matrix(x)){
# if it is a matrix already, then assume that it is a proper distance matrix
z = x
}
else{
z = as.matrix(dist(x))
}
z[lower.tri(z,diag=TRUE)]=NA #Prepare to drop duplicates and meaningless information
z=as.data.frame(as.table(z)) #Turn into a 3-column table
z=na.omit(z) #Get rid of the junk we flagged above
z=z[order(-abs(z$Freq)),]
z$Var1 = as.character(z$Var1)
z$Var2 = as.character(z$Var2)
ordered.pair = c()
for (i in c(1:nrow(z))){
if (z$Var1[i] > z$Var2[i]){
ordered.pair = c(ordered.pair,
paste(z$Var1[i], "-",z$Var2[i]))
}
else{
ordered.pair = c(ordered.pair,
paste(z$Var2[i], "-",z$Var1[i]))
}
}
z$pairs = ordered.pair
z$Var1 = NULL
z$Var2 = NULL
return(z)
}
dist.years.df = get.dist.df(dist.years)
names(dist.years.df)[1] = "years.dist"
hours.HSV.list = list()
categories = unique(res.H$category)
library(plyr)
for (i in c(1:length(categories))){
res.H.temp = subset(res.H, category == categories[i])
res.H.temp$category = NULL
res.S.temp = subset(res.S, category == categories[i])
res.S.temp$category = NULL
res.V.temp = subset(res.V, category == categories[i])
res.V.temp$category = NULL
## get H distances
row.names(res.H.temp) = res.H.temp[,1]
H.dist = get.dist.df(res.H.temp[,-1])
names(H.dist)[1] = "hue.dist"
## get S distances
row.names(res.S.temp) = res.S.temp[,1]
S.dist = get.dist.df(res.S.temp[,-1])
names(S.dist)[1] = "saturation.dist"
## get V distances
row.names(res.V.temp) = res.V.temp[,1]
V.dist = get.dist.df(res.V.temp[,-1])
names(V.dist)[1] = "value.dist"
hours.HSV = join_all(list(dist.years.df, H.dist, S.dist, V.dist))
hours.HSV$category = categories[i]
hours.HSV.list[[i]] = hours.HSV
}
hours.HSV.res = do.call(rbind, hours.HSV.list)
#p = ggplot(hours.HSV.res, aes(x = hue.dist, y = years.dist, label = pairs)) + geom_text() + facet_wrap( ~ category, nrow = 2) + geom_text() + stat_smooth(method = lm)
p = ggplot(hours.HSV.res, aes(x = hue.dist, y = years.dist, label = pairs, colour = category)) + geom_text() + geom_text() + stat_smooth(method = lm)
ggsave(file = "./figures/Hue_time.pdf", p)
#p = ggplot(hours.HSV.res, aes(x = saturation.dist, y = years.dist, label = pairs)) + geom_text() + facet_wrap( ~ category, nrow = 2) + geom_text() + stat_smooth(method = lm)
p = ggplot(hours.HSV.res, aes(x = saturation.dist, y = years.dist, label = pairs, colour = category)) + geom_text() + geom_text() + stat_smooth(method = lm)
ggsave(file = "./figures/Saturation_time.pdf", p)
#p = ggplot(hours.HSV.res, aes(x = value.dist, y = years.dist, label = pairs)) + geom_text() + facet_wrap( ~ category, nrow = 2) + geom_text() + stat_smooth(method = lm)
p = ggplot(hours.HSV.res, aes(x = value.dist, y = years.dist, label = pairs, colour = category)) + geom_text() + geom_text() + stat_smooth(method = lm)
ggsave(file = "./figures/Value_time.pdf", p)
| /DeviantArt/scripts/analysis/yearly_category_HSV.R | no_license | myazdani/DeviantArt-repo | R | false | false | 6,496 | r | ## yearly_category_HSV.R
##
##
library(data.table)
#####-------------------------------------
##### setup data
#####-------------------------------------
#dt = fread("~/Documents/wifire/processedData/features/output_SD_all.csv", header = FALSE)
load("./processedData/features/digital_trad_art_MATLABfeatures.Rda")
res = as.data.frame(dt[,lapply(.SD, FUN = function(x) as.double(mean(x)/sd(x))), by = list(Year, category), .SDcols = 45:68])
res = subset(res, Year != 2000)
rm(dt)
res.H = res[,c(1,2,which(grepl("HH", names(res))))]
res.S = res[,c(1,2,which(grepl("SH", names(res))))]
res.V = res[,c(1,2,which(grepl("VH", names(res))))]
rm(res)
library(ggplot2)
library(reshape)
# res.H.m = melt(res.H, id.vars = c("day", "hour.chunk"))
# res.S.m = melt(res.S)
# res.V.m = melt(res.V)
#
# ggplot(res.H.m, aes(x = variable, y = value, colour = day)) + geom_point() + facet_wrap(~ hour.chunk, nrow = 3)
# ggplot(res.S.m, aes(x = variable, y = value, colour = day)) + geom_point() + facet_wrap(~ hour.chunk, nrow = 3)
# ggplot(res.V.m, aes(x = variable, y = value, colour = day)) + geom_point() + facet_wrap(~ hour.chunk, nrow = 3)
#####-------------------------------------
#####
#####-------------------------------------
return_mds_per_category = function(df){
categories = unique(df$category)
categories.mds = data.frame(category = NA, Year = NA, coord.1 = NA, coord.2 = NA)
for (i in c(1:length(categories))){
df.temp = subset(df, category == categories[i])
df.temp$category = NULL
row.names(df.temp) = df.temp$Year
dist.matrix = dist(df.temp[, -which(names(df.temp) == "Year")], method = "euclidean")
fit <- cmdscale(dist.matrix,eig=TRUE, k=2) # k is the number of dim
mds.1 = fit$points[,1]
mds.2 = fit$points[,2]
if (max(mds.1) < max(abs(mds.1))) { mds.1 = -1*mds.1}
#if (max(mds.2) < max(abs(mds.2))) { mds.2 = -1*mds.2}
categories.mds = rbind(categories.mds, data.frame(category = categories[i], Year = row.names(fit$points), coord.1 = mds.1, coord.2 = mds.2))
}
return(categories.mds[-1,])
}
mds.H = return_mds_per_category(res.H)
#p = ggplot(mds.H, aes(x = coord.1, y = coord.2, label = Year)) + facet_wrap( ~ category, nrow = 2) + geom_text() + ggtitle("MDS of Hue Distributions")
p = ggplot(mds.H, aes(x = coord.1, y = coord.2, label = Year, colour = category)) + geom_text() + ggtitle("MDS of Hue Distributions")
ggsave(file = "./figures/Hue_MDS.pdf", p)
mds.S = return_mds_per_category(res.S)
#p = ggplot(mds.S, aes(x = coord.1, y = coord.2, label = Year)) + facet_wrap( ~ category, nrow = 2) + geom_text() + ggtitle("MDS of Saturation Distributions")
p = ggplot(mds.S, aes(x = coord.1, y = coord.2, label = Year, colour = category)) + geom_text() + ggtitle("MDS of Saturation Distributions")
ggsave(file = "./figures/Saturation_MDS.pdf", p)
mds.V = return_mds_per_category(res.V)
ggplot(mds.V, aes(x = coord.1, y = coord.2, label = Year, colour = category)) + geom_text() + ggtitle("MDS of Value Distributions")
ggsave(file = "./figures/Value_MDS.pdf", p)
#####-------------------------------------
##### setup Year distance matrix
#####-------------------------------------
first.row = c(0:9)
dist.years = toeplitz(first.row)
unique.years = as.numeric(unique(res.S$Year))
rownames(dist.years) = unique.years[order(unique.years)]
colnames(dist.years) = unique.years[order(unique.years)]
#####-------------------------------------
##### setup hour distance matrix
#####-------------------------------------
get.dist.df = function(x){
if (is.matrix(x)){
# if it is a matrix already, then assume that it is a proper distance matrix
z = x
}
else{
z = as.matrix(dist(x))
}
z[lower.tri(z,diag=TRUE)]=NA #Prepare to drop duplicates and meaningless information
z=as.data.frame(as.table(z)) #Turn into a 3-column table
z=na.omit(z) #Get rid of the junk we flagged above
z=z[order(-abs(z$Freq)),]
z$Var1 = as.character(z$Var1)
z$Var2 = as.character(z$Var2)
ordered.pair = c()
for (i in c(1:nrow(z))){
if (z$Var1[i] > z$Var2[i]){
ordered.pair = c(ordered.pair,
paste(z$Var1[i], "-",z$Var2[i]))
}
else{
ordered.pair = c(ordered.pair,
paste(z$Var2[i], "-",z$Var1[i]))
}
}
z$pairs = ordered.pair
z$Var1 = NULL
z$Var2 = NULL
return(z)
}
dist.years.df = get.dist.df(dist.years)
names(dist.years.df)[1] = "years.dist"
hours.HSV.list = list()
categories = unique(res.H$category)
library(plyr)
for (i in c(1:length(categories))){
res.H.temp = subset(res.H, category == categories[i])
res.H.temp$category = NULL
res.S.temp = subset(res.S, category == categories[i])
res.S.temp$category = NULL
res.V.temp = subset(res.V, category == categories[i])
res.V.temp$category = NULL
## get H distances
row.names(res.H.temp) = res.H.temp[,1]
H.dist = get.dist.df(res.H.temp[,-1])
names(H.dist)[1] = "hue.dist"
## get S distances
row.names(res.S.temp) = res.S.temp[,1]
S.dist = get.dist.df(res.S.temp[,-1])
names(S.dist)[1] = "saturation.dist"
## get V distances
row.names(res.V.temp) = res.V.temp[,1]
V.dist = get.dist.df(res.V.temp[,-1])
names(V.dist)[1] = "value.dist"
hours.HSV = join_all(list(dist.years.df, H.dist, S.dist, V.dist))
hours.HSV$category = categories[i]
hours.HSV.list[[i]] = hours.HSV
}
hours.HSV.res = do.call(rbind, hours.HSV.list)
#p = ggplot(hours.HSV.res, aes(x = hue.dist, y = years.dist, label = pairs)) + geom_text() + facet_wrap( ~ category, nrow = 2) + geom_text() + stat_smooth(method = lm)
p = ggplot(hours.HSV.res, aes(x = hue.dist, y = years.dist, label = pairs, colour = category)) + geom_text() + geom_text() + stat_smooth(method = lm)
ggsave(file = "./figures/Hue_time.pdf", p)
#p = ggplot(hours.HSV.res, aes(x = saturation.dist, y = years.dist, label = pairs)) + geom_text() + facet_wrap( ~ category, nrow = 2) + geom_text() + stat_smooth(method = lm)
p = ggplot(hours.HSV.res, aes(x = saturation.dist, y = years.dist, label = pairs, colour = category)) + geom_text() + geom_text() + stat_smooth(method = lm)
ggsave(file = "./figures/Saturation_time.pdf", p)
#p = ggplot(hours.HSV.res, aes(x = value.dist, y = years.dist, label = pairs)) + geom_text() + facet_wrap( ~ category, nrow = 2) + geom_text() + stat_smooth(method = lm)
p = ggplot(hours.HSV.res, aes(x = value.dist, y = years.dist, label = pairs, colour = category)) + geom_text() + geom_text() + stat_smooth(method = lm)
ggsave(file = "./figures/Value_time.pdf", p)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iris.R
\name{get_iris_raw_task}
\alias{get_iris_raw_task}
\title{Check task of IRIS RAW file}
\usage{
get_iris_raw_task(
file,
header_size = 50,
task = c("WIND", "SURVEILLANCE", "VOL_A", "VOL_B")
)
}
\arguments{
\item{file}{A string containing a file name.}
\item{header_size}{Number of header bytes to search}
\item{task}{task names to search for in the file header}
}
\value{
one of the \code{task} names found in the header, \code{NA} if none of the task names were found.
}
\description{
Checks which task (polar volume type) is contained in a IRIS RAW file
}
| /man/get_iris_raw_task.Rd | permissive | barthoekstra/bioRad | R | false | true | 650 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iris.R
\name{get_iris_raw_task}
\alias{get_iris_raw_task}
\title{Check task of IRIS RAW file}
\usage{
get_iris_raw_task(
file,
header_size = 50,
task = c("WIND", "SURVEILLANCE", "VOL_A", "VOL_B")
)
}
\arguments{
\item{file}{A string containing a file name.}
\item{header_size}{Number of header bytes to search}
\item{task}{task names to search for in the file header}
}
\value{
one of the \code{task} names found in the header, \code{NA} if none of the task names were found.
}
\description{
Checks which task (polar volume type) is contained in a IRIS RAW file
}
|
#install.packages (c("plyr","plotrix"))
library (plyr)
library (plotrix)
setwd("C:\\Users\\Valentin\\Documents\\IP")
file = "pacients-ours.csv"
table = read.csv(file, TRUE, ",")
type = "BarPlot" #Pie or BarPlot
contor = length(names(table))
tip = 1
while(tip <= 2)
{
if(tip == 1)
type = "Pie"
if(tip == 2)
type = "BarPlot"
size = 2
while(size <= length(table))
{
var = colnames(table[size])
t = 1
ok2 = TRUE
while (t <= contor)
{
if (var == names (table[t]))
ok2 = FALSE
t = t + 1
}
w = FALSE
k = 1
while( k <= length(var))
{
if (grepl (var[k], "BestDoctor"))
w = TRUE
k = k + 1
}
if(file != "doctors.csv")
w = FALSE
if (ok2 == FALSE || w == TRUE)
{
if (w == TRUE)
{
a = count (table[9])
var = gsub (".", " ", var, fixed = TRUE)
copie = nrow (a)
maxim = a [copie, 1]
q = 1
while (table[q, 9] != maxim)
q = q + 1
nume = table[q,2]
nume = paste(nume, table[q, 3], sep = "")
z=paste(var,"_",type,".png",sep="")
png(filename=z)
pie (maxim, labels = maxim, col = "blue", main = var, radius = 1, xlab = nume, ylab = table[q, 6])
dev.off()
}
if (w != TRUE)
{
a = count(table, var)
var = gsub(".", " ", var, fixed=TRUE)
i = nrow(a)
colors = rainbow(i)
slices = NULL
lbls = NULL
while (i > 0)
{
slices = c(slices, a[i, ncol(a)])
j = ncol(a) - 1
k = 1
tmp = NULL
while (k <= j)
{
tmp = paste(tmp, substr(a[i,k], 1, 100),"\n")
k = k + 1
}
lbls = c(lbls, tmp)
i = i - 1
}
clbls = lbls
pct = round(slices / sum(slices) * 100)
lbls = paste(lbls, pct)
lbls = paste(lbls, "%", sep = "")
ok = FALSE
k = 1
while( k <= j)
{
if(grepl(var[k],"Age"))
ok = TRUE
k = k + 1
}
if(ok == TRUE && ncol(a) < 3 )
{
valmin = 1
valmax = 10
p = 1
year = NULL
ye = 0
while(p < nrow(a))
{
while(a[p,1] >= valmin && a[p,1] < valmax && p < nrow(a))
{
ye = ye + a[p,2]
p = p + 1
}
valmin = valmax
valmax = valmax + 10
year = c(year, ye)
ye = 0
}
maxim = max(year) + (10 - max(year) %% 10)
name = paste(seq(0, valmax - 20, by = 10), "-", seq(9, valmax - 10, by = 10), "\nyears ")
if(type == "Pie")
{
name = gsub("\n", " ", name, fixed=TRUE)
pct = round(year / sum(year) * 100)
name = paste(name, "\n=", pct)
name = paste(name, "%", sep = "")
j = 1
while (j < length(year))
{
if(year[j] == 0)
{
name = name[-j]
year = year[-j]
j = j - 1
}
j = j + 1
}
z=paste(var,"_",type,".png",sep="")
#png(filename=z)
pie(year, labels = name, col = rainbow(length(year)), main = "Age Range", radius = 1)
dev.off()
}
if(type == "BarPlot")
{
z=paste(var,"_",type,".png",sep="")
png(filename=z)
barplot(year, main = "Age Range", xlim = c(0, maxim), horiz = TRUE, col = rainbow(length(year)), las = 1, names.arg = name, cex.names = 0.8)
dev.off()
}
}
if(type == "Pie" && ok != TRUE)
{
if(nrow(a) <= 4)
{
z=paste(var,"_",type,".png",sep="")
png(filename=z)
pie3D(slices, explode = 0, labels = lbls, theta = pi / 3, col = colors, main = var, radius = 1, start = 1.5, labelcex = 1)
dev.off()
}
if(nrow(a) > 4)
{
z=paste(var,"_",type,".png",sep="")
png(filename=z)
pie(slices, labels = lbls, col = colors, main = var, radius = 1)
dev.off()
}
}
if(type == "BarPlot" && ok != TRUE)
{
z=paste(var,"_",type,".png",sep="")
png(filename=z)
barplot(slices, main = var, xlim = c(0, max(slices) + (10 - max(slices) %% 10)), col = colors, horiz = TRUE, las = 1, names.arg = clbls, cex.names = 0.8)
dev.off()
}
if(ok == TRUE && ncol(a) > 2)
print("Age must be called alone!")
}
if(type != "Pie" && type != "BarPlot")
print("Unknown Graphic Function!")
}
if(ok2 == TRUE && w != TRUE)
print("Wrong Column. Please Insert a correct one!")
size = size + 1
}
tip = tip + 1
} | /Cod R-PNG saving and Generate all - final.R | no_license | Hachiko1337/SEStatisticsProject | R | false | false | 5,327 | r | #install.packages (c("plyr","plotrix"))
library (plyr)
library (plotrix)
setwd("C:\\Users\\Valentin\\Documents\\IP")
file = "pacients-ours.csv"
table = read.csv(file, TRUE, ",")
type = "BarPlot" #Pie or BarPlot
contor = length(names(table))
tip = 1
while(tip <= 2)
{
if(tip == 1)
type = "Pie"
if(tip == 2)
type = "BarPlot"
size = 2
while(size <= length(table))
{
var = colnames(table[size])
t = 1
ok2 = TRUE
while (t <= contor)
{
if (var == names (table[t]))
ok2 = FALSE
t = t + 1
}
w = FALSE
k = 1
while( k <= length(var))
{
if (grepl (var[k], "BestDoctor"))
w = TRUE
k = k + 1
}
if(file != "doctors.csv")
w = FALSE
if (ok2 == FALSE || w == TRUE)
{
if (w == TRUE)
{
a = count (table[9])
var = gsub (".", " ", var, fixed = TRUE)
copie = nrow (a)
maxim = a [copie, 1]
q = 1
while (table[q, 9] != maxim)
q = q + 1
nume = table[q,2]
nume = paste(nume, table[q, 3], sep = "")
z=paste(var,"_",type,".png",sep="")
png(filename=z)
pie (maxim, labels = maxim, col = "blue", main = var, radius = 1, xlab = nume, ylab = table[q, 6])
dev.off()
}
if (w != TRUE)
{
a = count(table, var)
var = gsub(".", " ", var, fixed=TRUE)
i = nrow(a)
colors = rainbow(i)
slices = NULL
lbls = NULL
while (i > 0)
{
slices = c(slices, a[i, ncol(a)])
j = ncol(a) - 1
k = 1
tmp = NULL
while (k <= j)
{
tmp = paste(tmp, substr(a[i,k], 1, 100),"\n")
k = k + 1
}
lbls = c(lbls, tmp)
i = i - 1
}
clbls = lbls
pct = round(slices / sum(slices) * 100)
lbls = paste(lbls, pct)
lbls = paste(lbls, "%", sep = "")
ok = FALSE
k = 1
while( k <= j)
{
if(grepl(var[k],"Age"))
ok = TRUE
k = k + 1
}
if(ok == TRUE && ncol(a) < 3 )
{
valmin = 1
valmax = 10
p = 1
year = NULL
ye = 0
while(p < nrow(a))
{
while(a[p,1] >= valmin && a[p,1] < valmax && p < nrow(a))
{
ye = ye + a[p,2]
p = p + 1
}
valmin = valmax
valmax = valmax + 10
year = c(year, ye)
ye = 0
}
maxim = max(year) + (10 - max(year) %% 10)
name = paste(seq(0, valmax - 20, by = 10), "-", seq(9, valmax - 10, by = 10), "\nyears ")
if(type == "Pie")
{
name = gsub("\n", " ", name, fixed=TRUE)
pct = round(year / sum(year) * 100)
name = paste(name, "\n=", pct)
name = paste(name, "%", sep = "")
j = 1
while (j < length(year))
{
if(year[j] == 0)
{
name = name[-j]
year = year[-j]
j = j - 1
}
j = j + 1
}
z=paste(var,"_",type,".png",sep="")
#png(filename=z)
pie(year, labels = name, col = rainbow(length(year)), main = "Age Range", radius = 1)
dev.off()
}
if(type == "BarPlot")
{
z=paste(var,"_",type,".png",sep="")
png(filename=z)
barplot(year, main = "Age Range", xlim = c(0, maxim), horiz = TRUE, col = rainbow(length(year)), las = 1, names.arg = name, cex.names = 0.8)
dev.off()
}
}
if(type == "Pie" && ok != TRUE)
{
if(nrow(a) <= 4)
{
z=paste(var,"_",type,".png",sep="")
png(filename=z)
pie3D(slices, explode = 0, labels = lbls, theta = pi / 3, col = colors, main = var, radius = 1, start = 1.5, labelcex = 1)
dev.off()
}
if(nrow(a) > 4)
{
z=paste(var,"_",type,".png",sep="")
png(filename=z)
pie(slices, labels = lbls, col = colors, main = var, radius = 1)
dev.off()
}
}
if(type == "BarPlot" && ok != TRUE)
{
z=paste(var,"_",type,".png",sep="")
png(filename=z)
barplot(slices, main = var, xlim = c(0, max(slices) + (10 - max(slices) %% 10)), col = colors, horiz = TRUE, las = 1, names.arg = clbls, cex.names = 0.8)
dev.off()
}
if(ok == TRUE && ncol(a) > 2)
print("Age must be called alone!")
}
if(type != "Pie" && type != "BarPlot")
print("Unknown Graphic Function!")
}
if(ok2 == TRUE && w != TRUE)
print("Wrong Column. Please Insert a correct one!")
size = size + 1
}
tip = tip + 1
} |
library(ggplot2)
library(reshape2)
#1
dbinom(x = 10, size = 35, prob = 0.51)
#2
pbinom(q = 10, size = 35, prob = 0.49)
#3
qbinom(p = 0.5, size = 35, prob = 0.51)
#4
set.seed(4857)
muestra <- rbinom(n = 1000, size = 35, prob = 0.51)
df1 <- as.data.frame(table(muestra)/length(muestra))
names(df1) <- c("Exitos", "FR")
df2 <- melt(df1)
ggplot(df2, aes(x = Exitos, y = value, fill = variable)) +
geom_bar (stat="identity", position = "dodge")
x <- seq(-4, 4, 0.01)*6 + 110
y <- dnorm(x, mean = 110, sd = 7)
#1
plot(x, y, type = "l", xlab = "", ylab = "")
title(main = "Densidad de Probabilidad Normal", sub = expression(paste(mu == 110, " y ", sigma == 7)))
#2
pnorm(q = 140, mean = 110, sd = 7, lower.tail = FALSE)
#3
b <- qnorm(p = 0.95, mean = 110, sd = 7)
pnorm(b, 110, 7)
#4
set.seed(7563)
muestra <- rnorm(n = 1000, mean = 110, sd = 7)
mdf <- as.data.frame(muestra)
ggplot(mdf, aes(muestra)) +
geom_histogram(colour = 'red',
fill = 'blue',
alpha = 0.3,
binwidth = 3) +
geom_density(aes(y = 3*..count..))+
geom_vline(xintercept = mean(mdf$muestra), linetype="dashed", color = "black") +
ggtitle('Histograma para la muestra normal') +
labs(x = 'Valores obtenidos', y = 'Frecuencia')+
theme_dark() +
theme(plot.title = element_text(hjust = 0.5, size = 16))
| /sesion_4/retos.R | no_license | YaelRmz/bedu_fase_2_modulo_1 | R | false | false | 1,333 | r | library(ggplot2)
library(reshape2)
#1
dbinom(x = 10, size = 35, prob = 0.51)
#2
pbinom(q = 10, size = 35, prob = 0.49)
#3
qbinom(p = 0.5, size = 35, prob = 0.51)
#4
set.seed(4857)
muestra <- rbinom(n = 1000, size = 35, prob = 0.51)
df1 <- as.data.frame(table(muestra)/length(muestra))
names(df1) <- c("Exitos", "FR")
df2 <- melt(df1)
ggplot(df2, aes(x = Exitos, y = value, fill = variable)) +
geom_bar (stat="identity", position = "dodge")
x <- seq(-4, 4, 0.01)*6 + 110
y <- dnorm(x, mean = 110, sd = 7)
#1
plot(x, y, type = "l", xlab = "", ylab = "")
title(main = "Densidad de Probabilidad Normal", sub = expression(paste(mu == 110, " y ", sigma == 7)))
#2
pnorm(q = 140, mean = 110, sd = 7, lower.tail = FALSE)
#3
b <- qnorm(p = 0.95, mean = 110, sd = 7)
pnorm(b, 110, 7)
#4
set.seed(7563)
muestra <- rnorm(n = 1000, mean = 110, sd = 7)
mdf <- as.data.frame(muestra)
ggplot(mdf, aes(muestra)) +
geom_histogram(colour = 'red',
fill = 'blue',
alpha = 0.3,
binwidth = 3) +
geom_density(aes(y = 3*..count..))+
geom_vline(xintercept = mean(mdf$muestra), linetype="dashed", color = "black") +
ggtitle('Histograma para la muestra normal') +
labs(x = 'Valores obtenidos', y = 'Frecuencia')+
theme_dark() +
theme(plot.title = element_text(hjust = 0.5, size = 16))
|
### data to convert as cvs is state
### now covert this state data to state.csv
### this data will be present in the existing working directory
write.csv(state,file = 'state.csv',row.names = F )
| /convert.R | no_license | moadams847/regression-analysis-murder | R | false | false | 201 | r | ### data to convert as cvs is state
### now covert this state data to state.csv
### this data will be present in the existing working directory
write.csv(state,file = 'state.csv',row.names = F )
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filterTS.r
\name{dataTS}
\alias{dataTS}
\title{dataTS}
\source{
wtss service
}
\description{
Dataset example with one time series to run some functions this package
}
| /man/dataTS.Rd | no_license | vishalbelsare/FeaturesTS | R | false | true | 246 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filterTS.r
\name{dataTS}
\alias{dataTS}
\title{dataTS}
\source{
wtss service
}
\description{
Dataset example with one time series to run some functions this package
}
|
plot.split.ocsvm <- function(species.name,color.choice="nuria",save.fig=F) {
#######################################
#######################################
# MAKE SURE YOU LOAD WORLDCLIM FIRST !!
#######################################
#######################################
# res = 2 column matrix of Prediction and Probability from model result
# x = 2 column matrix with coordinates of predictions to be colored into map
# bool = boolean for edges
# legend.loc = length 2 vector with coordinates for legend
# color.choice = character, "nuria", "rainbow", "heat"
# save.fig = logical
# Load data
#setwd("R:\\Intelligent systems for biosecurity\\INVASIVE_SPP\\_dist_data&gbif")
#data1 <- read.table(paste(species.name,"_data_pres_abs.txt",sep=""), header=TRUE)
#setwd(paste("R:\\Intelligent systems for biosecurity\\INVASIVE_SPP\\_OCSVM\\",species.name,sep=""))
#res <- cbind(final.vote, final.vote/100)
#colnames(res) <- c("Vote","%")
## FILTER
#f <- function(x) sum(is.na(x))==0
#bool <- apply(worldclim[,colnames(data1)[1:(ncol(data1)-1)]],1,f)
#worldclim2 <- worldclim[bool,colnames(data1)[1:(ncol(data1)-1)]]
#bool2 <- apply(worldclim.orig[,c(59,60)],1,f)
#x <- worldclim[bool,c(2,3)]
# Splits
spl <- c(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)
ind1 <- res[,2]<spl[1]
ind2 <- res[,2]>spl[1] & res[,2]<=spl[2]
ind3 <- res[,2]>spl[2] & res[,2]<=spl[3]
ind4 <- res[,2]>spl[3] & res[,2]<=spl[4]
ind5 <- res[,2]>spl[4] & res[,2]<=spl[5]
ind6 <- res[,2]>spl[5] & res[,2]<=spl[6]
ind7 <- res[,2]>spl[6] & res[,2]<=spl[7]
ind8 <- res[,2]>spl[7] & res[,2]<=spl[8]
ind9 <- res[,2]>spl[8] & res[,2]<=spl[9]
ind10 <- res[,2]>spl[9]
col <- rep("grey", dim(res)[1])
if (color.choice == "nuria") {
col[ind1] <- rgb(207, 207, 207, maxColorValue=255)
col[ind2] <- rgb(255, 215, 0, maxColorValue=255)
col[ind3] <- rgb(255, 193, 37, maxColorValue=255)
col[ind4] <- rgb(255, 180, 34, maxColorValue=255)
col[ind5] <- rgb(255, 185, 15, maxColorValue=255)
col[ind6] <- rgb(255, 165, 0, maxColorValue=255)
col[ind7] <- rgb(205, 104, 57, maxColorValue=255)
col[ind8] <- rgb(139, 76, 57, maxColorValue=255)
col[ind9] <- rgb(139, 54, 38, maxColorValue=255)
col[ind10] <- rgb(139, 37, 0, maxColorValue=255)
col.vec <- c(rgb(250, 250, 210, maxColorValue=255),
rgb(255, 215, 0, maxColorValue=255),
rgb(255, 193, 37, maxColorValue=255),
rgb(255, 180, 34, maxColorValue=255),
rgb(255, 185, 15, maxColorValue=255),
rgb(255, 165, 0, maxColorValue=255),
rgb(205, 104, 57, maxColorValue=255),
rgb(139, 76, 57, maxColorValue=255),
rgb(139, 54, 38, maxColorValue=255),
rgb(139, 37, 0, maxColorValue=255))
}
if (color.choice == "rainbow") {
col[ind1] <- rainbow(12)[10]
col[ind2] <- rainbow(12)[9]
col[ind3] <- rainbow(12)[8]
col[ind4] <- rainbow(12)[7]
col[ind5] <- rainbow(12)[6]
col[ind6] <- rainbow(12)[5]
col[ind7] <- rainbow(12)[4]
col[ind8] <- rainbow(12)[3]
col[ind9] <- rainbow(12)[2]
col[ind10] <- rainbow(12)[1]
col.vec <- rev(rainbow(12)[1:10])
}
if (color.choice == "heat") {
col[ind1] <- heat.colors(10)[10]
col[ind2] <- heat.colors(10)[9]
col[ind3] <- heat.colors(10)[8]
col[ind4] <- heat.colors(10)[7]
col[ind5] <- heat.colors(10)[6]
col[ind6] <- heat.colors(10)[5]
col[ind7] <- heat.colors(10)[4]
col[ind8] <- heat.colors(10)[3]
col[ind9] <- heat.colors(10)[2]
col[ind10] <- heat.colors(10)[1]
col.vec <- rev(heat.colors(10))
}
legend.loc <- c(-120,-10)
cex1 <- 0.2
pch1 <- 16
win.graph(width=70,height=40)
plot(x[,1],x[,2],cex=cex1,pch=pch1,col=col, xlab="Longitude", ylab="Latitude")
legend(legend.loc[1],legend.loc[2], legend=c(paste("<",spl[1]),
paste(paste(spl[1],"-"),spl[2]),
paste(paste(spl[2],"-"),spl[3]),
paste(paste(spl[3],"-"),spl[4]),
paste(paste(spl[4],"-"),spl[5]),
paste(paste(spl[5],"-"),spl[6]),
paste(paste(spl[6],"-"),spl[7]),
paste(paste(spl[7],"-"),spl[8]),
paste(paste(spl[8],"-"),spl[9]),
paste(">",spl[9])),
col=col.vec,
pch=15,bg="white", cex=0.8)
if(plot.fig == "world") {
points(border[,2], border[,3], cex=0.2, xlab="Longitude", ylab="Latitude")
title(paste("Prediction for World",species.name,method.name,sep=","))
if(save.fig) savePlot(paste(species.name,"_plotWorld_prediction_",method.name,".png",sep=""), type="png")
}
if(plot.fig == "nz") {
points(border[,2], border[,3], cex=0.2, xlab="Longitude", ylab="Latitude")
# points(worldclim.orig[!bool2,2],worldclim.orig[!bool2,3],cex=1,pch=15,col="grey")
title(paste("Prediction for NZ",species.name,method.name,sep=","))
if(save.fig) savePlot(paste(species.name,"_plotNZ_prediction_",method.name,".png",sep=""), type="png")
}
}
| /assets/scripts/4_MMA/_functions/plot.split.ocsvm.R | permissive | BPRC-EcoInformatics/BPRC-EcoInformatics.github.io | R | false | false | 4,845 | r | plot.split.ocsvm <- function(species.name,color.choice="nuria",save.fig=F) {
#######################################
#######################################
# MAKE SURE YOU LOAD WORLDCLIM FIRST !!
#######################################
#######################################
# res = 2 column matrix of Prediction and Probability from model result
# x = 2 column matrix with coordinates of predictions to be colored into map
# bool = boolean for edges
# legend.loc = length 2 vector with coordinates for legend
# color.choice = character, "nuria", "rainbow", "heat"
# save.fig = logical
# Load data
#setwd("R:\\Intelligent systems for biosecurity\\INVASIVE_SPP\\_dist_data&gbif")
#data1 <- read.table(paste(species.name,"_data_pres_abs.txt",sep=""), header=TRUE)
#setwd(paste("R:\\Intelligent systems for biosecurity\\INVASIVE_SPP\\_OCSVM\\",species.name,sep=""))
#res <- cbind(final.vote, final.vote/100)
#colnames(res) <- c("Vote","%")
## FILTER
#f <- function(x) sum(is.na(x))==0
#bool <- apply(worldclim[,colnames(data1)[1:(ncol(data1)-1)]],1,f)
#worldclim2 <- worldclim[bool,colnames(data1)[1:(ncol(data1)-1)]]
#bool2 <- apply(worldclim.orig[,c(59,60)],1,f)
#x <- worldclim[bool,c(2,3)]
# Splits
spl <- c(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)
ind1 <- res[,2]<spl[1]
ind2 <- res[,2]>spl[1] & res[,2]<=spl[2]
ind3 <- res[,2]>spl[2] & res[,2]<=spl[3]
ind4 <- res[,2]>spl[3] & res[,2]<=spl[4]
ind5 <- res[,2]>spl[4] & res[,2]<=spl[5]
ind6 <- res[,2]>spl[5] & res[,2]<=spl[6]
ind7 <- res[,2]>spl[6] & res[,2]<=spl[7]
ind8 <- res[,2]>spl[7] & res[,2]<=spl[8]
ind9 <- res[,2]>spl[8] & res[,2]<=spl[9]
ind10 <- res[,2]>spl[9]
col <- rep("grey", dim(res)[1])
if (color.choice == "nuria") {
col[ind1] <- rgb(207, 207, 207, maxColorValue=255)
col[ind2] <- rgb(255, 215, 0, maxColorValue=255)
col[ind3] <- rgb(255, 193, 37, maxColorValue=255)
col[ind4] <- rgb(255, 180, 34, maxColorValue=255)
col[ind5] <- rgb(255, 185, 15, maxColorValue=255)
col[ind6] <- rgb(255, 165, 0, maxColorValue=255)
col[ind7] <- rgb(205, 104, 57, maxColorValue=255)
col[ind8] <- rgb(139, 76, 57, maxColorValue=255)
col[ind9] <- rgb(139, 54, 38, maxColorValue=255)
col[ind10] <- rgb(139, 37, 0, maxColorValue=255)
col.vec <- c(rgb(250, 250, 210, maxColorValue=255),
rgb(255, 215, 0, maxColorValue=255),
rgb(255, 193, 37, maxColorValue=255),
rgb(255, 180, 34, maxColorValue=255),
rgb(255, 185, 15, maxColorValue=255),
rgb(255, 165, 0, maxColorValue=255),
rgb(205, 104, 57, maxColorValue=255),
rgb(139, 76, 57, maxColorValue=255),
rgb(139, 54, 38, maxColorValue=255),
rgb(139, 37, 0, maxColorValue=255))
}
if (color.choice == "rainbow") {
col[ind1] <- rainbow(12)[10]
col[ind2] <- rainbow(12)[9]
col[ind3] <- rainbow(12)[8]
col[ind4] <- rainbow(12)[7]
col[ind5] <- rainbow(12)[6]
col[ind6] <- rainbow(12)[5]
col[ind7] <- rainbow(12)[4]
col[ind8] <- rainbow(12)[3]
col[ind9] <- rainbow(12)[2]
col[ind10] <- rainbow(12)[1]
col.vec <- rev(rainbow(12)[1:10])
}
if (color.choice == "heat") {
col[ind1] <- heat.colors(10)[10]
col[ind2] <- heat.colors(10)[9]
col[ind3] <- heat.colors(10)[8]
col[ind4] <- heat.colors(10)[7]
col[ind5] <- heat.colors(10)[6]
col[ind6] <- heat.colors(10)[5]
col[ind7] <- heat.colors(10)[4]
col[ind8] <- heat.colors(10)[3]
col[ind9] <- heat.colors(10)[2]
col[ind10] <- heat.colors(10)[1]
col.vec <- rev(heat.colors(10))
}
legend.loc <- c(-120,-10)
cex1 <- 0.2
pch1 <- 16
win.graph(width=70,height=40)
plot(x[,1],x[,2],cex=cex1,pch=pch1,col=col, xlab="Longitude", ylab="Latitude")
legend(legend.loc[1],legend.loc[2], legend=c(paste("<",spl[1]),
paste(paste(spl[1],"-"),spl[2]),
paste(paste(spl[2],"-"),spl[3]),
paste(paste(spl[3],"-"),spl[4]),
paste(paste(spl[4],"-"),spl[5]),
paste(paste(spl[5],"-"),spl[6]),
paste(paste(spl[6],"-"),spl[7]),
paste(paste(spl[7],"-"),spl[8]),
paste(paste(spl[8],"-"),spl[9]),
paste(">",spl[9])),
col=col.vec,
pch=15,bg="white", cex=0.8)
if(plot.fig == "world") {
points(border[,2], border[,3], cex=0.2, xlab="Longitude", ylab="Latitude")
title(paste("Prediction for World",species.name,method.name,sep=","))
if(save.fig) savePlot(paste(species.name,"_plotWorld_prediction_",method.name,".png",sep=""), type="png")
}
if(plot.fig == "nz") {
points(border[,2], border[,3], cex=0.2, xlab="Longitude", ylab="Latitude")
# points(worldclim.orig[!bool2,2],worldclim.orig[!bool2,3],cex=1,pch=15,col="grey")
title(paste("Prediction for NZ",species.name,method.name,sep=","))
if(save.fig) savePlot(paste(species.name,"_plotNZ_prediction_",method.name,".png",sep=""), type="png")
}
}
|
library(geomedb)
### Name: listMarkers
### Title: get a list of markers to query against
### Aliases: listMarkers
### ** Examples
## Not run:
##D markers <- listMarkers()
## End(Not run)
| /data/genthat_extracted_code/geomedb/examples/listMarkers.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 195 | r | library(geomedb)
### Name: listMarkers
### Title: get a list of markers to query against
### Aliases: listMarkers
### ** Examples
## Not run:
##D markers <- listMarkers()
## End(Not run)
|
#' Google Partners API
#' Lets advertisers search certified companies and create contact leads with them, and also audits the usage of clients.
#'
#' Auto-generated code by googleAuthR::gar_create_api_skeleton
#' at 2017-03-05 19:57:25
#' filename: /Users/mark/dev/R/autoGoogleAPI/googlepartnersv2.auto/R/partners_functions.R
#' api_json: api_json
#'
#' @details
#' Authentication scopes used are:
#' \itemize{
#' \item
#' }
#'
#' @docType package
#' @name partners_googleAuthR
#'
NULL
## NULL
#' A helper function that tests whether an object is either NULL _or_
#' a list of NULLs
#'
#' @keywords internal
is.NullOb <- function(x) is.null(x) | all(sapply(x, is.null))
#' Recursively step down into list, removing all such objects
#'
#' @keywords internal
rmNullObs <- function(x) {
x <- Filter(Negate(is.NullOb), x)
lapply(x, function(x) if (is.list(x))
rmNullObs(x) else x)
}
#' Logs a user event.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/partners/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c()}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param LogUserEventRequest The \link{LogUserEventRequest} object to pass to this method
#' #' @importFrom googleAuthR gar_api_generator
#' @family LogUserEventRequest functions
#' @export
userEvents.log <- function(LogUserEventRequest) {
url <- "https://partners.googleapis.com/v2/userEvents:log"
# partners.userEvents.log
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(LogUserEventRequest, "gar_LogUserEventRequest"))
f(the_body = LogUserEventRequest)
}
#' Logs a generic message from the client, such as `Failed to render component`, `Profile page is running slow`, `More than 500 users have accessed this result.`, etc.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/partners/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c()}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param LogMessageRequest The \link{LogMessageRequest} object to pass to this method
#' #' @importFrom googleAuthR gar_api_generator
#' @family LogMessageRequest functions
#' @export
clientMessages.log <- function(LogMessageRequest) {
url <- "https://partners.googleapis.com/v2/clientMessages:log"
# partners.clientMessages.log
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(LogMessageRequest, "gar_LogMessageRequest"))
f(the_body = LogMessageRequest)
}
#' Lists states for current user.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/partners/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c()}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param requestMetadata.userOverrides.ipAddress IP address to use instead of the user's geo-located IP address
#' @param requestMetadata.userOverrides.userId Logged-in user ID to impersonate instead of the user's ID
#' @param requestMetadata.locale Locale to use for the current request
#' @param requestMetadata.partnersSessionId Google Partners session ID
#' @param requestMetadata.experimentIds Experiment IDs the current request belongs to
#' @param requestMetadata.trafficSource.trafficSourceId Identifier to indicate where the traffic comes from
#' @param requestMetadata.trafficSource.trafficSubId Second level identifier to indicate where the traffic comes from
#' @importFrom googleAuthR gar_api_generator
#' @export
userStates.list <- function(requestMetadata.userOverrides.ipAddress = NULL, requestMetadata.userOverrides.userId = NULL,
requestMetadata.locale = NULL, requestMetadata.partnersSessionId = NULL, requestMetadata.experimentIds = NULL,
requestMetadata.trafficSource.trafficSourceId = NULL, requestMetadata.trafficSource.trafficSubId = NULL) {
url <- "https://partners.googleapis.com/v2/userStates"
# partners.userStates.list
pars = list(requestMetadata.userOverrides.ipAddress = requestMetadata.userOverrides.ipAddress,
requestMetadata.userOverrides.userId = requestMetadata.userOverrides.userId,
requestMetadata.locale = requestMetadata.locale, requestMetadata.partnersSessionId = requestMetadata.partnersSessionId,
requestMetadata.experimentIds = requestMetadata.experimentIds, requestMetadata.trafficSource.trafficSourceId = requestMetadata.trafficSource.trafficSourceId,
requestMetadata.trafficSource.trafficSubId = requestMetadata.trafficSource.trafficSubId)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Gets a company.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/partners/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c()}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param companyId The ID of the company to retrieve
#' @param requestMetadata.userOverrides.ipAddress IP address to use instead of the user's geo-located IP address
#' @param requestMetadata.userOverrides.userId Logged-in user ID to impersonate instead of the user's ID
#' @param requestMetadata.locale Locale to use for the current request
#' @param requestMetadata.partnersSessionId Google Partners session ID
#' @param requestMetadata.experimentIds Experiment IDs the current request belongs to
#' @param requestMetadata.trafficSource.trafficSourceId Identifier to indicate where the traffic comes from
#' @param requestMetadata.trafficSource.trafficSubId Second level identifier to indicate where the traffic comes from
#' @param view The view of `Company` resource to be returned
#' @param orderBy How to order addresses within the returned company
#' @param currencyCode If the company's budget is in a different currency code than this one, then the converted budget is converted to this currency code
#' @param address The address to use for sorting the company's addresses by proximity
#' @importFrom googleAuthR gar_api_generator
#' @export
companies.get <- function(companyId, requestMetadata.userOverrides.ipAddress = NULL,
requestMetadata.userOverrides.userId = NULL, requestMetadata.locale = NULL, requestMetadata.partnersSessionId = NULL,
requestMetadata.experimentIds = NULL, requestMetadata.trafficSource.trafficSourceId = NULL,
requestMetadata.trafficSource.trafficSubId = NULL, view = NULL, orderBy = NULL,
currencyCode = NULL, address = NULL) {
url <- sprintf("https://partners.googleapis.com/v2/companies/%s", companyId)
# partners.companies.get
pars = list(requestMetadata.userOverrides.ipAddress = requestMetadata.userOverrides.ipAddress,
requestMetadata.userOverrides.userId = requestMetadata.userOverrides.userId,
requestMetadata.locale = requestMetadata.locale, requestMetadata.partnersSessionId = requestMetadata.partnersSessionId,
requestMetadata.experimentIds = requestMetadata.experimentIds, requestMetadata.trafficSource.trafficSourceId = requestMetadata.trafficSource.trafficSourceId,
requestMetadata.trafficSource.trafficSubId = requestMetadata.trafficSource.trafficSubId,
view = view, orderBy = orderBy, currencyCode = currencyCode, address = address)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Lists companies.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/partners/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c()}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param requestMetadata.userOverrides.ipAddress IP address to use instead of the user's geo-located IP address
#' @param requestMetadata.userOverrides.userId Logged-in user ID to impersonate instead of the user's ID
#' @param requestMetadata.locale Locale to use for the current request
#' @param requestMetadata.partnersSessionId Google Partners session ID
#' @param requestMetadata.experimentIds Experiment IDs the current request belongs to
#' @param requestMetadata.trafficSource.trafficSourceId Identifier to indicate where the traffic comes from
#' @param requestMetadata.trafficSource.trafficSubId Second level identifier to indicate where the traffic comes from
#' @param pageSize Requested page size
#' @param pageToken A token identifying a page of results that the server returns
#' @param companyName Company name to search for
#' @param view The view of the `Company` resource to be returned
#' @param minMonthlyBudget.currencyCode The 3-letter currency code defined in ISO 4217
#' @param minMonthlyBudget.units The whole units of the amount
#' @param minMonthlyBudget.nanos Number of nano (10^-9) units of the amount
#' @param maxMonthlyBudget.currencyCode The 3-letter currency code defined in ISO 4217
#' @param maxMonthlyBudget.units The whole units of the amount
#' @param maxMonthlyBudget.nanos Number of nano (10^-9) units of the amount
#' @param industries List of industries the company can help with
#' @param services List of services the company can help with
#' @param languageCodes List of language codes that company can support
#' @param address The address to use when searching for companies
#' @param orderBy How to order addresses within the returned companies
#' @param gpsMotivations List of reasons for using Google Partner Search to get companies
#' @param websiteUrl Website URL that will help to find a better matched company
#' @importFrom googleAuthR gar_api_generator
#' @export
companies.list <- function(requestMetadata.userOverrides.ipAddress = NULL, requestMetadata.userOverrides.userId = NULL,
requestMetadata.locale = NULL, requestMetadata.partnersSessionId = NULL, requestMetadata.experimentIds = NULL,
requestMetadata.trafficSource.trafficSourceId = NULL, requestMetadata.trafficSource.trafficSubId = NULL,
pageSize = NULL, pageToken = NULL, companyName = NULL, view = NULL, minMonthlyBudget.currencyCode = NULL,
minMonthlyBudget.units = NULL, minMonthlyBudget.nanos = NULL, maxMonthlyBudget.currencyCode = NULL,
maxMonthlyBudget.units = NULL, maxMonthlyBudget.nanos = NULL, industries = NULL,
services = NULL, languageCodes = NULL, address = NULL, orderBy = NULL, gpsMotivations = NULL,
websiteUrl = NULL) {
url <- "https://partners.googleapis.com/v2/companies"
# partners.companies.list
pars = list(requestMetadata.userOverrides.ipAddress = requestMetadata.userOverrides.ipAddress,
requestMetadata.userOverrides.userId = requestMetadata.userOverrides.userId,
requestMetadata.locale = requestMetadata.locale, requestMetadata.partnersSessionId = requestMetadata.partnersSessionId,
requestMetadata.experimentIds = requestMetadata.experimentIds, requestMetadata.trafficSource.trafficSourceId = requestMetadata.trafficSource.trafficSourceId,
requestMetadata.trafficSource.trafficSubId = requestMetadata.trafficSource.trafficSubId,
pageSize = pageSize, pageToken = pageToken, companyName = companyName, view = view,
minMonthlyBudget.currencyCode = minMonthlyBudget.currencyCode, minMonthlyBudget.units = minMonthlyBudget.units,
minMonthlyBudget.nanos = minMonthlyBudget.nanos, maxMonthlyBudget.currencyCode = maxMonthlyBudget.currencyCode,
maxMonthlyBudget.units = maxMonthlyBudget.units, maxMonthlyBudget.nanos = maxMonthlyBudget.nanos,
industries = industries, services = services, languageCodes = languageCodes,
address = address, orderBy = orderBy, gpsMotivations = gpsMotivations, websiteUrl = websiteUrl)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
| /B_analysts_sources_github/MarkEdmondson1234/autoGoogleAPI/partners_functions.R | no_license | Irbis3/crantasticScrapper | R | false | false | 13,057 | r | #' Google Partners API
#' Lets advertisers search certified companies and create contact leads with them, and also audits the usage of clients.
#'
#' Auto-generated code by googleAuthR::gar_create_api_skeleton
#' at 2017-03-05 19:57:25
#' filename: /Users/mark/dev/R/autoGoogleAPI/googlepartnersv2.auto/R/partners_functions.R
#' api_json: api_json
#'
#' @details
#' Authentication scopes used are:
#' \itemize{
#' \item
#' }
#'
#' @docType package
#' @name partners_googleAuthR
#'
NULL
## NULL
#' A helper function that tests whether an object is either NULL _or_
#' a list of NULLs
#'
#' @keywords internal
is.NullOb <- function(x) is.null(x) | all(sapply(x, is.null))
#' Recursively step down into list, removing all such objects
#'
#' @keywords internal
rmNullObs <- function(x) {
x <- Filter(Negate(is.NullOb), x)
lapply(x, function(x) if (is.list(x))
rmNullObs(x) else x)
}
#' Logs a user event.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/partners/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c()}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param LogUserEventRequest The \link{LogUserEventRequest} object to pass to this method
#' #' @importFrom googleAuthR gar_api_generator
#' @family LogUserEventRequest functions
#' @export
userEvents.log <- function(LogUserEventRequest) {
url <- "https://partners.googleapis.com/v2/userEvents:log"
# partners.userEvents.log
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(LogUserEventRequest, "gar_LogUserEventRequest"))
f(the_body = LogUserEventRequest)
}
#' Logs a generic message from the client, such as `Failed to render component`, `Profile page is running slow`, `More than 500 users have accessed this result.`, etc.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/partners/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c()}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param LogMessageRequest The \link{LogMessageRequest} object to pass to this method
#' #' @importFrom googleAuthR gar_api_generator
#' @family LogMessageRequest functions
#' @export
clientMessages.log <- function(LogMessageRequest) {
url <- "https://partners.googleapis.com/v2/clientMessages:log"
# partners.clientMessages.log
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(LogMessageRequest, "gar_LogMessageRequest"))
f(the_body = LogMessageRequest)
}
#' Lists states for current user.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/partners/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c()}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param requestMetadata.userOverrides.ipAddress IP address to use instead of the user's geo-located IP address
#' @param requestMetadata.userOverrides.userId Logged-in user ID to impersonate instead of the user's ID
#' @param requestMetadata.locale Locale to use for the current request
#' @param requestMetadata.partnersSessionId Google Partners session ID
#' @param requestMetadata.experimentIds Experiment IDs the current request belongs to
#' @param requestMetadata.trafficSource.trafficSourceId Identifier to indicate where the traffic comes from
#' @param requestMetadata.trafficSource.trafficSubId Second level identifier to indicate where the traffic comes from
#' @importFrom googleAuthR gar_api_generator
#' @export
userStates.list <- function(requestMetadata.userOverrides.ipAddress = NULL, requestMetadata.userOverrides.userId = NULL,
requestMetadata.locale = NULL, requestMetadata.partnersSessionId = NULL, requestMetadata.experimentIds = NULL,
requestMetadata.trafficSource.trafficSourceId = NULL, requestMetadata.trafficSource.trafficSubId = NULL) {
url <- "https://partners.googleapis.com/v2/userStates"
# partners.userStates.list
pars = list(requestMetadata.userOverrides.ipAddress = requestMetadata.userOverrides.ipAddress,
requestMetadata.userOverrides.userId = requestMetadata.userOverrides.userId,
requestMetadata.locale = requestMetadata.locale, requestMetadata.partnersSessionId = requestMetadata.partnersSessionId,
requestMetadata.experimentIds = requestMetadata.experimentIds, requestMetadata.trafficSource.trafficSourceId = requestMetadata.trafficSource.trafficSourceId,
requestMetadata.trafficSource.trafficSubId = requestMetadata.trafficSource.trafficSubId)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Gets a company.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/partners/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c()}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param companyId The ID of the company to retrieve
#' @param requestMetadata.userOverrides.ipAddress IP address to use instead of the user's geo-located IP address
#' @param requestMetadata.userOverrides.userId Logged-in user ID to impersonate instead of the user's ID
#' @param requestMetadata.locale Locale to use for the current request
#' @param requestMetadata.partnersSessionId Google Partners session ID
#' @param requestMetadata.experimentIds Experiment IDs the current request belongs to
#' @param requestMetadata.trafficSource.trafficSourceId Identifier to indicate where the traffic comes from
#' @param requestMetadata.trafficSource.trafficSubId Second level identifier to indicate where the traffic comes from
#' @param view The view of `Company` resource to be returned
#' @param orderBy How to order addresses within the returned company
#' @param currencyCode If the company's budget is in a different currency code than this one, then the converted budget is converted to this currency code
#' @param address The address to use for sorting the company's addresses by proximity
#' @importFrom googleAuthR gar_api_generator
#' @export
companies.get <- function(companyId, requestMetadata.userOverrides.ipAddress = NULL,
requestMetadata.userOverrides.userId = NULL, requestMetadata.locale = NULL, requestMetadata.partnersSessionId = NULL,
requestMetadata.experimentIds = NULL, requestMetadata.trafficSource.trafficSourceId = NULL,
requestMetadata.trafficSource.trafficSubId = NULL, view = NULL, orderBy = NULL,
currencyCode = NULL, address = NULL) {
url <- sprintf("https://partners.googleapis.com/v2/companies/%s", companyId)
# partners.companies.get
pars = list(requestMetadata.userOverrides.ipAddress = requestMetadata.userOverrides.ipAddress,
requestMetadata.userOverrides.userId = requestMetadata.userOverrides.userId,
requestMetadata.locale = requestMetadata.locale, requestMetadata.partnersSessionId = requestMetadata.partnersSessionId,
requestMetadata.experimentIds = requestMetadata.experimentIds, requestMetadata.trafficSource.trafficSourceId = requestMetadata.trafficSource.trafficSourceId,
requestMetadata.trafficSource.trafficSubId = requestMetadata.trafficSource.trafficSubId,
view = view, orderBy = orderBy, currencyCode = currencyCode, address = address)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Lists companies.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/partners/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c()}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param requestMetadata.userOverrides.ipAddress IP address to use instead of the user's geo-located IP address
#' @param requestMetadata.userOverrides.userId Logged-in user ID to impersonate instead of the user's ID
#' @param requestMetadata.locale Locale to use for the current request
#' @param requestMetadata.partnersSessionId Google Partners session ID
#' @param requestMetadata.experimentIds Experiment IDs the current request belongs to
#' @param requestMetadata.trafficSource.trafficSourceId Identifier to indicate where the traffic comes from
#' @param requestMetadata.trafficSource.trafficSubId Second level identifier to indicate where the traffic comes from
#' @param pageSize Requested page size
#' @param pageToken A token identifying a page of results that the server returns
#' @param companyName Company name to search for
#' @param view The view of the `Company` resource to be returned
#' @param minMonthlyBudget.currencyCode The 3-letter currency code defined in ISO 4217
#' @param minMonthlyBudget.units The whole units of the amount
#' @param minMonthlyBudget.nanos Number of nano (10^-9) units of the amount
#' @param maxMonthlyBudget.currencyCode The 3-letter currency code defined in ISO 4217
#' @param maxMonthlyBudget.units The whole units of the amount
#' @param maxMonthlyBudget.nanos Number of nano (10^-9) units of the amount
#' @param industries List of industries the company can help with
#' @param services List of services the company can help with
#' @param languageCodes List of language codes that company can support
#' @param address The address to use when searching for companies
#' @param orderBy How to order addresses within the returned companies
#' @param gpsMotivations List of reasons for using Google Partner Search to get companies
#' @param websiteUrl Website URL that will help to find a better matched company
#' @importFrom googleAuthR gar_api_generator
#' @export
companies.list <- function(requestMetadata.userOverrides.ipAddress = NULL, requestMetadata.userOverrides.userId = NULL,
requestMetadata.locale = NULL, requestMetadata.partnersSessionId = NULL, requestMetadata.experimentIds = NULL,
requestMetadata.trafficSource.trafficSourceId = NULL, requestMetadata.trafficSource.trafficSubId = NULL,
pageSize = NULL, pageToken = NULL, companyName = NULL, view = NULL, minMonthlyBudget.currencyCode = NULL,
minMonthlyBudget.units = NULL, minMonthlyBudget.nanos = NULL, maxMonthlyBudget.currencyCode = NULL,
maxMonthlyBudget.units = NULL, maxMonthlyBudget.nanos = NULL, industries = NULL,
services = NULL, languageCodes = NULL, address = NULL, orderBy = NULL, gpsMotivations = NULL,
websiteUrl = NULL) {
url <- "https://partners.googleapis.com/v2/companies"
# partners.companies.list
pars = list(requestMetadata.userOverrides.ipAddress = requestMetadata.userOverrides.ipAddress,
requestMetadata.userOverrides.userId = requestMetadata.userOverrides.userId,
requestMetadata.locale = requestMetadata.locale, requestMetadata.partnersSessionId = requestMetadata.partnersSessionId,
requestMetadata.experimentIds = requestMetadata.experimentIds, requestMetadata.trafficSource.trafficSourceId = requestMetadata.trafficSource.trafficSourceId,
requestMetadata.trafficSource.trafficSubId = requestMetadata.trafficSource.trafficSubId,
pageSize = pageSize, pageToken = pageToken, companyName = companyName, view = view,
minMonthlyBudget.currencyCode = minMonthlyBudget.currencyCode, minMonthlyBudget.units = minMonthlyBudget.units,
minMonthlyBudget.nanos = minMonthlyBudget.nanos, maxMonthlyBudget.currencyCode = maxMonthlyBudget.currencyCode,
maxMonthlyBudget.units = maxMonthlyBudget.units, maxMonthlyBudget.nanos = maxMonthlyBudget.nanos,
industries = industries, services = services, languageCodes = languageCodes,
address = address, orderBy = orderBy, gpsMotivations = gpsMotivations, websiteUrl = websiteUrl)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
|
# Preparation -------------------------------------------------------------
# load packages
library(tidyverse)
library(lingtypology)
# set working directory
setwd("~/2018_labseminar")
# load data
map <- read_tsv("allev.csv")
# Evidential perfects -----------------------------------------------------
# subset glottolog points
glot <- map[complete.cases(map$evidentiality),]
# re-order elements in legend
glot$evidentiality <- factor(glot$evidentiality, levels = c("+", "-", "~"))
# draw map
map.feature(glot$Language,
latitude = glot$Lat,
longitude = glot$Lon,
color = glot$group_color,
features = glot$Group,
tile = c("Esri.WorldGrayCanvas"),
stroke.features = glot$evidentiality,
stroke.title = "Evidential perfect",
stroke.color = c("black", "honeydew1", "gray"))
# Evidential perfects + all villages by affiliation -----------------------
map.feature(map$Language,
latitude = map$Lat,
longitude = map$Lon,
color = map$group_color,
opacity = 0.5,
features = map$Group,
legend = F,
tile = c("Esri.WorldGrayCanvas"),
stroke.features = map$evidentiality,
stroke.title = "Evidential perfect",
stroke.color = c("black", "honeydew1", "gray"))
| /code_maps.R | no_license | sverhees/2018_labseminar | R | false | false | 1,378 | r |
# Preparation -------------------------------------------------------------
# load packages
library(tidyverse)
library(lingtypology)
# set working directory
setwd("~/2018_labseminar")
# load data
map <- read_tsv("allev.csv")
# Evidential perfects -----------------------------------------------------
# subset glottolog points
glot <- map[complete.cases(map$evidentiality),]
# re-order elements in legend
glot$evidentiality <- factor(glot$evidentiality, levels = c("+", "-", "~"))
# draw map
map.feature(glot$Language,
latitude = glot$Lat,
longitude = glot$Lon,
color = glot$group_color,
features = glot$Group,
tile = c("Esri.WorldGrayCanvas"),
stroke.features = glot$evidentiality,
stroke.title = "Evidential perfect",
stroke.color = c("black", "honeydew1", "gray"))
# Evidential perfects + all villages by affiliation -----------------------
map.feature(map$Language,
latitude = map$Lat,
longitude = map$Lon,
color = map$group_color,
opacity = 0.5,
features = map$Group,
legend = F,
tile = c("Esri.WorldGrayCanvas"),
stroke.features = map$evidentiality,
stroke.title = "Evidential perfect",
stroke.color = c("black", "honeydew1", "gray"))
|
#' Inside the sp way
#'
#' Call the O'Rourke InPoly function with the R C API, as in the sp package.
#'
#'
#' For each query point 'pts', returns one of the following relative to P 'coords':
#' 0 : is strictly interior to P
#' 1 : is strictly exterior to P
#' 3 : is a vertex of P
#' 2 : lies on the relative interior of an edge of P
#'
#' @param pts matrix of points 2 columns x,y
#' @param coords matrix of polygon ring 2 columns x,y
#'
#' @return integer vector of point in polygon status, see Details
#' @export
#'
#' @examples
#' inside_sp(matrix(runif(10), ncol = 2), cbind(c(0, .5, .5, 0, 0), c(0, 0, .5, 0, 0)))
inside_sp <- function(pts, coords) {
.Call(point_in_polygon_old_sp, pts[,1], pts[,2], coords[,1], coords[,2],
PACKAGE = "insidesp")
}
| /R/inside_sp.R | permissive | diminutive/insidesp | R | false | false | 777 | r | #' Inside the sp way
#'
#' Call the O'Rourke InPoly function with the R C API, as in the sp package.
#'
#'
#' For each query point 'pts', returns one of the following relative to P 'coords':
#' 0 : is strictly interior to P
#' 1 : is strictly exterior to P
#' 3 : is a vertex of P
#' 2 : lies on the relative interior of an edge of P
#'
#' @param pts matrix of points 2 columns x,y
#' @param coords matrix of polygon ring 2 columns x,y
#'
#' @return integer vector of point in polygon status, see Details
#' @export
#'
#' @examples
#' inside_sp(matrix(runif(10), ncol = 2), cbind(c(0, .5, .5, 0, 0), c(0, 0, .5, 0, 0)))
inside_sp <- function(pts, coords) {
.Call(point_in_polygon_old_sp, pts[,1], pts[,2], coords[,1], coords[,2],
PACKAGE = "insidesp")
}
|
\name{disMat}
\alias{disMat}
\title{
Pairwise Dissimilarity Matrix of Stochastic Textured Surfaces
}
\description{
Compute KL and ALK dissimiarlity matrices for the given stochastic textured surface images.
}
\usage{
disMat(imgs, nb, cp=1e-3, subsample = c(1, .5),
standardize = TRUE, keep.fits = FALSE, verbose=FALSE)
}
\arguments{
\item{imgs}{
a 3-dimensional array containing all images.
}
\item{nb}{
the size of the neighborhood. It must be a 1-length or 3-length vector of positive integer(s). If the former, it is the same with a 3-length vector with the same elements.
}
\item{cp}{
the minimal value for the \code{rpart} complexity models. The smaller \code{cp} is, the more complex the \code{rpart} models are fit.
}
\item{subsample}{
the portion of pixels in the given image \code{img} to be used when fitting models (the first component) and computing dissimilarities (the second component). It takes values in (0, 1] (e.g., \code{subsample = c(1, .5)} means that the whole image is used when fitting models, and roughly a half of that is used when compute dissimilarities).
}
\item{standardize}{
if \code{TRUE}, standardize the given image \code{img <- (img - mean(img))/sd(img)}. This reduces the effect of different lighting conditions when images are taken.
}
\item{keep.fits}{
if \code{TRUE}, save all the fitted models in the "fits.Rdata" under the wokring directory.
}
\item{verbose}{
if set to \code{TRUE}, output some computational time information.
}
}
\value{
the KL and AKL dissimilarity matrices.
}
\references{
Bui, A.T. and Apley, D.W. (2019b) "An exploratory analysis approach for understanding variation in stochastic textured surfaces", Computational Statistics & Data Analysis, 137, 33-50.
}
\author{
Anh Bui
}
\examples{
## generate images: the first two are similar, the third is different with the other two
phi1 <- c(.6, .6, .5)
phi2 <- c(.35, .35, .3)
imgs <- array(0, c(100,100,3))
for (j in 1:dim(imgs)[3])
imgs[,,j] <- sarGen(phi1 = phi1[j], phi2 = phi2[j], m = 100, n = 100, border = 50)
## compute KL and AKL dissimilarity matrices
disMat(imgs = imgs, nb = 1)
}
| /man/disMat.Rd | no_license | cran/spc4sts | R | false | false | 2,190 | rd | \name{disMat}
\alias{disMat}
\title{
Pairwise Dissimilarity Matrix of Stochastic Textured Surfaces
}
\description{
Compute KL and ALK dissimiarlity matrices for the given stochastic textured surface images.
}
\usage{
disMat(imgs, nb, cp=1e-3, subsample = c(1, .5),
standardize = TRUE, keep.fits = FALSE, verbose=FALSE)
}
\arguments{
\item{imgs}{
a 3-dimensional array containing all images.
}
\item{nb}{
the size of the neighborhood. It must be a 1-length or 3-length vector of positive integer(s). If the former, it is the same with a 3-length vector with the same elements.
}
\item{cp}{
the minimal value for the \code{rpart} complexity models. The smaller \code{cp} is, the more complex the \code{rpart} models are fit.
}
\item{subsample}{
the portion of pixels in the given image \code{img} to be used when fitting models (the first component) and computing dissimilarities (the second component). It takes values in (0, 1] (e.g., \code{subsample = c(1, .5)} means that the whole image is used when fitting models, and roughly a half of that is used when compute dissimilarities).
}
\item{standardize}{
if \code{TRUE}, standardize the given image \code{img <- (img - mean(img))/sd(img)}. This reduces the effect of different lighting conditions when images are taken.
}
\item{keep.fits}{
if \code{TRUE}, save all the fitted models in the "fits.Rdata" under the wokring directory.
}
\item{verbose}{
if set to \code{TRUE}, output some computational time information.
}
}
\value{
the KL and AKL dissimilarity matrices.
}
\references{
Bui, A.T. and Apley, D.W. (2019b) "An exploratory analysis approach for understanding variation in stochastic textured surfaces", Computational Statistics & Data Analysis, 137, 33-50.
}
\author{
Anh Bui
}
\examples{
## generate images: the first two are similar, the third is different with the other two
phi1 <- c(.6, .6, .5)
phi2 <- c(.35, .35, .3)
imgs <- array(0, c(100,100,3))
for (j in 1:dim(imgs)[3])
imgs[,,j] <- sarGen(phi1 = phi1[j], phi2 = phi2[j], m = 100, n = 100, border = 50)
## compute KL and AKL dissimilarity matrices
disMat(imgs = imgs, nb = 1)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/add_trees.R
\name{add_trees}
\alias{add_trees}
\title{add_trees}
\usage{
add_trees(phy, nexml = new("nexml"), append_to_existing_otus = FALSE)
}
\arguments{
\item{phy}{a phylo object, multiPhylo object, or list of
mulitPhylo to be added to the nexml}
\item{nexml}{a nexml object to which we should append this phylo.
By default, a new nexml object will be created.}
\item{append_to_existing_otus}{logical, indicating if we should
make a new OTU block (default) or append to the existing one.}
}
\value{
a nexml object containing the phy in nexml format.
}
\description{
add_trees
}
\examples{
library("geiger")
data(geospiza)
geiger_nex <- add_trees(geospiza$phy)
}
| /man/add_trees.Rd | permissive | vanderphylum/RNeXML | R | false | false | 755 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/add_trees.R
\name{add_trees}
\alias{add_trees}
\title{add_trees}
\usage{
add_trees(phy, nexml = new("nexml"), append_to_existing_otus = FALSE)
}
\arguments{
\item{phy}{a phylo object, multiPhylo object, or list of
mulitPhylo to be added to the nexml}
\item{nexml}{a nexml object to which we should append this phylo.
By default, a new nexml object will be created.}
\item{append_to_existing_otus}{logical, indicating if we should
make a new OTU block (default) or append to the existing one.}
}
\value{
a nexml object containing the phy in nexml format.
}
\description{
add_trees
}
\examples{
library("geiger")
data(geospiza)
geiger_nex <- add_trees(geospiza$phy)
}
|
rm(list=ls())
library(tidycensus)
library(acs)
library(jsonlite)
library(tidyverse)
library(stringr)
library(DT)
library(sf)
library(plotly)
library(tigris)
library(leaflet)
library(mapview)
apiKey <- read_json("~/.keys.json")
census_api_key(apiKey$acs)
ageVarsA1 <- paste0("B01001_0", sprintf("%02d", c(3:25, 27:49)))
ageVarsD1 <- paste0("P01100", sprintf("%02d", 1:31))
extractMidpoint <- function(x){
mps <- lapply(str_extract_all(x, "\\d+"), as.numeric)
mps <- lapply(1:length(mps), function(i){
z <- mps[[i]]
if(length(z) == 1 & grepl("under|<", tolower(x[i]))){
z <- 0:4
}
min(z):max(z)
})
mps
}
acsAgeDF <- load_variables(2016, "acs5") %>%
filter(name %in% ageVarsA1) %>%
mutate(ages=extractMidpoint(label)) %>%
mutate(ages=lapply(ages, function(z){
if(length(z) == 2){
if(all(z == 65:66)){
z <- 65:69
}
}
if(length(z) == 3){
if(all(z == 67:69)){
z <- 65:69
}
}
z
})) %>%
mutate(group=as.numeric(as.factor(sapply(ages, min)))) %>%
rename(variable=name)
acsGroupsDF <- acsAgeDF %>%
select(ages, group) %>%
unique %>%
mutate(ageMean=sapply(ages, function(x){
mean(c(x, last(x)+1))
}))
censusAgeDF <- load_variables(1990, "sf1") %>%
filter(name %in% ageVarsD1) %>%
mutate(ages=extractMidpoint(label)) %>%
mutate(group=sapply(ages, function(a){
which(sapply(acsGroupsDF$ages, function(z){
all(a %in% z)
}))
})) %>%
select(-ages) %>%
left_join(acsGroupsDF, by="group") %>%
rename(variable=name)
# downolad data the capture output stuff isnt neccesary its just to get rid
# of the output
ageCensusDF <- get_decennial(
geography="county", # I want county level data
variables=ageVarsD1, # iwant the variables from this list
year=1990, # from the 2014 acs
geometry=FALSE) %>%
left_join(select(censusAgeDF, -ages, -label, -concept), by="variable") %>%
group_by(GEOID, NAME, group, ageMean) %>%
summarize(estimate=sum(value)) %>%
ungroup
ageAcsDF <- get_acs(
geography="county", # I want county level data
variables=ageVarsA1, # iwant the variables from this list
year=2016, # from the 2014 acs
geometry=TRUE) %>%
left_join(select(acsAgeDF, -ages, -label, -concept), by="variable") %>%
left_join(select(acsGroupsDF, -ages), by="group") %>%
group_by(GEOID, NAME, group, ageMean) %>%
summarize(estimate=sum(estimate)) %>%
ungroup
geometryDF <- ageAcsDF %>%
select(geometry, GEOID, NAME) %>%
unique
diffNA <- function(x){
z <- NA
if(length(x) == 2){
z <- diff(x)
}
z
}
agingDF <- left_join(geometryDF, rbind(
ageAcsDF %>%
as_tibble() %>%
select(-geometry) %>%
mutate(ageWeight=ageMean*estimate) %>%
group_by(GEOID, NAME) %>%
summarize(mAge=sum(ageWeight)/sum(estimate)) %>%
ungroup() %>%
mutate(year=2014),
ageCensusDF %>%
mutate(ageWeight=ageMean*estimate) %>%
group_by(GEOID, NAME) %>%
summarize(mAge=sum(ageWeight)/sum(estimate)) %>%
ungroup() %>%
mutate(year=1990)) %>%
arrange(GEOID, year) %>%
group_by(GEOID) %>%
summarize(meanAgeDiff=diffNA(mAge)), by="GEOID") %>%
mutate(meanAgeDiffQ=cut_number(meanAgeDiff, 6))
pal2 <- function(x, lo=-7, hi=18){
pal <- colorNumeric(palette = "Spectral", domain=c(lo,hi), reverse=TRUE)
pal(seq(lo, hi, length.out = x))
}
mapview(agingDF, zcol="meanAgeDiffQ", col.regions=pal2)
rbind(
ageAcsDF %>%
as_tibble() %>%
select(-geometry) %>%
mutate(ageWeight=ageMean*estimate) %>%
summarize(mAge=sum(ageWeight)/sum(estimate)) %>%
mutate(year=2014),
ageCensusDF %>%
mutate(ageWeight=ageMean*estimate) %>%
summarize(mAge=sum(ageWeight)/sum(estimate)) %>%
mutate(year=1990))
| /EDA/agingPopulation.R | no_license | nmmarquez/hispanicACS | R | false | false | 4,005 | r | rm(list=ls())
library(tidycensus)
library(acs)
library(jsonlite)
library(tidyverse)
library(stringr)
library(DT)
library(sf)
library(plotly)
library(tigris)
library(leaflet)
library(mapview)
apiKey <- read_json("~/.keys.json")
census_api_key(apiKey$acs)
ageVarsA1 <- paste0("B01001_0", sprintf("%02d", c(3:25, 27:49)))
ageVarsD1 <- paste0("P01100", sprintf("%02d", 1:31))
extractMidpoint <- function(x){
mps <- lapply(str_extract_all(x, "\\d+"), as.numeric)
mps <- lapply(1:length(mps), function(i){
z <- mps[[i]]
if(length(z) == 1 & grepl("under|<", tolower(x[i]))){
z <- 0:4
}
min(z):max(z)
})
mps
}
acsAgeDF <- load_variables(2016, "acs5") %>%
filter(name %in% ageVarsA1) %>%
mutate(ages=extractMidpoint(label)) %>%
mutate(ages=lapply(ages, function(z){
if(length(z) == 2){
if(all(z == 65:66)){
z <- 65:69
}
}
if(length(z) == 3){
if(all(z == 67:69)){
z <- 65:69
}
}
z
})) %>%
mutate(group=as.numeric(as.factor(sapply(ages, min)))) %>%
rename(variable=name)
acsGroupsDF <- acsAgeDF %>%
select(ages, group) %>%
unique %>%
mutate(ageMean=sapply(ages, function(x){
mean(c(x, last(x)+1))
}))
censusAgeDF <- load_variables(1990, "sf1") %>%
filter(name %in% ageVarsD1) %>%
mutate(ages=extractMidpoint(label)) %>%
mutate(group=sapply(ages, function(a){
which(sapply(acsGroupsDF$ages, function(z){
all(a %in% z)
}))
})) %>%
select(-ages) %>%
left_join(acsGroupsDF, by="group") %>%
rename(variable=name)
# downolad data the capture output stuff isnt neccesary its just to get rid
# of the output
ageCensusDF <- get_decennial(
geography="county", # I want county level data
variables=ageVarsD1, # iwant the variables from this list
year=1990, # from the 2014 acs
geometry=FALSE) %>%
left_join(select(censusAgeDF, -ages, -label, -concept), by="variable") %>%
group_by(GEOID, NAME, group, ageMean) %>%
summarize(estimate=sum(value)) %>%
ungroup
ageAcsDF <- get_acs(
geography="county", # I want county level data
variables=ageVarsA1, # iwant the variables from this list
year=2016, # from the 2014 acs
geometry=TRUE) %>%
left_join(select(acsAgeDF, -ages, -label, -concept), by="variable") %>%
left_join(select(acsGroupsDF, -ages), by="group") %>%
group_by(GEOID, NAME, group, ageMean) %>%
summarize(estimate=sum(estimate)) %>%
ungroup
geometryDF <- ageAcsDF %>%
select(geometry, GEOID, NAME) %>%
unique
diffNA <- function(x){
z <- NA
if(length(x) == 2){
z <- diff(x)
}
z
}
agingDF <- left_join(geometryDF, rbind(
ageAcsDF %>%
as_tibble() %>%
select(-geometry) %>%
mutate(ageWeight=ageMean*estimate) %>%
group_by(GEOID, NAME) %>%
summarize(mAge=sum(ageWeight)/sum(estimate)) %>%
ungroup() %>%
mutate(year=2014),
ageCensusDF %>%
mutate(ageWeight=ageMean*estimate) %>%
group_by(GEOID, NAME) %>%
summarize(mAge=sum(ageWeight)/sum(estimate)) %>%
ungroup() %>%
mutate(year=1990)) %>%
arrange(GEOID, year) %>%
group_by(GEOID) %>%
summarize(meanAgeDiff=diffNA(mAge)), by="GEOID") %>%
mutate(meanAgeDiffQ=cut_number(meanAgeDiff, 6))
pal2 <- function(x, lo=-7, hi=18){
pal <- colorNumeric(palette = "Spectral", domain=c(lo,hi), reverse=TRUE)
pal(seq(lo, hi, length.out = x))
}
mapview(agingDF, zcol="meanAgeDiffQ", col.regions=pal2)
rbind(
ageAcsDF %>%
as_tibble() %>%
select(-geometry) %>%
mutate(ageWeight=ageMean*estimate) %>%
summarize(mAge=sum(ageWeight)/sum(estimate)) %>%
mutate(year=2014),
ageCensusDF %>%
mutate(ageWeight=ageMean*estimate) %>%
summarize(mAge=sum(ageWeight)/sum(estimate)) %>%
mutate(year=1990))
|
##############################################
#Generación de cortes de la Estrategia Unidos#
##############################################
################## LIBRERIAS
library(rgdal)
library(rgeos)
library(sp)
library(ggmap)
library(readr)
library(readxl)
library(dplyr)
library(stringi)
library(stringr)
library(ggplot2)
library(phonics)
library(foreign)
library(reader)
library(tidyr)
library(reshape2)
library(data.table)
library(sqldf)
library(eeptools)
library(summarytools)
options(scipen=999) ### Esto es para evitar la Notación Cientifica
#Este código sigue las instrucciones expresadas en el documento "Paso a paso para la generación de cortes de la Estrategia Unidos"
#DATOS#
#######
Entradas="/Volumes/Macintosh HD/Users/andresromeroparra/Google Drive/DPS/2020/Generacion de cortes/Entradas"#Defina el escritorio de entrada donde están los archivos requeridos.
Salidas ="/Volumes/Macintosh HD/Users/andresromeroparra/Google Drive/DPS/2020/Generacion de cortes/Salidas"#Defina el escritorio de salida donde serán enviado los archivos generados.
setwd(Entradas)#Seleccione el directorio entrada donde se encuentran los archivos.
#Los archivos que debería contener la carpeta son (entre parentesis los nombres que asumirán en el código):
# 1.Sabana del formulario Unidos (FORMULARIO)
# 2.Cálculo de logros por hogar (Logros_Hogar)
# 3.Cálculo de logros por integrantes (Logros_Integrantes)
# 4.Cálculo de Índice de Pobreza Multidimensional (IPM)
# 5.Cálculo de línea de pobreza (LP)
# 6.Municipios de Colombia (MUNICIPIOS)
# 7.Estado del acompañamiento (ESTADO)
#Importación de archivos que se usaran en la generación de cortes oficiales de la Estrategia Unidos
Caracterizacion_DNP_20200211 = read_delim("/Volumes/Macintosh HD/Users/andresromeroparra/Google Drive/DPS/2020/Datos/UNIDOS_2019/Caracterizacion_DNP_20200211.txt","|", escape_double = FALSE, locale = locale(encoding = "ISO-8859-1"), trim_ws = TRUE)
FORMULARIO =read_delim("Caracterizacion_DNP_20200319.txt","|", escape_double = FALSE, locale = locale(encoding = "ISO-8859-1"),trim_ws = TRUE)
Logros_Hogar =read_excel("Calculos 20200319.xlsx", sheet = "LogrosHogar", skip = 1)
Logros_Integrantes=read_excel("Calculos 20200319.xlsx", sheet = "LogrosIntegrante", skip = 1)
IPM =read_excel("Calculos 20200319.xlsx", sheet = "IPM", skip = 0)
LP =read_excel("Calculos 20200319.xlsx", sheet = "LP", skip = 0)
#ESTADO =read_excel("Estado Hogares Piloto.xlsx")
ESTADO =read_excel("Estado Hogares Piloto_20200430.xlsx")
MUNICIPIOS =read_excel("MUNICIPIOS.xlsx", sheet = "Municipios", skip = 10)
MUNICIPIOS =MUNICIPIOS[1:1122,c("Código...3","Nombre...2","Nombre...4")]
#Renombrar variables#
#####################
setnames(FORMULARIO, old = c("A01","EdadCaracterizacion","A04"),
new = c("idHogar","EdadActual","Zona"))#Cambio de nombre de columnas
setnames(Logros_Integrantes, old = grep("logro|fecha",names(Logros_Integrantes),value = TRUE),
new = paste(grep("logro|fecha",names(Logros_Integrantes),value = TRUE),"I",sep = "_"))#Cambio de nombre de columnas
setnames(IPM, old = c("fechaCalculo"),
new = c("fechaCalculo_IPM"))#Cambio de nombre de columnas
setnames(LP, old = c("fechaCalculo"),
new = c("fechaCalculo_LP"))#Cambio de nombre de columnas
setnames(ESTADO, old = c("Estado","IdHogar"),
new = c("EstadoHogar","idHogar"))#Cambio de nombre de columnas
setnames(MUNICIPIOS, old = c("Código...3","Nombre...2","Nombre...4"),
new = c("CodigoMunicipio","Departamento","Municipio"))#Cambio de nombre de columnas
#Recodificación variables#
##########################
##########################
FORMULARIO$Discapacidad=rowSums(FORMULARIO[grep("E15", names(FORMULARIO), value = TRUE)][-8]==1)
FORMULARIO$Discapacidad=ifelse(FORMULARIO$Discapacidad>0,1,0)#Variable dicotoma de discapacidad (1 si tiene alguna discapacidad 0 de lo contrario)
FORMULARIO= FORMULARIO %>% mutate(CICLOVITAL = ifelse(EdadActual<=5, "1-PrimeraInfancia",
ifelse((EdadActual>=6 & EdadActual<=11),"2-Ninez",
ifelse((EdadActual>=12 & EdadActual<=17),"3-Adolescencia",
ifelse((EdadActual>=18 & EdadActual<=24),"4-Juventud",
ifelse((EdadActual>=25 & EdadActual<=59),"5-Adulto",
ifelse(EdadActual>59,"6-AdultoMayor",
"NA")))))))
Variables_add= c("E01_a","E01_b","E01_c","E01_d","E02","E03","E05","E06","E08","CICLOVITAL","Discapacidad")#Estas variables se usaran en cálculos posteriores
Variables=c("idHogar",
"idIntegranteHogar",
"Departamento",
"CodigoMunicipio",
"Municipio",
"Zona",
"EdadActual",
"E11",
"logro01",
"logro02",
"logro03",
"logro04",
"logro05",
"logro06",
"logro07",
"logro08",
"logro09",
"logro10",
"logro11",
"logro13",
"logro14",
"logro15",
"logro16",
"logro17",
"logro18",
"logro20",
"logro21",
"logro22",
"logro23",
"logro24",
"logro25",
"logro26",
"logro27",
"logro28",
"fechaCalculo",
"logro01_I",
"logro02_I",
"logro03_I",
"logro04_I",
"logro05_I",
"logro06_I",
"logro07_I",
"logro08_I",
"logro09_I",
"logro10_I",
"logro11_I",
"logro13_I",
"logro14_I",
"logro15_I",
"logro16_I",
"logro17_I",
"logro18_I",
"logro20_I",
"logro21_I",
"logro22_I",
"logro23_I",
"logro24_I",
"logro25_I",
"logro26_I",
"logro27_I",
"logro28_I",
"fechaCalculo_I",
"indLogroEducativo",
"indAlfabetismo",
"indAsistenciaEscolar",
"indRezagoEscolar",
"indCuidadoInfancia",
"indTrabajoInfantil",
"indDesempleo",
"indEmpleoInformal",
"indAseguramientoSalud",
"indAccesosalud",
"indAccesoAgua",
"indEliminacionExcretas",
"indPisosVivienda",
"indParedesExteriores",
"indHacinamientoCritico",
"fechaCalculo_IPM",
"calculoIPM",
"denominacionIPM",
"denominacionLP",
"fechaCalculo_LP",
"EstadoHogar")#Se difene lista de variables de la base de datos final
DATA_Hogares=Reduce(function(x,y) merge(x = x, y = y, by = c("idHogar"), all.x=TRUE), list(Logros_Hogar,IPM,LP,ESTADO[c("idHogar","EstadoHogar")],FORMULARIO[!duplicated(FORMULARIO$idHogar),]))#Unión de datos de hogares
DATA_Hogares = DATA_Hogares[intersect(Variables,names(DATA_Hogares))]#Se conservan las columnas que están en la lista de variables
DATA_Hogares = select(DATA_Hogares, -c(idIntegranteHogar, EdadActual, E11))#Se conservan las columnas que están en la lista de variables a nivel de hogar
DATA_Integrantes=Reduce(function(x,y) merge(x = x, y = y, by = c("idIntegranteHogar"), all.x=TRUE), list(Logros_Integrantes,select(FORMULARIO, -c(idHogar,Zona))))#Unión de datos de personas. No se selecciona la variable idHogar para no duplicarla
DATA_Integrantes=DATA_Integrantes[intersect(c(Variables,Variables_add),names(DATA_Integrantes))]#Ordena las columnas según el orden de la lista de variables.
#Une los datos de hogares e integrantes.
DATA=merge(select(DATA_Integrantes, -c(CodigoMunicipio)),DATA_Hogares, by="idHogar", all.x=TRUE)
DATA=merge(DATA, MUNICIPIOS, by="CodigoMunicipio", all.x=TRUE)#Agrega las columnas de nombre de departamento y municipio.
setdiff(Variables,names(DATA))#Verfifique que todas las variables (columnas) están en el dataframe
DATA=DATA[c(Variables_add,intersect(Variables,names(DATA)))]#Se seleccionan las variables relevantes para los calculos
rm(DATA_Hogares,DATA_Integrantes)
#1. Procedimiento Generación corte certificado
#a)
EXCLUSION_CALCULO_IPM_LP=DATA[!(DATA$denominacionIPM %in% c("NO POBRE","POBRE")) |
!(DATA$denominacionLP %in% c("NO POBRE","POBRE","POBRE EXTREMO")), c("idHogar","idIntegranteHogar","denominacionIPM","denominacionLP")]
List=grep("^logro",names(DATA),value = TRUE)#Lista de variables donde se van a buscar los registros sin calculo de LP
EXCLUSION_CALCULO_LOGROS=filter(DATA[c("idHogar","idIntegranteHogar",List)], rowSums(mutate_each(DATA[List], funs(!(. %in% c("SIN DATO","POR ALCANZAR","ALCANZADO","NO APLICA"))))) >= 1L)
DATA$EXCLUSION_SINCALCULO_IPM_LP_LOGROS=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_CALCULO_IPM_LP$idIntegranteHogar) |
(DATA$idIntegranteHogar %in% EXCLUSION_CALCULO_LOGROS$idIntegranteHogar),1,0)
#b)
List=grep("^logro",names(DATA),value = TRUE)[c(1:3,5:26)]#Lista de variables donde se van a buscar los sin dato. Todos los logros a nivel de hogar con excepción del logro 4.
EXCLUSION_SIN_DATO=filter(DATA, rowSums(mutate_each(DATA[List], funs(. %in% "SIN DATO"))) >= 1L)#Registros que tienen respuesta "SIN DATO"
EXCLUSION_SIN_DATO=EXCLUSION_SIN_DATO[,c("idHogar","idIntegranteHogar","logro27")]
DATA$EXCLUSION_SIN_DATO=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_SIN_DATO$idIntegranteHogar),1,0)
#c)
#1.
fonetico=function(text){
text=gsub("¥|Ð","Y|D",text)
text=str_replace_all(gsub("`|\\'", "", toupper(text)),"[[:punct:]]", "")
text=str_replace_all(text,"[^[:graph:]]", " ")
text=stri_trans_general(text,"Latin-ASCII")
text=soundex(text, maxCodeLen = 4L, clean = FALSE)
return(text)
}
EXCLUSION_UNICO_FONETICO=DATA[duplicated(paste(fonetico(DATA$E01_a),
fonetico(DATA$E01_b),
fonetico(DATA$E01_c),
fonetico(DATA$E01_d),
DATA$E02))|duplicated(paste(fonetico(DATA$E01_a),
fonetico(DATA$E01_b),
fonetico(DATA$E01_c),
fonetico(DATA$E01_d),
DATA$E02),fromLast=TRUE),]
DATA$EXCLUSION_DUPLICIDAD_FONETICO=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_UNICO_FONETICO$idIntegranteHogar),1,0)
#2.
EXCLUSION_SIN_DOCUMENTO_UNICO=DATA[duplicated(paste(DATA$E05,DATA$E06)),]
EXCLUSION_SIN_DOCUMENTO_UNICO=EXCLUSION_SIN_DOCUMENTO_UNICO[!(EXCLUSION_SIN_DOCUMENTO_UNICO$E05==0 & EXCLUSION_SIN_DOCUMENTO_UNICO$E06==9),]#Se seleccionan los registros que no
EXCLUSION_SIN_DOCUMENTO_UNICO=EXCLUSION_SIN_DOCUMENTO_UNICO[c("idHogar","idIntegranteHogar","E01_a","E01_b","E01_c","E01_d","E02","E05","E06")]
DATA$EXCLUSION_DUPLICIDAD_DOCUMENTO=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_SIN_DOCUMENTO_UNICO$idIntegranteHogar),1,0)
#d)
DATA= DATA %>% group_by(idHogar) %>% mutate(EXCLUSION_HOGAR_MENOR_14 = ifelse(all(EdadActual<14),1,0))#Se marcan hogares donde todos los miembros son menores de 14 años
#e)
EXCLUSION_ESTADO_E=DATA[DATA$EstadoHogar %in% c("Renuncia Voluntaria","No Localizado"),c("idHogar","idIntegranteHogar","EstadoHogar")]#Se eliminan registros con Estado de hogar renuncia voluntaria y No localizado
DATA$EXCLUSION_ESTADO_RENUNCIA_NOLOCALIZADO=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_ESTADO_E$idIntegranteHogar),1,0)
#f)
EXCLUSION_ESTADO_F=DATA[DATA$EstadoHogar %in% c("Con suspensión del Acompañamiento Unión de Hogares"),]#Se eliminan registros con Estado de hogar con suspensión del acompañamiento
DATA$EXCLUSION_ESTADO_SUSPENCION=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_ESTADO_F$idIntegranteHogar),1,0)
#Provisonal
DATA=as.data.frame(DATA)
Corte_Unidos=DATA[DATA$EXCLUSION_SINCALCULO_IPM_LP_LOGROS %in% 0 & DATA$EXCLUSION_SIN_DATO %in% 0 & DATA$EXCLUSION_DUPLICIDAD_FONETICO %in% 0 & DATA$EXCLUSION_DUPLICIDAD_DOCUMENTO %in% 0 & DATA$EXCLUSION_HOGAR_MENOR_14 %in% 0 & DATA$EXCLUSION_ESTADO_RENUNCIA_NOLOCALIZADO %in% 0 & DATA$EXCLUSION_ESTADO_SUSPENCION %in% 0,]
#Colchon
EXCLUSION_EXT_COLCHON=read_excel("diff_corte_unidos_12062020.xlsx")
EXCLUSION_EXT_COLCHON=EXCLUSION_EXT_COLCHON[EXCLUSION_EXT_COLCHON$COLCHON %in% "NUEVO","idIntegranteHogar"]
DATA$EXCLUSION_EXT_COLCHON=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_EXT_COLCHON$idIntegranteHogar),1,0)
#Fonetico
diff_corte_unidos_12062020=read_excel("diff_corte_unidos_12062020.xlsx")
diff_corte_unidos_12062020=diff_corte_unidos_12062020[!diff_corte_unidos_12062020$COLCHON %in% "NUEVO",]
diff_corte_unidos_12062020=diff_corte_unidos_12062020[diff_corte_unidos_12062020$DUPLICADO %in% "FONETICO",]
EXCLUSION_EXT_FONETIC=DATA[DATA$idIntegranteHogar %in% intersect(diff_corte_unidos_12062020$idIntegranteHogar,Corte_Unidos$idIntegranteHogar),]
DATA$EXCLUSION_EXT_FONETIC=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_EXT_FONETIC$idIntegranteHogar),1,0)
#Hace falta definir la revisión de gemelos y mellizos dentro del documento Paso a paros (...)
#Exclusion diferente a <<SI>>
diff_corte_unidos_12062020=read_excel("diff_corte_unidos_12062020.xlsx")
diff_corte_unidos_12062020=diff_corte_unidos_12062020[!diff_corte_unidos_12062020$COLCHON %in% "NUEVO",]
diff_corte_unidos_12062020=diff_corte_unidos_12062020[!diff_corte_unidos_12062020$DUPLICADO %in% "FONETICO",]
diff_corte_unidos_12062020=diff_corte_unidos_12062020[!diff_corte_unidos_12062020$Exclusión %in% "SI",]
EXCLUSION_EXT_EXCLUSION=DATA[DATA$idIntegranteHogar %in% intersect(diff_corte_unidos_12062020$idIntegranteHogar,Corte_Unidos$idIntegranteHogar),]
DATA$EXCLUSION_EXT_EXCLUSION=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_EXT_EXCLUSION$idIntegranteHogar),1,0)
#Exclusion igual a <<SI>>
diff_corte_unidos_12062020=read_excel("diff_corte_unidos_12062020.xlsx")
diff_corte_unidos_12062020=diff_corte_unidos_12062020[!diff_corte_unidos_12062020$COLCHON %in% "NUEVO",]
diff_corte_unidos_12062020=diff_corte_unidos_12062020[!diff_corte_unidos_12062020$DUPLICADO %in% "FONETICO",]
diff_corte_unidos_12062020=diff_corte_unidos_12062020[diff_corte_unidos_12062020$Exclusión %in% "SI",]
EXCLUSION_EXT_EXCLUSIONSI=DATA[DATA$idIntegranteHogar %in% intersect(diff_corte_unidos_12062020$idIntegranteHogar,Corte_Unidos$idIntegranteHogar),]
DATA$EXCLUSION_EXT_EXCLUSIONSI=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_EXT_EXCLUSIONSI$idIntegranteHogar),1,0)
#Duplicado fonetico diferente a gemelo y mellizo
Revision_duplicados <- read_excel("Revision_duplicados.xlsx")
Revision_duplicados$Exclusion=ifelse(!grepl("Ok",Revision_duplicados$`Validación Reportado como duplicado`),1,0)
EXCLUSION_EXT_BDUA_registraduria=Revision_duplicados[Revision_duplicados$Exclusion %in% 1,c("idIntegranteHogar","Exclusion")]
DATA$EXCLUSION_EXT_BDUA_registraduria=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_EXT_BDUA_registraduria$idIntegranteHogar),1,0)
#Variables segun diccionario
DATA=as.data.frame(DATA)
Corte_Unidos=DATA[DATA$EXCLUSION_SINCALCULO_IPM_LP_LOGROS %in% 0 &
DATA$EXCLUSION_SIN_DATO %in% 0 &
DATA$EXCLUSION_DUPLICIDAD_DOCUMENTO %in% 0 &
DATA$EXCLUSION_HOGAR_MENOR_14 %in% 0 &
DATA$EXCLUSION_ESTADO_RENUNCIA_NOLOCALIZADO %in% 0 &
DATA$EXCLUSION_ESTADO_SUSPENCION %in% 0 &
DATA$EXCLUSION_EXT_COLCHON %in% 0 &
DATA$EXCLUSION_EXT_FONETIC %in% 0 &
DATA$EXCLUSION_EXT_EXCLUSION %in% 0 &
DATA$EXCLUSION_EXT_EXCLUSIONSI %in% 0 &
DATA$EXCLUSION_EXT_BDUA_registraduria %in% 0,]
Corte_Unidos_1=DATA[DATA$EXCLUSION_SINCALCULO_IPM_LP_LOGROS %in% 0 &
DATA$EXCLUSION_SIN_DATO %in% 0 &
DATA$EXCLUSION_DUPLICIDAD_FONETICO %in% 0 &
DATA$EXCLUSION_DUPLICIDAD_DOCUMENTO %in% 0 &
DATA$EXCLUSION_HOGAR_MENOR_14 %in% 0 &
DATA$EXCLUSION_ESTADO_RENUNCIA_NOLOCALIZADO %in% 0 &
DATA$EXCLUSION_ESTADO_SUSPENCION %in% 0 &
DATA$EXCLUSION_EXT_COLCHON %in% 0 &
DATA$EXCLUSION_EXT_FONETIC %in% 0 &
DATA$EXCLUSION_EXT_EXCLUSION %in% 0 &
DATA$EXCLUSION_EXT_EXCLUSIONSI %in% 0 &
DATA$EXCLUSION_EXT_BDUA_registraduria %in% 0,]
Corte_Unidos=Corte_Unidos %>% group_by(idHogar) %>% mutate(Total_personas=n())
Corte_Unidos=Corte_Unidos[Variables]
FORMULARIO=FORMULARIO[FORMULARIO$idIntegranteHogar %in% Corte_Unidos$idIntegranteHogar,]
setwd(Salidas)#Defina un directorio de las salidas generadas por el código
write.csv(Corte_Unidos, file = paste("Corte_Unidos","_",format(Sys.time(), "%d%m%Y"), ".csv", sep=""), row.names = FALSE)
write.csv(DATA, file = paste("DATA","_",format(Sys.time(), "%d%m%Y"), ".csv", sep=""), row.names = FALSE)
write.csv(FORMULARIO, file = paste("SABANA_CORTE","_",format(Sys.time(), "%d%m%Y"), ".csv", sep=""), row.names = FALSE)
#Luego de obtener los EXCLUSIONes se eliminan los dataframe sin EXCLUSIONes.
to.rm <- unlist(eapply(.GlobalEnv, function(x) is.data.frame(x) && (nrow(x) %in% 0 | all(is.na(x)))))
rm(list = names(to.rm)[to.rm], envir = .GlobalEnv)
#Se exportan los dataframe con la expresión regular "EXCLUSION" al directorio definido.
m = length(ls()[ls() %in% grep("EXCLUSION",ls(),value = TRUE)])
n = ls()[ls() %in% grep("EXCLUSION",ls(),value = TRUE)]
for(i in 1:m) {
write.csv2(
get(n[[i]]),
file = paste(n[[i]],"_",format(Sys.time(), "%d%m%Y"),".csv",sep = ""),
sep = "",
row.names = FALSE)
}
#2. Generaciones estadísticas descriptivas
view(dfSummary(as.data.frame(Corte_Unidos)))#Esta linea demanda una importante capacidad de computo.
rm(list = ls()[!ls() %in% grep("Salidas|Entradas|DATA|Variables|MUNICIPIOS",ls(),value = TRUE)])#Elimina objetos que no se requieren para los cálculos posteriores
#3. Procedimiento Generación de Frecuencias.
INTEGRANTES=DATA[c("CodigoMunicipio")] %>% group_by(CodigoMunicipio) %>% summarise(TOTALPERSONAS=n())
HOGARES=DATA[!duplicated(DATA$idHogar),c("idHogar","CodigoMunicipio")] %>% group_by(CodigoMunicipio) %>% summarise(TOTALHOGARES=n())
ZONA= reshape2::dcast(data=DATA[!duplicated(DATA$idHogar),],
CodigoMunicipio ~ Zona,
fun.aggregate = length,
value.var = "Zona")#Genera frecuencias en columnas de la variable definida
setnames(ZONA, old = c("1","2","3"),
new = c("CABECERA MUNICIPAL","CENTRO POBLADO","RURAL DISPERSO"))
LOGROS=DATA[c("CodigoMunicipio", grep("^logro",names(DATA),value = TRUE)[1:26])] %>%
gather(category, val, -c(CodigoMunicipio)) %>%
na.omit() %>%
group_by(CodigoMunicipio, category, val) %>%
summarise(new = n()) %>%
spread(val, new, fill = 0)
ALCANZADO =LOGROS[c("CodigoMunicipio","category","ALCANZADO")] %>% spread(category, ALCANZADO)
POR_ALCANZAR =LOGROS[c("CodigoMunicipio","category","POR ALCANZAR")] %>% spread(category, `POR ALCANZAR`)
setnames(ALCANZADO, old = names(ALCANZADO)[-1],
new = paste(toupper(names(ALCANZADO)[-1]),"F","A",sep = "_"))
setnames(POR_ALCANZAR, old = names(POR_ALCANZAR)[-1],
new = paste(toupper(names(POR_ALCANZAR)[-1]),"F","PA",sep = "_"))
DATA_Municipal_HOG=Reduce(function(x,y) merge(x = x, y = y, by = c("CodigoMunicipio"), all.x=TRUE), list(ZONA,POR_ALCANZAR,ALCANZADO))#Se unen los dataframe de frecuencias de individuos.
DISCAPACIDAD= reshape2::dcast(data=DATA,
CodigoMunicipio ~ Discapacidad,
fun.aggregate = length,
value.var = "Discapacidad")#Genera frecuencias en columnas de la variable definida
setnames(DISCAPACIDAD, old = c("0","1"),
new = c("DISCAPACIDADNO","DISCAPACIDADSI"))
SEXO= reshape2::dcast(data=DATA,
CodigoMunicipio ~ E03,
fun.aggregate = length,
value.var = "E03")#Genera frecuencias en columnas de la variable definida
setnames(SEXO, old = c("1","2"),
new = c("SEXOHOMBRE","SEXOMUJER"))
SEXO$SEXOINTERSEXUAL=0#No hay casos de intersexuales. Se agrega para conservar la estructura.
GRUPOSETAREO = reshape2::dcast(data=DATA,
CodigoMunicipio ~ CICLOVITAL,
fun.aggregate = length,
value.var = "CICLOVITAL")#Genera frecuencias en columnas de la variable definida
GRUPOSETINICOS= reshape2::dcast(data=DATA,
CodigoMunicipio ~ E08,
fun.aggregate = length,
value.var = "E08")#Genera frecuencias en columnas de la variable definida
setnames(GRUPOSETINICOS, old = c("1","2","3","4","5","6"),
new = c("INDIGENA","ROM","RAIZAL","AFRODESCENDIENTE","PALENQUERO","SIN ETNIA"))
LOGROS=DATA[c("CodigoMunicipio",grep("_I",names(DATA),value = TRUE)[-(27:28)])] %>%
gather(category, val, -c(CodigoMunicipio)) %>%
na.omit() %>%
group_by(CodigoMunicipio, category, val) %>%
summarise(new = n()) %>%
spread(val, new, fill = 0)
ALCANZADO =LOGROS[c("CodigoMunicipio","category","ALCANZADO")] %>% spread(category, ALCANZADO)
POR_ALCANZAR =LOGROS[c("CodigoMunicipio","category","POR ALCANZAR")] %>% spread(category, `POR ALCANZAR`)
setnames(ALCANZADO, old = names(ALCANZADO)[-1],
new = paste(toupper(names(ALCANZADO)[-1]),"A",sep = "_"))
setnames(POR_ALCANZAR, old = names(POR_ALCANZAR)[-1],
new = paste(toupper(names(POR_ALCANZAR)[-1]),"PA",sep = "_"))
DATA_Municipal_INT=Reduce(function(x,y) merge(x = x, y = y, by = c("CodigoMunicipio"), all.x=TRUE), list(INTEGRANTES,HOGARES,POR_ALCANZAR,ALCANZADO,SEXO,GRUPOSETAREO,DISCAPACIDAD,GRUPOSETINICOS))#Se unen los dataframe de frecuencias de individuos.
DATA_Municipal=merge(DATA_Municipal_HOG,DATA_Municipal_INT,by="CodigoMunicipio",all.x=TRUE)
DATA_Municipal=merge(MUNICIPIOS,DATA_Municipal, by="CodigoMunicipio", all.y=TRUE)#Se genera el archivo de frecuencias municipales.
setwd(Entradas)
Frecuencias_Estructura = read_excel("Frecuencias_Estructura.xlsx")#Se importa la lista de variables con el nombre usado previamente en la generación del archivo de frecuencias.
DATA_Municipal=DATA_Municipal[Frecuencias_Estructura$Variables]#Se ordenan las variables siguiendo el orden previo de los archivos de frecuencias.
setwd(Salidas)
write.csv(DATA_Municipal, file = paste("Frecuencias_UNIDOS_Municipal","_",format(Sys.time(), "%d%m%Y"), ".csv", sep=""), row.names = FALSE)
rm(list = ls()[!ls() %in% grep("^DATA_Municipal",ls(),value = TRUE)])#Elimina objetos que no se requieren para los cálculos posteriores
| /Generacion de cortes UNIDOS.R | no_license | GITInformacion/Dps-Scripts | R | false | false | 23,373 | r | ##############################################
#Generación de cortes de la Estrategia Unidos#
##############################################
################## LIBRERIAS
library(rgdal)
library(rgeos)
library(sp)
library(ggmap)
library(readr)
library(readxl)
library(dplyr)
library(stringi)
library(stringr)
library(ggplot2)
library(phonics)
library(foreign)
library(reader)
library(tidyr)
library(reshape2)
library(data.table)
library(sqldf)
library(eeptools)
library(summarytools)
options(scipen=999) ### Esto es para evitar la Notación Cientifica
#Este código sigue las instrucciones expresadas en el documento "Paso a paso para la generación de cortes de la Estrategia Unidos"
#DATOS#
#######
Entradas="/Volumes/Macintosh HD/Users/andresromeroparra/Google Drive/DPS/2020/Generacion de cortes/Entradas"#Defina el escritorio de entrada donde están los archivos requeridos.
Salidas ="/Volumes/Macintosh HD/Users/andresromeroparra/Google Drive/DPS/2020/Generacion de cortes/Salidas"#Defina el escritorio de salida donde serán enviado los archivos generados.
setwd(Entradas)#Seleccione el directorio entrada donde se encuentran los archivos.
#Los archivos que debería contener la carpeta son (entre parentesis los nombres que asumirán en el código):
# 1.Sabana del formulario Unidos (FORMULARIO)
# 2.Cálculo de logros por hogar (Logros_Hogar)
# 3.Cálculo de logros por integrantes (Logros_Integrantes)
# 4.Cálculo de Índice de Pobreza Multidimensional (IPM)
# 5.Cálculo de línea de pobreza (LP)
# 6.Municipios de Colombia (MUNICIPIOS)
# 7.Estado del acompañamiento (ESTADO)
#Importación de archivos que se usaran en la generación de cortes oficiales de la Estrategia Unidos
Caracterizacion_DNP_20200211 = read_delim("/Volumes/Macintosh HD/Users/andresromeroparra/Google Drive/DPS/2020/Datos/UNIDOS_2019/Caracterizacion_DNP_20200211.txt","|", escape_double = FALSE, locale = locale(encoding = "ISO-8859-1"), trim_ws = TRUE)
FORMULARIO =read_delim("Caracterizacion_DNP_20200319.txt","|", escape_double = FALSE, locale = locale(encoding = "ISO-8859-1"),trim_ws = TRUE)
Logros_Hogar =read_excel("Calculos 20200319.xlsx", sheet = "LogrosHogar", skip = 1)
Logros_Integrantes=read_excel("Calculos 20200319.xlsx", sheet = "LogrosIntegrante", skip = 1)
IPM =read_excel("Calculos 20200319.xlsx", sheet = "IPM", skip = 0)
LP =read_excel("Calculos 20200319.xlsx", sheet = "LP", skip = 0)
#ESTADO =read_excel("Estado Hogares Piloto.xlsx")
ESTADO =read_excel("Estado Hogares Piloto_20200430.xlsx")
MUNICIPIOS =read_excel("MUNICIPIOS.xlsx", sheet = "Municipios", skip = 10)
MUNICIPIOS =MUNICIPIOS[1:1122,c("Código...3","Nombre...2","Nombre...4")]
#Renombrar variables#
#####################
setnames(FORMULARIO, old = c("A01","EdadCaracterizacion","A04"),
new = c("idHogar","EdadActual","Zona"))#Cambio de nombre de columnas
setnames(Logros_Integrantes, old = grep("logro|fecha",names(Logros_Integrantes),value = TRUE),
new = paste(grep("logro|fecha",names(Logros_Integrantes),value = TRUE),"I",sep = "_"))#Cambio de nombre de columnas
setnames(IPM, old = c("fechaCalculo"),
new = c("fechaCalculo_IPM"))#Cambio de nombre de columnas
setnames(LP, old = c("fechaCalculo"),
new = c("fechaCalculo_LP"))#Cambio de nombre de columnas
setnames(ESTADO, old = c("Estado","IdHogar"),
new = c("EstadoHogar","idHogar"))#Cambio de nombre de columnas
setnames(MUNICIPIOS, old = c("Código...3","Nombre...2","Nombre...4"),
new = c("CodigoMunicipio","Departamento","Municipio"))#Cambio de nombre de columnas
#Recodificación variables#
##########################
##########################
FORMULARIO$Discapacidad=rowSums(FORMULARIO[grep("E15", names(FORMULARIO), value = TRUE)][-8]==1)
FORMULARIO$Discapacidad=ifelse(FORMULARIO$Discapacidad>0,1,0)#Variable dicotoma de discapacidad (1 si tiene alguna discapacidad 0 de lo contrario)
FORMULARIO= FORMULARIO %>% mutate(CICLOVITAL = ifelse(EdadActual<=5, "1-PrimeraInfancia",
ifelse((EdadActual>=6 & EdadActual<=11),"2-Ninez",
ifelse((EdadActual>=12 & EdadActual<=17),"3-Adolescencia",
ifelse((EdadActual>=18 & EdadActual<=24),"4-Juventud",
ifelse((EdadActual>=25 & EdadActual<=59),"5-Adulto",
ifelse(EdadActual>59,"6-AdultoMayor",
"NA")))))))
Variables_add= c("E01_a","E01_b","E01_c","E01_d","E02","E03","E05","E06","E08","CICLOVITAL","Discapacidad")#Estas variables se usaran en cálculos posteriores
Variables=c("idHogar",
"idIntegranteHogar",
"Departamento",
"CodigoMunicipio",
"Municipio",
"Zona",
"EdadActual",
"E11",
"logro01",
"logro02",
"logro03",
"logro04",
"logro05",
"logro06",
"logro07",
"logro08",
"logro09",
"logro10",
"logro11",
"logro13",
"logro14",
"logro15",
"logro16",
"logro17",
"logro18",
"logro20",
"logro21",
"logro22",
"logro23",
"logro24",
"logro25",
"logro26",
"logro27",
"logro28",
"fechaCalculo",
"logro01_I",
"logro02_I",
"logro03_I",
"logro04_I",
"logro05_I",
"logro06_I",
"logro07_I",
"logro08_I",
"logro09_I",
"logro10_I",
"logro11_I",
"logro13_I",
"logro14_I",
"logro15_I",
"logro16_I",
"logro17_I",
"logro18_I",
"logro20_I",
"logro21_I",
"logro22_I",
"logro23_I",
"logro24_I",
"logro25_I",
"logro26_I",
"logro27_I",
"logro28_I",
"fechaCalculo_I",
"indLogroEducativo",
"indAlfabetismo",
"indAsistenciaEscolar",
"indRezagoEscolar",
"indCuidadoInfancia",
"indTrabajoInfantil",
"indDesempleo",
"indEmpleoInformal",
"indAseguramientoSalud",
"indAccesosalud",
"indAccesoAgua",
"indEliminacionExcretas",
"indPisosVivienda",
"indParedesExteriores",
"indHacinamientoCritico",
"fechaCalculo_IPM",
"calculoIPM",
"denominacionIPM",
"denominacionLP",
"fechaCalculo_LP",
"EstadoHogar")#Se difene lista de variables de la base de datos final
DATA_Hogares=Reduce(function(x,y) merge(x = x, y = y, by = c("idHogar"), all.x=TRUE), list(Logros_Hogar,IPM,LP,ESTADO[c("idHogar","EstadoHogar")],FORMULARIO[!duplicated(FORMULARIO$idHogar),]))#Unión de datos de hogares
DATA_Hogares = DATA_Hogares[intersect(Variables,names(DATA_Hogares))]#Se conservan las columnas que están en la lista de variables
DATA_Hogares = select(DATA_Hogares, -c(idIntegranteHogar, EdadActual, E11))#Se conservan las columnas que están en la lista de variables a nivel de hogar
DATA_Integrantes=Reduce(function(x,y) merge(x = x, y = y, by = c("idIntegranteHogar"), all.x=TRUE), list(Logros_Integrantes,select(FORMULARIO, -c(idHogar,Zona))))#Unión de datos de personas. No se selecciona la variable idHogar para no duplicarla
DATA_Integrantes=DATA_Integrantes[intersect(c(Variables,Variables_add),names(DATA_Integrantes))]#Ordena las columnas según el orden de la lista de variables.
#Une los datos de hogares e integrantes.
DATA=merge(select(DATA_Integrantes, -c(CodigoMunicipio)),DATA_Hogares, by="idHogar", all.x=TRUE)
DATA=merge(DATA, MUNICIPIOS, by="CodigoMunicipio", all.x=TRUE)#Agrega las columnas de nombre de departamento y municipio.
setdiff(Variables,names(DATA))#Verfifique que todas las variables (columnas) están en el dataframe
DATA=DATA[c(Variables_add,intersect(Variables,names(DATA)))]#Se seleccionan las variables relevantes para los calculos
rm(DATA_Hogares,DATA_Integrantes)
#1. Procedimiento Generación corte certificado
#a)
EXCLUSION_CALCULO_IPM_LP=DATA[!(DATA$denominacionIPM %in% c("NO POBRE","POBRE")) |
!(DATA$denominacionLP %in% c("NO POBRE","POBRE","POBRE EXTREMO")), c("idHogar","idIntegranteHogar","denominacionIPM","denominacionLP")]
List=grep("^logro",names(DATA),value = TRUE)#Lista de variables donde se van a buscar los registros sin calculo de LP
EXCLUSION_CALCULO_LOGROS=filter(DATA[c("idHogar","idIntegranteHogar",List)], rowSums(mutate_each(DATA[List], funs(!(. %in% c("SIN DATO","POR ALCANZAR","ALCANZADO","NO APLICA"))))) >= 1L)
DATA$EXCLUSION_SINCALCULO_IPM_LP_LOGROS=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_CALCULO_IPM_LP$idIntegranteHogar) |
(DATA$idIntegranteHogar %in% EXCLUSION_CALCULO_LOGROS$idIntegranteHogar),1,0)
#b)
List=grep("^logro",names(DATA),value = TRUE)[c(1:3,5:26)]#Lista de variables donde se van a buscar los sin dato. Todos los logros a nivel de hogar con excepción del logro 4.
EXCLUSION_SIN_DATO=filter(DATA, rowSums(mutate_each(DATA[List], funs(. %in% "SIN DATO"))) >= 1L)#Registros que tienen respuesta "SIN DATO"
EXCLUSION_SIN_DATO=EXCLUSION_SIN_DATO[,c("idHogar","idIntegranteHogar","logro27")]
DATA$EXCLUSION_SIN_DATO=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_SIN_DATO$idIntegranteHogar),1,0)
#c)
#1.
fonetico=function(text){
text=gsub("¥|Ð","Y|D",text)
text=str_replace_all(gsub("`|\\'", "", toupper(text)),"[[:punct:]]", "")
text=str_replace_all(text,"[^[:graph:]]", " ")
text=stri_trans_general(text,"Latin-ASCII")
text=soundex(text, maxCodeLen = 4L, clean = FALSE)
return(text)
}
EXCLUSION_UNICO_FONETICO=DATA[duplicated(paste(fonetico(DATA$E01_a),
fonetico(DATA$E01_b),
fonetico(DATA$E01_c),
fonetico(DATA$E01_d),
DATA$E02))|duplicated(paste(fonetico(DATA$E01_a),
fonetico(DATA$E01_b),
fonetico(DATA$E01_c),
fonetico(DATA$E01_d),
DATA$E02),fromLast=TRUE),]
DATA$EXCLUSION_DUPLICIDAD_FONETICO=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_UNICO_FONETICO$idIntegranteHogar),1,0)
#2.
EXCLUSION_SIN_DOCUMENTO_UNICO=DATA[duplicated(paste(DATA$E05,DATA$E06)),]
EXCLUSION_SIN_DOCUMENTO_UNICO=EXCLUSION_SIN_DOCUMENTO_UNICO[!(EXCLUSION_SIN_DOCUMENTO_UNICO$E05==0 & EXCLUSION_SIN_DOCUMENTO_UNICO$E06==9),]#Se seleccionan los registros que no
EXCLUSION_SIN_DOCUMENTO_UNICO=EXCLUSION_SIN_DOCUMENTO_UNICO[c("idHogar","idIntegranteHogar","E01_a","E01_b","E01_c","E01_d","E02","E05","E06")]
DATA$EXCLUSION_DUPLICIDAD_DOCUMENTO=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_SIN_DOCUMENTO_UNICO$idIntegranteHogar),1,0)
#d)
DATA= DATA %>% group_by(idHogar) %>% mutate(EXCLUSION_HOGAR_MENOR_14 = ifelse(all(EdadActual<14),1,0))#Se marcan hogares donde todos los miembros son menores de 14 años
#e)
EXCLUSION_ESTADO_E=DATA[DATA$EstadoHogar %in% c("Renuncia Voluntaria","No Localizado"),c("idHogar","idIntegranteHogar","EstadoHogar")]#Se eliminan registros con Estado de hogar renuncia voluntaria y No localizado
DATA$EXCLUSION_ESTADO_RENUNCIA_NOLOCALIZADO=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_ESTADO_E$idIntegranteHogar),1,0)
#f)
EXCLUSION_ESTADO_F=DATA[DATA$EstadoHogar %in% c("Con suspensión del Acompañamiento Unión de Hogares"),]#Se eliminan registros con Estado de hogar con suspensión del acompañamiento
DATA$EXCLUSION_ESTADO_SUSPENCION=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_ESTADO_F$idIntegranteHogar),1,0)
#Provisonal
DATA=as.data.frame(DATA)
Corte_Unidos=DATA[DATA$EXCLUSION_SINCALCULO_IPM_LP_LOGROS %in% 0 & DATA$EXCLUSION_SIN_DATO %in% 0 & DATA$EXCLUSION_DUPLICIDAD_FONETICO %in% 0 & DATA$EXCLUSION_DUPLICIDAD_DOCUMENTO %in% 0 & DATA$EXCLUSION_HOGAR_MENOR_14 %in% 0 & DATA$EXCLUSION_ESTADO_RENUNCIA_NOLOCALIZADO %in% 0 & DATA$EXCLUSION_ESTADO_SUSPENCION %in% 0,]
#Colchon
EXCLUSION_EXT_COLCHON=read_excel("diff_corte_unidos_12062020.xlsx")
EXCLUSION_EXT_COLCHON=EXCLUSION_EXT_COLCHON[EXCLUSION_EXT_COLCHON$COLCHON %in% "NUEVO","idIntegranteHogar"]
DATA$EXCLUSION_EXT_COLCHON=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_EXT_COLCHON$idIntegranteHogar),1,0)
#Fonetico
diff_corte_unidos_12062020=read_excel("diff_corte_unidos_12062020.xlsx")
diff_corte_unidos_12062020=diff_corte_unidos_12062020[!diff_corte_unidos_12062020$COLCHON %in% "NUEVO",]
diff_corte_unidos_12062020=diff_corte_unidos_12062020[diff_corte_unidos_12062020$DUPLICADO %in% "FONETICO",]
EXCLUSION_EXT_FONETIC=DATA[DATA$idIntegranteHogar %in% intersect(diff_corte_unidos_12062020$idIntegranteHogar,Corte_Unidos$idIntegranteHogar),]
DATA$EXCLUSION_EXT_FONETIC=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_EXT_FONETIC$idIntegranteHogar),1,0)
#Hace falta definir la revisión de gemelos y mellizos dentro del documento Paso a paros (...)
#Exclusion diferente a <<SI>>
diff_corte_unidos_12062020=read_excel("diff_corte_unidos_12062020.xlsx")
diff_corte_unidos_12062020=diff_corte_unidos_12062020[!diff_corte_unidos_12062020$COLCHON %in% "NUEVO",]
diff_corte_unidos_12062020=diff_corte_unidos_12062020[!diff_corte_unidos_12062020$DUPLICADO %in% "FONETICO",]
diff_corte_unidos_12062020=diff_corte_unidos_12062020[!diff_corte_unidos_12062020$Exclusión %in% "SI",]
EXCLUSION_EXT_EXCLUSION=DATA[DATA$idIntegranteHogar %in% intersect(diff_corte_unidos_12062020$idIntegranteHogar,Corte_Unidos$idIntegranteHogar),]
DATA$EXCLUSION_EXT_EXCLUSION=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_EXT_EXCLUSION$idIntegranteHogar),1,0)
#Exclusion igual a <<SI>>
diff_corte_unidos_12062020=read_excel("diff_corte_unidos_12062020.xlsx")
diff_corte_unidos_12062020=diff_corte_unidos_12062020[!diff_corte_unidos_12062020$COLCHON %in% "NUEVO",]
diff_corte_unidos_12062020=diff_corte_unidos_12062020[!diff_corte_unidos_12062020$DUPLICADO %in% "FONETICO",]
diff_corte_unidos_12062020=diff_corte_unidos_12062020[diff_corte_unidos_12062020$Exclusión %in% "SI",]
EXCLUSION_EXT_EXCLUSIONSI=DATA[DATA$idIntegranteHogar %in% intersect(diff_corte_unidos_12062020$idIntegranteHogar,Corte_Unidos$idIntegranteHogar),]
DATA$EXCLUSION_EXT_EXCLUSIONSI=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_EXT_EXCLUSIONSI$idIntegranteHogar),1,0)
#Duplicado fonetico diferente a gemelo y mellizo
Revision_duplicados <- read_excel("Revision_duplicados.xlsx")
Revision_duplicados$Exclusion=ifelse(!grepl("Ok",Revision_duplicados$`Validación Reportado como duplicado`),1,0)
EXCLUSION_EXT_BDUA_registraduria=Revision_duplicados[Revision_duplicados$Exclusion %in% 1,c("idIntegranteHogar","Exclusion")]
DATA$EXCLUSION_EXT_BDUA_registraduria=ifelse((DATA$idIntegranteHogar %in% EXCLUSION_EXT_BDUA_registraduria$idIntegranteHogar),1,0)
#Variables segun diccionario
DATA=as.data.frame(DATA)
Corte_Unidos=DATA[DATA$EXCLUSION_SINCALCULO_IPM_LP_LOGROS %in% 0 &
DATA$EXCLUSION_SIN_DATO %in% 0 &
DATA$EXCLUSION_DUPLICIDAD_DOCUMENTO %in% 0 &
DATA$EXCLUSION_HOGAR_MENOR_14 %in% 0 &
DATA$EXCLUSION_ESTADO_RENUNCIA_NOLOCALIZADO %in% 0 &
DATA$EXCLUSION_ESTADO_SUSPENCION %in% 0 &
DATA$EXCLUSION_EXT_COLCHON %in% 0 &
DATA$EXCLUSION_EXT_FONETIC %in% 0 &
DATA$EXCLUSION_EXT_EXCLUSION %in% 0 &
DATA$EXCLUSION_EXT_EXCLUSIONSI %in% 0 &
DATA$EXCLUSION_EXT_BDUA_registraduria %in% 0,]
Corte_Unidos_1=DATA[DATA$EXCLUSION_SINCALCULO_IPM_LP_LOGROS %in% 0 &
DATA$EXCLUSION_SIN_DATO %in% 0 &
DATA$EXCLUSION_DUPLICIDAD_FONETICO %in% 0 &
DATA$EXCLUSION_DUPLICIDAD_DOCUMENTO %in% 0 &
DATA$EXCLUSION_HOGAR_MENOR_14 %in% 0 &
DATA$EXCLUSION_ESTADO_RENUNCIA_NOLOCALIZADO %in% 0 &
DATA$EXCLUSION_ESTADO_SUSPENCION %in% 0 &
DATA$EXCLUSION_EXT_COLCHON %in% 0 &
DATA$EXCLUSION_EXT_FONETIC %in% 0 &
DATA$EXCLUSION_EXT_EXCLUSION %in% 0 &
DATA$EXCLUSION_EXT_EXCLUSIONSI %in% 0 &
DATA$EXCLUSION_EXT_BDUA_registraduria %in% 0,]
Corte_Unidos=Corte_Unidos %>% group_by(idHogar) %>% mutate(Total_personas=n())
Corte_Unidos=Corte_Unidos[Variables]
FORMULARIO=FORMULARIO[FORMULARIO$idIntegranteHogar %in% Corte_Unidos$idIntegranteHogar,]
setwd(Salidas)#Defina un directorio de las salidas generadas por el código
write.csv(Corte_Unidos, file = paste("Corte_Unidos","_",format(Sys.time(), "%d%m%Y"), ".csv", sep=""), row.names = FALSE)
write.csv(DATA, file = paste("DATA","_",format(Sys.time(), "%d%m%Y"), ".csv", sep=""), row.names = FALSE)
write.csv(FORMULARIO, file = paste("SABANA_CORTE","_",format(Sys.time(), "%d%m%Y"), ".csv", sep=""), row.names = FALSE)
#Luego de obtener los EXCLUSIONes se eliminan los dataframe sin EXCLUSIONes.
to.rm <- unlist(eapply(.GlobalEnv, function(x) is.data.frame(x) && (nrow(x) %in% 0 | all(is.na(x)))))
rm(list = names(to.rm)[to.rm], envir = .GlobalEnv)
#Se exportan los dataframe con la expresión regular "EXCLUSION" al directorio definido.
m = length(ls()[ls() %in% grep("EXCLUSION",ls(),value = TRUE)])
n = ls()[ls() %in% grep("EXCLUSION",ls(),value = TRUE)]
for(i in 1:m) {
write.csv2(
get(n[[i]]),
file = paste(n[[i]],"_",format(Sys.time(), "%d%m%Y"),".csv",sep = ""),
sep = "",
row.names = FALSE)
}
#2. Generaciones estadísticas descriptivas
view(dfSummary(as.data.frame(Corte_Unidos)))#Esta linea demanda una importante capacidad de computo.
rm(list = ls()[!ls() %in% grep("Salidas|Entradas|DATA|Variables|MUNICIPIOS",ls(),value = TRUE)])#Elimina objetos que no se requieren para los cálculos posteriores
#3. Procedimiento Generación de Frecuencias.
INTEGRANTES=DATA[c("CodigoMunicipio")] %>% group_by(CodigoMunicipio) %>% summarise(TOTALPERSONAS=n())
HOGARES=DATA[!duplicated(DATA$idHogar),c("idHogar","CodigoMunicipio")] %>% group_by(CodigoMunicipio) %>% summarise(TOTALHOGARES=n())
ZONA= reshape2::dcast(data=DATA[!duplicated(DATA$idHogar),],
CodigoMunicipio ~ Zona,
fun.aggregate = length,
value.var = "Zona")#Genera frecuencias en columnas de la variable definida
setnames(ZONA, old = c("1","2","3"),
new = c("CABECERA MUNICIPAL","CENTRO POBLADO","RURAL DISPERSO"))
LOGROS=DATA[c("CodigoMunicipio", grep("^logro",names(DATA),value = TRUE)[1:26])] %>%
gather(category, val, -c(CodigoMunicipio)) %>%
na.omit() %>%
group_by(CodigoMunicipio, category, val) %>%
summarise(new = n()) %>%
spread(val, new, fill = 0)
ALCANZADO =LOGROS[c("CodigoMunicipio","category","ALCANZADO")] %>% spread(category, ALCANZADO)
POR_ALCANZAR =LOGROS[c("CodigoMunicipio","category","POR ALCANZAR")] %>% spread(category, `POR ALCANZAR`)
setnames(ALCANZADO, old = names(ALCANZADO)[-1],
new = paste(toupper(names(ALCANZADO)[-1]),"F","A",sep = "_"))
setnames(POR_ALCANZAR, old = names(POR_ALCANZAR)[-1],
new = paste(toupper(names(POR_ALCANZAR)[-1]),"F","PA",sep = "_"))
DATA_Municipal_HOG=Reduce(function(x,y) merge(x = x, y = y, by = c("CodigoMunicipio"), all.x=TRUE), list(ZONA,POR_ALCANZAR,ALCANZADO))#Se unen los dataframe de frecuencias de individuos.
DISCAPACIDAD= reshape2::dcast(data=DATA,
CodigoMunicipio ~ Discapacidad,
fun.aggregate = length,
value.var = "Discapacidad")#Genera frecuencias en columnas de la variable definida
setnames(DISCAPACIDAD, old = c("0","1"),
new = c("DISCAPACIDADNO","DISCAPACIDADSI"))
SEXO= reshape2::dcast(data=DATA,
CodigoMunicipio ~ E03,
fun.aggregate = length,
value.var = "E03")#Genera frecuencias en columnas de la variable definida
setnames(SEXO, old = c("1","2"),
new = c("SEXOHOMBRE","SEXOMUJER"))
SEXO$SEXOINTERSEXUAL=0#No hay casos de intersexuales. Se agrega para conservar la estructura.
GRUPOSETAREO = reshape2::dcast(data=DATA,
CodigoMunicipio ~ CICLOVITAL,
fun.aggregate = length,
value.var = "CICLOVITAL")#Genera frecuencias en columnas de la variable definida
GRUPOSETINICOS= reshape2::dcast(data=DATA,
CodigoMunicipio ~ E08,
fun.aggregate = length,
value.var = "E08")#Genera frecuencias en columnas de la variable definida
setnames(GRUPOSETINICOS, old = c("1","2","3","4","5","6"),
new = c("INDIGENA","ROM","RAIZAL","AFRODESCENDIENTE","PALENQUERO","SIN ETNIA"))
LOGROS=DATA[c("CodigoMunicipio",grep("_I",names(DATA),value = TRUE)[-(27:28)])] %>%
gather(category, val, -c(CodigoMunicipio)) %>%
na.omit() %>%
group_by(CodigoMunicipio, category, val) %>%
summarise(new = n()) %>%
spread(val, new, fill = 0)
ALCANZADO =LOGROS[c("CodigoMunicipio","category","ALCANZADO")] %>% spread(category, ALCANZADO)
POR_ALCANZAR =LOGROS[c("CodigoMunicipio","category","POR ALCANZAR")] %>% spread(category, `POR ALCANZAR`)
setnames(ALCANZADO, old = names(ALCANZADO)[-1],
new = paste(toupper(names(ALCANZADO)[-1]),"A",sep = "_"))
setnames(POR_ALCANZAR, old = names(POR_ALCANZAR)[-1],
new = paste(toupper(names(POR_ALCANZAR)[-1]),"PA",sep = "_"))
DATA_Municipal_INT=Reduce(function(x,y) merge(x = x, y = y, by = c("CodigoMunicipio"), all.x=TRUE), list(INTEGRANTES,HOGARES,POR_ALCANZAR,ALCANZADO,SEXO,GRUPOSETAREO,DISCAPACIDAD,GRUPOSETINICOS))#Se unen los dataframe de frecuencias de individuos.
DATA_Municipal=merge(DATA_Municipal_HOG,DATA_Municipal_INT,by="CodigoMunicipio",all.x=TRUE)
DATA_Municipal=merge(MUNICIPIOS,DATA_Municipal, by="CodigoMunicipio", all.y=TRUE)#Se genera el archivo de frecuencias municipales.
setwd(Entradas)
Frecuencias_Estructura = read_excel("Frecuencias_Estructura.xlsx")#Se importa la lista de variables con el nombre usado previamente en la generación del archivo de frecuencias.
DATA_Municipal=DATA_Municipal[Frecuencias_Estructura$Variables]#Se ordenan las variables siguiendo el orden previo de los archivos de frecuencias.
setwd(Salidas)
write.csv(DATA_Municipal, file = paste("Frecuencias_UNIDOS_Municipal","_",format(Sys.time(), "%d%m%Y"), ".csv", sep=""), row.names = FALSE)
rm(list = ls()[!ls() %in% grep("^DATA_Municipal",ls(),value = TRUE)])#Elimina objetos que no se requieren para los cálculos posteriores
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{dcook_gamma}
\alias{dcook_gamma}
\title{Gráfico da distância de Cook para o modelo Gamma.}
\usage{
dcook_gamma(modelo)
}
\arguments{
\item{modelo}{modelo ajustado}
}
\description{
Gráfico da distância de Cook para o modelo Gamma.
}
\details{
A linha tracejada representa o corte proposto por Bollen, Kenneth et al de
4/n em que n é o número de observações.
}
\examples{
clotting <- data.frame(
u = c(5,10,15,20,30,40,60,80,100),
lot1 = c(118,58,42,35,27,25,21,19,18),
lot2 = c(69,35,26,21,18,16,13,12,12))
modelo <- glm(lot1 ~ log(u), data = clotting, family = Gamma)
modelo \%>\% dcook_gamma()
}
| /man/dcook_gamma.Rd | no_license | dfalbel/diag | R | false | false | 674 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{dcook_gamma}
\alias{dcook_gamma}
\title{Gráfico da distância de Cook para o modelo Gamma.}
\usage{
dcook_gamma(modelo)
}
\arguments{
\item{modelo}{modelo ajustado}
}
\description{
Gráfico da distância de Cook para o modelo Gamma.
}
\details{
A linha tracejada representa o corte proposto por Bollen, Kenneth et al de
4/n em que n é o número de observações.
}
\examples{
clotting <- data.frame(
u = c(5,10,15,20,30,40,60,80,100),
lot1 = c(118,58,42,35,27,25,21,19,18),
lot2 = c(69,35,26,21,18,16,13,12,12))
modelo <- glm(lot1 ~ log(u), data = clotting, family = Gamma)
modelo \%>\% dcook_gamma()
}
|
# this script calculates all of the statistics reported in-text in the manuscript
library(here)
library(tidyverse)
library(purrr)
library(broom)
library(lme4)
###################
# 1. Region and historical warming
###################
soda.stats <- readRDS(here("processed-data","soda_stats.rds"))
hadisst.stats <- readRDS(here("processed-data","hadisst_stats.rds"))
oisst.neus <- readRDS(here("processed-data","oisst_neus.rds"))
hadisst.isotherms <- readRDS(here("processed-data","hadisst_isotherms_time.rds"))
hadisst.lm.mean <- hadisst.stats %>%
dplyr::select(year_measured, year.month.mean, year.month.max, year.month.sd, year.month.min) %>% # use year_measured which refers to the actual year measured not the edge year to match to
distinct() %>%
filter(year_measured >= 1968) %>%
lm(year.month.mean ~ year_measured, data=.) %>%
summary()
oisst.lm.high <- oisst.neus %>%
group_by(time) %>%
mutate(day.mean.sst = mean(sst)) %>%
ungroup() %>%
group_by(year) %>%
mutate(year.daily.99 = quantile(day.mean.sst, 0.99)) %>%
ungroup() %>%
dplyr::select(year, year.daily.99) %>%
distinct() %>%
lm(year.daily.99 ~ year, data=.) %>%
summary()
oisst.lm.low <- oisst.neus %>%
group_by(time) %>%
mutate(day.mean.sst = mean(sst)) %>%
ungroup() %>%
group_by(year) %>%
mutate(year.daily.01 = quantile(day.mean.sst, 0.01)) %>%
ungroup() %>%
dplyr::select(year, year.daily.01) %>%
distinct() %>%
lm(year.daily.01 ~ year, data=.) %>%
summary()
soda.lm.mean <- soda.stats %>%
dplyr::select(year_measured, year.month.mean, year.month.max, year.month.sd, year.month.min) %>%
distinct() %>%
lm(year.month.mean ~ year_measured, data=.) %>%
summary()
isotherm.shifts <- hadisst.isotherms %>%
group_by(degrees) %>%
nest() %>%
mutate(
model = purrr::map(data, ~lm(est.iso.hadisst ~ year, data = .x)),
tidymodel = purrr::map(model, tidy)
) %>%
unnest(tidymodel, .drop=TRUE) %>%
filter(term=="year") %>%
dplyr::select(-data, -model)
###################
# 2. Survey data
###################
poldat.stats.samples <- readRDS(here("processed-data","poldat.stats.iso.rds")) %>%
dplyr::select(commonname, numyears, numobs) %>%
distinct()
quantile(poldat.stats.samples$numobs)
quantile(poldat.stats.samples$numyears)
eqdat.stats.samples <- readRDS(here("processed-data","eqdat.stats.iso.rds")) %>%
dplyr::select(commonname, numyears, numobs) %>%
distinct()
quantile(eqdat.stats.samples$numobs)
quantile(eqdat.stats.samples$numyears)
###################
# 3. Range edge position analysis
###################
poldat.stats.iso <- readRDS(here("processed-data","poldat.stats.iso.rds")) %>%
mutate(year = as.numeric(year))
eqdat.stats.iso <- readRDS(here("processed-data","eqdat.stats.iso.rds")) %>%
mutate(year = as.numeric(year))
poldat.assemblage.lm <- poldat.stats.iso %>%
dplyr::select(year, assemblage.dist95) %>%
distinct() %>%
lm(assemblage.dist95 ~ year, data = .) %>%
summary()
eqdat.assemblage.lm <- eqdat.stats.iso %>%
dplyr::select(year, assemblage.dist05) %>%
distinct() %>%
lm(assemblage.dist05 ~ year, data = .) %>%
summary()
###################
# 5. Changes in depth and biomass
###################
poldat.stats.iso <- readRDS(here("processed-data","poldat.stats.iso.rds")) %>%
mutate(year = as.numeric(year))
eqdat.stats.iso <- readRDS(here("processed-data","eqdat.stats.iso.rds")) %>%
mutate(year = as.numeric(year))
#changes in depth over time
poldat.depth.lm <- poldat.stats.iso %>%
dplyr::select(latinname, commonname, depth.mean.wt, year) %>%
distinct() %>%
group_by(commonname) %>%
nest() %>%
mutate(
model = purrr::map(data, ~lm(depth.mean.wt ~ year, data = .x)),
tidymodel = purrr::map(model, tidy)
) %>%
unnest(tidymodel) %>%
filter(term=="year")
eqdat.depth.lm <- eqdat.stats.iso %>%
dplyr::select(latinname, commonname, depth.mean.wt, year) %>%
distinct() %>%
group_by(commonname) %>%
nest() %>%
mutate(
model = purrr::map(data, ~lm(depth.mean.wt ~ year, data = .x)),
tidymodel = purrr::map(model, tidy)
) %>%
unnest(tidymodel) %>%
filter(term=="year")
# changes in biomass over time
poldat.abund.lm <- poldat.stats.iso %>%
dplyr::select(latinname, commonname, biomass.correct.kg, year) %>%
distinct() %>%
mutate(biomass.correct.mt = biomass.correct.kg/1000) %>%
group_by(commonname) %>%
nest() %>%
mutate(
model = purrr::map(data, ~lm(biomass.correct.mt ~ year, data = .x)),
tidymodel = purrr::map(model, tidy)
) %>%
unnest(tidymodel) %>%
filter(term=="year")
eqdat.abund.lm <- eqdat.stats.iso %>%
dplyr::select(latinname, commonname, biomass.correct.kg, year) %>%
distinct() %>%
mutate(biomass.correct.mt = biomass.correct.kg/1000) %>%
group_by(commonname) %>%
nest() %>%
mutate(
model = purrr::map(data, ~lm(biomass.correct.mt ~ year, data = .x)),
tidymodel = purrr::map(model, tidy)
) %>%
unnest(tidymodel) %>%
filter(term=="year")
# changes in depth vs edge
poldat.lm <- poldat.stats.iso %>%
dplyr::select(latinname, commonname, spp.dist95, year) %>%
distinct() %>%
group_by(commonname) %>%
nest() %>%
mutate(
model = map(data, ~lm(spp.dist95 ~ year, data = .x)),
tidymodel = map(model, tidy)
) %>%
unnest(tidymodel, .drop=TRUE) %>%
filter(term=="year")
eqdat.lm <- eqdat.stats.iso %>%
dplyr::select(latinname, commonname, spp.dist05, year) %>%
distinct() %>%
group_by(commonname) %>%
nest() %>%
mutate(
model = map(data, ~lm(spp.dist05 ~ year, data = .x)),
tidymodel = map(model, tidy)
) %>%
unnest(tidymodel, .drop=TRUE) %>%
filter(term=="year")
poldat.edge.depth <- poldat.depth.lm %>%
rename(depth.coeff = estimate) %>%
left_join(poldat.lm %>% select(commonname, estimate), by="commonname") %>%
rename(edge.coeff = estimate) %>%
select(commonname, depth.coeff, edge.coeff)
cor.test(poldat.edge.depth$depth.coeff, poldat.edge.depth$edge.coeff, method="spearman")
eqdat.edge.depth <- eqdat.depth.lm %>%
rename(depth.coeff = estimate) %>%
left_join(eqdat.lm %>% select(commonname, estimate), by="commonname") %>%
rename(edge.coeff = estimate) %>%
select(commonname, depth.coeff, edge.coeff)
cor.test(eqdat.edge.depth$depth.coeff, eqdat.edge.depth$edge.coeff, method="spearman")
poldat.edge.abund <- poldat.abund.lm %>%
rename(abund.coeff = estimate) %>%
left_join(poldat.lm %>% select(commonname, estimate), by="commonname") %>%
rename(edge.coeff = estimate) %>%
select(commonname, abund.coeff, edge.coeff)
cor.test(poldat.edge.abund$abund.coeff, poldat.edge.abund$edge.coeff, method="spearman")
eqdat.edge.abund <- eqdat.abund.lm %>%
rename(abund.coeff = estimate) %>%
left_join(eqdat.lm %>% select(commonname, estimate), by="commonname") %>%
rename(edge.coeff = estimate) %>%
select(commonname, abund.coeff, edge.coeff)
cor.test(eqdat.edge.abund$abund.coeff, eqdat.edge.abund$edge.coeff, method="spearman")
| /scripts/paper_stats.R | no_license | afredston/neus-range-edges | R | false | false | 7,040 | r | # this script calculates all of the statistics reported in-text in the manuscript
library(here)
library(tidyverse)
library(purrr)
library(broom)
library(lme4)
###################
# 1. Region and historical warming
###################
soda.stats <- readRDS(here("processed-data","soda_stats.rds"))
hadisst.stats <- readRDS(here("processed-data","hadisst_stats.rds"))
oisst.neus <- readRDS(here("processed-data","oisst_neus.rds"))
hadisst.isotherms <- readRDS(here("processed-data","hadisst_isotherms_time.rds"))
hadisst.lm.mean <- hadisst.stats %>%
dplyr::select(year_measured, year.month.mean, year.month.max, year.month.sd, year.month.min) %>% # use year_measured which refers to the actual year measured not the edge year to match to
distinct() %>%
filter(year_measured >= 1968) %>%
lm(year.month.mean ~ year_measured, data=.) %>%
summary()
oisst.lm.high <- oisst.neus %>%
group_by(time) %>%
mutate(day.mean.sst = mean(sst)) %>%
ungroup() %>%
group_by(year) %>%
mutate(year.daily.99 = quantile(day.mean.sst, 0.99)) %>%
ungroup() %>%
dplyr::select(year, year.daily.99) %>%
distinct() %>%
lm(year.daily.99 ~ year, data=.) %>%
summary()
oisst.lm.low <- oisst.neus %>%
group_by(time) %>%
mutate(day.mean.sst = mean(sst)) %>%
ungroup() %>%
group_by(year) %>%
mutate(year.daily.01 = quantile(day.mean.sst, 0.01)) %>%
ungroup() %>%
dplyr::select(year, year.daily.01) %>%
distinct() %>%
lm(year.daily.01 ~ year, data=.) %>%
summary()
soda.lm.mean <- soda.stats %>%
dplyr::select(year_measured, year.month.mean, year.month.max, year.month.sd, year.month.min) %>%
distinct() %>%
lm(year.month.mean ~ year_measured, data=.) %>%
summary()
isotherm.shifts <- hadisst.isotherms %>%
group_by(degrees) %>%
nest() %>%
mutate(
model = purrr::map(data, ~lm(est.iso.hadisst ~ year, data = .x)),
tidymodel = purrr::map(model, tidy)
) %>%
unnest(tidymodel, .drop=TRUE) %>%
filter(term=="year") %>%
dplyr::select(-data, -model)
###################
# 2. Survey data
###################
poldat.stats.samples <- readRDS(here("processed-data","poldat.stats.iso.rds")) %>%
dplyr::select(commonname, numyears, numobs) %>%
distinct()
quantile(poldat.stats.samples$numobs)
quantile(poldat.stats.samples$numyears)
eqdat.stats.samples <- readRDS(here("processed-data","eqdat.stats.iso.rds")) %>%
dplyr::select(commonname, numyears, numobs) %>%
distinct()
quantile(eqdat.stats.samples$numobs)
quantile(eqdat.stats.samples$numyears)
###################
# 3. Range edge position analysis
###################
poldat.stats.iso <- readRDS(here("processed-data","poldat.stats.iso.rds")) %>%
mutate(year = as.numeric(year))
eqdat.stats.iso <- readRDS(here("processed-data","eqdat.stats.iso.rds")) %>%
mutate(year = as.numeric(year))
poldat.assemblage.lm <- poldat.stats.iso %>%
dplyr::select(year, assemblage.dist95) %>%
distinct() %>%
lm(assemblage.dist95 ~ year, data = .) %>%
summary()
eqdat.assemblage.lm <- eqdat.stats.iso %>%
dplyr::select(year, assemblage.dist05) %>%
distinct() %>%
lm(assemblage.dist05 ~ year, data = .) %>%
summary()
###################
# 5. Changes in depth and biomass
###################
poldat.stats.iso <- readRDS(here("processed-data","poldat.stats.iso.rds")) %>%
mutate(year = as.numeric(year))
eqdat.stats.iso <- readRDS(here("processed-data","eqdat.stats.iso.rds")) %>%
mutate(year = as.numeric(year))
#changes in depth over time
poldat.depth.lm <- poldat.stats.iso %>%
dplyr::select(latinname, commonname, depth.mean.wt, year) %>%
distinct() %>%
group_by(commonname) %>%
nest() %>%
mutate(
model = purrr::map(data, ~lm(depth.mean.wt ~ year, data = .x)),
tidymodel = purrr::map(model, tidy)
) %>%
unnest(tidymodel) %>%
filter(term=="year")
eqdat.depth.lm <- eqdat.stats.iso %>%
dplyr::select(latinname, commonname, depth.mean.wt, year) %>%
distinct() %>%
group_by(commonname) %>%
nest() %>%
mutate(
model = purrr::map(data, ~lm(depth.mean.wt ~ year, data = .x)),
tidymodel = purrr::map(model, tidy)
) %>%
unnest(tidymodel) %>%
filter(term=="year")
# changes in biomass over time
poldat.abund.lm <- poldat.stats.iso %>%
dplyr::select(latinname, commonname, biomass.correct.kg, year) %>%
distinct() %>%
mutate(biomass.correct.mt = biomass.correct.kg/1000) %>%
group_by(commonname) %>%
nest() %>%
mutate(
model = purrr::map(data, ~lm(biomass.correct.mt ~ year, data = .x)),
tidymodel = purrr::map(model, tidy)
) %>%
unnest(tidymodel) %>%
filter(term=="year")
eqdat.abund.lm <- eqdat.stats.iso %>%
dplyr::select(latinname, commonname, biomass.correct.kg, year) %>%
distinct() %>%
mutate(biomass.correct.mt = biomass.correct.kg/1000) %>%
group_by(commonname) %>%
nest() %>%
mutate(
model = purrr::map(data, ~lm(biomass.correct.mt ~ year, data = .x)),
tidymodel = purrr::map(model, tidy)
) %>%
unnest(tidymodel) %>%
filter(term=="year")
# changes in depth vs edge
poldat.lm <- poldat.stats.iso %>%
dplyr::select(latinname, commonname, spp.dist95, year) %>%
distinct() %>%
group_by(commonname) %>%
nest() %>%
mutate(
model = map(data, ~lm(spp.dist95 ~ year, data = .x)),
tidymodel = map(model, tidy)
) %>%
unnest(tidymodel, .drop=TRUE) %>%
filter(term=="year")
eqdat.lm <- eqdat.stats.iso %>%
dplyr::select(latinname, commonname, spp.dist05, year) %>%
distinct() %>%
group_by(commonname) %>%
nest() %>%
mutate(
model = map(data, ~lm(spp.dist05 ~ year, data = .x)),
tidymodel = map(model, tidy)
) %>%
unnest(tidymodel, .drop=TRUE) %>%
filter(term=="year")
poldat.edge.depth <- poldat.depth.lm %>%
rename(depth.coeff = estimate) %>%
left_join(poldat.lm %>% select(commonname, estimate), by="commonname") %>%
rename(edge.coeff = estimate) %>%
select(commonname, depth.coeff, edge.coeff)
cor.test(poldat.edge.depth$depth.coeff, poldat.edge.depth$edge.coeff, method="spearman")
eqdat.edge.depth <- eqdat.depth.lm %>%
rename(depth.coeff = estimate) %>%
left_join(eqdat.lm %>% select(commonname, estimate), by="commonname") %>%
rename(edge.coeff = estimate) %>%
select(commonname, depth.coeff, edge.coeff)
cor.test(eqdat.edge.depth$depth.coeff, eqdat.edge.depth$edge.coeff, method="spearman")
poldat.edge.abund <- poldat.abund.lm %>%
rename(abund.coeff = estimate) %>%
left_join(poldat.lm %>% select(commonname, estimate), by="commonname") %>%
rename(edge.coeff = estimate) %>%
select(commonname, abund.coeff, edge.coeff)
cor.test(poldat.edge.abund$abund.coeff, poldat.edge.abund$edge.coeff, method="spearman")
eqdat.edge.abund <- eqdat.abund.lm %>%
rename(abund.coeff = estimate) %>%
left_join(eqdat.lm %>% select(commonname, estimate), by="commonname") %>%
rename(edge.coeff = estimate) %>%
select(commonname, abund.coeff, edge.coeff)
cor.test(eqdat.edge.abund$abund.coeff, eqdat.edge.abund$edge.coeff, method="spearman")
|
#Lab III
#Team 8
rm(list=ls())
library(tree)
library(gbm)
require(randomForest)
require(rpart)
require(rpart.plot)
require(rattle)
#Import and clean data
telecomData = read.csv("Lab3Data.csv")
#removes NAs
telecomData = telecomData[complete.cases(telecomData),]
#look at the telecomData dataframe
str(telecomData)
#drop the customerID column
telecomData<-subset(select=-c(customerID), telecomData)
#turn SeniorCitizen into a factor
telecomData$SeniorCitizen <- factor(telecomData$SeniorCitizen)
#create a results data frame
final.results <- data.frame(matrix(ncol = 1, nrow = 4))
colnames(final.results) <- c("Error Rate")
rownames(final.results) <- c("Boosting", "Bagging", "Random Forest", "Classification Tree")
#############################################
################### Boosting ################
#############################################
set.seed(2016)#reset for testing
telecomData_Boosting <- telecomData
#change Churn levels to 0 and 1
telecomData_Boosting$Churn_b[telecomData_Boosting$Churn=="No"] <- 0
telecomData_Boosting$Churn_b[telecomData_Boosting$Churn=="Yes"] <- 1
telecomData_Boosting<-telecomData_Boosting[,-20]
colnames(telecomData_Boosting)[20] <- "Churn"
#training and testing split
train = sample(1:nrow(telecomData_Boosting), nrow(telecomData)*0.8)
test = telecomData_Boosting[-train,]
churn.test = telecomData_Boosting$Churn[-train]
#create model
boost.tel=gbm(Churn~.,data=telecomData_Boosting[train,],n.trees=5000,interaction.depth=4, n.minobsinnode=0, bag.fraction = .3, cv.folds = 5, shrinkage = 10^-3.1)
#predict Churn on the test set
yhat.boost=predict(boost.tel,newdata=telecomData_Boosting[-train,],n.trees=5000, type="response")
yhat.boost = ifelse(yhat.boost<.5, 0, 1)
#Create confusion matrix
confusionMatrix = table(churn.test, yhat.boost, dnn=c("Actual","Predicted"))
confusionMatrix
right = confusionMatrix[1] + confusionMatrix[4]
wrong = confusionMatrix[2] + confusionMatrix[3]
error_rate = wrong/(right+wrong)*100
success_rate = right/(right+wrong)*100
final.results[1,1] = error_rate
###Additions to test new data from Murray###
newData<-read.csv("ChurnDataTest.csv")
#turn SeniorCitizen into a factor
newData$SeniorCitizen <- factor(telecomData$SeniorCitizen)
#############################################
################### Bagging #################
#############################################
set.seed(2016)#reset for testing
telecomData_Bagging <- telecomData
bag.telecomData = randomForest(Churn~., data=telecomData_Bagging, mtry=19, importance =TRUE)
error_rate <- tail(bag.telecomData$err.rate[,1],1)*100
final.results[2,1] = round(error_rate, 2)
#############################################
################### Random Forest ###########
#############################################
set.seed(2016)#reset for testing
telecomData_RandomForest <- telecomData
#Grow a random forest by using a smaller value of the mtry argument
rf.churn = randomForest(Churn~., data=telecomData_RandomForest, mtry=2, ntree = 100, importance =TRUE)
error_rate <- tail(rf.churn$err.rate[,1],1)*100
final.results[3,1] = round(error_rate,2)
#############################################
################### Classification Tree #####
#############################################
set.seed(2016)#reset for testing
telecomData_ClassificationTree <- telecomData
#training and testing split
percTrain = 0.8 #specifies the proportion of the data to be withheld for training. The remainder is used for testing.
train = sample(1:nrow(telecomData_ClassificationTree), nrow(telecomData_ClassificationTree)*percTrain)
test = telecomData_ClassificationTree[-train,]
churn.test = telecomData_ClassificationTree$Churn[-train]
#build Rpart decision tree with training data
tree.trainChurn = rpart(Churn~., telecomData_ClassificationTree, subset = train, method = "class")
#plot the tree
#plot(tree.trainChurn, main="Base Train Churn Decision Tree")
#text(tree.trainChurn, pretty = 0)
#examine the tree cross validation
#printcp(tree.trainChurn) #examine cross validation error results
#plotcp(tree.trainChurn)#plot cross validation error results
#prune the tree by selecting the tree which mininizes cross validation error
minCV = which.min(tree.trainChurn$cptable[,"xerror"])
tree.trainChurn = prune(tree.trainChurn, cp=tree.trainChurn$cptable[minCV, "CP"])
#plot the pruned tree both normally, and with the fancy colored plot
#plot(tree.trainChurn, main="Pruned Churn Decision Tree")
#text(tree.trainChurn, pretty = 0)
#fancyRpartPlot(tree.trainChurn)
#make predictions on the withheld data using the pruned tree
tree.pred = predict(tree.trainChurn, newdata=test, type="class")
#always want actuals as the rows, predicted in the columns ## table(rows,columns), with null hypothesis before the alt
confusionMatrix = table(churn.test, tree.pred, dnn=c("Actual","Predicted"))
percConfMatrx=round(100*confusionMatrix/length(tree.pred), digits = 2) #give the table in terms of the percentages to two decimal spots. Table should add up to 100
success_rate=percConfMatrx[1,1]+percConfMatrx[2,2]
error_rate=percConfMatrx[1,2]+percConfMatrx[2,1]
typeIErr=percConfMatrx[1,2]
typeIIErr=percConfMatrx[2,1]
final.results[4,1] = error_rate
###################################
###############FINAL RESULTS#######
###################################
final.results | /Machine Learning II/Lab III/Lab_3_Team_8.R | no_license | King-AK/MSBA | R | false | false | 5,472 | r | #Lab III
#Team 8
rm(list=ls())
library(tree)
library(gbm)
require(randomForest)
require(rpart)
require(rpart.plot)
require(rattle)
#Import and clean data
telecomData = read.csv("Lab3Data.csv")
#removes NAs
telecomData = telecomData[complete.cases(telecomData),]
#look at the telecomData dataframe
str(telecomData)
#drop the customerID column
telecomData<-subset(select=-c(customerID), telecomData)
#turn SeniorCitizen into a factor
telecomData$SeniorCitizen <- factor(telecomData$SeniorCitizen)
#create a results data frame
final.results <- data.frame(matrix(ncol = 1, nrow = 4))
colnames(final.results) <- c("Error Rate")
rownames(final.results) <- c("Boosting", "Bagging", "Random Forest", "Classification Tree")
#############################################
################### Boosting ################
#############################################
set.seed(2016)#reset for testing
telecomData_Boosting <- telecomData
#change Churn levels to 0 and 1
telecomData_Boosting$Churn_b[telecomData_Boosting$Churn=="No"] <- 0
telecomData_Boosting$Churn_b[telecomData_Boosting$Churn=="Yes"] <- 1
telecomData_Boosting<-telecomData_Boosting[,-20]
colnames(telecomData_Boosting)[20] <- "Churn"
#training and testing split
train = sample(1:nrow(telecomData_Boosting), nrow(telecomData)*0.8)
test = telecomData_Boosting[-train,]
churn.test = telecomData_Boosting$Churn[-train]
#create model
boost.tel=gbm(Churn~.,data=telecomData_Boosting[train,],n.trees=5000,interaction.depth=4, n.minobsinnode=0, bag.fraction = .3, cv.folds = 5, shrinkage = 10^-3.1)
#predict Churn on the test set
yhat.boost=predict(boost.tel,newdata=telecomData_Boosting[-train,],n.trees=5000, type="response")
yhat.boost = ifelse(yhat.boost<.5, 0, 1)
#Create confusion matrix
confusionMatrix = table(churn.test, yhat.boost, dnn=c("Actual","Predicted"))
confusionMatrix
right = confusionMatrix[1] + confusionMatrix[4]
wrong = confusionMatrix[2] + confusionMatrix[3]
error_rate = wrong/(right+wrong)*100
success_rate = right/(right+wrong)*100
final.results[1,1] = error_rate
###Additions to test new data from Murray###
newData<-read.csv("ChurnDataTest.csv")
#turn SeniorCitizen into a factor
newData$SeniorCitizen <- factor(telecomData$SeniorCitizen)
#############################################
################### Bagging #################
#############################################
set.seed(2016)#reset for testing
telecomData_Bagging <- telecomData
bag.telecomData = randomForest(Churn~., data=telecomData_Bagging, mtry=19, importance =TRUE)
error_rate <- tail(bag.telecomData$err.rate[,1],1)*100
final.results[2,1] = round(error_rate, 2)
#############################################
################### Random Forest ###########
#############################################
set.seed(2016)#reset for testing
telecomData_RandomForest <- telecomData
#Grow a random forest by using a smaller value of the mtry argument
rf.churn = randomForest(Churn~., data=telecomData_RandomForest, mtry=2, ntree = 100, importance =TRUE)
error_rate <- tail(rf.churn$err.rate[,1],1)*100
final.results[3,1] = round(error_rate,2)
#############################################
################### Classification Tree #####
#############################################
set.seed(2016)#reset for testing
telecomData_ClassificationTree <- telecomData
#training and testing split
percTrain = 0.8 #specifies the proportion of the data to be withheld for training. The remainder is used for testing.
train = sample(1:nrow(telecomData_ClassificationTree), nrow(telecomData_ClassificationTree)*percTrain)
test = telecomData_ClassificationTree[-train,]
churn.test = telecomData_ClassificationTree$Churn[-train]
#build Rpart decision tree with training data
tree.trainChurn = rpart(Churn~., telecomData_ClassificationTree, subset = train, method = "class")
#plot the tree
#plot(tree.trainChurn, main="Base Train Churn Decision Tree")
#text(tree.trainChurn, pretty = 0)
#examine the tree cross validation
#printcp(tree.trainChurn) #examine cross validation error results
#plotcp(tree.trainChurn)#plot cross validation error results
#prune the tree by selecting the tree which mininizes cross validation error
minCV = which.min(tree.trainChurn$cptable[,"xerror"])
tree.trainChurn = prune(tree.trainChurn, cp=tree.trainChurn$cptable[minCV, "CP"])
#plot the pruned tree both normally, and with the fancy colored plot
#plot(tree.trainChurn, main="Pruned Churn Decision Tree")
#text(tree.trainChurn, pretty = 0)
#fancyRpartPlot(tree.trainChurn)
#make predictions on the withheld data using the pruned tree
tree.pred = predict(tree.trainChurn, newdata=test, type="class")
#always want actuals as the rows, predicted in the columns ## table(rows,columns), with null hypothesis before the alt
confusionMatrix = table(churn.test, tree.pred, dnn=c("Actual","Predicted"))
percConfMatrx=round(100*confusionMatrix/length(tree.pred), digits = 2) #give the table in terms of the percentages to two decimal spots. Table should add up to 100
success_rate=percConfMatrx[1,1]+percConfMatrx[2,2]
error_rate=percConfMatrx[1,2]+percConfMatrx[2,1]
typeIErr=percConfMatrx[1,2]
typeIIErr=percConfMatrx[2,1]
final.results[4,1] = error_rate
###################################
###############FINAL RESULTS#######
###################################
final.results |
args<-commandArgs(TRUE)
source("new_imp.R")
# first argument is the method
# second argument is the csv
# third argument is the number of times to run
switch = toString(args[1])
file = toString(args[2])
iters = as.integer(args[3])
if (switch == "mean.imp"){
mean.imp(file)
#} else if (switch == "regress.imp"){
# regress.imp(file)
} else if (switch == "mult.imp"){
mult.imp(file, iters)
} else {
stop("Imputation method does not exist!")
}
| /run_script.R | no_license | mjnichol/pssp_imputation | R | false | false | 445 | r | args<-commandArgs(TRUE)
source("new_imp.R")
# first argument is the method
# second argument is the csv
# third argument is the number of times to run
switch = toString(args[1])
file = toString(args[2])
iters = as.integer(args[3])
if (switch == "mean.imp"){
mean.imp(file)
#} else if (switch == "regress.imp"){
# regress.imp(file)
} else if (switch == "mult.imp"){
mult.imp(file, iters)
} else {
stop("Imputation method does not exist!")
}
|
# Pendulum.R
source("./R/ODE.R")
source("./R/EulerRichardson.R")
# setGeneric("setState", function(object, theta, thetaDot, ...)
# standardGeneric("setState"))
setClass("Pendulum", slots = c(
omega0Squared = "numeric",
state = "numeric",
odeSolver = "EulerRichardson"
),
prototype = prototype(
omega0Squared = 3,
state = c(0, 0, 0)
),
contains = c("ODE")
)
setMethod("initialize", "Pendulum", function(.Object) {
.Object@odeSolver <- EulerRichardson(.Object)
return(.Object)
})
setMethod("setStepSize", signature("Pendulum"), function(object, dt, ...) {
# use explicit parameter declaration
# setStepSize generic may use two different step parameters: stepSize and dt
object@odeSolver <- setStepSize(object@odeSolver, dt)
object
})
setMethod("step", "Pendulum", function(object) {
object@odeSolver <- step(object@odeSolver)
object@rate <- object@odeSolver@ode@rate
object@state <- object@odeSolver@ode@state
object
})
setMethod("setState", signature("Pendulum"), function(object, theta, thetaDot, ...) {
object@state[1] <- theta # angle
object@state[2] <- thetaDot # derivative of angle
# state[3] is time
object@odeSolver@ode@state <- object@state
object
})
setMethod("getState", "Pendulum", function(object) {
object@state
})
setMethod("getRate", "Pendulum", function(object, state, rate) {
rate[1] <- state[2] # rate of change of angle
rate[2] <- -object@omega0Squared * sin(state[1]) # rate of change of dtheta
rate[3] <- 1 # rate of change of time, dt/dt
object@state <- object@odeSolver@ode@state <- state
object@rate <- object@odeSolver@ode@rate <- rate
# object@rate
invisible(object)
})
# constructor
Pendulum <- function() new("Pendulum")
| /R/Pendulum.R | no_license | stanleesocca/scientific-computing-r | R | false | false | 2,006 | r | # Pendulum.R
source("./R/ODE.R")
source("./R/EulerRichardson.R")
# setGeneric("setState", function(object, theta, thetaDot, ...)
# standardGeneric("setState"))
setClass("Pendulum", slots = c(
omega0Squared = "numeric",
state = "numeric",
odeSolver = "EulerRichardson"
),
prototype = prototype(
omega0Squared = 3,
state = c(0, 0, 0)
),
contains = c("ODE")
)
setMethod("initialize", "Pendulum", function(.Object) {
.Object@odeSolver <- EulerRichardson(.Object)
return(.Object)
})
setMethod("setStepSize", signature("Pendulum"), function(object, dt, ...) {
# use explicit parameter declaration
# setStepSize generic may use two different step parameters: stepSize and dt
object@odeSolver <- setStepSize(object@odeSolver, dt)
object
})
setMethod("step", "Pendulum", function(object) {
object@odeSolver <- step(object@odeSolver)
object@rate <- object@odeSolver@ode@rate
object@state <- object@odeSolver@ode@state
object
})
setMethod("setState", signature("Pendulum"), function(object, theta, thetaDot, ...) {
object@state[1] <- theta # angle
object@state[2] <- thetaDot # derivative of angle
# state[3] is time
object@odeSolver@ode@state <- object@state
object
})
setMethod("getState", "Pendulum", function(object) {
object@state
})
setMethod("getRate", "Pendulum", function(object, state, rate) {
rate[1] <- state[2] # rate of change of angle
rate[2] <- -object@omega0Squared * sin(state[1]) # rate of change of dtheta
rate[3] <- 1 # rate of change of time, dt/dt
object@state <- object@odeSolver@ode@state <- state
object@rate <- object@odeSolver@ode@rate <- rate
# object@rate
invisible(object)
})
# constructor
Pendulum <- function() new("Pendulum")
|
library(ckanr)
### Name: related_show
### Title: Show a related item
### Aliases: related_show
### ** Examples
## Not run:
##D # Setup
##D ckanr_setup(url = "http://demo.ckan.org/", key = getOption("ckan_demo_key"))
##D
##D # create a package and a related item
##D res <- package_create("hello-pluto2") %>%
##D related_create(title = "my resource",
##D type = "visualization")
##D
##D # show the related item
##D related_show(res)
##D related_show(res$id)
##D
##D # get data back in different formats
##D related_show(res, as = 'json')
##D related_show(res, as = 'table')
## End(Not run)
| /data/genthat_extracted_code/ckanr/examples/related_show.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 621 | r | library(ckanr)
### Name: related_show
### Title: Show a related item
### Aliases: related_show
### ** Examples
## Not run:
##D # Setup
##D ckanr_setup(url = "http://demo.ckan.org/", key = getOption("ckan_demo_key"))
##D
##D # create a package and a related item
##D res <- package_create("hello-pluto2") %>%
##D related_create(title = "my resource",
##D type = "visualization")
##D
##D # show the related item
##D related_show(res)
##D related_show(res$id)
##D
##D # get data back in different formats
##D related_show(res, as = 'json')
##D related_show(res, as = 'table')
## End(Not run)
|
##################
# Load Libraries #
##################
gcinfo(FALSE)
packagelist <- c("RNeo4j", "ggplot2", "wesanderson", "igraph", "visNetwork", "scales", "plyr", "cowplot", "vegan", "reshape2", "stringr")
new.packages <- packagelist[!(packagelist %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages, repos='http://cran.us.r-project.org')
lapply(packagelist, library, character.only = TRUE)
##############
# Diet Graph #
##############
# Start the connection to the graph
# If you are getting a lack of permission, disable local permission on Neo4J
graph <- startGraph("http://localhost:7474/db/data/", "neo4j", "root")
# Get list of the sample IDs
sampleidquery <- "
MATCH
(x:SRP002424)-->(y)-[d]->(z:Phage)-->(a:Bacterial_Host)<-[e]-(b),
(b)<--(i:PatientID)-->(y),
(b)<--(t:TimePoint)-->(y),
(k:Disease)-->(y)
WHERE toInt(d.Abundance) > 0
OR toInt(e.Abundance) > 0
RETURN DISTINCT
z.Name AS from,
a.Name AS to,
i.Name AS PatientID,
t.Name AS TimePoint,
k.Name AS Diet,
toInt(d.Abundance) AS PhageAbundance,
toInt(z.Length) AS PhageLength,
toInt(e.Abundance) AS BacteriaAbundance,
toInt(a.Length) AS BacteriaLength;
"
sampletable <- as.data.frame(cypher(graph, sampleidquery))
# Correct the lengths
sampletable$PhageAbundance <- round(1e7 * sampletable$PhageAbundance / sampletable$PhageLength)
sampletable$BacteriaAbundance <- round(1e7 * sampletable$BacteriaAbundance / sampletable$BacteriaLength)
sampletable <- sampletable[,-9]
sampletable <- sampletable[,-7]
head(sampletable)
# get subsampling depth
phageminseq <- min(ddply(sampletable, c("PatientID", "TimePoint"), summarize, sum = sum(PhageAbundance))$sum)
bacminseq <- min(ddply(sampletable, c("PatientID", "TimePoint"), summarize, sum = sum(BacteriaAbundance))$sum)
# Rarefy each sample using sequence counts
rout <- lapply(unique(sampletable$PatientID), function(i) {
subsetdfout <- as.data.frame(sampletable[c(sampletable$PatientID %in% i),])
outputin <- lapply(unique(subsetdfout$TimePoint), function(j) {
subsetdfin <- subsetdfout[c(subsetdfout$TimePoint %in% j),]
subsetdfin$PhageAbundance <- c(rrarefy(subsetdfin$PhageAbundance, sample = phageminseq))
subsetdfin$BacteriaAbundance <- c(rrarefy(subsetdfin$BacteriaAbundance, sample = bacminseq))
return(subsetdfin)
})
forresult <- as.data.frame(do.call(rbind, outputin))
return(forresult)
})
# Finish making subsampled data frame
rdf <- as.data.frame(do.call(rbind, rout))
# Remove those without bacteria or phage nodes after subsampling
# Zero here means loss of the node
rdf <- rdf[!c(rdf$PhageAbundance == 0 | rdf$BacteriaAbundance == 0),]
# Calculate edge values from nodes
rdf$edge <- log10(rdf$PhageAbundance * rdf$BacteriaAbundance)
# Make a list of subgraphs for each of the samples
# This will be used for diversity, centrality, etc
routdiv <- lapply(unique(rdf$PatientID), function(i) {
subsetdfout <- as.data.frame(rdf[c(rdf$PatientID %in% i),])
outputin <- lapply(unique(subsetdfout$TimePoint), function(j) {
subsetdfin <- subsetdfout[c(subsetdfout$TimePoint %in% j),]
lapgraph <- graph_from_data_frame(subsetdfin[,c("to", "from")], directed = FALSE)
E(lapgraph)$weight <- subsetdfin[,c("edge")]
V(lapgraph)$timepoint <- j
V(lapgraph)$patientid <- i
diettype <- unique(subsetdfin$Diet)
V(lapgraph)$diet <- diettype
return(lapgraph)
})
return(outputin)
})
##### Eigen Vector Centrality #####
rcen <- lapply(c(1:length(routdiv)), function(i) {
listelement <- routdiv[[ i ]]
outputin <- lapply(c(1:length(listelement)), function(j) {
listgraph <- listelement[[ j ]]
centraldf <- as.data.frame(eigen_centrality(listgraph)$vector)
colnames(centraldf) <- "ecen"
centraldf$names <- row.names(centraldf)
centraldf$patient <- unique(V(listgraph)$patientid)
centraldf$tp <- unique(V(listgraph)$timepoint)
centraldf$diettype <- unique(V(listgraph)$diet)
return(centraldf)
})
forresult <- as.data.frame(do.call(rbind, outputin))
return(forresult)
})
rcdf <- as.data.frame(do.call(rbind, rcen))
rcast <- dcast(rcdf, patient + tp ~ names, value.var = "ecen")
rcast[is.na(rcast)] <- 0
rownames(rcast) <- paste(rcast$patient, rcast$tp, sep = "_")
rcast <- rcast[!c(rcast$patient == 2012),]
rcast <- rcast[,-c(1:2)]
rdist <- vegdist(rcast, method = "bray")
rdm <- melt(as.matrix(rdist))
rm <- cbind(rdm, as.data.frame(str_split_fixed(rdm$Var1, "_", 2)))
rm <- cbind(rm, as.data.frame(str_split_fixed(rm$Var2, "_", 2)))
rm <- rm[,-c(1:2)]
colnames(rm) <- c("ec", "patient1", "time1", "patient2", "time2")
rm <- rm[!c(rm$ec == 0),]
rm$class <- ifelse(rm$patient1 == rm$patient2, "Intrapersonal", "Interpersonal")
ravg <- ddply(rm, c("patient1", "class"), summarize, avg = mean(ec))
ravgslope <- lapply(unique(ravg$patient1), function(i) {
y <- ravg[c(ravg$class %in% "Intrapersonal" & ravg$patient1 %in% i), "avg"] - ravg[c(ravg$class %in% "Interpersonal" & ravg$patient1 %in% i), "avg"]
return(data.frame(i, y))
})
ravgslope <- do.call(rbind, ravgslope)
sum(c(ravgslope$y <= 0) + 0) / length(ravgslope$y)
# Statistical significance
chg <- ravgslope$y
pdf <- density(chg)
fx <- approxfun(pdf$x, pdf$y, yleft=0, yright=0)
cdfdiet <- integrate(fx, -Inf, 0)
cdfdiet
pdietpvalue <- wilcox.test(data = ravg, avg ~ class, paired = TRUE)$p.value
linediet <- ggplot(ravg, aes(x = class, y = avg, group = patient1)) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
geom_line(colour = wes_palette("Royal1")[2]) +
geom_point(colour = "black") +
ylab("EV Centrality Distance") +
xlab("") +
ylim(0, NA) +
annotate("text", x = 1.5, y = 0, label = paste("p-value = ", signif(pdietpvalue, digits = 3), sep = ""))
densitydiet <- ggplot(ravgslope, aes(y)) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
geom_density() +
geom_vline(xintercept = 0, linetype = "dashed") +
ylab("Probability") +
xlab("Intrapersonal Change") +
xlim(range(pdf$x))
intrabetadiv <- plot_grid(linediet, densitydiet, rel_heights = c(4, 1), ncol = 1)
## Ordination ###
ORD_NMDS <- metaMDS(comm = rdist, k=2)
ORD_FIT = data.frame(MDS1 = ORD_NMDS$points[,1], MDS2 = ORD_NMDS$points[,2])
ORD_FIT$SampleID <- rownames(ORD_FIT)
# Get metadata
ORD_FIT <- cbind(ORD_FIT, as.data.frame(str_split_fixed(ORD_FIT$SampleID, "_", 2)))
plotnmds_dietstudy <- ggplot(ORD_FIT, aes(x=MDS1, y=MDS2, colour=factor(V1))) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
legend.position = "bottom"
) +
geom_point() +
scale_colour_manual(values = wes_palette("Darjeeling"), name = "Subject")
plotnmds_dietstudy
anosimstat <- anosim(rdist, ORD_FIT$V1)
##############
# Skin Graph #
##############
# Import graphs into a list
skinsites <- c("Ax", "Ac", "Pa", "Tw", "Um", "Fh", "Ra")
# Start list
graphdfTP2 <- data.frame()
graphdfTP3 <- data.frame()
for (i in skinsites) {
print(i)
filename <- paste("./data/skingraph-", i, ".Rdata", sep = "")
load(file = filename)
graphdfTP2 <- rbind(graphdfTP2, sampletable)
rm(sampletable)
}
rm(i)
for (i in skinsites) {
print(i)
filename <- paste("./data/skingraph-", i, "-TP3.Rdata", sep = "")
load(file = filename)
graphdfTP3 <- rbind(graphdfTP3, sampletable)
rm(sampletable)
}
rm(i)
totalgraph <- rbind(graphdfTP2, graphdfTP3)
# Correct the lengths
totalgraph$PhageAbundance <- round(1e7 * totalgraph$PhageAbundance / totalgraph$PhageLength)
totalgraph$BacteriaAbundance <- round(1e7 * totalgraph$BacteriaAbundance / totalgraph$BacteriaLength)
totalgraph <- totalgraph[,-9]
totalgraph <- totalgraph[,-7]
# See the object size
format(object.size(totalgraph), units = "MB")
# Run subsampling
uniquephagegraph <- unique(totalgraph[-c(2,7)])
phageminseq <- quantile(ddply(uniquephagegraph, c("PatientID", "Location", "TimePoint"), summarize, sum = sum(as.numeric(PhageAbundance)))$sum, 0.05)
print(format(object.size(uniquephagegraph), units = "MB"))
uniquebacteriagraph <- unique(totalgraph[-c(1,6)])
bacminseq <- quantile(ddply(uniquebacteriagraph, c("PatientID", "Location", "TimePoint"), summarize, sum = sum(as.numeric(BacteriaAbundance)))$sum, 0.05)
print(format(object.size(uniquephagegraph), units = "MB"))
# Rarefy each sample using sequence counts
rout <- lapply(unique(uniquephagegraph$PatientID), function(i) {
outputout <- lapply(unique(uniquephagegraph$TimePoint), function(t) {
outputin <- lapply(unique(as.data.frame(uniquephagegraph[c(uniquephagegraph$PatientID %in% i & uniquephagegraph$TimePoint %in% t),])$Location), function(j) {
print(c(i, t, j))
subsetdfin <- as.data.frame(uniquephagegraph[c(uniquephagegraph$PatientID %in% i & uniquephagegraph$TimePoint %in% t & uniquephagegraph$Location %in% j),])
if (sum(subsetdfin$PhageAbundance) >= phageminseq) {
subsetdfin$PhageAbundance <- c(rrarefy(subsetdfin$PhageAbundance, sample = phageminseq))
return(subsetdfin)
} else {
NULL
}
})
forresult <- as.data.frame(do.call(rbind, outputin))
rm(outputin)
return(forresult)
})
outresult <- as.data.frame(do.call(rbind, outputout))
rm(outputout)
return(outresult)
})
rdfphage <- as.data.frame(do.call(rbind, rout))
# Check the results
ddply(rdfphage, c("PatientID", "Location", "TimePoint"), summarize, sum = sum(as.numeric(PhageAbundance)))
rdfphage$combophage <- paste(rdfphage$from, rdfphage$PatientID, rdfphage$Location, rdfphage$TimePoint, sep = "__")
rdfphage <- rdfphage[-c(1:4)]
rout <- lapply(unique(uniquebacteriagraph$PatientID), function(i) {
outputout <- lapply(unique(uniquebacteriagraph$TimePoint), function(t) {
outputin <- lapply(unique(as.data.frame(uniquebacteriagraph[c(uniquebacteriagraph$PatientID %in% i & uniquebacteriagraph$TimePoint %in% t),])$Location), function(j) {
print(c(i, t, j))
subsetdfin <- as.data.frame(uniquebacteriagraph[c(uniquebacteriagraph$PatientID %in% i & uniquebacteriagraph$TimePoint %in% t & uniquebacteriagraph$Location %in% j),])
if (sum(subsetdfin$BacteriaAbundance) >= phageminseq) {
subsetdfin$BacteriaAbundance <- c(rrarefy(subsetdfin$BacteriaAbundance, sample = bacminseq))
return(subsetdfin)
} else {
NULL
}
})
forresult <- as.data.frame(do.call(rbind, outputin))
rm(outputin)
return(forresult)
})
outresult <- as.data.frame(do.call(rbind, outputout))
rm(outputout)
return(outresult)
})
rdfbacteria <- as.data.frame(do.call(rbind, rout))
ddply(rdfbacteria, c("PatientID", "Location", "TimePoint"), summarize, sum = sum(as.numeric(BacteriaAbundance)))
rdfbacteria$combobacteria <- paste(rdfbacteria$to, rdfbacteria$PatientID, rdfbacteria$Location, rdfbacteria$TimePoint, sep = "__")
rdfbacteria <- rdfbacteria[-c(1:4)]
# Merge the subsampled abundances back into the original file
totalgraphcombo <- totalgraph
totalgraphcombo$combophage <- paste(totalgraphcombo$from, totalgraphcombo$PatientID, totalgraphcombo$Location, totalgraphcombo$TimePoint, sep = "__")
totalgraphcombo$combobacteria <- paste(totalgraphcombo$to, totalgraphcombo$PatientID, totalgraphcombo$Location, totalgraphcombo$TimePoint, sep = "__")
totalgraphcombo <- totalgraphcombo[-c(1:7)]
format(object.size(totalgraphcombo), units = "MB")
format(object.size(rdfphage), units = "KB")
totalgraphmerge <- merge(totalgraphcombo, rdfphage, by = "combophage")
totalgraphmerge <- merge(totalgraphmerge, rdfbacteria, by = "combobacteria")
# Remove those without bacteria or phage nodes after subsampling
# Zero here means loss of the node
rdf <- totalgraphmerge[!c(totalgraphmerge$PhageAbundance == 0 | totalgraphmerge$BacteriaAbundance == 0),]
# Calculate edge values from nodes
rdf$edge <- log10(rdf$PhageAbundance * rdf$BacteriaAbundance) + 0.0001
# Parse the values again
rdf <- cbind(as.data.frame(str_split_fixed(rdf$combobacteria, "__", 4)), rdf)
rdf <- cbind(as.data.frame(str_split_fixed(rdf$combophage, "__", 4)), rdf)
rdf <- rdf[-c(2:4)]
rdf <- rdf[-c(6:7)]
colnames(rdf) <- c("from", "to", "PatientID", "Location", "TimePoint", "PhageAbundance", "BacteriaAbundance", "edge")
# Make a list of subgraphs for each of the samples
# This will be used for diversity, centrality, etc
routdiv <- lapply(unique(rdf$PatientID), function(i) {
print(i)
outtime <- lapply(unique(rdf$TimePoint), function(t) {
print(t)
subsetdfout <- as.data.frame(rdf[c(rdf$PatientID %in% i & rdf$TimePoint %in% t),])
outputin <- lapply(unique(subsetdfout$Location), function(j) {
print(j)
subsetdfin <- subsetdfout[c(subsetdfout$Location %in% j),]
lapgraph <- graph_from_data_frame(subsetdfin[,c("to", "from")], directed = FALSE)
E(lapgraph)$weight <- subsetdfin[,c("edge")]
print(as.character(j))
V(lapgraph)$location <- as.character(j)
V(lapgraph)$patientid <- as.character(i)
print(unique(V(lapgraph)$patientid))
V(lapgraph)$timepoint <- t
return(lapgraph)
})
return(outputin)
})
return(outtime)
})
# save(routdiv, file = "./data/quickskinplot.RData")
load(file = "./data/quickskinplot.RData")
rcen <- lapply(routdiv, function(i) {
outputout <- lapply(i, function(k) {
outputin <- lapply(k, function(j) {
centraldf <- as.data.frame(eigen_centrality(j)$vector)
colnames(centraldf) <- "ecen"
centraldf$names <- row.names(centraldf)
centraldf$patient <- unique(V(j)$patientid)
centraldf$Timepoint <- unique(V(j)$timepoint)
centraldf$location <- unique(V(j)$location)
print(c(unique(V(j)$patientid), unique(V(j)$timepoint), unique(V(j)$location)))
return(centraldf)
})
forresult <- as.data.frame(do.call(rbind, outputin))
return(forresult)
})
outresult <- as.data.frame(do.call(rbind, outputout))
return(outresult)
})
rcdf <- as.data.frame(do.call(rbind, rcen))
rcast <- dcast(rcdf, patient + Timepoint + location ~ names, value.var = "ecen")
rcast[is.na(rcast)] <- 0
rownames(rcast) <- paste(rcast$patient, rcast$Timepoint, rcast$location, sep = "_")
rcast <- rcast[,-c(1:3)]
rdistskin <- vegdist(rcast, method = "bray")
rdm <- melt(as.matrix(rdistskin))
rm <- cbind(rdm, as.data.frame(str_split_fixed(rdm$Var1, "_", 4)))
rm <- cbind(rm, as.data.frame(str_split_fixed(rm$Var2, "_", 4)))
rm <- rm[,-c(1:2)]
rm <- rm[,-c(2,6)]
colnames(rm) <- c("ec", "patient1", "time1", "location1", "patient2", "time2", "location2")
rm <- rm[!c(rm$ec == 0),]
moisture <- c("Moist", "IntMoist", "IntMoist", "Moist", "Moist", "Sebaceous", "Sebaceous")
occlusion <- c("Occluded", "IntOccluded", "Exposed", "Occluded", "Occluded", "Exposed", "Occluded")
locationmetadata <- data.frame(skinsites, moisture, occlusion)
# Interpersonal Differences
rm[c(rm$patient1 == rm$patient2 & rm$location1 == rm$location2), "class"] <- "Intrapersonal"
rm[c(rm$patient1 != rm$patient2 & rm$time1 == rm$time2 & rm$location1 == rm$location2), "class"] <- "Interpersonal"
rm <- rm[complete.cases(rm),]
ravg <- ddply(rm, c("patient1", "class", "location1"), summarize, avg = mean(ec))
counta <- ddply(ravg, c("patient1", "location1"), summarize, count = length(unique(class)))
counta <- counta[c(counta$count == 2),]
ravg <- merge(ravg, counta, by = c("patient1", "location1"))
ravg$merged <- paste(ravg$patient1, ravg$location1, sep = "")
ravgslope <- lapply(unique(ravg$merged), function(i) {
y <- ravg[c(ravg$class %in% "Intrapersonal" & ravg$merged %in% i), "avg"] - ravg[c(ravg$class %in% "Interpersonal" & ravg$merged %in% i), "avg"]
return(data.frame(i, y))
})
ravgslope <- do.call(rbind, ravgslope)
chg <- ravgslope$y
pdf <- density(chg)
fx <- approxfun(pdf$x, pdf$y, yleft=0, yright=0)
cdfskin <- integrate(fx, -Inf, 0)
cdfskin
pskinpvalue <- wilcox.test(data = ravg, avg ~ class, paired = TRUE)$p.value
skinline <- ggplot(ravg, aes(x = class, y = avg, group = merged)) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
geom_line(colour = wes_palette("Royal1")[2]) +
geom_point(colour = "black") +
ylab("EV Centrality Distance") +
xlab("") +
ylim(0, NA) +
annotate("text", x = 1.5, y = 0, label = paste("p-value = ", signif(pskinpvalue, digits = 3), sep = ""))
# For supplemental, do by specific site
ravg <- ddply(rm, c("patient1", "class", "location1"), summarize, avg = mean(ec))
counta <- ddply(ravg, c("patient1", "location1"), summarize, count = length(unique(class)))
counta <- counta[c(counta$count == 2),]
ravg <- merge(ravg, counta, by = c("patient1", "location1"))
ravg$merged <- paste(ravg$patient1, ravg$location1, sep = "")
ravgslope <- lapply(unique(ravg$merged), function(i) {
y <- ravg[c(ravg$class %in% "Intrapersonal" & ravg$merged %in% i), "avg"] - ravg[c(ravg$class %in% "Interpersonal" & ravg$merged %in% i), "avg"]
return(data.frame(i, y))
})
ravgslope <- do.call(rbind, ravgslope)
ravgslope$location <- gsub("\\d+", "", ravgslope$i, perl = TRUE)
ravgslope$subject <- gsub("\\D+", "", ravgslope$i, perl = TRUE)
chg <- ravgslope[c(ravgslope$location %in% "Fh"),"y"]
pdf <- density(chg)
fx <- approxfun(pdf$x, pdf$y, yleft=0, yright=0)
cdfskin <- integrate(fx, -Inf, 0)
cdfskin
bylocation <- lapply(unique(ravg$location1), function(i) {
chg <- ravg[c(ravg$location1 %in% i),]
intsig <- wilcox.test(data = chg, avg ~ class, paired = TRUE)$p.value
a <- ggplot(ravg[c(ravg$location1 %in% i),], aes(x = class, y = avg, group = merged)) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
geom_line(colour = wes_palette("Royal1")[2]) +
geom_point(colour = "black") +
ylab("EV Centrality Distance") +
xlab("") +
ggtitle(i) +
ylim(0, NA) +
annotate("text", x = 1.5, y = 0, label = paste("p-value = ", signif(intsig, digits = 3), sep = ""))
return(a)
})
supplocations <- plot_grid(plotlist = bylocation, nrow = 2)
##############
# Twin Graph #
##############
sampleidquery <- "
MATCH
(x:SRP002523)-->(y)-[d]->(z:Phage)-->(a:Bacterial_Host)<-[e]-(b),
(b)<--(i:PatientID)-->(y),
(b)<--(t:TimePoint)-->(y),
(k:Disease)-->(y)
WHERE toInt(d.Abundance) > 0
OR toInt(e.Abundance) > 0
RETURN DISTINCT
z.Name AS from,
a.Name AS to,
i.Name AS PatientID,
t.Name AS TimePoint,
k.Name AS Diet,
toInt(d.Abundance) AS PhageAbundance,
toInt(e.Abundance) AS BacteriaAbundance;
"
sampletable <- as.data.frame(cypher(graph, sampleidquery))
head(sampletable)
# get subsampling depth
phageminseq <- min(ddply(sampletable, c("PatientID", "TimePoint"), summarize, sum = sum(PhageAbundance))$sum)
bacminseq <- min(ddply(sampletable, c("PatientID", "TimePoint"), summarize, sum = sum(BacteriaAbundance))$sum)
# Rarefy each sample using sequence counts
rout <- lapply(unique(sampletable$PatientID), function(i) {
subsetdfout <- as.data.frame(sampletable[c(sampletable$PatientID %in% i),])
outputin <- lapply(unique(subsetdfout$TimePoint), function(j) {
subsetdfin <- subsetdfout[c(subsetdfout$TimePoint %in% j),]
subsetdfin$PhageAbundance <- c(rrarefy(subsetdfin$PhageAbundance, sample = phageminseq))
subsetdfin$BacteriaAbundance <- c(rrarefy(subsetdfin$BacteriaAbundance, sample = bacminseq))
return(subsetdfin)
})
forresult <- as.data.frame(do.call(rbind, outputin))
return(forresult)
})
# Finish making subsampled data frame
rdf <- as.data.frame(do.call(rbind, rout))
# Remove those without bacteria or phage nodes after subsampling
# Zero here means loss of the node
rdf <- rdf[!c(rdf$PhageAbundance == 0 | rdf$BacteriaAbundance == 0),]
# Calculate edge values from nodes
rdf$edge <- log10(rdf$PhageAbundance * rdf$BacteriaAbundance)
# Make a list of subgraphs for each of the samples
# This will be used for diversity, centrality, etc
routdiv <- lapply(unique(rdf$PatientID), function(i) {
subsetdfout <- as.data.frame(rdf[c(rdf$PatientID %in% i),])
outputin <- lapply(unique(subsetdfout$TimePoint), function(j) {
subsetdfin <- subsetdfout[c(subsetdfout$TimePoint %in% j),]
lapgraph <- graph_from_data_frame(subsetdfin[,c("to", "from")], directed = FALSE)
E(lapgraph)$weight <- subsetdfin[,c("edge")]
V(lapgraph)$timepoint <- j
V(lapgraph)$patientid <- i
diettype <- unique(subsetdfin$Diet)
V(lapgraph)$diet <- diettype
return(lapgraph)
})
return(outputin)
})
rcen <- lapply(routdiv, function(i) {
outputout <- lapply(i, function(k) {
centraldf <- as.data.frame(eigen_centrality(k)$vector)
colnames(centraldf) <- "ecen"
centraldf$names <- row.names(centraldf)
centraldf$patient <- unique(V(k)$patientid)
centraldf$Timepoint <- unique(V(k)$timepoint)
centraldf$diettype <- unique(V(k)$diet)
print(c(unique(V(k)$patientid), unique(V(k)$timepoint)))
return(centraldf)
})
outresult <- as.data.frame(do.call(rbind, outputout))
return(outresult)
})
rcdf <- as.data.frame(do.call(rbind, rcen))
rcast <- dcast(rcdf, patient + Timepoint + diettype ~ names, value.var = "ecen")
rcast[is.na(rcast)] <- 0
rownames(rcast) <- paste(rcast$patient, rcast$Timepoint, rcast$diettype, sep = "_")
rcast <- rcast[,-c(1:3)]
rdisttwin <- vegdist(rcast, method = "bray")
rdm <- melt(as.matrix(rdisttwin))
rm <- cbind(rdm, as.data.frame(str_split_fixed(rdm$Var1, "_", 3)))
rm <- cbind(rm, as.data.frame(str_split_fixed(rm$Var2, "_", 3)))
rm <- rm[,-c(1:2)]
colnames(rm) <- c("ec", "patient1", "time1", "diet1", "patient2", "time2", "diet2")
rm <- rm[!c(rm$ec == 0),]
rm$family1 <- gsub("[TM].*", "", rm$patient1, perl = TRUE)
rm$family2 <- gsub("[TM].*", "", rm$patient2, perl = TRUE)
rm$person1 <- gsub("F\\d", "", rm$patient1, perl = TRUE)
rm$person1 <- gsub("\\d", "", rm$person1, perl = TRUE)
rm$person2 <- gsub("F\\d", "", rm$patient2, perl = TRUE)
rm$person2 <- gsub("\\d", "", rm$person2, perl = TRUE)
rm$class <- ifelse(rm$family1 == rm$family2, "Intrafamily", "Interfamily")
ravg <- ddply(rm, c("patient1", "class"), summarize, avg = mean(ec))
ravgslope <- lapply(unique(ravg$patient1), function(i) {
y <- ravg[c(ravg$class %in% "Intrafamily" & ravg$patient1 %in% i), "avg"] - ravg[c(ravg$class %in% "Interfamily" & ravg$patient1 %in% i), "avg"]
return(data.frame(i, y))
})
ravgslope <- do.call(rbind, ravgslope)
chg <- ravgslope$y
pdf <- density(chg)
fx <- approxfun(pdf$x, pdf$y, yleft=0, yright=0)
cdftwins <- integrate(fx, -Inf, 0)
cdftwins
ptwinpvalue <- wilcox.test(data = ravg, avg ~ class, paired = TRUE)$p.value
twinline <- ggplot(ravg, aes(x = class, y = avg, group = patient1)) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
geom_line(colour = wes_palette("Royal1")[2]) +
geom_point(colour = "black") +
ylab("EV Centrality Distance") +
xlab("") +
ylim(0, NA) +
annotate("text", x = 1.5, y = 0, label = paste("p-value = ", signif(ptwinpvalue, digits = 3), sep = ""))
twinden <- ggplot(ravgslope, aes(y)) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
geom_density() +
geom_vline(xintercept = 0, linetype = "dashed") +
ylab("Probability") +
xlab("Intrapersonal Change") +
xlim(range(pdf$x))
intrabetadivwithmothers <- plot_grid(twinline, twinden, rel_heights = c(4, 1), ncol = 1)
###############
# Final Plots #
###############
boxplots <- plot_grid(
linediet,
twinline,
skinline,
labels = c("B", "C", "D"), ncol = 3)
finalplot <- plot_grid(plotnmds_dietstudy, boxplots, labels = c("A"), rel_widths = c(1, 1.5))
pdf("./figures/intrapersonal_diversity.pdf", width = 12, height = 5)
finalplot
dev.off()
pdf("./figures/intraallskin.pdf", width = 10, height = 10)
supplocations
dev.off()
probstats <- data.frame(
site = c("Diet", "Skin", "Twins", "DietAnosim", "AS"),
prob = c(pdietpvalue, pskinpvalue, ptwinpvalue, anosimstat$signif, anosimstat$statistic)
)
write.table(probstats, file = "./rtables/interstats.tsv", quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
| /bin/interpersonaldiversity.R | no_license | DuhaimeLab/Hannigan_ConjunctisViribus_mSystems_2017 | R | false | false | 23,820 | r | ##################
# Load Libraries #
##################
gcinfo(FALSE)
packagelist <- c("RNeo4j", "ggplot2", "wesanderson", "igraph", "visNetwork", "scales", "plyr", "cowplot", "vegan", "reshape2", "stringr")
new.packages <- packagelist[!(packagelist %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages, repos='http://cran.us.r-project.org')
lapply(packagelist, library, character.only = TRUE)
##############
# Diet Graph #
##############
# Start the connection to the graph
# If you are getting a lack of permission, disable local permission on Neo4J
graph <- startGraph("http://localhost:7474/db/data/", "neo4j", "root")
# Get list of the sample IDs
sampleidquery <- "
MATCH
(x:SRP002424)-->(y)-[d]->(z:Phage)-->(a:Bacterial_Host)<-[e]-(b),
(b)<--(i:PatientID)-->(y),
(b)<--(t:TimePoint)-->(y),
(k:Disease)-->(y)
WHERE toInt(d.Abundance) > 0
OR toInt(e.Abundance) > 0
RETURN DISTINCT
z.Name AS from,
a.Name AS to,
i.Name AS PatientID,
t.Name AS TimePoint,
k.Name AS Diet,
toInt(d.Abundance) AS PhageAbundance,
toInt(z.Length) AS PhageLength,
toInt(e.Abundance) AS BacteriaAbundance,
toInt(a.Length) AS BacteriaLength;
"
sampletable <- as.data.frame(cypher(graph, sampleidquery))
# Correct the lengths
sampletable$PhageAbundance <- round(1e7 * sampletable$PhageAbundance / sampletable$PhageLength)
sampletable$BacteriaAbundance <- round(1e7 * sampletable$BacteriaAbundance / sampletable$BacteriaLength)
sampletable <- sampletable[,-9]
sampletable <- sampletable[,-7]
head(sampletable)
# get subsampling depth
phageminseq <- min(ddply(sampletable, c("PatientID", "TimePoint"), summarize, sum = sum(PhageAbundance))$sum)
bacminseq <- min(ddply(sampletable, c("PatientID", "TimePoint"), summarize, sum = sum(BacteriaAbundance))$sum)
# Rarefy each sample using sequence counts
rout <- lapply(unique(sampletable$PatientID), function(i) {
subsetdfout <- as.data.frame(sampletable[c(sampletable$PatientID %in% i),])
outputin <- lapply(unique(subsetdfout$TimePoint), function(j) {
subsetdfin <- subsetdfout[c(subsetdfout$TimePoint %in% j),]
subsetdfin$PhageAbundance <- c(rrarefy(subsetdfin$PhageAbundance, sample = phageminseq))
subsetdfin$BacteriaAbundance <- c(rrarefy(subsetdfin$BacteriaAbundance, sample = bacminseq))
return(subsetdfin)
})
forresult <- as.data.frame(do.call(rbind, outputin))
return(forresult)
})
# Finish making subsampled data frame
rdf <- as.data.frame(do.call(rbind, rout))
# Remove those without bacteria or phage nodes after subsampling
# Zero here means loss of the node
rdf <- rdf[!c(rdf$PhageAbundance == 0 | rdf$BacteriaAbundance == 0),]
# Calculate edge values from nodes
rdf$edge <- log10(rdf$PhageAbundance * rdf$BacteriaAbundance)
# Make a list of subgraphs for each of the samples
# This will be used for diversity, centrality, etc
routdiv <- lapply(unique(rdf$PatientID), function(i) {
subsetdfout <- as.data.frame(rdf[c(rdf$PatientID %in% i),])
outputin <- lapply(unique(subsetdfout$TimePoint), function(j) {
subsetdfin <- subsetdfout[c(subsetdfout$TimePoint %in% j),]
lapgraph <- graph_from_data_frame(subsetdfin[,c("to", "from")], directed = FALSE)
E(lapgraph)$weight <- subsetdfin[,c("edge")]
V(lapgraph)$timepoint <- j
V(lapgraph)$patientid <- i
diettype <- unique(subsetdfin$Diet)
V(lapgraph)$diet <- diettype
return(lapgraph)
})
return(outputin)
})
##### Eigen Vector Centrality #####
rcen <- lapply(c(1:length(routdiv)), function(i) {
listelement <- routdiv[[ i ]]
outputin <- lapply(c(1:length(listelement)), function(j) {
listgraph <- listelement[[ j ]]
centraldf <- as.data.frame(eigen_centrality(listgraph)$vector)
colnames(centraldf) <- "ecen"
centraldf$names <- row.names(centraldf)
centraldf$patient <- unique(V(listgraph)$patientid)
centraldf$tp <- unique(V(listgraph)$timepoint)
centraldf$diettype <- unique(V(listgraph)$diet)
return(centraldf)
})
forresult <- as.data.frame(do.call(rbind, outputin))
return(forresult)
})
rcdf <- as.data.frame(do.call(rbind, rcen))
rcast <- dcast(rcdf, patient + tp ~ names, value.var = "ecen")
rcast[is.na(rcast)] <- 0
rownames(rcast) <- paste(rcast$patient, rcast$tp, sep = "_")
rcast <- rcast[!c(rcast$patient == 2012),]
rcast <- rcast[,-c(1:2)]
rdist <- vegdist(rcast, method = "bray")
rdm <- melt(as.matrix(rdist))
rm <- cbind(rdm, as.data.frame(str_split_fixed(rdm$Var1, "_", 2)))
rm <- cbind(rm, as.data.frame(str_split_fixed(rm$Var2, "_", 2)))
rm <- rm[,-c(1:2)]
colnames(rm) <- c("ec", "patient1", "time1", "patient2", "time2")
rm <- rm[!c(rm$ec == 0),]
rm$class <- ifelse(rm$patient1 == rm$patient2, "Intrapersonal", "Interpersonal")
ravg <- ddply(rm, c("patient1", "class"), summarize, avg = mean(ec))
ravgslope <- lapply(unique(ravg$patient1), function(i) {
y <- ravg[c(ravg$class %in% "Intrapersonal" & ravg$patient1 %in% i), "avg"] - ravg[c(ravg$class %in% "Interpersonal" & ravg$patient1 %in% i), "avg"]
return(data.frame(i, y))
})
ravgslope <- do.call(rbind, ravgslope)
sum(c(ravgslope$y <= 0) + 0) / length(ravgslope$y)
# Statistical significance
chg <- ravgslope$y
pdf <- density(chg)
fx <- approxfun(pdf$x, pdf$y, yleft=0, yright=0)
cdfdiet <- integrate(fx, -Inf, 0)
cdfdiet
pdietpvalue <- wilcox.test(data = ravg, avg ~ class, paired = TRUE)$p.value
linediet <- ggplot(ravg, aes(x = class, y = avg, group = patient1)) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
geom_line(colour = wes_palette("Royal1")[2]) +
geom_point(colour = "black") +
ylab("EV Centrality Distance") +
xlab("") +
ylim(0, NA) +
annotate("text", x = 1.5, y = 0, label = paste("p-value = ", signif(pdietpvalue, digits = 3), sep = ""))
densitydiet <- ggplot(ravgslope, aes(y)) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
geom_density() +
geom_vline(xintercept = 0, linetype = "dashed") +
ylab("Probability") +
xlab("Intrapersonal Change") +
xlim(range(pdf$x))
intrabetadiv <- plot_grid(linediet, densitydiet, rel_heights = c(4, 1), ncol = 1)
## Ordination ###
ORD_NMDS <- metaMDS(comm = rdist, k=2)
ORD_FIT = data.frame(MDS1 = ORD_NMDS$points[,1], MDS2 = ORD_NMDS$points[,2])
ORD_FIT$SampleID <- rownames(ORD_FIT)
# Get metadata
ORD_FIT <- cbind(ORD_FIT, as.data.frame(str_split_fixed(ORD_FIT$SampleID, "_", 2)))
plotnmds_dietstudy <- ggplot(ORD_FIT, aes(x=MDS1, y=MDS2, colour=factor(V1))) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
legend.position = "bottom"
) +
geom_point() +
scale_colour_manual(values = wes_palette("Darjeeling"), name = "Subject")
plotnmds_dietstudy
anosimstat <- anosim(rdist, ORD_FIT$V1)
##############
# Skin Graph #
##############
# Import graphs into a list
skinsites <- c("Ax", "Ac", "Pa", "Tw", "Um", "Fh", "Ra")
# Start list
graphdfTP2 <- data.frame()
graphdfTP3 <- data.frame()
for (i in skinsites) {
print(i)
filename <- paste("./data/skingraph-", i, ".Rdata", sep = "")
load(file = filename)
graphdfTP2 <- rbind(graphdfTP2, sampletable)
rm(sampletable)
}
rm(i)
for (i in skinsites) {
print(i)
filename <- paste("./data/skingraph-", i, "-TP3.Rdata", sep = "")
load(file = filename)
graphdfTP3 <- rbind(graphdfTP3, sampletable)
rm(sampletable)
}
rm(i)
totalgraph <- rbind(graphdfTP2, graphdfTP3)
# Correct the lengths
totalgraph$PhageAbundance <- round(1e7 * totalgraph$PhageAbundance / totalgraph$PhageLength)
totalgraph$BacteriaAbundance <- round(1e7 * totalgraph$BacteriaAbundance / totalgraph$BacteriaLength)
totalgraph <- totalgraph[,-9]
totalgraph <- totalgraph[,-7]
# See the object size
format(object.size(totalgraph), units = "MB")
# Run subsampling
uniquephagegraph <- unique(totalgraph[-c(2,7)])
phageminseq <- quantile(ddply(uniquephagegraph, c("PatientID", "Location", "TimePoint"), summarize, sum = sum(as.numeric(PhageAbundance)))$sum, 0.05)
print(format(object.size(uniquephagegraph), units = "MB"))
uniquebacteriagraph <- unique(totalgraph[-c(1,6)])
bacminseq <- quantile(ddply(uniquebacteriagraph, c("PatientID", "Location", "TimePoint"), summarize, sum = sum(as.numeric(BacteriaAbundance)))$sum, 0.05)
print(format(object.size(uniquephagegraph), units = "MB"))
# Rarefy each sample using sequence counts
rout <- lapply(unique(uniquephagegraph$PatientID), function(i) {
outputout <- lapply(unique(uniquephagegraph$TimePoint), function(t) {
outputin <- lapply(unique(as.data.frame(uniquephagegraph[c(uniquephagegraph$PatientID %in% i & uniquephagegraph$TimePoint %in% t),])$Location), function(j) {
print(c(i, t, j))
subsetdfin <- as.data.frame(uniquephagegraph[c(uniquephagegraph$PatientID %in% i & uniquephagegraph$TimePoint %in% t & uniquephagegraph$Location %in% j),])
if (sum(subsetdfin$PhageAbundance) >= phageminseq) {
subsetdfin$PhageAbundance <- c(rrarefy(subsetdfin$PhageAbundance, sample = phageminseq))
return(subsetdfin)
} else {
NULL
}
})
forresult <- as.data.frame(do.call(rbind, outputin))
rm(outputin)
return(forresult)
})
outresult <- as.data.frame(do.call(rbind, outputout))
rm(outputout)
return(outresult)
})
rdfphage <- as.data.frame(do.call(rbind, rout))
# Check the results
ddply(rdfphage, c("PatientID", "Location", "TimePoint"), summarize, sum = sum(as.numeric(PhageAbundance)))
rdfphage$combophage <- paste(rdfphage$from, rdfphage$PatientID, rdfphage$Location, rdfphage$TimePoint, sep = "__")
rdfphage <- rdfphage[-c(1:4)]
rout <- lapply(unique(uniquebacteriagraph$PatientID), function(i) {
outputout <- lapply(unique(uniquebacteriagraph$TimePoint), function(t) {
outputin <- lapply(unique(as.data.frame(uniquebacteriagraph[c(uniquebacteriagraph$PatientID %in% i & uniquebacteriagraph$TimePoint %in% t),])$Location), function(j) {
print(c(i, t, j))
subsetdfin <- as.data.frame(uniquebacteriagraph[c(uniquebacteriagraph$PatientID %in% i & uniquebacteriagraph$TimePoint %in% t & uniquebacteriagraph$Location %in% j),])
if (sum(subsetdfin$BacteriaAbundance) >= phageminseq) {
subsetdfin$BacteriaAbundance <- c(rrarefy(subsetdfin$BacteriaAbundance, sample = bacminseq))
return(subsetdfin)
} else {
NULL
}
})
forresult <- as.data.frame(do.call(rbind, outputin))
rm(outputin)
return(forresult)
})
outresult <- as.data.frame(do.call(rbind, outputout))
rm(outputout)
return(outresult)
})
rdfbacteria <- as.data.frame(do.call(rbind, rout))
ddply(rdfbacteria, c("PatientID", "Location", "TimePoint"), summarize, sum = sum(as.numeric(BacteriaAbundance)))
rdfbacteria$combobacteria <- paste(rdfbacteria$to, rdfbacteria$PatientID, rdfbacteria$Location, rdfbacteria$TimePoint, sep = "__")
rdfbacteria <- rdfbacteria[-c(1:4)]
# Merge the subsampled abundances back into the original file
totalgraphcombo <- totalgraph
totalgraphcombo$combophage <- paste(totalgraphcombo$from, totalgraphcombo$PatientID, totalgraphcombo$Location, totalgraphcombo$TimePoint, sep = "__")
totalgraphcombo$combobacteria <- paste(totalgraphcombo$to, totalgraphcombo$PatientID, totalgraphcombo$Location, totalgraphcombo$TimePoint, sep = "__")
totalgraphcombo <- totalgraphcombo[-c(1:7)]
format(object.size(totalgraphcombo), units = "MB")
format(object.size(rdfphage), units = "KB")
totalgraphmerge <- merge(totalgraphcombo, rdfphage, by = "combophage")
totalgraphmerge <- merge(totalgraphmerge, rdfbacteria, by = "combobacteria")
# Remove those without bacteria or phage nodes after subsampling
# Zero here means loss of the node
rdf <- totalgraphmerge[!c(totalgraphmerge$PhageAbundance == 0 | totalgraphmerge$BacteriaAbundance == 0),]
# Calculate edge values from nodes
rdf$edge <- log10(rdf$PhageAbundance * rdf$BacteriaAbundance) + 0.0001
# Parse the values again
rdf <- cbind(as.data.frame(str_split_fixed(rdf$combobacteria, "__", 4)), rdf)
rdf <- cbind(as.data.frame(str_split_fixed(rdf$combophage, "__", 4)), rdf)
rdf <- rdf[-c(2:4)]
rdf <- rdf[-c(6:7)]
colnames(rdf) <- c("from", "to", "PatientID", "Location", "TimePoint", "PhageAbundance", "BacteriaAbundance", "edge")
# Make a list of subgraphs for each of the samples
# This will be used for diversity, centrality, etc
routdiv <- lapply(unique(rdf$PatientID), function(i) {
print(i)
outtime <- lapply(unique(rdf$TimePoint), function(t) {
print(t)
subsetdfout <- as.data.frame(rdf[c(rdf$PatientID %in% i & rdf$TimePoint %in% t),])
outputin <- lapply(unique(subsetdfout$Location), function(j) {
print(j)
subsetdfin <- subsetdfout[c(subsetdfout$Location %in% j),]
lapgraph <- graph_from_data_frame(subsetdfin[,c("to", "from")], directed = FALSE)
E(lapgraph)$weight <- subsetdfin[,c("edge")]
print(as.character(j))
V(lapgraph)$location <- as.character(j)
V(lapgraph)$patientid <- as.character(i)
print(unique(V(lapgraph)$patientid))
V(lapgraph)$timepoint <- t
return(lapgraph)
})
return(outputin)
})
return(outtime)
})
# save(routdiv, file = "./data/quickskinplot.RData")
load(file = "./data/quickskinplot.RData")
rcen <- lapply(routdiv, function(i) {
outputout <- lapply(i, function(k) {
outputin <- lapply(k, function(j) {
centraldf <- as.data.frame(eigen_centrality(j)$vector)
colnames(centraldf) <- "ecen"
centraldf$names <- row.names(centraldf)
centraldf$patient <- unique(V(j)$patientid)
centraldf$Timepoint <- unique(V(j)$timepoint)
centraldf$location <- unique(V(j)$location)
print(c(unique(V(j)$patientid), unique(V(j)$timepoint), unique(V(j)$location)))
return(centraldf)
})
forresult <- as.data.frame(do.call(rbind, outputin))
return(forresult)
})
outresult <- as.data.frame(do.call(rbind, outputout))
return(outresult)
})
rcdf <- as.data.frame(do.call(rbind, rcen))
rcast <- dcast(rcdf, patient + Timepoint + location ~ names, value.var = "ecen")
rcast[is.na(rcast)] <- 0
rownames(rcast) <- paste(rcast$patient, rcast$Timepoint, rcast$location, sep = "_")
rcast <- rcast[,-c(1:3)]
rdistskin <- vegdist(rcast, method = "bray")
rdm <- melt(as.matrix(rdistskin))
rm <- cbind(rdm, as.data.frame(str_split_fixed(rdm$Var1, "_", 4)))
rm <- cbind(rm, as.data.frame(str_split_fixed(rm$Var2, "_", 4)))
rm <- rm[,-c(1:2)]
rm <- rm[,-c(2,6)]
colnames(rm) <- c("ec", "patient1", "time1", "location1", "patient2", "time2", "location2")
rm <- rm[!c(rm$ec == 0),]
moisture <- c("Moist", "IntMoist", "IntMoist", "Moist", "Moist", "Sebaceous", "Sebaceous")
occlusion <- c("Occluded", "IntOccluded", "Exposed", "Occluded", "Occluded", "Exposed", "Occluded")
locationmetadata <- data.frame(skinsites, moisture, occlusion)
# Interpersonal Differences
rm[c(rm$patient1 == rm$patient2 & rm$location1 == rm$location2), "class"] <- "Intrapersonal"
rm[c(rm$patient1 != rm$patient2 & rm$time1 == rm$time2 & rm$location1 == rm$location2), "class"] <- "Interpersonal"
rm <- rm[complete.cases(rm),]
ravg <- ddply(rm, c("patient1", "class", "location1"), summarize, avg = mean(ec))
counta <- ddply(ravg, c("patient1", "location1"), summarize, count = length(unique(class)))
counta <- counta[c(counta$count == 2),]
ravg <- merge(ravg, counta, by = c("patient1", "location1"))
ravg$merged <- paste(ravg$patient1, ravg$location1, sep = "")
ravgslope <- lapply(unique(ravg$merged), function(i) {
y <- ravg[c(ravg$class %in% "Intrapersonal" & ravg$merged %in% i), "avg"] - ravg[c(ravg$class %in% "Interpersonal" & ravg$merged %in% i), "avg"]
return(data.frame(i, y))
})
ravgslope <- do.call(rbind, ravgslope)
chg <- ravgslope$y
pdf <- density(chg)
fx <- approxfun(pdf$x, pdf$y, yleft=0, yright=0)
cdfskin <- integrate(fx, -Inf, 0)
cdfskin
pskinpvalue <- wilcox.test(data = ravg, avg ~ class, paired = TRUE)$p.value
skinline <- ggplot(ravg, aes(x = class, y = avg, group = merged)) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
geom_line(colour = wes_palette("Royal1")[2]) +
geom_point(colour = "black") +
ylab("EV Centrality Distance") +
xlab("") +
ylim(0, NA) +
annotate("text", x = 1.5, y = 0, label = paste("p-value = ", signif(pskinpvalue, digits = 3), sep = ""))
# For supplemental, do by specific site
ravg <- ddply(rm, c("patient1", "class", "location1"), summarize, avg = mean(ec))
counta <- ddply(ravg, c("patient1", "location1"), summarize, count = length(unique(class)))
counta <- counta[c(counta$count == 2),]
ravg <- merge(ravg, counta, by = c("patient1", "location1"))
ravg$merged <- paste(ravg$patient1, ravg$location1, sep = "")
ravgslope <- lapply(unique(ravg$merged), function(i) {
y <- ravg[c(ravg$class %in% "Intrapersonal" & ravg$merged %in% i), "avg"] - ravg[c(ravg$class %in% "Interpersonal" & ravg$merged %in% i), "avg"]
return(data.frame(i, y))
})
ravgslope <- do.call(rbind, ravgslope)
ravgslope$location <- gsub("\\d+", "", ravgslope$i, perl = TRUE)
ravgslope$subject <- gsub("\\D+", "", ravgslope$i, perl = TRUE)
chg <- ravgslope[c(ravgslope$location %in% "Fh"),"y"]
pdf <- density(chg)
fx <- approxfun(pdf$x, pdf$y, yleft=0, yright=0)
cdfskin <- integrate(fx, -Inf, 0)
cdfskin
bylocation <- lapply(unique(ravg$location1), function(i) {
chg <- ravg[c(ravg$location1 %in% i),]
intsig <- wilcox.test(data = chg, avg ~ class, paired = TRUE)$p.value
a <- ggplot(ravg[c(ravg$location1 %in% i),], aes(x = class, y = avg, group = merged)) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
geom_line(colour = wes_palette("Royal1")[2]) +
geom_point(colour = "black") +
ylab("EV Centrality Distance") +
xlab("") +
ggtitle(i) +
ylim(0, NA) +
annotate("text", x = 1.5, y = 0, label = paste("p-value = ", signif(intsig, digits = 3), sep = ""))
return(a)
})
supplocations <- plot_grid(plotlist = bylocation, nrow = 2)
##############
# Twin Graph #
##############
sampleidquery <- "
MATCH
(x:SRP002523)-->(y)-[d]->(z:Phage)-->(a:Bacterial_Host)<-[e]-(b),
(b)<--(i:PatientID)-->(y),
(b)<--(t:TimePoint)-->(y),
(k:Disease)-->(y)
WHERE toInt(d.Abundance) > 0
OR toInt(e.Abundance) > 0
RETURN DISTINCT
z.Name AS from,
a.Name AS to,
i.Name AS PatientID,
t.Name AS TimePoint,
k.Name AS Diet,
toInt(d.Abundance) AS PhageAbundance,
toInt(e.Abundance) AS BacteriaAbundance;
"
sampletable <- as.data.frame(cypher(graph, sampleidquery))
head(sampletable)
# get subsampling depth
phageminseq <- min(ddply(sampletable, c("PatientID", "TimePoint"), summarize, sum = sum(PhageAbundance))$sum)
bacminseq <- min(ddply(sampletable, c("PatientID", "TimePoint"), summarize, sum = sum(BacteriaAbundance))$sum)
# Rarefy each sample using sequence counts
rout <- lapply(unique(sampletable$PatientID), function(i) {
subsetdfout <- as.data.frame(sampletable[c(sampletable$PatientID %in% i),])
outputin <- lapply(unique(subsetdfout$TimePoint), function(j) {
subsetdfin <- subsetdfout[c(subsetdfout$TimePoint %in% j),]
subsetdfin$PhageAbundance <- c(rrarefy(subsetdfin$PhageAbundance, sample = phageminseq))
subsetdfin$BacteriaAbundance <- c(rrarefy(subsetdfin$BacteriaAbundance, sample = bacminseq))
return(subsetdfin)
})
forresult <- as.data.frame(do.call(rbind, outputin))
return(forresult)
})
# Finish making subsampled data frame
rdf <- as.data.frame(do.call(rbind, rout))
# Remove those without bacteria or phage nodes after subsampling
# Zero here means loss of the node
rdf <- rdf[!c(rdf$PhageAbundance == 0 | rdf$BacteriaAbundance == 0),]
# Calculate edge values from nodes
rdf$edge <- log10(rdf$PhageAbundance * rdf$BacteriaAbundance)
# Make a list of subgraphs for each of the samples
# This will be used for diversity, centrality, etc
routdiv <- lapply(unique(rdf$PatientID), function(i) {
subsetdfout <- as.data.frame(rdf[c(rdf$PatientID %in% i),])
outputin <- lapply(unique(subsetdfout$TimePoint), function(j) {
subsetdfin <- subsetdfout[c(subsetdfout$TimePoint %in% j),]
lapgraph <- graph_from_data_frame(subsetdfin[,c("to", "from")], directed = FALSE)
E(lapgraph)$weight <- subsetdfin[,c("edge")]
V(lapgraph)$timepoint <- j
V(lapgraph)$patientid <- i
diettype <- unique(subsetdfin$Diet)
V(lapgraph)$diet <- diettype
return(lapgraph)
})
return(outputin)
})
rcen <- lapply(routdiv, function(i) {
outputout <- lapply(i, function(k) {
centraldf <- as.data.frame(eigen_centrality(k)$vector)
colnames(centraldf) <- "ecen"
centraldf$names <- row.names(centraldf)
centraldf$patient <- unique(V(k)$patientid)
centraldf$Timepoint <- unique(V(k)$timepoint)
centraldf$diettype <- unique(V(k)$diet)
print(c(unique(V(k)$patientid), unique(V(k)$timepoint)))
return(centraldf)
})
outresult <- as.data.frame(do.call(rbind, outputout))
return(outresult)
})
rcdf <- as.data.frame(do.call(rbind, rcen))
rcast <- dcast(rcdf, patient + Timepoint + diettype ~ names, value.var = "ecen")
rcast[is.na(rcast)] <- 0
rownames(rcast) <- paste(rcast$patient, rcast$Timepoint, rcast$diettype, sep = "_")
rcast <- rcast[,-c(1:3)]
rdisttwin <- vegdist(rcast, method = "bray")
rdm <- melt(as.matrix(rdisttwin))
rm <- cbind(rdm, as.data.frame(str_split_fixed(rdm$Var1, "_", 3)))
rm <- cbind(rm, as.data.frame(str_split_fixed(rm$Var2, "_", 3)))
rm <- rm[,-c(1:2)]
colnames(rm) <- c("ec", "patient1", "time1", "diet1", "patient2", "time2", "diet2")
rm <- rm[!c(rm$ec == 0),]
rm$family1 <- gsub("[TM].*", "", rm$patient1, perl = TRUE)
rm$family2 <- gsub("[TM].*", "", rm$patient2, perl = TRUE)
rm$person1 <- gsub("F\\d", "", rm$patient1, perl = TRUE)
rm$person1 <- gsub("\\d", "", rm$person1, perl = TRUE)
rm$person2 <- gsub("F\\d", "", rm$patient2, perl = TRUE)
rm$person2 <- gsub("\\d", "", rm$person2, perl = TRUE)
rm$class <- ifelse(rm$family1 == rm$family2, "Intrafamily", "Interfamily")
ravg <- ddply(rm, c("patient1", "class"), summarize, avg = mean(ec))
ravgslope <- lapply(unique(ravg$patient1), function(i) {
y <- ravg[c(ravg$class %in% "Intrafamily" & ravg$patient1 %in% i), "avg"] - ravg[c(ravg$class %in% "Interfamily" & ravg$patient1 %in% i), "avg"]
return(data.frame(i, y))
})
ravgslope <- do.call(rbind, ravgslope)
chg <- ravgslope$y
pdf <- density(chg)
fx <- approxfun(pdf$x, pdf$y, yleft=0, yright=0)
cdftwins <- integrate(fx, -Inf, 0)
cdftwins
ptwinpvalue <- wilcox.test(data = ravg, avg ~ class, paired = TRUE)$p.value
twinline <- ggplot(ravg, aes(x = class, y = avg, group = patient1)) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
geom_line(colour = wes_palette("Royal1")[2]) +
geom_point(colour = "black") +
ylab("EV Centrality Distance") +
xlab("") +
ylim(0, NA) +
annotate("text", x = 1.5, y = 0, label = paste("p-value = ", signif(ptwinpvalue, digits = 3), sep = ""))
twinden <- ggplot(ravgslope, aes(y)) +
theme_classic() +
theme(
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
geom_density() +
geom_vline(xintercept = 0, linetype = "dashed") +
ylab("Probability") +
xlab("Intrapersonal Change") +
xlim(range(pdf$x))
intrabetadivwithmothers <- plot_grid(twinline, twinden, rel_heights = c(4, 1), ncol = 1)
###############
# Final Plots #
###############
boxplots <- plot_grid(
linediet,
twinline,
skinline,
labels = c("B", "C", "D"), ncol = 3)
finalplot <- plot_grid(plotnmds_dietstudy, boxplots, labels = c("A"), rel_widths = c(1, 1.5))
pdf("./figures/intrapersonal_diversity.pdf", width = 12, height = 5)
finalplot
dev.off()
pdf("./figures/intraallskin.pdf", width = 10, height = 10)
supplocations
dev.off()
probstats <- data.frame(
site = c("Diet", "Skin", "Twins", "DietAnosim", "AS"),
prob = c(pdietpvalue, pskinpvalue, ptwinpvalue, anosimstat$signif, anosimstat$statistic)
)
write.table(probstats, file = "./rtables/interstats.tsv", quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
|
#' Calculate whether times belong to certain week days
#'
#' @param t An integer, seconds since origin date
#' @param origin_date A POSIXct, the origin date
#' @param wdays An array of integers (1 - 7, 1 = Monday)
#'
#' @return An array of 0 and 1
in_wdays <- function(t, origin_date, wdays) {
wdays <- as.integer(format(origin_date + t, "%u"))
return(as.integer(wdays %in% wdays))
}
#' Calculate whether times belong to certain months
#'
#' @param t An integer, seconds since origin date
#' @param origin_date A POSIXct, the origin date
#' @param months An array of integers (1 - 12)
#'
#' @return An array of 0 and 1
in_months <- function(t, origin_date, months) {
y <- as.integer(format(origin_date + t, "%m"))
return(as.integer(y %in% months))
}
#' Calculate whether times belong to a certain day of the year
#'
#' @param t An integer, seconds since origin date
#' @param origin_date A POSIXct, the origin date
#' @param day_of_year An array of integers (1 - 366)
#'
#' @return An array of 0 and 1
in_day_of_year <- function(t, origin_date, day_of_year) {
y <- as.integer(format(origin_date + t, "%j"))
return(as.integer(y %in% day_of_year))
}
#' Calculate times "on duty"
#'
#' Vectorized version, needed in planned_labor_vehicle_id()
#'
#' @param sim_vehicle A sim_vehicle
#' @param t An integer, the time since origin date. At present, minutes are used.
#' @param origin_date A POSIXct, the origin date
#'
#' @return An array of integers (0 or 1)
id_on_duty <- function(sim_vehicle, t, origin_date) {
assertthat::assert_that(nrow(sim_vehicle) == 1)
cond1 <- sim_vehicle$shift_from_simdate <= simtimer::sim_date(t) &
sim_vehicle$shift_to_simdate >= simtimer::sim_date(t) &
sim_vehicle$shift_from_simtime <= simtimer::sim_time(t) &
sim_vehicle$shift_to_simtime > simtimer::sim_time(t)
wdays <- as.integer(format(origin_date + t, "%u"))
wdays_schedule <- as.integer(unlist(strsplit(sim_vehicle$shift_weekday, ",")))
cond2 <- wdays %in% wdays_schedule
cond <- cond1 & cond2 # correct => the change from 0 => 1 or 1 => 0
return(as.integer(cond))
}
| /data/Roptimize/basic-scheduling.R | no_license | ims-fhs/badhacker | R | false | false | 2,093 | r | #' Calculate whether times belong to certain week days
#'
#' @param t An integer, seconds since origin date
#' @param origin_date A POSIXct, the origin date
#' @param wdays An array of integers (1 - 7, 1 = Monday)
#'
#' @return An array of 0 and 1
in_wdays <- function(t, origin_date, wdays) {
wdays <- as.integer(format(origin_date + t, "%u"))
return(as.integer(wdays %in% wdays))
}
#' Calculate whether times belong to certain months
#'
#' @param t An integer, seconds since origin date
#' @param origin_date A POSIXct, the origin date
#' @param months An array of integers (1 - 12)
#'
#' @return An array of 0 and 1
in_months <- function(t, origin_date, months) {
y <- as.integer(format(origin_date + t, "%m"))
return(as.integer(y %in% months))
}
#' Calculate whether times belong to a certain day of the year
#'
#' @param t An integer, seconds since origin date
#' @param origin_date A POSIXct, the origin date
#' @param day_of_year An array of integers (1 - 366)
#'
#' @return An array of 0 and 1
in_day_of_year <- function(t, origin_date, day_of_year) {
y <- as.integer(format(origin_date + t, "%j"))
return(as.integer(y %in% day_of_year))
}
#' Calculate times "on duty"
#'
#' Vectorized version, needed in planned_labor_vehicle_id()
#'
#' @param sim_vehicle A sim_vehicle
#' @param t An integer, the time since origin date. At present, minutes are used.
#' @param origin_date A POSIXct, the origin date
#'
#' @return An array of integers (0 or 1)
id_on_duty <- function(sim_vehicle, t, origin_date) {
assertthat::assert_that(nrow(sim_vehicle) == 1)
cond1 <- sim_vehicle$shift_from_simdate <= simtimer::sim_date(t) &
sim_vehicle$shift_to_simdate >= simtimer::sim_date(t) &
sim_vehicle$shift_from_simtime <= simtimer::sim_time(t) &
sim_vehicle$shift_to_simtime > simtimer::sim_time(t)
wdays <- as.integer(format(origin_date + t, "%u"))
wdays_schedule <- as.integer(unlist(strsplit(sim_vehicle$shift_weekday, ",")))
cond2 <- wdays %in% wdays_schedule
cond <- cond1 & cond2 # correct => the change from 0 => 1 or 1 => 0
return(as.integer(cond))
}
|
###########################################################################
### Check results
###########################################################################
plot_line <- param_est$estimate$gamma %>%
select(jdate, mean, shape, iteration) %>%
mutate(line = "Estimate")
%>%
bind_rows( true_param_stat %>% mutate(line = "True")) %>%
mutate(line = factor(line, levels = c("True", "Estimate")))
mle_synth_stat$estimate %>% filter(param == "mean" & ci == "estimate")
head(param_est$gamma)
### Quick check
p <- ggplot(param_est$gamma, aes(x=jdate, y = mean)) %>%
+ geom_line(aes(group = iteration), alpha=0.1) %>%
+ geom_line(data = true_param_stat, aes(y=mean), colour = "red") %>%
#+ geom_line(data = init_est$gamma, aes(y=mean), colour = "blue") %>%
+ geom_line(data = mle_synth_stat$estimate %>% filter(param == "mean" & ci == "estimate"), aes(y=value), colour = "green") %>%
+ theme_classic()%>%
+ scale_x_continuous(name = "Julian Date", breaks=round(seq(1,365,length.out=20))) %>% ### This seems to break it, putting white lines , expand = c(0, 0)
+ scale_y_continuous(name="Mean") %>%
+ coord_cartesian(xlim=c(1,365))
### Plot
p
plot_line <- init_est$estimate$gamma %>%
#select(jdate, mean) %>%
mutate(line = "Estimate") %>%
bind_rows( true_param_stat %>% mutate(line = "True")) %>%
mutate(line = factor(line, levels = c("True", "Estimate")))
plot_ribbon <- init_est$marginal$mean %>%
mutate(ymin = exp(mean - qnorm(0.975) * sigma_mean), ymax = exp(mean + qnorm(0.975)*sigma_mean)) %>%
mutate(fill = "95% CI")
p <- ggplot(plot_line, aes(x=jdate)) %>%
+ geom_ribbon(data = plot_ribbon, aes(ymin = ymin, ymax = ymax, fill = fill), alpha=0.2) %>%
+ geom_line(aes(y=mean, colour = line)) %>%
+ scale_x_continuous(name = "Julian Date", breaks=month_breaks, expand = c(0,0), sec.axis = sec_axis(~ . + 0, breaks = month_breaks, labels = month_labels)) %>% ### This seems to break it, putting white lines , expand = c(0, 0)
+ scale_colour_manual(name = "Mean", values = c("red", "black")) %>%
+ scale_fill_manual(name = NULL, values = c("grey70")) %>%
+ scale_y_continuous(name="Mean") %>%
+ coord_cartesian(xlim=c(1,365))
### Plot
p
### Quick plot to test
p <- ggplot(filter(param_est, draw=="X1"), aes(x=jdate, y=year, fill=mean)) %>%
+ geom_tile() %>%
+ scale_fill_viridis(name = "Mean")%>%
+ theme_bw()%>%
+ scale_x_continuous(name = "Julian Date", breaks=round(seq(1,365,length.out=20))) %>% ### This seems to break it, putting white lines , expand = c(0, 0)
+ scale_y_continuous(name="Year", expand=c(0,0)) %>%
+ coord_cartesian(xlim=c(1,365), ylim=c(1900,2020))
### Plot
p
### Save the plot
ggsave("./output/tensor_plot.png", p, width =12.5, height = 8, dpi = 300)
### Quick plot of all draws
ggplot(param_est, aes(x=jdate)) + geom_line(aes(group=draw, y=mean), colour="grey40", alpha=0.2) + theme_classic() + facet_grid(year ~ . )
#geom_line(data= mle_fit, aes(y=mean), colour="red") +
ggplot(param_est %>% filter(jdate == unique(param_est$jdate)[seq(1,74, by =7)]), aes(x=year)) + geom_line(aes(group=draw, y=mean), colour="grey40", alpha=0.2) + theme_classic() + facet_grid(jdate ~ . )
### Convert to long format to plot all together
param_est_long <- param_long(param_est)
param_est_long <- param_est_long %>%
mutate(param = factor(param, levels = c("mean", "scale", "rate", "shape", "disp"), labels = c("Mean", "Scale", "Rate", "Shape", "Dispersion")))
head(param_est_long)
### Plot all draws and all parameters
p <- ggplot(filter(param_est_long, year == 1990), aes(x=jdate)) + geom_ribbon(data = mle_plot, aes(ymin = lower_ci, ymax = upper_ci), fill="grey60", alpha=0.2) + geom_line(aes(group=draw, y=value), colour="grey40", alpha=0.2) + theme_classic() + facet_grid( param ~ . , scales = "free_y") + geom_line(data= mle_plot, aes(y=estimate), colour="red")
p
### Save the plot
ggsave("./output/tensor_plot3.png", p, width =12.5, height = 8, dpi = 300)
### Plot all draws and all parameters
p <- ggplot(filter(param_est_long, year == 1970), aes(x=jdate)) + geom_ribbon(data = mle_plot, aes(ymin = lower_ci, ymax = upper_ci), fill="grey60", alpha=0.2) + geom_line(aes(group=draw, y=value), colour="grey40", alpha=0.2) + theme_classic() + facet_grid( param ~ . , scales = "free_y") + geom_line(data= mle_plot, aes(y=estimate), colour="red")
p
### Calculate some summary statistics (quantiles) to plot the spline results
param_summ <- param_summary(param_est)
param_summ <- param_summ %>%
mutate(param = factor(param, levels = c("mean", "scale", "rate", "shape", "disp"), labels = c("Mean", "Scale", "Rate", "Shape", "Dispersion")))
head(param_summ)
plot_df <- filter(param_summ, year == 1990)
### Create a plot comparing the true value, spline with uncertainty bounds, and the MLE estimate
p <- ggplot(plot_df, aes(x=jdate)) %>%
+ geom_ribbon(data = mle_plot, aes(ymin = lower_ci, ymax = upper_ci), fill="grey60", alpha=0.2) %>%
+ geom_line(data = mle_plot, aes(y=estimate), colour="#66c2a5") %>%
+ geom_ribbon(data = plot_df, aes(ymin = perc_50_lower, ymax = perc_95_upper), fill="grey70", alpha = 0.5) %>%
+ geom_line(aes(y = median), colour="#fc8d62", size=1) %>%
+ geom_line(data = filter(param_summ, year == 1960), aes(y = median), colour="black", size=1) %>%
+ theme_classic() %>%
+ scale_x_continuous(name = "Julian Date", breaks=round(seq(1,365,length.out=20)), expand = c(0, 0)) %>%
+ scale_y_continuous(name="Parameter Estimate") %>%
+ coord_cartesian(xlim=c(1,365)) %>%
+ facet_grid(param ~., scales="free_y")
### Plot
p
### Save the plot
ggsave("./output/tensor_plot2.png", p, width =12.5, height = 8, dpi = 300)
### Quick plot to test
p <- ggplot(param_summ %>% filter(param == "Mean"), aes(x=jdate, y=year, fill=median)) %>%
+ geom_tile() %>%
+ scale_fill_viridis()%>%
+ theme_bw()%>%
+ scale_x_continuous(name = "Julian Date", breaks=round(seq(1,365,length.out=20))) %>% ### This seems to break it, putting white lines , expand = c(0, 0)
+ scale_y_continuous(name="Year", expand=c(0,0)) %>%
+ coord_cartesian(xlim=c(1,365), ylim=c(1900,2020))
### Plot
p
### Save the plot
ggsave("./output/tensor_plot4.png", p, width =12.5, height = 8, dpi = 300)
new_data <- expand.grid(jdate = round(seq(1,336,length.out = 12)), year = seq(1950,2010, 1))
### Estimate parameters for all draws using the demo (single year) basis
param <- extract_params(model_fit = model_tens, basis = basis_full, newdata = new_data)
param_summ <- param_summary(param$param_est)
head(param_summ)
plot_df <- param_summ %>% filter(param == "shape")
mle_plot <- mle_plot %>%
right_join(expand.grid(jdate = unique(param_summ$jdate), year = c(1950,2020))) %>%
filter( param != "theta")
p <- ggplot(param_summ, aes(x=year)) %>%
# + geom_line(data = mle_plot, aes( y=value), colour="#66c2a5") %>%
+ geom_ribbon(data = param_summ, aes(ymin = perc_95_lower, ymax = perc_95_upper), fill="grey70", alpha = 0.5) %>%
+ geom_ribbon(data = param_summ, aes(ymin = perc_50_lower, ymax = perc_50_upper), fill="grey50", alpha = 0.5) %>%
+ geom_line(aes(y = median), colour="#fc8d62", size=1) %>%
+ theme_classic() %>%
+ scale_x_continuous(name = "Year", breaks=round(seq(1900,2020,10)), expand = c(0, 0)) %>%
+ scale_y_continuous(name="Parameter Estimate") %>%
#+ coord_cartesian(xlim=c(1,365)) %>%
+ facet_grid(param ~ jdate, scales = "free_y")
### Plot
p
### Save the plot
ggsave("./output/tensor_plot5.png", p, width =12.5, height = 8, dpi = 300)
new_data <- expand.grid(jdate = seq(1,366,60), year = seq(1939,2020, 1))
### Estimate parameters for all draws using the demo (single year) basis
param <- extract_params(model_fit = model_tens, basis = basis_full, newdata = new_data)
param_est <- param$param_est
### Calculate some summary statistics (quantiles) to plot the spline results
param_summ <- param_summary(param_est)
param_summ <- param_summ %>%
mutate(param = factor(param, levels = c("mean", "scale", "rate", "shape", "disp"), labels = c("Mean", "Scale", "Rate", "Shape", "Dispersion")))
head(param_summ)
### Create a plot comparing the true value, spline with uncertainty bounds, and the MLE estimate
p <- ggplot(param_summ, aes(x=year)) %>%
# + geom_ribbon(data = mle_plot, aes(ymin = lower_ci, ymax = upper_ci), fill="grey60", alpha=0.2) %>%
# + geom_line(data = mle_plot, aes(y=estimate), colour="#66c2a5") %>%
# + geom_ribbon(data = plot_df, aes(ymin = perc_50_lower, ymax = perc_95_upper), fill="grey70", alpha = 0.5) %>%
+ geom_line(aes(y = median, group = jdate), size=1) %>%
# + geom_line(data = filter(param_summ, year == 1960), aes(y = median), colour="black", size=1) %>%
+ theme_classic() %>%
+ scale_x_continuous(name = "Year", breaks=round(seq(1940,2020,by=10)), expand = c(0, 0)) %>%
+ scale_y_continuous(name="Parameter Estimate") %>%
+ coord_cartesian(xlim=c(1930,2020)) %>%
+ facet_grid(param ~jdate, scales="free_y") %>%
+ theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
### Plot
p
mean_est <- exp(extract(fit_2, "mean_param")$mean_param)
ya <- apply(mean_est, 2, median)
plot(true_param$jdate, true_param$mean, type="l")
lines(fitting_df$jdate, apply(mean_est, 2, median), col="red")
#lines(fitting_df$jdate, apply(mean_est, 2, quantile, 0.975), col="blue")
#lines(fitting_df$jdate, apply(mean_est, 2, quantile, 0.025), col="blue")
lines(fitting_df$jdate, mean_est[1,], col="grey")
scale_est <- exp(extract(fit_2, "scale_param")$scale_param)
ya <- apply(scale_est, 2, median)
plot(true_param$jdate, true_param$scale, type="l")
lines(fitting_df$jdate, apply(scale_est, 2, median), col="red")
lines(fitting_df$jdate, apply(scale_est, 2, quantile, 0.975), col="blue")
lines(fitting_df$jdate, apply(scale_est, 2, quantile, 0.025), col="blue")
plot(density(exp(extract(fit_2, "b_0_mean")$b_0_mean)))
abline(v = 5, col="red")
plot(density(exp(extract(fit_2, "b_0_scale")$b_0_scale)))
abline(v = 5/0.5, col="red")
plot(fit_2, plotfun = "trace", pars = "lambda_mean", inc_warmup = TRUE)
plot(fit_2, plotfun = "trace", pars = "lambda_scale", inc_warmup = TRUE)
plot(density(exp(extract(fit_2, "lambda_mean")$lambda_mean)))
lines(density(exp(rnorm(5000,log(22.8),5))), col="blue")
abline(v = mle_mean_gam$sp / mle_mean_gam$smooth[[1]]$S.scale, col="red")
abline(v = init_vals[[1]]$lambda_mean, col="green")
plot(density(extract(fit_2, "lambda_mean")$lambda_mean))
lines(density(rgamma(5000,1.5, rate=0.06579)), col="blue")
abline(v = mle_mean_gam$sp / mle_mean_gam$smooth[[1]]$S.scale, col="red")
abline(v = init_vals[[1]]$lambda_mean, col="green")
plot(density(extract(fit_2, "lambda_scale")$lambda_scale))
lines(density(rgamma(5000,10,0.002)), col="blue")
abline(v = mle_scale_gam$sp / mle_scale_gam$smooth[[1]]$S.scale, col="red")
###########################################################################
### Run with fixed lambda
###########################################################################
### Create the data to send to stan model
data_fitting <- list(N = length(fitting_df$precip),
basis_dim = basis_dim,
y=fitting_df$precip,
X = as.matrix(X_reparam),
S = as.matrix(mle_mean_gam$smooth[[1]]$S[[1]]),
b_0_mean_prior=b_0_mean_prior,
b_0_scale_prior=b_0_scale_prior,
lambda_mean=2000,
lambda_scale=4000)
str(data_fitting)
#init_vals <- list(list(), list(), list())
init_vals <- list(list(b_0_mean = b_0_mean_prior[1],
b_0_scale = b_0_scale_prior[1],
b_mean=b_mean_init,
b_scale=b_scale_init))
init_vals
### Fit the model
fit_2 <- stan(file = "./stan_models/03-seasonal_spline/e-gammals_fixedlambda.stan",
data = data_fitting,
init = init_vals,
iter = 500, chains = 1, verbose = TRUE)
precision_mat <- mle_mean_gam$smooth[[1]]$S[[1]] * (mle_mean_gam$sp[[1]])
precision_mat <- mle_mean_gam$smooth[[1]]$S[[1]] * (mle_mean_gam$sp[[1]] /mle_mean_gam$smooth[[1]]$S.scale)
sigma <- solve(precision_mat)
yoop <- mvrnorm(n=1000, rep(0, 28), Sigma = sigma)
plot(yoop[,1], yoop[,2])
plot(density(yoop[,1]))
sd(yoop[,1])
gammals_fit
plot(coef(gammals_fit)[2:29], type="b")
lines(coef(gammals_fit)[31:60], col="red")
plot(coef(gammals_fit)[31:60], type="b")
coef(gammals_fit)
plot(coef(gammals_fit))
c(gammals_fit$sp)
gammals_fit$smooth[[1]]$S
gammals_fit$smooth[[2]]$S
spline_reparam[[1]]$S
gammals_fit$smooth[[1]]$S.scale
spline_reparam[[1]]$S.scale
require(MASS)
sigma <- gammals_fit$sp[[1]] * gammals_fit$smooth[[1]]$S.scale * gammals_fit$smooth[[1]]$S[[1]]
yoop <- mvrnorm(n=1000, rep(0, 28), Sigma = 1/sigma)
plot(yoop[,1], yoop[,2])
var(mvrnorm(n=1000, rep(0, 2), Sigma))
var(mvrnorm(n=1000, rep(0, 2), Sigma, empirical = TRUE))
gammals_fit$smooth[[1]]$S.scale *
precision_mat <- gammals_fit$smooth[[1]]$S[[1]] * gammals_fit$sp[[2]]
sigma <- solve(precision_mat)
yoop <- mvrnorm(n=1000, rep(0, 28), Sigma = sigma)
plot(yoop[,1], yoop[,2])
###########################################################################
### Calculate prior and initial values from MLE fit
###########################################################################
### Fit using mgcv directly
ctrl <- list(nthreads=4)
gammals_fit <- gam(list(precip
~ s(jdate, bs=c("cc"), k = c(n_knots_jdate) ) ,
~ s(jdate, bs=c("cc"), k = c(n_knots_jdate) )
),
data=fitting_df,
knots = list(jdate=knots_jdate),
family=gammals,
select=FALSE,
method="REML",
control= ctrl)
### Build the basis function again, either with or without the reparameterization
#spline_orig <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), absorb.cons=FALSE, null.space.penalty = TRUE, scale.penalty=TRUE)
#spline_reparam <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), absorb.cons=TRUE, null.space.penalty = FALSE, scale.penalty=TRUE)
###########################################################################
### Calculate prior and initial values from MLE fit
###########################################################################
### Create the prior for the mean intercept
b_0_mean_prior <- c(summary(gammals_fit)$p.table[1,1], summary(gammals_fit)$p.table[1,2])
### Create a vector for intializing the mean
b_mean_init <- c(coef(gammals_fit)[2:c(n_knots_jdate-1)])
lambda_mean_init <- c(gammals_fit$sp)[[1]] / gammals_fit$smooth[[1]]$S.scale
### Create the prior for the scale intercept
b_0_scale_prior <- c(summary(gammals_fit)$p.table[2,1])
b_0_scale_prior[2] <- log(b_0_scale_prior[1] + summary(gammals_fit)$p.table[2,2]) - log(b_0_scale_prior[1])
b_0_scale_prior[1] <- log(b_0_scale_prior[1])
### Create a vector for intializing the scale
b_scale_init <- c(coef(gammals_fit)[c(n_knots_jdate+1):length(coef(gammals_fit))])
lambda_scale_init <- c(gammals_fit$sp)[[2]] / gammals_fit$smooth[[1]]$S.scale
jagam <- jagam(precip ~ s(jdate, bs=c("cc"), k = c(n_knots_jdate) ),
data=fitting_df,
knots = list(jdate=knots_jdate),
family=gaussian(link = "log"),
file="test.jag")
jagam$pregam$S
jagam$jags.ini$lambda
precision_mat <- jagam$pregam$S[[1]] * jagam$jags.ini$lambda
sigma <- solve(precision_mat)
yoop <- mvrnorm(n=1000, rep(0, 28), Sigma = sigma)
plot(yoop[,1], yoop[,2])
multi_normal_prec
### Build the basis function again, either with or without the reparameterization
spline_orig <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), null.space.penalty = FALSE)
spline_reparam <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), absorb.cons=TRUE, null.space.penalty = FALSE)
#spline_orig <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), null.space.penalty = TRUE)
#spline_reparam <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), absorb.cons=TRUE, null.space.penalty = TRUE)
### Extract the matrices for basis and penalty term
X_orig <- spline_orig[[1]]$X
s_orig <- spline_orig[[1]]$S
s_reparam <- spline_reparam[[1]]$S[[1]]
### Reparameterize both using the QR decomposition following Wood
### Where Z is the Q matrix without the first column, used to reparameterize
C <- rep(1, nrow(X_orig)) %*% X_orig
qrc <- qr(t(C))
Z <- qr.Q(qrc,complete=TRUE)[,(nrow(C)+1):ncol(C)]
### Calculate reparameterized matrices for basis and penalty
X_reparam <- X_orig%*%Z
#X_reparam2 <- spline_reparam[[1]]$X
head(X_orig)
head(X_reparam)
head(s_orig)
head(s_reparam)
#b <- gam(log(rate_mle) ~ s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), select=TRUE)
### Create the data to be used for model
basis_dim <- dim(X_reparam)[2]
### Fit the model
fit_2 <- stan(file = "./stan_models/03-seasonal_spline/e-gamma_hurdle_spline_loc_e.stan",
data = data_fitting,
init = init_vals,
iter = 800, chains = 1, verbose = FALSE)
###########################################################################
### Calculate prior and initial values from MLE fit
###########################################################################
### Fit a gam using the same basis
mle_mean_gam <- gam(log(mean) ~ s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=mle_fit, knots = list(jdate=knots_jdate), select=FALSE)
summary(mle_mean_gam)
plot(mle_mean_gam)
### Create the prior for the mean intercept
b_0_mean_prior <- c(summary(mle_mean_gam)$p.table[1], summary(mle_mean_gam)$p.table[2])
### Create a vector for intializing the mean
b_mean_init <- c(coef(mle_mean_gam)[2:length(coef(mle_mean_gam))])
lambda_mean_init <- c(mle_mean_gam$sp)
### Doublecheck
b_init_test <- X_reparam %*% b_mean_init + b_0_mean_prior[1]
plot(fitting_df$jdate, exp(b_init_test), type="l")
rm(b_init_test)
### Fit a gam using the same basis
mle_scale_gam <- gam(log(scale) ~ s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=mle_fit, knots = list(jdate=knots_jdate), select=FALSE)
summary(mle_scale_gam)
plot(mle_scale_gam)
### Create the prior for the scale intercept
b_0_scale_prior <- c(summary(mle_scale_gam)$p.table[1], summary(mle_scale_gam)$p.table[2])
### Create a vector for intializing the scale
b_scale_init <- c(coef(mle_scale_gam)[2:length(coef(mle_scale_gam))])
lambda_scale_init <- c(mle_scale_gam$sp)
### Doublecheck
b_init_test <- X_reparam %*% b_scale_init + b_0_scale_prior[1]
plot(fitting_df$jdate, exp(b_init_test), type="l")
rm(b_init_test)
###########################################################################
### Run the model
###########################################################################
### Create the data to send to stan model
data_fitting <- list(N = length(fitting_df$precip), basis_dim = basis_dim, y=fitting_df$precip, X = X_reparam, S = s_reparam, b_0_mean_prior=b_0_mean_prior, b_0_scale_prior=b_0_scale_prior, lambda_mean_init=lambda_mean_init, lambda_scale_init=lambda_scale_init)
str(data_fitting)
#init_vals <- list(list(), list(), list())
init_vals <- list(list(b_0_mean = b_0_mean_prior[1], b_0_scale = b_0_scale_prior[1], b_mean=b_mean_init, b_scale=b_scale_init, lambda_mean_init=lambda_mean_init, lambda_scale_init=lambda_scale_init))
init_vals
### Fit the model
fit_3 <- stan(file = "./stan_models/03-seasonal_spline/e-gamma_mean_scaleersion.stan",
data = data_fitting,
init = init_vals,
iter = 200, chains = 1, verbose = FALSE)
### So fast
### Fit a gam using the same basis
ya <- gammals(precip ~ s(jdate, bs=c("cc"), k = c(n_knots_jdate)), ~ s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), select=TRUE))
summary(ya)
plot(ya)
### Fit using mgcv directly
ctrl <- list(nthreads=4)
fit_3 <- gam(list(precip
~ s(jdate, bs=c("cc"), k = c(n_knots_jdate) ) ,
~ s(jdate, bs=c("cc"), k = c(n_knots_jdate) )
),
data=fitting_df,
knots = list(jdate=knots_jdate),
family=gammals,
select=TRUE,
method="REML",
control= ctrl)
### You can't put the scale.penalty option in here
c(coef(fit_3)[2:length(coef(fit_3))])
c(fit_3$sp)
summary(fit_3)
plot(fit_3)
smoothCon
maS <- norm(S) / norm(X, type = "I")^2 ### Scaling factor for S
sapply(fit_3$smooth, "[[", "S.scale") / fit_3$sp
S_gammals <- fit_3$smooth[[1]]$S
S_gammals <- S_gammals[[1]]
x_mat <- model.matrix(fit_3)
X_gammals <- x_mat[,seq(2,29)]
maS <- norm(S_gammals) / norm(X_gammals, type = "I")^2
ya <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data = fitting_df, knots = list(jdate=knots_jdate), absorb.cons=TRUE, scale.penalty=FALSE)
ya[[1]]$S
ya <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots)), data = fitting_df, knots = list(jdate=knots_jdate), absorb.cons=TRUE, scale.penalty=TRUE)
ya[[1]]$S
###########################################################################
### Check results
###########################################################################
print(fit_2, pars = c("b_0_mean", "b_0_scale"))
#
exp(1.59)
exp(2.28)
### True values are 5 and 5/0.5 or 10
### Check the trace plots to confirm the chains converge
plot(fit_2, plotfun = "trace", pars = "b_0_mean", inc_warmup = TRUE)
plot(density(exp(extract(fit_2, "b_0_mean")$b_0_mean)))
abline(v = 5, col="red")
plot(density(exp(extract(fit_2, "b_0_scale")$b_0_scale)))
abline(v = 5/0.5, col="red")
plot(fit_2, plotfun = "trace", pars = "lambda_mean", inc_warmup = TRUE)
plot(fit_2, plotfun = "trace", pars = "lambda_scale", inc_warmup = TRUE)
### Check chains without warmup
plot(fit_2, plotfun = "trace", pars = "b_mean", inc_warmup = TRUE)
plot(fit_2, plotfun = "trace", pars = "b_scale", inc_warmup = TRUE)
### Check the distributions of beta values
plot(fit_2, show_density = TRUE, ci_level = 0.5, pars = "b_mean", fill_color = "lightblue") + theme_classic()
plot(fit_2, show_density = TRUE, ci_level = 0.5, pars = "b_scale", fill_color = "lightblue") + theme_classic()
mean_est <- extract(fit_2, "mean_est")$mean_est
ya <- apply(mean_est, 2, median)
plot(true_param$jdate, true_param$mean, type="l")
lines(fitting_df$jdate, apply(mean_est, 2, median), col="red")
lines(fitting_df$jdate, apply(mean_est, 2, quantile, 0.975), col="blue")
lines(fitting_df$jdate, apply(mean_est, 2, quantile, 0.025), col="blue")
scale_est <- extract(fit_2, "scale_est")$scale_est
ya <- apply(scale_est, 2, median)
plot(true_param$jdate, true_param$scale, type="l")
lines(fitting_df$jdate, ya, col="red")
shape_est <- extract(fit_2, "shape_est")$shape_est
ya <- apply(shape_est, 2, median)
plot(true_param$jdate, true_param$shape, type="l")
lines(fitting_df$jdate, ya, col="red")
lambda_mean <- extract(fit_2, "lambda_mean")$lambda_mean
plot(density(lambda_mean))
lambda_scale <- extract(fit_2, "lambda_scale")$lambda_scale
plot(density(lambda_scale))
###########################################################################
### Check results
###########################################################################
#print(fit_2)
### Create the full basis matrix by adding the intercept column
X_full_reparam <- cbind(rep(1,dim(X_reparam)[1]), X_reparam)
demo_basis_reparam <- cbind(rep(1,dim(demo_basis_reparam)[1]), demo_basis_reparam)
### Extract the spline coefficients and intercept for mean
b_mean <- extract(fit_2, "b_mean")$b_mean
b_0_mean <- extract(fit_2, "b_0_mean")$b_0_mean
### Combine the intercept and spline coefficients into a single matrix
b_full_mean <- cbind(matrix(b_0_mean, dim(b_mean)[1], 1), b_mean)
### Extract the spline coefficients and intercept for rate
b_scale <- extract(fit_2, "b_scale")$b_scale
b_0_scale <- extract(fit_2, "b_0_scale")$b_0_scale
### Combine the intercept and spline coefficients into a single matrix
b_full_scale <- cbind(matrix(b_0_scale, dim(b_scale)[1], 1), b_scale)
### Calculate the estimate of rate based on the jdate 1 to 365 dataframe
mean_est_jdate <- exp(demo_basis_reparam %*% t(b_full_mean))
mean_est <- data.frame(jdate_demo, rate=mean_est_jdate)
### Gather the results into a long format
mean_est_long <- mean_est %>%
gather("draw", "mean", -jdate)
### Quick plot of all draws
ggplot(mean_est_long, aes(x=jdate, y=mean, group=draw)) + geom_line(colour="grey40", alpha=0.2) + theme_classic()
### Calculate some summary statistics (quantiles) to plot the spline results
mean_est_summary <- mean_est_long %>%
group_by(jdate) %>%
summarise(median = median(mean), perc_95_lower = quantile(mean, 0.025), perc_95_upper = quantile(mean, 0.975), perc_50_upper = quantile(mean, 0.25), perc_50_lower = quantile(mean, 0.75))
### Create a plot comparing the true value, spline with uncertainty bounds, and the MLE estimate
p <- ggplot(mean_est_summary, aes(x=jdate)) %>%
+ geom_line(data = mle_fit, aes(y=mean), colour="#66c2a5") %>%
+ geom_ribbon(data = mean_est_summary, aes(ymin = perc_95_lower, ymax = perc_95_upper), fill="grey70", alpha = 0.5) %>%
+ geom_ribbon(data = mean_est_summary, aes(ymin = perc_50_lower, ymax = perc_50_upper), fill="grey50", alpha = 0.5) %>%
+ geom_line(data = true_param, aes(y=mean), size=1) %>%
+ geom_line(aes(y = median), colour="#fc8d62", size=1) %>%
+ theme_classic() %>%
+ scale_x_continuous(name = "Julian Date", breaks=round(seq(1,365,length.out=20)), expand = c(0, 0)) %>%
+ scale_y_continuous(name="Mean Parameter") %>%
+ coord_cartesian(xlim=c(1,365))
### Plot
p
### Save the plot
ggsave("../output/model2_rate_spline_v_mle.png", p, width =12.5, height = 8, dpi = 300)
#### Repeat all of this for shape parameter
### Calculate the estimate of shape based on the jdate 1 to 365 dataframe
shape_est_jdate <- exp(demo_basis_reparam %*% t(b_full_shape))
shape_est <- data.frame(jdate_demo, rate=shape_est_jdate)
### Gather the results into a long format
shape_est_long <- shape_est %>%
gather("draw", "shape", -jdate)
### Calculate summary statistics
shape_est_summary <- shape_est_long %>%
group_by(jdate) %>%
summarise(median = median(shape), perc_95_lower = quantile(shape, 0.025), perc_95_upper = quantile(shape, 0.975), perc_50_upper = quantile(shape, 0.25), perc_50_lower = quantile(shape, 0.75))
### Quick plot of all draws
ggplot(shape_est_long, aes(x=jdate, y=shape, group=draw)) + geom_line(colour="grey40", alpha=0.2) + theme_classic()
### Create a plot comparing the true value, spline with uncertainty bounds, and the MLE estimate
p <- ggplot(shape_est_summary, aes(x=jdate)) %>%
+ geom_line(data = mle_fit, aes(y=shape), colour="#66c2a5") %>%
+ geom_ribbon(data = shape_est_summary, aes(ymin = perc_95_lower, ymax = perc_95_upper), fill="grey70", alpha = 0.5) %>%
+ geom_ribbon(data = shape_est_summary, aes(ymin = perc_50_lower, ymax = perc_50_upper), fill="grey50", alpha = 0.5) %>%
+ geom_line(data = true_param, aes(y=shape), size=1) %>%
+ geom_line(aes(y = median), colour="#fc8d62", size=1) %>%
+ theme_classic() %>%
+ scale_x_continuous(name = "Julian Date", breaks=round(seq(1,365,length.out=20)), expand = c(0, 0)) %>%
+ scale_y_continuous(name="Shape Parameter") %>%
+ coord_cartesian(xlim=c(1,365))
### Plot
p
### Save the plot
ggsave("../output/model2_shape_spline_v_mle.png", p, width =12.5, height = 8, dpi = 300)
###########################################################################
### Convert coeficients back to original basis (before reparam)
###########################################################################
### Need to go row by row of betas
b_mean_orig <- Z %*% b_mean[1,]
plot(jdate_demo$jdate, exp(demo_basis_x %*% b_mean_orig + b_0_mean[1]), type="l"); for(j in seq(1,n_knots-1)){lines(jdate_demo$jdate, exp(demo_basis_x[,j] * b_mean_orig[j] + b_0_mean[1]), col= rainbow(n_knots)[j])}
| /leftover.R | no_license | jstagge/spibayes_paper | R | false | false | 27,746 | r |
###########################################################################
### Check results
###########################################################################
plot_line <- param_est$estimate$gamma %>%
select(jdate, mean, shape, iteration) %>%
mutate(line = "Estimate")
%>%
bind_rows( true_param_stat %>% mutate(line = "True")) %>%
mutate(line = factor(line, levels = c("True", "Estimate")))
mle_synth_stat$estimate %>% filter(param == "mean" & ci == "estimate")
head(param_est$gamma)
### Quick check
p <- ggplot(param_est$gamma, aes(x=jdate, y = mean)) %>%
+ geom_line(aes(group = iteration), alpha=0.1) %>%
+ geom_line(data = true_param_stat, aes(y=mean), colour = "red") %>%
#+ geom_line(data = init_est$gamma, aes(y=mean), colour = "blue") %>%
+ geom_line(data = mle_synth_stat$estimate %>% filter(param == "mean" & ci == "estimate"), aes(y=value), colour = "green") %>%
+ theme_classic()%>%
+ scale_x_continuous(name = "Julian Date", breaks=round(seq(1,365,length.out=20))) %>% ### This seems to break it, putting white lines , expand = c(0, 0)
+ scale_y_continuous(name="Mean") %>%
+ coord_cartesian(xlim=c(1,365))
### Plot
p
plot_line <- init_est$estimate$gamma %>%
#select(jdate, mean) %>%
mutate(line = "Estimate") %>%
bind_rows( true_param_stat %>% mutate(line = "True")) %>%
mutate(line = factor(line, levels = c("True", "Estimate")))
plot_ribbon <- init_est$marginal$mean %>%
mutate(ymin = exp(mean - qnorm(0.975) * sigma_mean), ymax = exp(mean + qnorm(0.975)*sigma_mean)) %>%
mutate(fill = "95% CI")
p <- ggplot(plot_line, aes(x=jdate)) %>%
+ geom_ribbon(data = plot_ribbon, aes(ymin = ymin, ymax = ymax, fill = fill), alpha=0.2) %>%
+ geom_line(aes(y=mean, colour = line)) %>%
+ scale_x_continuous(name = "Julian Date", breaks=month_breaks, expand = c(0,0), sec.axis = sec_axis(~ . + 0, breaks = month_breaks, labels = month_labels)) %>% ### This seems to break it, putting white lines , expand = c(0, 0)
+ scale_colour_manual(name = "Mean", values = c("red", "black")) %>%
+ scale_fill_manual(name = NULL, values = c("grey70")) %>%
+ scale_y_continuous(name="Mean") %>%
+ coord_cartesian(xlim=c(1,365))
### Plot
p
### Quick plot to test
p <- ggplot(filter(param_est, draw=="X1"), aes(x=jdate, y=year, fill=mean)) %>%
+ geom_tile() %>%
+ scale_fill_viridis(name = "Mean")%>%
+ theme_bw()%>%
+ scale_x_continuous(name = "Julian Date", breaks=round(seq(1,365,length.out=20))) %>% ### This seems to break it, putting white lines , expand = c(0, 0)
+ scale_y_continuous(name="Year", expand=c(0,0)) %>%
+ coord_cartesian(xlim=c(1,365), ylim=c(1900,2020))
### Plot
p
### Save the plot
ggsave("./output/tensor_plot.png", p, width =12.5, height = 8, dpi = 300)
### Quick plot of all draws
ggplot(param_est, aes(x=jdate)) + geom_line(aes(group=draw, y=mean), colour="grey40", alpha=0.2) + theme_classic() + facet_grid(year ~ . )
#geom_line(data= mle_fit, aes(y=mean), colour="red") +
ggplot(param_est %>% filter(jdate == unique(param_est$jdate)[seq(1,74, by =7)]), aes(x=year)) + geom_line(aes(group=draw, y=mean), colour="grey40", alpha=0.2) + theme_classic() + facet_grid(jdate ~ . )
### Convert to long format to plot all together
param_est_long <- param_long(param_est)
param_est_long <- param_est_long %>%
mutate(param = factor(param, levels = c("mean", "scale", "rate", "shape", "disp"), labels = c("Mean", "Scale", "Rate", "Shape", "Dispersion")))
head(param_est_long)
### Plot all draws and all parameters
p <- ggplot(filter(param_est_long, year == 1990), aes(x=jdate)) + geom_ribbon(data = mle_plot, aes(ymin = lower_ci, ymax = upper_ci), fill="grey60", alpha=0.2) + geom_line(aes(group=draw, y=value), colour="grey40", alpha=0.2) + theme_classic() + facet_grid( param ~ . , scales = "free_y") + geom_line(data= mle_plot, aes(y=estimate), colour="red")
p
### Save the plot
ggsave("./output/tensor_plot3.png", p, width =12.5, height = 8, dpi = 300)
### Plot all draws and all parameters
p <- ggplot(filter(param_est_long, year == 1970), aes(x=jdate)) + geom_ribbon(data = mle_plot, aes(ymin = lower_ci, ymax = upper_ci), fill="grey60", alpha=0.2) + geom_line(aes(group=draw, y=value), colour="grey40", alpha=0.2) + theme_classic() + facet_grid( param ~ . , scales = "free_y") + geom_line(data= mle_plot, aes(y=estimate), colour="red")
p
### Calculate some summary statistics (quantiles) to plot the spline results
param_summ <- param_summary(param_est)
param_summ <- param_summ %>%
mutate(param = factor(param, levels = c("mean", "scale", "rate", "shape", "disp"), labels = c("Mean", "Scale", "Rate", "Shape", "Dispersion")))
head(param_summ)
plot_df <- filter(param_summ, year == 1990)
### Create a plot comparing the true value, spline with uncertainty bounds, and the MLE estimate
p <- ggplot(plot_df, aes(x=jdate)) %>%
+ geom_ribbon(data = mle_plot, aes(ymin = lower_ci, ymax = upper_ci), fill="grey60", alpha=0.2) %>%
+ geom_line(data = mle_plot, aes(y=estimate), colour="#66c2a5") %>%
+ geom_ribbon(data = plot_df, aes(ymin = perc_50_lower, ymax = perc_95_upper), fill="grey70", alpha = 0.5) %>%
+ geom_line(aes(y = median), colour="#fc8d62", size=1) %>%
+ geom_line(data = filter(param_summ, year == 1960), aes(y = median), colour="black", size=1) %>%
+ theme_classic() %>%
+ scale_x_continuous(name = "Julian Date", breaks=round(seq(1,365,length.out=20)), expand = c(0, 0)) %>%
+ scale_y_continuous(name="Parameter Estimate") %>%
+ coord_cartesian(xlim=c(1,365)) %>%
+ facet_grid(param ~., scales="free_y")
### Plot
p
### Save the plot
ggsave("./output/tensor_plot2.png", p, width =12.5, height = 8, dpi = 300)
### Quick plot to test
p <- ggplot(param_summ %>% filter(param == "Mean"), aes(x=jdate, y=year, fill=median)) %>%
+ geom_tile() %>%
+ scale_fill_viridis()%>%
+ theme_bw()%>%
+ scale_x_continuous(name = "Julian Date", breaks=round(seq(1,365,length.out=20))) %>% ### This seems to break it, putting white lines , expand = c(0, 0)
+ scale_y_continuous(name="Year", expand=c(0,0)) %>%
+ coord_cartesian(xlim=c(1,365), ylim=c(1900,2020))
### Plot
p
### Save the plot
ggsave("./output/tensor_plot4.png", p, width =12.5, height = 8, dpi = 300)
new_data <- expand.grid(jdate = round(seq(1,336,length.out = 12)), year = seq(1950,2010, 1))
### Estimate parameters for all draws using the demo (single year) basis
param <- extract_params(model_fit = model_tens, basis = basis_full, newdata = new_data)
param_summ <- param_summary(param$param_est)
head(param_summ)
plot_df <- param_summ %>% filter(param == "shape")
mle_plot <- mle_plot %>%
right_join(expand.grid(jdate = unique(param_summ$jdate), year = c(1950,2020))) %>%
filter( param != "theta")
p <- ggplot(param_summ, aes(x=year)) %>%
# + geom_line(data = mle_plot, aes( y=value), colour="#66c2a5") %>%
+ geom_ribbon(data = param_summ, aes(ymin = perc_95_lower, ymax = perc_95_upper), fill="grey70", alpha = 0.5) %>%
+ geom_ribbon(data = param_summ, aes(ymin = perc_50_lower, ymax = perc_50_upper), fill="grey50", alpha = 0.5) %>%
+ geom_line(aes(y = median), colour="#fc8d62", size=1) %>%
+ theme_classic() %>%
+ scale_x_continuous(name = "Year", breaks=round(seq(1900,2020,10)), expand = c(0, 0)) %>%
+ scale_y_continuous(name="Parameter Estimate") %>%
#+ coord_cartesian(xlim=c(1,365)) %>%
+ facet_grid(param ~ jdate, scales = "free_y")
### Plot
p
### Save the plot
ggsave("./output/tensor_plot5.png", p, width =12.5, height = 8, dpi = 300)
new_data <- expand.grid(jdate = seq(1,366,60), year = seq(1939,2020, 1))
### Estimate parameters for all draws using the demo (single year) basis
param <- extract_params(model_fit = model_tens, basis = basis_full, newdata = new_data)
param_est <- param$param_est
### Calculate some summary statistics (quantiles) to plot the spline results
param_summ <- param_summary(param_est)
param_summ <- param_summ %>%
mutate(param = factor(param, levels = c("mean", "scale", "rate", "shape", "disp"), labels = c("Mean", "Scale", "Rate", "Shape", "Dispersion")))
head(param_summ)
### Create a plot comparing the true value, spline with uncertainty bounds, and the MLE estimate
p <- ggplot(param_summ, aes(x=year)) %>%
# + geom_ribbon(data = mle_plot, aes(ymin = lower_ci, ymax = upper_ci), fill="grey60", alpha=0.2) %>%
# + geom_line(data = mle_plot, aes(y=estimate), colour="#66c2a5") %>%
# + geom_ribbon(data = plot_df, aes(ymin = perc_50_lower, ymax = perc_95_upper), fill="grey70", alpha = 0.5) %>%
+ geom_line(aes(y = median, group = jdate), size=1) %>%
# + geom_line(data = filter(param_summ, year == 1960), aes(y = median), colour="black", size=1) %>%
+ theme_classic() %>%
+ scale_x_continuous(name = "Year", breaks=round(seq(1940,2020,by=10)), expand = c(0, 0)) %>%
+ scale_y_continuous(name="Parameter Estimate") %>%
+ coord_cartesian(xlim=c(1930,2020)) %>%
+ facet_grid(param ~jdate, scales="free_y") %>%
+ theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
### Plot
p
mean_est <- exp(extract(fit_2, "mean_param")$mean_param)
ya <- apply(mean_est, 2, median)
plot(true_param$jdate, true_param$mean, type="l")
lines(fitting_df$jdate, apply(mean_est, 2, median), col="red")
#lines(fitting_df$jdate, apply(mean_est, 2, quantile, 0.975), col="blue")
#lines(fitting_df$jdate, apply(mean_est, 2, quantile, 0.025), col="blue")
lines(fitting_df$jdate, mean_est[1,], col="grey")
scale_est <- exp(extract(fit_2, "scale_param")$scale_param)
ya <- apply(scale_est, 2, median)
plot(true_param$jdate, true_param$scale, type="l")
lines(fitting_df$jdate, apply(scale_est, 2, median), col="red")
lines(fitting_df$jdate, apply(scale_est, 2, quantile, 0.975), col="blue")
lines(fitting_df$jdate, apply(scale_est, 2, quantile, 0.025), col="blue")
plot(density(exp(extract(fit_2, "b_0_mean")$b_0_mean)))
abline(v = 5, col="red")
plot(density(exp(extract(fit_2, "b_0_scale")$b_0_scale)))
abline(v = 5/0.5, col="red")
plot(fit_2, plotfun = "trace", pars = "lambda_mean", inc_warmup = TRUE)
plot(fit_2, plotfun = "trace", pars = "lambda_scale", inc_warmup = TRUE)
plot(density(exp(extract(fit_2, "lambda_mean")$lambda_mean)))
lines(density(exp(rnorm(5000,log(22.8),5))), col="blue")
abline(v = mle_mean_gam$sp / mle_mean_gam$smooth[[1]]$S.scale, col="red")
abline(v = init_vals[[1]]$lambda_mean, col="green")
plot(density(extract(fit_2, "lambda_mean")$lambda_mean))
lines(density(rgamma(5000,1.5, rate=0.06579)), col="blue")
abline(v = mle_mean_gam$sp / mle_mean_gam$smooth[[1]]$S.scale, col="red")
abline(v = init_vals[[1]]$lambda_mean, col="green")
plot(density(extract(fit_2, "lambda_scale")$lambda_scale))
lines(density(rgamma(5000,10,0.002)), col="blue")
abline(v = mle_scale_gam$sp / mle_scale_gam$smooth[[1]]$S.scale, col="red")
###########################################################################
### Run with fixed lambda
###########################################################################
### Create the data to send to stan model
data_fitting <- list(N = length(fitting_df$precip),
basis_dim = basis_dim,
y=fitting_df$precip,
X = as.matrix(X_reparam),
S = as.matrix(mle_mean_gam$smooth[[1]]$S[[1]]),
b_0_mean_prior=b_0_mean_prior,
b_0_scale_prior=b_0_scale_prior,
lambda_mean=2000,
lambda_scale=4000)
str(data_fitting)
#init_vals <- list(list(), list(), list())
init_vals <- list(list(b_0_mean = b_0_mean_prior[1],
b_0_scale = b_0_scale_prior[1],
b_mean=b_mean_init,
b_scale=b_scale_init))
init_vals
### Fit the model
fit_2 <- stan(file = "./stan_models/03-seasonal_spline/e-gammals_fixedlambda.stan",
data = data_fitting,
init = init_vals,
iter = 500, chains = 1, verbose = TRUE)
precision_mat <- mle_mean_gam$smooth[[1]]$S[[1]] * (mle_mean_gam$sp[[1]])
precision_mat <- mle_mean_gam$smooth[[1]]$S[[1]] * (mle_mean_gam$sp[[1]] /mle_mean_gam$smooth[[1]]$S.scale)
sigma <- solve(precision_mat)
yoop <- mvrnorm(n=1000, rep(0, 28), Sigma = sigma)
plot(yoop[,1], yoop[,2])
plot(density(yoop[,1]))
sd(yoop[,1])
gammals_fit
plot(coef(gammals_fit)[2:29], type="b")
lines(coef(gammals_fit)[31:60], col="red")
plot(coef(gammals_fit)[31:60], type="b")
coef(gammals_fit)
plot(coef(gammals_fit))
c(gammals_fit$sp)
gammals_fit$smooth[[1]]$S
gammals_fit$smooth[[2]]$S
spline_reparam[[1]]$S
gammals_fit$smooth[[1]]$S.scale
spline_reparam[[1]]$S.scale
require(MASS)
sigma <- gammals_fit$sp[[1]] * gammals_fit$smooth[[1]]$S.scale * gammals_fit$smooth[[1]]$S[[1]]
yoop <- mvrnorm(n=1000, rep(0, 28), Sigma = 1/sigma)
plot(yoop[,1], yoop[,2])
var(mvrnorm(n=1000, rep(0, 2), Sigma))
var(mvrnorm(n=1000, rep(0, 2), Sigma, empirical = TRUE))
gammals_fit$smooth[[1]]$S.scale *
precision_mat <- gammals_fit$smooth[[1]]$S[[1]] * gammals_fit$sp[[2]]
sigma <- solve(precision_mat)
yoop <- mvrnorm(n=1000, rep(0, 28), Sigma = sigma)
plot(yoop[,1], yoop[,2])
###########################################################################
### Calculate prior and initial values from MLE fit
###########################################################################
### Fit using mgcv directly
ctrl <- list(nthreads=4)
gammals_fit <- gam(list(precip
~ s(jdate, bs=c("cc"), k = c(n_knots_jdate) ) ,
~ s(jdate, bs=c("cc"), k = c(n_knots_jdate) )
),
data=fitting_df,
knots = list(jdate=knots_jdate),
family=gammals,
select=FALSE,
method="REML",
control= ctrl)
### Build the basis function again, either with or without the reparameterization
#spline_orig <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), absorb.cons=FALSE, null.space.penalty = TRUE, scale.penalty=TRUE)
#spline_reparam <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), absorb.cons=TRUE, null.space.penalty = FALSE, scale.penalty=TRUE)
###########################################################################
### Calculate prior and initial values from MLE fit
###########################################################################
### Create the prior for the mean intercept
b_0_mean_prior <- c(summary(gammals_fit)$p.table[1,1], summary(gammals_fit)$p.table[1,2])
### Create a vector for intializing the mean
b_mean_init <- c(coef(gammals_fit)[2:c(n_knots_jdate-1)])
lambda_mean_init <- c(gammals_fit$sp)[[1]] / gammals_fit$smooth[[1]]$S.scale
### Create the prior for the scale intercept
b_0_scale_prior <- c(summary(gammals_fit)$p.table[2,1])
b_0_scale_prior[2] <- log(b_0_scale_prior[1] + summary(gammals_fit)$p.table[2,2]) - log(b_0_scale_prior[1])
b_0_scale_prior[1] <- log(b_0_scale_prior[1])
### Create a vector for intializing the scale
b_scale_init <- c(coef(gammals_fit)[c(n_knots_jdate+1):length(coef(gammals_fit))])
lambda_scale_init <- c(gammals_fit$sp)[[2]] / gammals_fit$smooth[[1]]$S.scale
jagam <- jagam(precip ~ s(jdate, bs=c("cc"), k = c(n_knots_jdate) ),
data=fitting_df,
knots = list(jdate=knots_jdate),
family=gaussian(link = "log"),
file="test.jag")
jagam$pregam$S
jagam$jags.ini$lambda
precision_mat <- jagam$pregam$S[[1]] * jagam$jags.ini$lambda
sigma <- solve(precision_mat)
yoop <- mvrnorm(n=1000, rep(0, 28), Sigma = sigma)
plot(yoop[,1], yoop[,2])
multi_normal_prec
### Build the basis function again, either with or without the reparameterization
spline_orig <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), null.space.penalty = FALSE)
spline_reparam <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), absorb.cons=TRUE, null.space.penalty = FALSE)
#spline_orig <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), null.space.penalty = TRUE)
#spline_reparam <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), absorb.cons=TRUE, null.space.penalty = TRUE)
### Extract the matrices for basis and penalty term
X_orig <- spline_orig[[1]]$X
s_orig <- spline_orig[[1]]$S
s_reparam <- spline_reparam[[1]]$S[[1]]
### Reparameterize both using the QR decomposition following Wood
### Where Z is the Q matrix without the first column, used to reparameterize
C <- rep(1, nrow(X_orig)) %*% X_orig
qrc <- qr(t(C))
Z <- qr.Q(qrc,complete=TRUE)[,(nrow(C)+1):ncol(C)]
### Calculate reparameterized matrices for basis and penalty
X_reparam <- X_orig%*%Z
#X_reparam2 <- spline_reparam[[1]]$X
head(X_orig)
head(X_reparam)
head(s_orig)
head(s_reparam)
#b <- gam(log(rate_mle) ~ s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), select=TRUE)
### Create the data to be used for model
basis_dim <- dim(X_reparam)[2]
### Fit the model
fit_2 <- stan(file = "./stan_models/03-seasonal_spline/e-gamma_hurdle_spline_loc_e.stan",
data = data_fitting,
init = init_vals,
iter = 800, chains = 1, verbose = FALSE)
###########################################################################
### Calculate prior and initial values from MLE fit
###########################################################################
### Fit a gam using the same basis
mle_mean_gam <- gam(log(mean) ~ s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=mle_fit, knots = list(jdate=knots_jdate), select=FALSE)
summary(mle_mean_gam)
plot(mle_mean_gam)
### Create the prior for the mean intercept
b_0_mean_prior <- c(summary(mle_mean_gam)$p.table[1], summary(mle_mean_gam)$p.table[2])
### Create a vector for intializing the mean
b_mean_init <- c(coef(mle_mean_gam)[2:length(coef(mle_mean_gam))])
lambda_mean_init <- c(mle_mean_gam$sp)
### Doublecheck
b_init_test <- X_reparam %*% b_mean_init + b_0_mean_prior[1]
plot(fitting_df$jdate, exp(b_init_test), type="l")
rm(b_init_test)
### Fit a gam using the same basis
mle_scale_gam <- gam(log(scale) ~ s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=mle_fit, knots = list(jdate=knots_jdate), select=FALSE)
summary(mle_scale_gam)
plot(mle_scale_gam)
### Create the prior for the scale intercept
b_0_scale_prior <- c(summary(mle_scale_gam)$p.table[1], summary(mle_scale_gam)$p.table[2])
### Create a vector for intializing the scale
b_scale_init <- c(coef(mle_scale_gam)[2:length(coef(mle_scale_gam))])
lambda_scale_init <- c(mle_scale_gam$sp)
### Doublecheck
b_init_test <- X_reparam %*% b_scale_init + b_0_scale_prior[1]
plot(fitting_df$jdate, exp(b_init_test), type="l")
rm(b_init_test)
###########################################################################
### Run the model
###########################################################################
### Create the data to send to stan model
data_fitting <- list(N = length(fitting_df$precip), basis_dim = basis_dim, y=fitting_df$precip, X = X_reparam, S = s_reparam, b_0_mean_prior=b_0_mean_prior, b_0_scale_prior=b_0_scale_prior, lambda_mean_init=lambda_mean_init, lambda_scale_init=lambda_scale_init)
str(data_fitting)
#init_vals <- list(list(), list(), list())
init_vals <- list(list(b_0_mean = b_0_mean_prior[1], b_0_scale = b_0_scale_prior[1], b_mean=b_mean_init, b_scale=b_scale_init, lambda_mean_init=lambda_mean_init, lambda_scale_init=lambda_scale_init))
init_vals
### Fit the model
fit_3 <- stan(file = "./stan_models/03-seasonal_spline/e-gamma_mean_scaleersion.stan",
data = data_fitting,
init = init_vals,
iter = 200, chains = 1, verbose = FALSE)
### So fast
### Fit a gam using the same basis
ya <- gammals(precip ~ s(jdate, bs=c("cc"), k = c(n_knots_jdate)), ~ s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data=fitting_df, knots = list(jdate=knots_jdate), select=TRUE))
summary(ya)
plot(ya)
### Fit using mgcv directly
ctrl <- list(nthreads=4)
fit_3 <- gam(list(precip
~ s(jdate, bs=c("cc"), k = c(n_knots_jdate) ) ,
~ s(jdate, bs=c("cc"), k = c(n_knots_jdate) )
),
data=fitting_df,
knots = list(jdate=knots_jdate),
family=gammals,
select=TRUE,
method="REML",
control= ctrl)
### You can't put the scale.penalty option in here
c(coef(fit_3)[2:length(coef(fit_3))])
c(fit_3$sp)
summary(fit_3)
plot(fit_3)
smoothCon
maS <- norm(S) / norm(X, type = "I")^2 ### Scaling factor for S
sapply(fit_3$smooth, "[[", "S.scale") / fit_3$sp
S_gammals <- fit_3$smooth[[1]]$S
S_gammals <- S_gammals[[1]]
x_mat <- model.matrix(fit_3)
X_gammals <- x_mat[,seq(2,29)]
maS <- norm(S_gammals) / norm(X_gammals, type = "I")^2
ya <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots_jdate)), data = fitting_df, knots = list(jdate=knots_jdate), absorb.cons=TRUE, scale.penalty=FALSE)
ya[[1]]$S
ya <- smoothCon(s(jdate, bs=c("cc"), k = c(n_knots)), data = fitting_df, knots = list(jdate=knots_jdate), absorb.cons=TRUE, scale.penalty=TRUE)
ya[[1]]$S
###########################################################################
### Check results
###########################################################################
print(fit_2, pars = c("b_0_mean", "b_0_scale"))
#
exp(1.59)
exp(2.28)
### True values are 5 and 5/0.5 or 10
### Check the trace plots to confirm the chains converge
plot(fit_2, plotfun = "trace", pars = "b_0_mean", inc_warmup = TRUE)
plot(density(exp(extract(fit_2, "b_0_mean")$b_0_mean)))
abline(v = 5, col="red")
plot(density(exp(extract(fit_2, "b_0_scale")$b_0_scale)))
abline(v = 5/0.5, col="red")
plot(fit_2, plotfun = "trace", pars = "lambda_mean", inc_warmup = TRUE)
plot(fit_2, plotfun = "trace", pars = "lambda_scale", inc_warmup = TRUE)
### Check chains without warmup
plot(fit_2, plotfun = "trace", pars = "b_mean", inc_warmup = TRUE)
plot(fit_2, plotfun = "trace", pars = "b_scale", inc_warmup = TRUE)
### Check the distributions of beta values
plot(fit_2, show_density = TRUE, ci_level = 0.5, pars = "b_mean", fill_color = "lightblue") + theme_classic()
plot(fit_2, show_density = TRUE, ci_level = 0.5, pars = "b_scale", fill_color = "lightblue") + theme_classic()
mean_est <- extract(fit_2, "mean_est")$mean_est
ya <- apply(mean_est, 2, median)
plot(true_param$jdate, true_param$mean, type="l")
lines(fitting_df$jdate, apply(mean_est, 2, median), col="red")
lines(fitting_df$jdate, apply(mean_est, 2, quantile, 0.975), col="blue")
lines(fitting_df$jdate, apply(mean_est, 2, quantile, 0.025), col="blue")
scale_est <- extract(fit_2, "scale_est")$scale_est
ya <- apply(scale_est, 2, median)
plot(true_param$jdate, true_param$scale, type="l")
lines(fitting_df$jdate, ya, col="red")
shape_est <- extract(fit_2, "shape_est")$shape_est
ya <- apply(shape_est, 2, median)
plot(true_param$jdate, true_param$shape, type="l")
lines(fitting_df$jdate, ya, col="red")
lambda_mean <- extract(fit_2, "lambda_mean")$lambda_mean
plot(density(lambda_mean))
lambda_scale <- extract(fit_2, "lambda_scale")$lambda_scale
plot(density(lambda_scale))
###########################################################################
### Check results
###########################################################################
#print(fit_2)
### Create the full basis matrix by adding the intercept column
X_full_reparam <- cbind(rep(1,dim(X_reparam)[1]), X_reparam)
demo_basis_reparam <- cbind(rep(1,dim(demo_basis_reparam)[1]), demo_basis_reparam)
### Extract the spline coefficients and intercept for mean
b_mean <- extract(fit_2, "b_mean")$b_mean
b_0_mean <- extract(fit_2, "b_0_mean")$b_0_mean
### Combine the intercept and spline coefficients into a single matrix
b_full_mean <- cbind(matrix(b_0_mean, dim(b_mean)[1], 1), b_mean)
### Extract the spline coefficients and intercept for rate
b_scale <- extract(fit_2, "b_scale")$b_scale
b_0_scale <- extract(fit_2, "b_0_scale")$b_0_scale
### Combine the intercept and spline coefficients into a single matrix
b_full_scale <- cbind(matrix(b_0_scale, dim(b_scale)[1], 1), b_scale)
### Calculate the estimate of rate based on the jdate 1 to 365 dataframe
mean_est_jdate <- exp(demo_basis_reparam %*% t(b_full_mean))
mean_est <- data.frame(jdate_demo, rate=mean_est_jdate)
### Gather the results into a long format
mean_est_long <- mean_est %>%
gather("draw", "mean", -jdate)
### Quick plot of all draws
ggplot(mean_est_long, aes(x=jdate, y=mean, group=draw)) + geom_line(colour="grey40", alpha=0.2) + theme_classic()
### Calculate some summary statistics (quantiles) to plot the spline results
mean_est_summary <- mean_est_long %>%
group_by(jdate) %>%
summarise(median = median(mean), perc_95_lower = quantile(mean, 0.025), perc_95_upper = quantile(mean, 0.975), perc_50_upper = quantile(mean, 0.25), perc_50_lower = quantile(mean, 0.75))
### Create a plot comparing the true value, spline with uncertainty bounds, and the MLE estimate
p <- ggplot(mean_est_summary, aes(x=jdate)) %>%
+ geom_line(data = mle_fit, aes(y=mean), colour="#66c2a5") %>%
+ geom_ribbon(data = mean_est_summary, aes(ymin = perc_95_lower, ymax = perc_95_upper), fill="grey70", alpha = 0.5) %>%
+ geom_ribbon(data = mean_est_summary, aes(ymin = perc_50_lower, ymax = perc_50_upper), fill="grey50", alpha = 0.5) %>%
+ geom_line(data = true_param, aes(y=mean), size=1) %>%
+ geom_line(aes(y = median), colour="#fc8d62", size=1) %>%
+ theme_classic() %>%
+ scale_x_continuous(name = "Julian Date", breaks=round(seq(1,365,length.out=20)), expand = c(0, 0)) %>%
+ scale_y_continuous(name="Mean Parameter") %>%
+ coord_cartesian(xlim=c(1,365))
### Plot
p
### Save the plot
ggsave("../output/model2_rate_spline_v_mle.png", p, width =12.5, height = 8, dpi = 300)
#### Repeat all of this for shape parameter
### Calculate the estimate of shape based on the jdate 1 to 365 dataframe
shape_est_jdate <- exp(demo_basis_reparam %*% t(b_full_shape))
shape_est <- data.frame(jdate_demo, rate=shape_est_jdate)
### Gather the results into a long format
shape_est_long <- shape_est %>%
gather("draw", "shape", -jdate)
### Calculate summary statistics
shape_est_summary <- shape_est_long %>%
group_by(jdate) %>%
summarise(median = median(shape), perc_95_lower = quantile(shape, 0.025), perc_95_upper = quantile(shape, 0.975), perc_50_upper = quantile(shape, 0.25), perc_50_lower = quantile(shape, 0.75))
### Quick plot of all draws
ggplot(shape_est_long, aes(x=jdate, y=shape, group=draw)) + geom_line(colour="grey40", alpha=0.2) + theme_classic()
### Create a plot comparing the true value, spline with uncertainty bounds, and the MLE estimate
p <- ggplot(shape_est_summary, aes(x=jdate)) %>%
+ geom_line(data = mle_fit, aes(y=shape), colour="#66c2a5") %>%
+ geom_ribbon(data = shape_est_summary, aes(ymin = perc_95_lower, ymax = perc_95_upper), fill="grey70", alpha = 0.5) %>%
+ geom_ribbon(data = shape_est_summary, aes(ymin = perc_50_lower, ymax = perc_50_upper), fill="grey50", alpha = 0.5) %>%
+ geom_line(data = true_param, aes(y=shape), size=1) %>%
+ geom_line(aes(y = median), colour="#fc8d62", size=1) %>%
+ theme_classic() %>%
+ scale_x_continuous(name = "Julian Date", breaks=round(seq(1,365,length.out=20)), expand = c(0, 0)) %>%
+ scale_y_continuous(name="Shape Parameter") %>%
+ coord_cartesian(xlim=c(1,365))
### Plot
p
### Save the plot
ggsave("../output/model2_shape_spline_v_mle.png", p, width =12.5, height = 8, dpi = 300)
###########################################################################
### Convert coeficients back to original basis (before reparam)
###########################################################################
### Need to go row by row of betas
b_mean_orig <- Z %*% b_mean[1,]
plot(jdate_demo$jdate, exp(demo_basis_x %*% b_mean_orig + b_0_mean[1]), type="l"); for(j in seq(1,n_knots-1)){lines(jdate_demo$jdate, exp(demo_basis_x[,j] * b_mean_orig[j] + b_0_mean[1]), col= rainbow(n_knots)[j])}
|
expit = function(x) {
exp(x)/(1 + exp(x))
}
logit = function(x) {
log(x/(1 - x))
}
#ep = -1.7
#-1.29
#-1.1
#-0.9
#-0.6
#-0.2
#0.2
ep_vec = c(-3.015, -2.513, -2.118, -1.758, -1.42, -1.065, -0.66, -0.098)
#for l = 1, capture probabilities are 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9
ep = ep_vec[2]
pi1 = function(x) {
expit( ep + sum(c(0.4)*x))
}
pi2 = function(x) {
expit( ep + sum(c(0.3)*x))
}
dat_p = function(n, l){
x = matrix(runif(n*l, 0, 1) + 2,
#rnorm(n*l, 0, 1)+2,
nrow = n, ncol = l)
y1 = unlist(apply(x, 1, function(xi) {sample(c(0, 1), 1, replace = TRUE, prob = c( 1 - pi1(xi), pi1(xi)))}))
y2 = unlist(apply(x, 1, function(xi) {sample(c(0, 1), 1, replace = TRUE, prob = c( 1 - pi2(xi), pi2(xi)))}))
xp = do.call("cbind", lapply(1:ncol(x),
function(li){
if(li%%4 == 1){
return(exp(x[,li]/2))
}else if(li%%4 == 2){
return(x[,li]/(1 + exp(x[,li -1])) + 10)
}else if(li%%4 == 3){
return((x[,li]*x[,li-2]/25 + 0.6)^3)
}else{
return((x[,li -2] + x[,li] + 20)^2)
}
}))
List_matrix = cbind(y1, y2, x)
List_matrix_xstar = cbind(y1, y2, xp)
p1 = unlist(apply(x, 1, pi1))
p2 = unlist(apply(x, 1, pi2))
q1 = p1/(1 - (1-p1)*(1-p2))
q2 = p2/(1 - (1-p1)*(1-p2))
q12 = p2*p1/(1 - (1-p1)*(1-p2))
return(list(List_matrix = List_matrix, List_matrix_xstar = List_matrix_xstar,
psi0 = 1 - mean(apply(x, 1, function(xx){return((1 - pi1(xx))*(1 - pi2(xx)))}))
))
}
dam = numeric(length = 100)
for(i in 1:100){
dam[i] = dat_p(1000, 1)$psi0
}
summary(dam)
# Qnphi = mean(sapply(1:1, function(i) {
# x = matrix(
# rnorm(n*l, 0, 1),
# nrow = n, ncol = l)
# y1 = unlist(apply(x, 1, function(xi) {sample(c(0, 1), 1, replace = TRUE, prob = c( 1 - pi1(xi), pi1(xi)))}))
# y2 = unlist(apply(x, 1, function(xi) {sample(c(0, 1), 1, replace = TRUE, prob = c( 1 - pi2(xi), pi2(xi)))}))
# mean((q1*q2/q12 *(y1/q1 + y2/q2 - y1*y2/q12 - 1))[pmax(y1, y2) > 0])
# }))
| /data/simulated/indep_cov_Tilling_simulation.R | no_license | mqnjqrid/capture_recapture | R | false | false | 2,312 | r | expit = function(x) {
exp(x)/(1 + exp(x))
}
logit = function(x) {
log(x/(1 - x))
}
#ep = -1.7
#-1.29
#-1.1
#-0.9
#-0.6
#-0.2
#0.2
ep_vec = c(-3.015, -2.513, -2.118, -1.758, -1.42, -1.065, -0.66, -0.098)
#for l = 1, capture probabilities are 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9
ep = ep_vec[2]
pi1 = function(x) {
expit( ep + sum(c(0.4)*x))
}
pi2 = function(x) {
expit( ep + sum(c(0.3)*x))
}
dat_p = function(n, l){
x = matrix(runif(n*l, 0, 1) + 2,
#rnorm(n*l, 0, 1)+2,
nrow = n, ncol = l)
y1 = unlist(apply(x, 1, function(xi) {sample(c(0, 1), 1, replace = TRUE, prob = c( 1 - pi1(xi), pi1(xi)))}))
y2 = unlist(apply(x, 1, function(xi) {sample(c(0, 1), 1, replace = TRUE, prob = c( 1 - pi2(xi), pi2(xi)))}))
xp = do.call("cbind", lapply(1:ncol(x),
function(li){
if(li%%4 == 1){
return(exp(x[,li]/2))
}else if(li%%4 == 2){
return(x[,li]/(1 + exp(x[,li -1])) + 10)
}else if(li%%4 == 3){
return((x[,li]*x[,li-2]/25 + 0.6)^3)
}else{
return((x[,li -2] + x[,li] + 20)^2)
}
}))
List_matrix = cbind(y1, y2, x)
List_matrix_xstar = cbind(y1, y2, xp)
p1 = unlist(apply(x, 1, pi1))
p2 = unlist(apply(x, 1, pi2))
q1 = p1/(1 - (1-p1)*(1-p2))
q2 = p2/(1 - (1-p1)*(1-p2))
q12 = p2*p1/(1 - (1-p1)*(1-p2))
return(list(List_matrix = List_matrix, List_matrix_xstar = List_matrix_xstar,
psi0 = 1 - mean(apply(x, 1, function(xx){return((1 - pi1(xx))*(1 - pi2(xx)))}))
))
}
dam = numeric(length = 100)
for(i in 1:100){
dam[i] = dat_p(1000, 1)$psi0
}
summary(dam)
# Qnphi = mean(sapply(1:1, function(i) {
# x = matrix(
# rnorm(n*l, 0, 1),
# nrow = n, ncol = l)
# y1 = unlist(apply(x, 1, function(xi) {sample(c(0, 1), 1, replace = TRUE, prob = c( 1 - pi1(xi), pi1(xi)))}))
# y2 = unlist(apply(x, 1, function(xi) {sample(c(0, 1), 1, replace = TRUE, prob = c( 1 - pi2(xi), pi2(xi)))}))
# mean((q1*q2/q12 *(y1/q1 + y2/q2 - y1*y2/q12 - 1))[pmax(y1, y2) > 0])
# }))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bb2bbox.R
\name{bb2bbox}
\alias{bb2bbox}
\title{Convert a bb specification to a bbox specification}
\usage{
bb2bbox(bb)
}
\arguments{
\item{bb}{a bounding box in bb format (see examples)}
}
\value{
a bounding box in bbox format (see examples)
}
\description{
In ggmap, all maps (class ggmap) have the bb attribute, a data
frame bounding box specification in terms of the bottom left and
top right points of the spatial extent. This function converts
this specification to a named double vector (with names left,
bottom, right, top) specification that is used in some querying
functions (e.g. get_stamenmap).
}
\examples{
\dontrun{# cut down on R CMD check time
# grab a center/zoom map and compute its bounding box
gc <- geocode("white house, washington dc")
map <- get_map(gc)
(bb <- attr(map, "bb"))
(bbox <- bb2bbox(bb))
# use the bounding box to get a stamen map
stamMap <- get_stamenmap(bbox)
ggmap(map) +
geom_point(
aes(x = lon, y = lat),
data = gc, colour = "red", size = 3
)
ggmap(stamMap) +
geom_point(
aes(x = lon, y = lat),
data = gc, colour = "red", size = 3
)
}
}
\author{
David Kahle \email{david@kahle.io}
}
| /man/bb2bbox.Rd | permissive | dkahle/ggmap | R | false | true | 1,235 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bb2bbox.R
\name{bb2bbox}
\alias{bb2bbox}
\title{Convert a bb specification to a bbox specification}
\usage{
bb2bbox(bb)
}
\arguments{
\item{bb}{a bounding box in bb format (see examples)}
}
\value{
a bounding box in bbox format (see examples)
}
\description{
In ggmap, all maps (class ggmap) have the bb attribute, a data
frame bounding box specification in terms of the bottom left and
top right points of the spatial extent. This function converts
this specification to a named double vector (with names left,
bottom, right, top) specification that is used in some querying
functions (e.g. get_stamenmap).
}
\examples{
\dontrun{# cut down on R CMD check time
# grab a center/zoom map and compute its bounding box
gc <- geocode("white house, washington dc")
map <- get_map(gc)
(bb <- attr(map, "bb"))
(bbox <- bb2bbox(bb))
# use the bounding box to get a stamen map
stamMap <- get_stamenmap(bbox)
ggmap(map) +
geom_point(
aes(x = lon, y = lat),
data = gc, colour = "red", size = 3
)
ggmap(stamMap) +
geom_point(
aes(x = lon, y = lat),
data = gc, colour = "red", size = 3
)
}
}
\author{
David Kahle \email{david@kahle.io}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{reais}
\alias{reais}
\title{Wrapper around brformat}
\usage{
reais(prefix = "R$", ...)
}
\arguments{
\item{prefix}{currency units. Defaults for brazilian reais.}
\item{\ldots}{further arguments to be passed to \link{brformat}.}
}
\description{
This is a wrapper around \link{brformat}.
}
| /man/reais.Rd | permissive | droubi-org/appraiseR | R | false | true | 383 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{reais}
\alias{reais}
\title{Wrapper around brformat}
\usage{
reais(prefix = "R$", ...)
}
\arguments{
\item{prefix}{currency units. Defaults for brazilian reais.}
\item{\ldots}{further arguments to be passed to \link{brformat}.}
}
\description{
This is a wrapper around \link{brformat}.
}
|
#' ---
#' title: "ANOVA test for `dejection,cheerfulness,agitation,quiescence`~`testType`*`gender`"
#' author: Geiser C. Challco <geiser@alumni.usp.br>
#' comment: This file is automatically generate by Shiny-Statistic app (https://statistic.geiser.tech/)
#' Author - Geiser C. Challco <geiser@alumni.usp.br>
#'
#' Shiny-Statistic is distributed in the hope that it will be useful,
#' but WITHOUT ANY WARRANTY; without even the implied warranty of
#' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#' GNU General Public License for more details.
#'
#' You should have received a copy of the GNU General Public License.
#' If not, see <https://www.gnu.org/licenses/>.
#' output:
#' github_document:
#' toc: true
#' word_document:
#' toc: true
#' html_document:
#' toc: true
#' fontsize: 10pt
#' ---
#'
## ----setup, include=FALSE-----------------------------------------------------------------------------------------------------------------------
## Install and Load Packages
if (!'remotes' %in% rownames(installed.packages())) install.packages('remotes')
if (!"rshinystatistics" %in% rownames(installed.packages())) {
remotes::install_github("geiser/rshinystatistics")
} else if (packageVersion("rshinystatistics") < "0.0.0.9300") {
remotes::install_github("geiser/rshinystatistics")
}
wants <- c('ggplot2','ggpubr','rshinystatistics','utils')
has <- wants %in% rownames(installed.packages())
if (any(!has)) install.packages(wants[!has])
library(utils)
library(ggpubr)
library(ggplot2)
library(rshinystatistics)
#'
#'
#' ## Initial Variables and Data
#'
#' * R-script file: [../code/anova.R](../code/anova.R)
#' * Initial table file: [../data/initial-table.csv](../data/initial-table.csv)
#' * Data for dejection [../data/table-for-dejection.csv](../data/table-for-dejection.csv)
#' * Data for cheerfulness [../data/table-for-cheerfulness.csv](../data/table-for-cheerfulness.csv)
#' * Data for agitation [../data/table-for-agitation.csv](../data/table-for-agitation.csv)
#' * Data for quiescence [../data/table-for-quiescence.csv](../data/table-for-quiescence.csv)
#' * Table without outliers and normal distribution of data: [../data/table-with-normal-distribution.csv](../data/table-with-normal-distribution.csv)
#' * Other data files: [../data/](../data/)
#' * Files related to the presented results: [../results/](../results/)
#'
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
wid <- "responseId"
between <- c("testType","gender")
dvs <- c("dejection","cheerfulness","agitation","quiescence")
names(dvs) <- dvs
dat <- lapply(dvs, FUN = function(dv) {
data <- read.csv(paste0("../data/table-for-",dv,".csv"))
rownames(data) <- data[["responseId"]]
return(data)
})
rdat <- dat
sdat <- dat
#'
#' ### Descriptive statistics of initial data
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
(df <- get.descriptives(dat, dvs, between, include.global = T, symmetry.test = T))
#'
#'
## ---- echo=FALSE--------------------------------------------------------------------------------------------------------------------------------
car::Boxplot(`dejection` ~ `testType`*`gender`, data = dat[["dejection"]], id = list(n = Inf))
car::Boxplot(`cheerfulness` ~ `testType`*`gender`, data = dat[["cheerfulness"]], id = list(n = Inf))
car::Boxplot(`agitation` ~ `testType`*`gender`, data = dat[["agitation"]], id = list(n = Inf))
car::Boxplot(`quiescence` ~ `testType`*`gender`, data = dat[["quiescence"]], id = list(n = Inf))
#'
#' ## Checking of Assumptions
#'
#' ### Assumption: Symmetry and treatment of outliers
#'
#' #### Applying transformation for skewness data when normality is not achieved
#'
#'
#' Applying transformation in "dejection" to reduce skewness
#'
## -----------------------------------------------------------------------------------------------------------------------------------------------
density.plot.by.residual(rdat[["dejection"]],"dejection",between)
rdat[["dejection"]][["dejection"]] <- log10(dat[["dejection"]][["dejection"]])
density.plot.by.residual(rdat[["dejection"]],"dejection",between)
#'
#'
#'
#'
#' Applying transformation in "agitation" to reduce skewness
#'
## -----------------------------------------------------------------------------------------------------------------------------------------------
density.plot.by.residual(rdat[["agitation"]],"agitation",between)
rdat[["agitation"]][["agitation"]] <- sqrt(dat[["agitation"]][["agitation"]])
density.plot.by.residual(rdat[["agitation"]],"agitation",between)
#'
#'
#'
#'
#' #### Dealing with outliers (performing treatment of outliers)
#'
## -----------------------------------------------------------------------------------------------------------------------------------------------
rdat[["dejection"]] <- winzorize(rdat[["dejection"]],"dejection", c("testType","gender"))
rdat[["cheerfulness"]] <- winzorize(rdat[["cheerfulness"]],"cheerfulness", c("testType","gender"))
rdat[["agitation"]] <- winzorize(rdat[["agitation"]],"agitation", c("testType","gender"))
rdat[["quiescence"]] <- winzorize(rdat[["quiescence"]],"quiescence", c("testType","gender"))
#'
#' ### Assumption: Normality distribution of data
#'
#' #### Removing data that affect normality (extreme values)
#'
## -----------------------------------------------------------------------------------------------------------------------------------------------
non.normal <- list(
"dejection" = c("d23aa670-9d47-11eb-9b7e-0daf340a71ab","09bbd000-9e39-11eb-9b7e-0daf340a71ab","eb0ea380-aeb9-11eb-8cbb-599e427a3fce","d79c21e0-b1db-11eb-b944-15c8c1c6ce71","8a1d24d0-b183-11eb-b944-15c8c1c6ce71","d7a07110-df61-11eb-bf23-972ef7bdc96c","18379910-9df4-11eb-9b7e-0daf340a71ab","4b9fd020-9e02-11eb-9b7e-0daf340a71ab","a9467110-a853-11eb-8cbb-599e427a3fce","e7bb7ec0-b057-11eb-b944-15c8c1c6ce71","3cbe7e50-b25a-11eb-b944-15c8c1c6ce71","108b6210-b649-11eb-ad27-3593da35795f","369d0020-df62-11eb-bf23-972ef7bdc96c","3ee27670-df62-11eb-bf23-972ef7bdc96c","9bf2ecf0-df62-11eb-bf23-972ef7bdc96c"),
"cheerfulness" = c("d7a07110-df61-11eb-bf23-972ef7bdc96c"),
"quiescence" = c("369d0020-df62-11eb-bf23-972ef7bdc96c","2145a1e0-df63-11eb-bf23-972ef7bdc96c","46f906c0-9e36-11eb-9b7e-0daf340a71ab")
)
sdat <- removeFromDataTable(rdat, non.normal, wid)
#'
#' #### Result of normality test in the residual model
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
(df <- normality.test.by.residual(sdat, dvs, between))
#'
#'
#' #### Result of normality test in each group
#'
#' This is an optional validation and only valid for groups with number greater than 30 observations
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
(df <- get.descriptives(sdat, dvs, between, include.global = F, normality.test = T))
#'
#'
#' **Observation**:
#'
#' As sample sizes increase, parametric tests remain valid even with the violation of normality [[1](#references)].
#' According to the central limit theorem, the sampling distribution tends to be normal if the sample is large, more than (`n > 30`) observations.
#' Therefore, we performed parametric tests with large samples as described as follows:
#'
#' - In cases with the sample size greater than 100 (`n > 100`), we adopted a significance level of `p < 0.01`
#'
#' - For samples with `n > 50` observation, we adopted D'Agostino-Pearson test
#' that offers better accuracy for larger samples [[2](#references)].
#'
#' - For samples' size between `n > 100` and `n <= 200`, we ignored the normality test,
#' and our decision of validating normality was based only in the interpretation of QQ-plots
#' and histograms because the Shapiro-Wilk and D'Agostino-Pearson tests tend to be too sensitive
#' with values greater than 200 observation [[3](#references)].
#'
#' - For samples with `n > 200` observation, we ignore the normality assumption based on the central theorem limit.
#'
#'
#'
#'
#'
#' ### Assumption: Homogeneity of data distribution
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
(df <- homogeneity.test(sdat, dvs, between))
#'
#'
#' ## Saving the Data with Normal Distribution Used for Performing ANOVA test
#'
## -----------------------------------------------------------------------------------------------------------------------------------------------
ndat <- sdat[[1]]
for (dv in names(sdat)[-1]) ndat <- merge(ndat, sdat[[dv]])
write.csv(ndat, paste0("../data/table-with-normal-distribution.csv"))
#'
#' Descriptive statistics of data with normal distribution
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
(df <- get.descriptives(sdat, dvs, between))
#'
#'
## ---- echo=FALSE--------------------------------------------------------------------------------------------------------------------------------
for (dv in dvs) {
car::Boxplot(`dv` ~ `testType`*`gender`, data = sdat[[dv]] %>% cbind(dv=sdat[[dv]][[dv]]), id = list(n = Inf))
}
#'
#' ## Computation of ANOVA test and Pairwise Comparison
#'
#' ### ANOVA test
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
aov <- anova.test(sdat, dvs, between, type=2, effect.size="ges")
(adf <- get.anova.table(aov))
#'
#'
#' ### Pairwise comparison
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
pwc <- anova.pwc(sdat, dvs, between, p.adjust.method = "bonferroni")
(pdf <- get.anova.pwc.table(pwc, only.sig = F))
#'
#'
#' ### Descriptive Statistic of Estimated Marginal Means
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
(emms <- get.anova.emmeans.with.ds(pwc, sdat, dvs, between, "common"))
#'
#'
#'
#' ### Anova plots for the dependent variable "dejection"
## -----------------------------------------------------------------------------------------------------------------------------------------------
plots <- twoWayAnovaPlots(sdat[["dejection"]], "dejection", between, aov[["dejection"]], pwc[["dejection"]], c("jitter"), font.label.size=14, step.increase=0.25)
#'
#'
#' #### Plot of "dejection" based on "testType" (color: gender)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["testType"]]
#'
#'
#' #### Plot of "dejection" based on "gender" (color: testType)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["gender"]]
#'
#'
#'
#' ### Anova plots for the dependent variable "cheerfulness"
## -----------------------------------------------------------------------------------------------------------------------------------------------
plots <- twoWayAnovaPlots(sdat[["cheerfulness"]], "cheerfulness", between, aov[["cheerfulness"]], pwc[["cheerfulness"]], c("jitter"), font.label.size=14, step.increase=0.25)
#'
#'
#' #### Plot of "cheerfulness" based on "testType" (color: gender)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["testType"]]
#'
#'
#' #### Plot of "cheerfulness" based on "gender" (color: testType)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["gender"]]
#'
#'
#'
#' ### Anova plots for the dependent variable "agitation"
## -----------------------------------------------------------------------------------------------------------------------------------------------
plots <- twoWayAnovaPlots(sdat[["agitation"]], "agitation", between, aov[["agitation"]], pwc[["agitation"]], c("jitter"), font.label.size=14, step.increase=0.25)
#'
#'
#' #### Plot of "agitation" based on "testType" (color: gender)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["testType"]]
#'
#'
#' #### Plot of "agitation" based on "gender" (color: testType)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["gender"]]
#'
#'
#'
#' ### Anova plots for the dependent variable "quiescence"
## -----------------------------------------------------------------------------------------------------------------------------------------------
plots <- twoWayAnovaPlots(sdat[["quiescence"]], "quiescence", between, aov[["quiescence"]], pwc[["quiescence"]], c("jitter"), font.label.size=14, step.increase=0.25)
#'
#'
#' #### Plot of "quiescence" based on "testType" (color: gender)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["testType"]]
#'
#'
#' #### Plot of "quiescence" based on "gender" (color: testType)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["gender"]]
#'
#'
#'
#' ### Textual Report
#'
#' ANOVA tests with independent between-subjects variables "testType" (default, stFemale, stMale) and "gender" (Feminino, Masculino) were performed to determine statistically significant difference on the dependent varibles "dejection", "cheerfulness", "agitation", "quiescence". For the dependent variable "dejection", there was statistically significant effects in the factor "gender" with F(1,86)=6.402, p=0.013 and ges=0.069 (effect size).
#' For the dependent variable "cheerfulness", there was statistically significant effects in the factor "gender" with F(1,100)=10.362, p=0.002 and ges=0.094 (effect size).
#' For the dependent variable "agitation", there was not statistically significant effects.
#' For the dependent variable "quiescence", there was statistically significant effects in the factor "gender" with F(1,98)=6.34, p=0.013 and ges=0.061 (effect size).
#'
#'
#'
#' Pairwise comparisons using the Estimated Marginal Means (EMMs) were computed to find statistically significant diferences among the groups defined by the independent variables, and with the p-values ajusted by the method "bonferroni". For the dependent variable "quiescence", the mean in the gender="Feminino" (adj M=3.278 and SD=1.381) was significantly different than the mean in the gender="Masculino" (adj M=4.584 and SD=1.256) with p-adj=0.002; the mean in the gender="Feminino" (adj M=3.948 and SD=1.39) was significantly different than the mean in the gender="Masculino" (adj M=4.936 and SD=1.026) with p-adj=0.028; the mean in the gender="Feminino" (adj M=4.199 and SD=1.581) was significantly different than the mean in the gender="Masculino" (adj M=5.214 and SD=0.963) with p-adj=0.028.
#'
#'
#'
#' ## Tips and References
#'
#' - Use the site [https://www.tablesgenerator.com](https://www.tablesgenerator.com) to convert the HTML tables into Latex format
#'
#' - [2]: Miot, H. A. (2017). Assessing normality of data in clinical and experimental trials. J Vasc Bras, 16(2), 88-91.
#'
#' - [3]: Bárány, Imre; Vu, Van (2007). "Central limit theorems for Gaussian polytopes". Annals of Probability. Institute of Mathematical Statistics. 35 (4): 1593–1621.
#'
| /Results/H4-emotions-env_gender/code/anova.R | permissive | KamilaBenevides/gender-st-experiment | R | false | false | 16,144 | r | #' ---
#' title: "ANOVA test for `dejection,cheerfulness,agitation,quiescence`~`testType`*`gender`"
#' author: Geiser C. Challco <geiser@alumni.usp.br>
#' comment: This file is automatically generate by Shiny-Statistic app (https://statistic.geiser.tech/)
#' Author - Geiser C. Challco <geiser@alumni.usp.br>
#'
#' Shiny-Statistic is distributed in the hope that it will be useful,
#' but WITHOUT ANY WARRANTY; without even the implied warranty of
#' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#' GNU General Public License for more details.
#'
#' You should have received a copy of the GNU General Public License.
#' If not, see <https://www.gnu.org/licenses/>.
#' output:
#' github_document:
#' toc: true
#' word_document:
#' toc: true
#' html_document:
#' toc: true
#' fontsize: 10pt
#' ---
#'
## ----setup, include=FALSE-----------------------------------------------------------------------------------------------------------------------
## Install and Load Packages
if (!'remotes' %in% rownames(installed.packages())) install.packages('remotes')
if (!"rshinystatistics" %in% rownames(installed.packages())) {
remotes::install_github("geiser/rshinystatistics")
} else if (packageVersion("rshinystatistics") < "0.0.0.9300") {
remotes::install_github("geiser/rshinystatistics")
}
wants <- c('ggplot2','ggpubr','rshinystatistics','utils')
has <- wants %in% rownames(installed.packages())
if (any(!has)) install.packages(wants[!has])
library(utils)
library(ggpubr)
library(ggplot2)
library(rshinystatistics)
#'
#'
#' ## Initial Variables and Data
#'
#' * R-script file: [../code/anova.R](../code/anova.R)
#' * Initial table file: [../data/initial-table.csv](../data/initial-table.csv)
#' * Data for dejection [../data/table-for-dejection.csv](../data/table-for-dejection.csv)
#' * Data for cheerfulness [../data/table-for-cheerfulness.csv](../data/table-for-cheerfulness.csv)
#' * Data for agitation [../data/table-for-agitation.csv](../data/table-for-agitation.csv)
#' * Data for quiescence [../data/table-for-quiescence.csv](../data/table-for-quiescence.csv)
#' * Table without outliers and normal distribution of data: [../data/table-with-normal-distribution.csv](../data/table-with-normal-distribution.csv)
#' * Other data files: [../data/](../data/)
#' * Files related to the presented results: [../results/](../results/)
#'
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
wid <- "responseId"
between <- c("testType","gender")
dvs <- c("dejection","cheerfulness","agitation","quiescence")
names(dvs) <- dvs
dat <- lapply(dvs, FUN = function(dv) {
data <- read.csv(paste0("../data/table-for-",dv,".csv"))
rownames(data) <- data[["responseId"]]
return(data)
})
rdat <- dat
sdat <- dat
#'
#' ### Descriptive statistics of initial data
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
(df <- get.descriptives(dat, dvs, between, include.global = T, symmetry.test = T))
#'
#'
## ---- echo=FALSE--------------------------------------------------------------------------------------------------------------------------------
car::Boxplot(`dejection` ~ `testType`*`gender`, data = dat[["dejection"]], id = list(n = Inf))
car::Boxplot(`cheerfulness` ~ `testType`*`gender`, data = dat[["cheerfulness"]], id = list(n = Inf))
car::Boxplot(`agitation` ~ `testType`*`gender`, data = dat[["agitation"]], id = list(n = Inf))
car::Boxplot(`quiescence` ~ `testType`*`gender`, data = dat[["quiescence"]], id = list(n = Inf))
#'
#' ## Checking of Assumptions
#'
#' ### Assumption: Symmetry and treatment of outliers
#'
#' #### Applying transformation for skewness data when normality is not achieved
#'
#'
#' Applying transformation in "dejection" to reduce skewness
#'
## -----------------------------------------------------------------------------------------------------------------------------------------------
density.plot.by.residual(rdat[["dejection"]],"dejection",between)
rdat[["dejection"]][["dejection"]] <- log10(dat[["dejection"]][["dejection"]])
density.plot.by.residual(rdat[["dejection"]],"dejection",between)
#'
#'
#'
#'
#' Applying transformation in "agitation" to reduce skewness
#'
## -----------------------------------------------------------------------------------------------------------------------------------------------
density.plot.by.residual(rdat[["agitation"]],"agitation",between)
rdat[["agitation"]][["agitation"]] <- sqrt(dat[["agitation"]][["agitation"]])
density.plot.by.residual(rdat[["agitation"]],"agitation",between)
#'
#'
#'
#'
#' #### Dealing with outliers (performing treatment of outliers)
#'
## -----------------------------------------------------------------------------------------------------------------------------------------------
rdat[["dejection"]] <- winzorize(rdat[["dejection"]],"dejection", c("testType","gender"))
rdat[["cheerfulness"]] <- winzorize(rdat[["cheerfulness"]],"cheerfulness", c("testType","gender"))
rdat[["agitation"]] <- winzorize(rdat[["agitation"]],"agitation", c("testType","gender"))
rdat[["quiescence"]] <- winzorize(rdat[["quiescence"]],"quiescence", c("testType","gender"))
#'
#' ### Assumption: Normality distribution of data
#'
#' #### Removing data that affect normality (extreme values)
#'
## -----------------------------------------------------------------------------------------------------------------------------------------------
non.normal <- list(
"dejection" = c("d23aa670-9d47-11eb-9b7e-0daf340a71ab","09bbd000-9e39-11eb-9b7e-0daf340a71ab","eb0ea380-aeb9-11eb-8cbb-599e427a3fce","d79c21e0-b1db-11eb-b944-15c8c1c6ce71","8a1d24d0-b183-11eb-b944-15c8c1c6ce71","d7a07110-df61-11eb-bf23-972ef7bdc96c","18379910-9df4-11eb-9b7e-0daf340a71ab","4b9fd020-9e02-11eb-9b7e-0daf340a71ab","a9467110-a853-11eb-8cbb-599e427a3fce","e7bb7ec0-b057-11eb-b944-15c8c1c6ce71","3cbe7e50-b25a-11eb-b944-15c8c1c6ce71","108b6210-b649-11eb-ad27-3593da35795f","369d0020-df62-11eb-bf23-972ef7bdc96c","3ee27670-df62-11eb-bf23-972ef7bdc96c","9bf2ecf0-df62-11eb-bf23-972ef7bdc96c"),
"cheerfulness" = c("d7a07110-df61-11eb-bf23-972ef7bdc96c"),
"quiescence" = c("369d0020-df62-11eb-bf23-972ef7bdc96c","2145a1e0-df63-11eb-bf23-972ef7bdc96c","46f906c0-9e36-11eb-9b7e-0daf340a71ab")
)
sdat <- removeFromDataTable(rdat, non.normal, wid)
#'
#' #### Result of normality test in the residual model
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
(df <- normality.test.by.residual(sdat, dvs, between))
#'
#'
#' #### Result of normality test in each group
#'
#' This is an optional validation and only valid for groups with number greater than 30 observations
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
(df <- get.descriptives(sdat, dvs, between, include.global = F, normality.test = T))
#'
#'
#' **Observation**:
#'
#' As sample sizes increase, parametric tests remain valid even with the violation of normality [[1](#references)].
#' According to the central limit theorem, the sampling distribution tends to be normal if the sample is large, more than (`n > 30`) observations.
#' Therefore, we performed parametric tests with large samples as described as follows:
#'
#' - In cases with the sample size greater than 100 (`n > 100`), we adopted a significance level of `p < 0.01`
#'
#' - For samples with `n > 50` observation, we adopted D'Agostino-Pearson test
#' that offers better accuracy for larger samples [[2](#references)].
#'
#' - For samples' size between `n > 100` and `n <= 200`, we ignored the normality test,
#' and our decision of validating normality was based only in the interpretation of QQ-plots
#' and histograms because the Shapiro-Wilk and D'Agostino-Pearson tests tend to be too sensitive
#' with values greater than 200 observation [[3](#references)].
#'
#' - For samples with `n > 200` observation, we ignore the normality assumption based on the central theorem limit.
#'
#'
#'
#'
#'
#' ### Assumption: Homogeneity of data distribution
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
(df <- homogeneity.test(sdat, dvs, between))
#'
#'
#' ## Saving the Data with Normal Distribution Used for Performing ANOVA test
#'
## -----------------------------------------------------------------------------------------------------------------------------------------------
ndat <- sdat[[1]]
for (dv in names(sdat)[-1]) ndat <- merge(ndat, sdat[[dv]])
write.csv(ndat, paste0("../data/table-with-normal-distribution.csv"))
#'
#' Descriptive statistics of data with normal distribution
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
(df <- get.descriptives(sdat, dvs, between))
#'
#'
## ---- echo=FALSE--------------------------------------------------------------------------------------------------------------------------------
for (dv in dvs) {
car::Boxplot(`dv` ~ `testType`*`gender`, data = sdat[[dv]] %>% cbind(dv=sdat[[dv]][[dv]]), id = list(n = Inf))
}
#'
#' ## Computation of ANOVA test and Pairwise Comparison
#'
#' ### ANOVA test
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
aov <- anova.test(sdat, dvs, between, type=2, effect.size="ges")
(adf <- get.anova.table(aov))
#'
#'
#' ### Pairwise comparison
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
pwc <- anova.pwc(sdat, dvs, between, p.adjust.method = "bonferroni")
(pdf <- get.anova.pwc.table(pwc, only.sig = F))
#'
#'
#' ### Descriptive Statistic of Estimated Marginal Means
#'
## ---- include=FALSE-----------------------------------------------------------------------------------------------------------------------------
(emms <- get.anova.emmeans.with.ds(pwc, sdat, dvs, between, "common"))
#'
#'
#'
#' ### Anova plots for the dependent variable "dejection"
## -----------------------------------------------------------------------------------------------------------------------------------------------
plots <- twoWayAnovaPlots(sdat[["dejection"]], "dejection", between, aov[["dejection"]], pwc[["dejection"]], c("jitter"), font.label.size=14, step.increase=0.25)
#'
#'
#' #### Plot of "dejection" based on "testType" (color: gender)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["testType"]]
#'
#'
#' #### Plot of "dejection" based on "gender" (color: testType)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["gender"]]
#'
#'
#'
#' ### Anova plots for the dependent variable "cheerfulness"
## -----------------------------------------------------------------------------------------------------------------------------------------------
plots <- twoWayAnovaPlots(sdat[["cheerfulness"]], "cheerfulness", between, aov[["cheerfulness"]], pwc[["cheerfulness"]], c("jitter"), font.label.size=14, step.increase=0.25)
#'
#'
#' #### Plot of "cheerfulness" based on "testType" (color: gender)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["testType"]]
#'
#'
#' #### Plot of "cheerfulness" based on "gender" (color: testType)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["gender"]]
#'
#'
#'
#' ### Anova plots for the dependent variable "agitation"
## -----------------------------------------------------------------------------------------------------------------------------------------------
plots <- twoWayAnovaPlots(sdat[["agitation"]], "agitation", between, aov[["agitation"]], pwc[["agitation"]], c("jitter"), font.label.size=14, step.increase=0.25)
#'
#'
#' #### Plot of "agitation" based on "testType" (color: gender)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["testType"]]
#'
#'
#' #### Plot of "agitation" based on "gender" (color: testType)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["gender"]]
#'
#'
#'
#' ### Anova plots for the dependent variable "quiescence"
## -----------------------------------------------------------------------------------------------------------------------------------------------
plots <- twoWayAnovaPlots(sdat[["quiescence"]], "quiescence", between, aov[["quiescence"]], pwc[["quiescence"]], c("jitter"), font.label.size=14, step.increase=0.25)
#'
#'
#' #### Plot of "quiescence" based on "testType" (color: gender)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["testType"]]
#'
#'
#' #### Plot of "quiescence" based on "gender" (color: testType)
## ---- fig.width=7, fig.height=7-----------------------------------------------------------------------------------------------------------------
plots[["gender"]]
#'
#'
#'
#' ### Textual Report
#'
#' ANOVA tests with independent between-subjects variables "testType" (default, stFemale, stMale) and "gender" (Feminino, Masculino) were performed to determine statistically significant difference on the dependent varibles "dejection", "cheerfulness", "agitation", "quiescence". For the dependent variable "dejection", there was statistically significant effects in the factor "gender" with F(1,86)=6.402, p=0.013 and ges=0.069 (effect size).
#' For the dependent variable "cheerfulness", there was statistically significant effects in the factor "gender" with F(1,100)=10.362, p=0.002 and ges=0.094 (effect size).
#' For the dependent variable "agitation", there was not statistically significant effects.
#' For the dependent variable "quiescence", there was statistically significant effects in the factor "gender" with F(1,98)=6.34, p=0.013 and ges=0.061 (effect size).
#'
#'
#'
#' Pairwise comparisons using the Estimated Marginal Means (EMMs) were computed to find statistically significant diferences among the groups defined by the independent variables, and with the p-values ajusted by the method "bonferroni". For the dependent variable "quiescence", the mean in the gender="Feminino" (adj M=3.278 and SD=1.381) was significantly different than the mean in the gender="Masculino" (adj M=4.584 and SD=1.256) with p-adj=0.002; the mean in the gender="Feminino" (adj M=3.948 and SD=1.39) was significantly different than the mean in the gender="Masculino" (adj M=4.936 and SD=1.026) with p-adj=0.028; the mean in the gender="Feminino" (adj M=4.199 and SD=1.581) was significantly different than the mean in the gender="Masculino" (adj M=5.214 and SD=0.963) with p-adj=0.028.
#'
#'
#'
#' ## Tips and References
#'
#' - Use the site [https://www.tablesgenerator.com](https://www.tablesgenerator.com) to convert the HTML tables into Latex format
#'
#' - [2]: Miot, H. A. (2017). Assessing normality of data in clinical and experimental trials. J Vasc Bras, 16(2), 88-91.
#'
#' - [3]: Bárány, Imre; Vu, Van (2007). "Central limit theorems for Gaussian polytopes". Annals of Probability. Institute of Mathematical Statistics. 35 (4): 1593–1621.
#'
|
#
# This test file has been generated by kwb.test::create_test_files()
#
test_that("myggtheme() works", {
kwb.qmra:::myggtheme()
})
| /tests/testthat/test-function-myggtheme.R | permissive | KWB-R/kwb.qmra | R | false | false | 137 | r | #
# This test file has been generated by kwb.test::create_test_files()
#
test_that("myggtheme() works", {
kwb.qmra:::myggtheme()
})
|
library(LncPath)
### Name: geneSetDetail
### Title: Gain insight into the detail of the genes in a certain pathway
### Aliases: geneSetDetail
### Keywords: ~kwd1 ~kwd2
### ** Examples
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
Result <- getExampleData("Result")
Detail <- geneSetDetail(Result, "KEGG_RIBOSOME")
head(Detail)
| /data/genthat_extracted_code/LncPath/examples/geneSetDetail.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 428 | r | library(LncPath)
### Name: geneSetDetail
### Title: Gain insight into the detail of the genes in a certain pathway
### Aliases: geneSetDetail
### Keywords: ~kwd1 ~kwd2
### ** Examples
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
Result <- getExampleData("Result")
Detail <- geneSetDetail(Result, "KEGG_RIBOSOME")
head(Detail)
|
(shiny.sanitize.errors = FALSE)
load("data/pisaData.rda")
#download.file(url = "https://docs.google.com/spreadsheets/d/1LYmlzL14xQlF-nRen9a6morTmU0FsULSoee8xWEw9fA/pub?gid=1417149183&single=true&output=csv", destfile="data/pisaDictionary.csv", 'curl')
pisaDictionary<-read.csv("data/pisaDictionary.csv", header = TRUE, sep=",")
#load("data/pisaDictionary.rda")
# pisadb<-src_bigquery("r-shiny-1141", "pisa")
# pisa2012<- tbl(pisadb, "pisa2012")
# pisa2009<- tbl(pisadb, "pisa2009")
# pisa2006<- tbl(pisadb, "pisa2006")
print("loading pisa 2015")
load("../pisa2015.rda")
print("loading pisa 2012")
load("../pisa2012b.rda")
Countries<-read.csv("data/countries.csv", header = TRUE, sep=",")
countriesList<-Countries$CNT
names(countriesList)<-Countries$Country
ExpertiseLevels<-read.csv("data/ExpertiseLevels.csv", header = TRUE, sep=",")
ExpertiseLevelsLimits<-read.csv("data/expertiseLevelsLimits.csv", header = TRUE, sep=",")
#download.file(url = "https://docs.google.com/spreadsheets/d/15WPWh9Ir-61449iZ3P4vxKZlVP4GXqC-MCX6FFhfooU/pub?gid=439183945&single=true&output=csv", destfile="data/LevelExplenation.csv", 'curl')
LevelExplenation<-read.csv("data/LevelExplenation.csv", header = TRUE, sep=",")
groupColours<- c(
General="#b276b2",
Male="#5da5da",
Female="#f17cb0",
GeneralLow="#bc99c7",
GeneralMedium="#b276b2",
GeneralHigh="#7b3a96",
MaleHigh="#265dab",
MaleLow="#88bde6",
MaleMedium="#5da5da",
FemaleHigh="#e5126f",
FemaleLow="#f6aac9",
FemaleMedium="#f17cb0"
)
shinyServer(function(input, output, session) {
source('pisa.scores.R', local=TRUE)
source('pisa.expertise.R', local=TRUE)
source('urlSearch.R', local=TRUE)
source('pisa.survey.R', local=TRUE)
source('pisa.analyze.R', local=TRUE)
# output$output <- renderPrint({
# input$eval
# return(isolate(eval(parse(text=input$code))))
# })
output$knitDoc <- renderUI({
input$eval
return(isolate(HTML(knit2html(text = input$rmd, fragment.only = TRUE, quiet = TRUE))))
})
})
| /server.R | no_license | avnerkantor/openpisa | R | false | false | 2,013 | r | (shiny.sanitize.errors = FALSE)
load("data/pisaData.rda")
#download.file(url = "https://docs.google.com/spreadsheets/d/1LYmlzL14xQlF-nRen9a6morTmU0FsULSoee8xWEw9fA/pub?gid=1417149183&single=true&output=csv", destfile="data/pisaDictionary.csv", 'curl')
pisaDictionary<-read.csv("data/pisaDictionary.csv", header = TRUE, sep=",")
#load("data/pisaDictionary.rda")
# pisadb<-src_bigquery("r-shiny-1141", "pisa")
# pisa2012<- tbl(pisadb, "pisa2012")
# pisa2009<- tbl(pisadb, "pisa2009")
# pisa2006<- tbl(pisadb, "pisa2006")
print("loading pisa 2015")
load("../pisa2015.rda")
print("loading pisa 2012")
load("../pisa2012b.rda")
Countries<-read.csv("data/countries.csv", header = TRUE, sep=",")
countriesList<-Countries$CNT
names(countriesList)<-Countries$Country
ExpertiseLevels<-read.csv("data/ExpertiseLevels.csv", header = TRUE, sep=",")
ExpertiseLevelsLimits<-read.csv("data/expertiseLevelsLimits.csv", header = TRUE, sep=",")
#download.file(url = "https://docs.google.com/spreadsheets/d/15WPWh9Ir-61449iZ3P4vxKZlVP4GXqC-MCX6FFhfooU/pub?gid=439183945&single=true&output=csv", destfile="data/LevelExplenation.csv", 'curl')
LevelExplenation<-read.csv("data/LevelExplenation.csv", header = TRUE, sep=",")
groupColours<- c(
General="#b276b2",
Male="#5da5da",
Female="#f17cb0",
GeneralLow="#bc99c7",
GeneralMedium="#b276b2",
GeneralHigh="#7b3a96",
MaleHigh="#265dab",
MaleLow="#88bde6",
MaleMedium="#5da5da",
FemaleHigh="#e5126f",
FemaleLow="#f6aac9",
FemaleMedium="#f17cb0"
)
shinyServer(function(input, output, session) {
source('pisa.scores.R', local=TRUE)
source('pisa.expertise.R', local=TRUE)
source('urlSearch.R', local=TRUE)
source('pisa.survey.R', local=TRUE)
source('pisa.analyze.R', local=TRUE)
# output$output <- renderPrint({
# input$eval
# return(isolate(eval(parse(text=input$code))))
# })
output$knitDoc <- renderUI({
input$eval
return(isolate(HTML(knit2html(text = input$rmd, fragment.only = TRUE, quiet = TRUE))))
})
})
|
library(tidyverse)
library(cowplot)
library(grid)
library(extrafont)
library(Cairo)
loadfonts()
source("graphs.R")
### Data stuff
gun.stats.all <- function(deaths, cr, cr.upper, cr.lower) {
deaths <- sum(deaths)
crude.rate <- sum(cr, na.rm=T)
crude.rate.upper <- sum(cr.upper, na.rm=T)
crude.rate.lower <- sum(cr.lower, na.rm=T)
return(data.frame(deaths, crude.rate, crude.rate.upper, crude.rate.lower))
}
background.check.stats <- function(bg.check) {
return(data.frame(bg.check))
}
nation.deaths.bg.stats <- function(cr, cr.upper, cr.lower) {
crude.rate <- sum(cr, na.rm=T)
crude.rate.upper <- sum(cr.upper, na.rm=T)
crude.rate.lower <- sum(cr.lower, na.rm=T)
return(data.frame(crude.rate, crude.rate.upper, crude.rate.lower))
}
death.data <- read.csv("gun_deaths_year_state_clean.csv")
death.data$state <- state.abb[match(death.data$state, state.name)]
law.data <- read.csv("Everytown_research_master.csv")
gun.stats.year.state <- death.data %>%
group_by(year, state) %>%
do(gun.stats.all(.$deaths, .$crude.rate, .$crude.rate.upper.95.ci, .$crude.rate.lower.95.ci))
bg.checks.year.state <- filter(law.data, year >= 1999) %>%
group_by(year, state) %>%
do(background.check.stats(.$response))
all.data.year.state <- merge(gun.stats.year.state, bg.checks.year.state, by=c("state", "year"))
all.data.year <- all.data.year.state %>%
group_by(year, bg.check) %>%
do(nation.deaths.bg.stats(.$crude.rate, .$crude.rate.upper, .$crude.rate.lower))
### Grid layouts
vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
grid_mo <- function(dataset) {
mo.p <- state.plot(dataset, "MO", 2007)
### Grid layout stuff
grid.newpage()
pushViewport(viewport(layout = grid.layout(9, 12)))
# Graphs
print(mo.p, vp = vplayout(1:9, 1:12))
# Title
grid.text("Gun Deaths in Missouri",
vp=vplayout(1, 1:12),
y=unit(0.4, "npc"),
gp=gpar(fontfamily=text.fontfam, fontface="bold", col=title.col, cex=2))
## MAIN TEXT
xval <- 0.95
yval <- 0.5
line.gap <- 0.33
newline <- function(y, line, gap=line.gap) {
y - (line * gap)
}
block.cex <- 1.3
block.vp <- vplayout(2, 3)
block.hjust <- 0
grid.text(expression("In 2007, Missouri repealed laws requiring criminal background"),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 0), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=text.col, cex=block.cex))
grid.text(expression("checks for gun sales by unlicensed sellers, and "),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 1), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=text.col, cex=block.cex))
grid.text(expression("requiring record of the sale to be kept."),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 2), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=text.col, cex=block.cex))
}
grid_national <- function(dataset, yr) {
nat.p <- national.plot(dataset, hlt.year=yr)
nat.p.density <- national.plot.density(dataset, hlt.year=yr)
natp_with_den <- national_plot_with_density(nat.p, nat.p.density)
### Grid layout stuff
grid.newpage()
pushViewport(viewport(layout = grid.layout(9, 12)))
# Graphs
print(natp_with_den, vp = vplayout(3:9, 1:12))
# Title
grid.text("Gun Deaths by State",
vp=vplayout(1, 1:12),
y=unit(0.4, "npc"),
gp=gpar(fontfamily=text.fontfam, fontface="bold", col=title.col, cex=2))
grid.text(yr,
vp=vplayout(3, 11),
x=unit(0.7, "npc"),
y=unit(1.3, "npc"),
gp=gpar(fontfamily=fontfam, fontface="bold", col=title.col, cex=2))
## MAIN TEXT
xval <- 0
yval <- 0.65
line.gap <- 0.33
newline <- function(y, line, gap=line.gap) {
y - (line * gap)
}
block.cex <- 1.3
block.vp <- vplayout(2, 2)
block.hjust <- 0
grid.text(expression("Over time, gun death rates have stayed the same in states " *
phantom("with") *
" and " *
phantom("without") *
" laws"),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 0), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=text.col, cex=block.cex))
grid.text(expression(phantom("Over time, gun death rates have stayed the same in states ") *
"with" *
phantom(" and ") *
phantom("without") *
phantom(" laws")),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 0), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=good.col, cex=block.cex))
grid.text(expression(phantom("Over time, gun death rates have stayed the same in states ") *
phantom("with") *
phantom(" and ") *
"without" *
phantom(" laws")),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 0), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=bad.col, cex=block.cex))
grid.text(expression("requiring background checks for gun sales by unlicensed sellers, but states" *
phantom("without")),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 1), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=text.col, cex=block.cex))
grid.text(expression(phantom("requiring background checks for gun sales by unlicensed sellers, but states ") *
"without"),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 1), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=bad.col, cex=block.cex))
grid.text(expression("those laws have always had significantly more deaths than those " *
phantom("with") *
"."),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 2), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=text.col, cex=block.cex))
grid.text(expression(phantom("those laws have always had significantly more deaths than those ") *
"with" *
phantom(".")),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 2), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=good.col, cex=block.cex))
}
make_mo_svg <- function(dataset) {
svg("missouri_gun_deaths.svg", family=fontfam, width = 12, height = 8)
grid_mo(gun.stats.year.state)
dev.off()
}
make_national_svg <- function(year=0) {
if (year > 0) {
fname <- paste(c("national_gun_deaths_bg_checks_",
year,
".svg"),
sep="",
collapse="")
} else {
fname <- paste(c("national_gun_deaths_bg_checks",
".svg"),
sep="",
collapse="")
}
svg(fname, family=fontfam, width = 12, height = 8)
grid_hectic(all.data.year.state, year)
dev.off()
}
make_all_national_svgs <- function() {
for (i in 1999:2015) {
make_national_svg(year=i)
}
}
| /gun_stats.R | no_license | slantedlabs/gun_violence_data | R | false | false | 7,638 | r | library(tidyverse)
library(cowplot)
library(grid)
library(extrafont)
library(Cairo)
loadfonts()
source("graphs.R")
### Data stuff
gun.stats.all <- function(deaths, cr, cr.upper, cr.lower) {
deaths <- sum(deaths)
crude.rate <- sum(cr, na.rm=T)
crude.rate.upper <- sum(cr.upper, na.rm=T)
crude.rate.lower <- sum(cr.lower, na.rm=T)
return(data.frame(deaths, crude.rate, crude.rate.upper, crude.rate.lower))
}
background.check.stats <- function(bg.check) {
return(data.frame(bg.check))
}
nation.deaths.bg.stats <- function(cr, cr.upper, cr.lower) {
crude.rate <- sum(cr, na.rm=T)
crude.rate.upper <- sum(cr.upper, na.rm=T)
crude.rate.lower <- sum(cr.lower, na.rm=T)
return(data.frame(crude.rate, crude.rate.upper, crude.rate.lower))
}
death.data <- read.csv("gun_deaths_year_state_clean.csv")
death.data$state <- state.abb[match(death.data$state, state.name)]
law.data <- read.csv("Everytown_research_master.csv")
gun.stats.year.state <- death.data %>%
group_by(year, state) %>%
do(gun.stats.all(.$deaths, .$crude.rate, .$crude.rate.upper.95.ci, .$crude.rate.lower.95.ci))
bg.checks.year.state <- filter(law.data, year >= 1999) %>%
group_by(year, state) %>%
do(background.check.stats(.$response))
all.data.year.state <- merge(gun.stats.year.state, bg.checks.year.state, by=c("state", "year"))
all.data.year <- all.data.year.state %>%
group_by(year, bg.check) %>%
do(nation.deaths.bg.stats(.$crude.rate, .$crude.rate.upper, .$crude.rate.lower))
### Grid layouts
vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
grid_mo <- function(dataset) {
mo.p <- state.plot(dataset, "MO", 2007)
### Grid layout stuff
grid.newpage()
pushViewport(viewport(layout = grid.layout(9, 12)))
# Graphs
print(mo.p, vp = vplayout(1:9, 1:12))
# Title
grid.text("Gun Deaths in Missouri",
vp=vplayout(1, 1:12),
y=unit(0.4, "npc"),
gp=gpar(fontfamily=text.fontfam, fontface="bold", col=title.col, cex=2))
## MAIN TEXT
xval <- 0.95
yval <- 0.5
line.gap <- 0.33
newline <- function(y, line, gap=line.gap) {
y - (line * gap)
}
block.cex <- 1.3
block.vp <- vplayout(2, 3)
block.hjust <- 0
grid.text(expression("In 2007, Missouri repealed laws requiring criminal background"),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 0), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=text.col, cex=block.cex))
grid.text(expression("checks for gun sales by unlicensed sellers, and "),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 1), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=text.col, cex=block.cex))
grid.text(expression("requiring record of the sale to be kept."),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 2), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=text.col, cex=block.cex))
}
grid_national <- function(dataset, yr) {
nat.p <- national.plot(dataset, hlt.year=yr)
nat.p.density <- national.plot.density(dataset, hlt.year=yr)
natp_with_den <- national_plot_with_density(nat.p, nat.p.density)
### Grid layout stuff
grid.newpage()
pushViewport(viewport(layout = grid.layout(9, 12)))
# Graphs
print(natp_with_den, vp = vplayout(3:9, 1:12))
# Title
grid.text("Gun Deaths by State",
vp=vplayout(1, 1:12),
y=unit(0.4, "npc"),
gp=gpar(fontfamily=text.fontfam, fontface="bold", col=title.col, cex=2))
grid.text(yr,
vp=vplayout(3, 11),
x=unit(0.7, "npc"),
y=unit(1.3, "npc"),
gp=gpar(fontfamily=fontfam, fontface="bold", col=title.col, cex=2))
## MAIN TEXT
xval <- 0
yval <- 0.65
line.gap <- 0.33
newline <- function(y, line, gap=line.gap) {
y - (line * gap)
}
block.cex <- 1.3
block.vp <- vplayout(2, 2)
block.hjust <- 0
grid.text(expression("Over time, gun death rates have stayed the same in states " *
phantom("with") *
" and " *
phantom("without") *
" laws"),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 0), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=text.col, cex=block.cex))
grid.text(expression(phantom("Over time, gun death rates have stayed the same in states ") *
"with" *
phantom(" and ") *
phantom("without") *
phantom(" laws")),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 0), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=good.col, cex=block.cex))
grid.text(expression(phantom("Over time, gun death rates have stayed the same in states ") *
phantom("with") *
phantom(" and ") *
"without" *
phantom(" laws")),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 0), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=bad.col, cex=block.cex))
grid.text(expression("requiring background checks for gun sales by unlicensed sellers, but states" *
phantom("without")),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 1), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=text.col, cex=block.cex))
grid.text(expression(phantom("requiring background checks for gun sales by unlicensed sellers, but states ") *
"without"),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 1), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=bad.col, cex=block.cex))
grid.text(expression("those laws have always had significantly more deaths than those " *
phantom("with") *
"."),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 2), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=text.col, cex=block.cex))
grid.text(expression(phantom("those laws have always had significantly more deaths than those ") *
"with" *
phantom(".")),
vp=block.vp,
x=unit(xval, "npc"),
y=unit(newline(yval, 2), "npc"),
hjust=block.hjust,
gp=gpar(fontfamily=text.fontfam, col=good.col, cex=block.cex))
}
make_mo_svg <- function(dataset) {
svg("missouri_gun_deaths.svg", family=fontfam, width = 12, height = 8)
grid_mo(gun.stats.year.state)
dev.off()
}
make_national_svg <- function(year=0) {
if (year > 0) {
fname <- paste(c("national_gun_deaths_bg_checks_",
year,
".svg"),
sep="",
collapse="")
} else {
fname <- paste(c("national_gun_deaths_bg_checks",
".svg"),
sep="",
collapse="")
}
svg(fname, family=fontfam, width = 12, height = 8)
grid_hectic(all.data.year.state, year)
dev.off()
}
make_all_national_svgs <- function() {
for (i in 1999:2015) {
make_national_svg(year=i)
}
}
|
testlist <- list(genotype = c(-2114850319L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(detectRUNS:::genoConvertCpp,testlist)
str(result) | /detectRUNS/inst/testfiles/genoConvertCpp/libFuzzer_genoConvertCpp/genoConvertCpp_valgrind_files/1609875038-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 372 | r | testlist <- list(genotype = c(-2114850319L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(detectRUNS:::genoConvertCpp,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RSProjection.R
\name{RSProjection}
\alias{PlotRSprojection}
\alias{RSProjection}
\title{Random Skewers projection}
\usage{
RSProjection(cov.matrix.array, p = 0.95, num.vectors = 1000)
PlotRSprojection(rs_proj, cov.matrix.array, p = 0.95, ncols = 5)
}
\arguments{
\item{cov.matrix.array}{Array with dimentions traits x traits x populations x MCMCsamples}
\item{p}{significance treashhold for comparison of variation in each random direction}
\item{num.vectors}{number of random vectors}
\item{rs_proj}{output from RSProjection}
\item{ncols}{number of columns in plot}
}
\value{
projection of all matrices in all random vectors
set of random vectors and confidence intervals for the projections
eigen decomposition of the random vectors in directions with significant differences of variations
}
\description{
Not tested!
Uses MCMC Bayeisian posterior samples of a set of covariance matrices to identify
directions of the morphospace in which these matrices differ in their amount of genetic variance.
}
\examples{
library(magrittr)
# small MCMCsample to reduce run time, acctual sample should be larger
data(dentus)
cov.matrices = dlply(dentus, .(species), function(x) lm(as.matrix(x[,1:4])~1)) \%>\%
laply(function(x) BayesianCalculateMatrix(x, samples = 50)$Ps)
cov.matrices = aperm(cov.matrices, c(3, 4, 1, 2))
rs_proj = RSProjection(cov.matrices, p = 0.8)
PlotRSprojection(rs_proj, cov.matrices, ncol = 5)
}
\references{
Aguirre, J. D., E. Hine, K. McGuigan, and M. W. Blows. "Comparing G: multivariate analysis of genetic variation in multiple populations." Heredity 112, no. 1 (2014): 21-29.
}
| /man/RSProjection.Rd | no_license | wgar84/evolqg | R | false | true | 1,706 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RSProjection.R
\name{RSProjection}
\alias{PlotRSprojection}
\alias{RSProjection}
\title{Random Skewers projection}
\usage{
RSProjection(cov.matrix.array, p = 0.95, num.vectors = 1000)
PlotRSprojection(rs_proj, cov.matrix.array, p = 0.95, ncols = 5)
}
\arguments{
\item{cov.matrix.array}{Array with dimentions traits x traits x populations x MCMCsamples}
\item{p}{significance treashhold for comparison of variation in each random direction}
\item{num.vectors}{number of random vectors}
\item{rs_proj}{output from RSProjection}
\item{ncols}{number of columns in plot}
}
\value{
projection of all matrices in all random vectors
set of random vectors and confidence intervals for the projections
eigen decomposition of the random vectors in directions with significant differences of variations
}
\description{
Not tested!
Uses MCMC Bayeisian posterior samples of a set of covariance matrices to identify
directions of the morphospace in which these matrices differ in their amount of genetic variance.
}
\examples{
library(magrittr)
# small MCMCsample to reduce run time, acctual sample should be larger
data(dentus)
cov.matrices = dlply(dentus, .(species), function(x) lm(as.matrix(x[,1:4])~1)) \%>\%
laply(function(x) BayesianCalculateMatrix(x, samples = 50)$Ps)
cov.matrices = aperm(cov.matrices, c(3, 4, 1, 2))
rs_proj = RSProjection(cov.matrices, p = 0.8)
PlotRSprojection(rs_proj, cov.matrices, ncol = 5)
}
\references{
Aguirre, J. D., E. Hine, K. McGuigan, and M. W. Blows. "Comparing G: multivariate analysis of genetic variation in multiple populations." Heredity 112, no. 1 (2014): 21-29.
}
|
# load cached data
load('S:/NRCS/430 SOI Soil Survey/430-13 Investigations/Lab_Data/cached-data/kssl-SPC.Rda')
#### subset Sonora Office stuff here:
# keep only those lab data that are within MLRA 17, 18, 22A
# by filtering on MLRA code derived from the spatial layer
lab <- lab[which(lab$mlra %in% c('17', '18', '22A')), ]
lab$mlra <- factor(lab$mlra)
# cleanup
gc(reset = TRUE)
## fill some missing data if possible
## BS at pH 8.2
# (p1 <- xyplot(bs82 ~ bs7, data=horizons(lab), col='black', type=c('p','smooth','g')))
# (p2 <- xyplot(bs82 ~ estimated_ph_h2o, data=horizons(lab), col='black', type=c('p','smooth','g')))
png(file='figures/bs82-vs-bs7.png', width=600, height=600)
print(hexbinplot(bs82 ~ bs7, data=horizons(lab), colramp=viridis, colorkey=FALSE, xbins=30, main='MLRAs: 17, 18, 22A', ylab='Base Saturation (NH4-Ac, pH 7)', xlab='Base Saturation (sum of bases, pH 8.2)', trans=log, inv=exp, subset=bs82 < 100 & bs7 < 100, asp=1) + latticeExtra::layer(panel.abline(0, 1, col='red', lwd=2, lty=2)))
dev.off()
png(file='figures/bs82-vs-ph_h2o.png', width=600, height=600)
print(hexbinplot(bs82 ~ estimated_ph_h2o, data=horizons(lab), colramp=viridis, colorkey=FALSE, xbins=50, main='MLRAs: 17, 18, 22A', xlab='pH 1:1 H2O', ylab='Base Saturation (sum of bases, pH 8.2)', trans=log, inv=exp, subset=bs82 < 100, asp=1))
dev.off()
# model bs82 from bs7, truncate to less than 100%
# for now, two possible models
(l.bs <- ols(bs82 ~ rcs(bs7), data=horizons(lab), subset=bs7 < 100 & bs82 < 100, x=TRUE, y=TRUE))
# (l.bs <- ols(bs82 ~ rcs(bs7) + rcs(estimated_ph_h2o), data=horizons(lab), subset=bs7 < 100 & bs82 < 100, x=TRUE, y=TRUE))
# check predictions
png(file='figures/predicted-bs82-vs-measured-bs82.png', width=600, height=600)
print(hexbinplot(lab$bs82 ~ predict(l.bs, horizons(lab)), colramp=viridis, colorkey=FALSE, xbins=30, main='MLRAs: 17, 18, 22A', ylab='Predicted Base Saturation (sum of bases, pH 8.2)', xlab='Measured Base Saturation (sum of bases, pH 8.2)', trans=log, inv=exp, asp=1) + latticeExtra::layer(panel.abline(0, 1, col='red', lwd=2, lty=2)))
dev.off()
# RMSE: ~ 12% base saturation
sqrt(mean((predict(l.bs, horizons(lab)) - lab$bs82)^2, na.rm = TRUE))
# save model for others... could probably use some work
save(l.bs, file='S:/NRCS/430 SOI Soil Survey/430-13 Investigations/Lab_Data/mlra-17-18-22A-BS82-model.Rda')
# re-index missing values, that CAN BE predicted from BS7
missing.bs82 <- which(is.na(lab$bs82) & !is.na(lab$bs7) & lab$bs7 < 100)
# predict bs82 from bs7 when missing:
lab$bs82[missing.bs82] <- predict(l.bs, data.frame(bs7=lab$bs7[missing.bs82]))
# make note of estimated bs82
lab$bs82.method <- rep('measured', times=nrow(lab))
lab$bs82.method[missing.bs82] <- 'estimated'
# check: ok
(p3 <- xyplot(bs82 ~ bs7 | bs82.method, data=horizons(lab), type=c('p','smooth','g')))
## save to CSV file for others
write.csv(as(lab, 'data.frame'), file='S:/NRCS/430 SOI Soil Survey/430-13 Investigations/Lab_Data/kssl-ca-september-2017.csv', row.names=FALSE)
# init coordinates
coordinates(lab) <- ~ x + y
proj4string(lab) <- '+proj=longlat +datum=NAD83 +no_defs +ellps=GRS80 +towgs84=0,0,0'
## save result to Rda object for later
save(lab, file='S:/NRCS/430 SOI Soil Survey/430-13 Investigations/Lab_Data/kssl-ca-september-2017.Rda')
## graphical check: OK
png(file='S:/NRCS/430 SOI Soil Survey/430-13 Investigations/Lab_Data/sample-locations.png', width=600, height=800, antialias = 'cleartype')
par(mar=c(0,0,3,0))
map('county', 'California')
plot(mlra[mlra$MLRARSYM %in% c('17', '18', '22A'), ], border='blue', add=TRUE)
plot(as(lab, 'SpatialPoints'), add=TRUE, col='red', cex=0.25)
title('September 2017')
dev.off()
## save select attributes to SHP
writeOGR(as(lab, 'SpatialPointsDataFrame')[, c('pedon_id', 'taxonname')], dsn='L:/NRCS/MLRAShared/Geodata/UCD_NCSS', layer='mlra_17_18_22-lab_data', driver='ESRI Shapefile', overwrite_layer=TRUE)
## aggregate some soil properties for all profiles by MLRA, along 1 cm slices
a <- slab(lab, mlra ~ clay + ex_k_saturation + estimated_ph_h2o + bs82 + estimated_om)
# adjust factor labels for MLRA to include number of pedons
pedons.per.mlra <- tapply(site(lab)$mlra, site(lab)$mlra, length)
a$mlra <- factor(a$mlra, levels=names(pedons.per.mlra), labels=paste(names(pedons.per.mlra), ' (', pedons.per.mlra, ' profiles)', sep=''))
# re-name variables
a$variable <- factor(a$variable, labels=c('Clay %', 'Ex-K Saturation', 'pH 1:1 Water', 'Base Sat. pH 8.2', 'O.M. %'))
# make some nice colors
cols <- brewer.pal('Set1', n=3)
# plot: nice
png(file='S:/NRCS/430 SOI Soil Survey/430-13 Investigations/Lab_Data/properties_by_mlra.png', width=1400, height=700, antialias = 'cleartype')
print(xyplot(
top ~ p.q50 | variable, groups=mlra, data=a, lower=a$p.q25, upper=a$p.q75, layout=c(5, 1),
ylim=c(170,-5), alpha=0.25, scales=list(y=list(tick.num=7, alternating=3), x=list(relation='free',alternating=1)),
panel=panel.depth_function, prepanel=prepanel.depth_function, sync.colors=TRUE, asp=1.5,
ylab='Depth (cm)', xlab='median bounded by 25th and 75th percentiles', strip=strip.custom(bg=grey(0.85)), cf=a$contributing_fraction,
par.settings=list(superpose.line=list(col=cols, lty=c(1,2,3), lwd=2)),
auto.key=list(columns=3, title='MLRA', points=FALSE, lines=TRUE),
sub=paste(length(lab), 'profiles')
))
dev.off()
| /sonora-office-specific-stuff.R | no_license | dylanbeaudette/process-kssl-snapshot | R | false | false | 5,347 | r |
# load cached data
load('S:/NRCS/430 SOI Soil Survey/430-13 Investigations/Lab_Data/cached-data/kssl-SPC.Rda')
#### subset Sonora Office stuff here:
# keep only those lab data that are within MLRA 17, 18, 22A
# by filtering on MLRA code derived from the spatial layer
lab <- lab[which(lab$mlra %in% c('17', '18', '22A')), ]
lab$mlra <- factor(lab$mlra)
# cleanup
gc(reset = TRUE)
## fill some missing data if possible
## BS at pH 8.2
# (p1 <- xyplot(bs82 ~ bs7, data=horizons(lab), col='black', type=c('p','smooth','g')))
# (p2 <- xyplot(bs82 ~ estimated_ph_h2o, data=horizons(lab), col='black', type=c('p','smooth','g')))
png(file='figures/bs82-vs-bs7.png', width=600, height=600)
print(hexbinplot(bs82 ~ bs7, data=horizons(lab), colramp=viridis, colorkey=FALSE, xbins=30, main='MLRAs: 17, 18, 22A', ylab='Base Saturation (NH4-Ac, pH 7)', xlab='Base Saturation (sum of bases, pH 8.2)', trans=log, inv=exp, subset=bs82 < 100 & bs7 < 100, asp=1) + latticeExtra::layer(panel.abline(0, 1, col='red', lwd=2, lty=2)))
dev.off()
png(file='figures/bs82-vs-ph_h2o.png', width=600, height=600)
print(hexbinplot(bs82 ~ estimated_ph_h2o, data=horizons(lab), colramp=viridis, colorkey=FALSE, xbins=50, main='MLRAs: 17, 18, 22A', xlab='pH 1:1 H2O', ylab='Base Saturation (sum of bases, pH 8.2)', trans=log, inv=exp, subset=bs82 < 100, asp=1))
dev.off()
# model bs82 from bs7, truncate to less than 100%
# for now, two possible models
(l.bs <- ols(bs82 ~ rcs(bs7), data=horizons(lab), subset=bs7 < 100 & bs82 < 100, x=TRUE, y=TRUE))
# (l.bs <- ols(bs82 ~ rcs(bs7) + rcs(estimated_ph_h2o), data=horizons(lab), subset=bs7 < 100 & bs82 < 100, x=TRUE, y=TRUE))
# check predictions
png(file='figures/predicted-bs82-vs-measured-bs82.png', width=600, height=600)
print(hexbinplot(lab$bs82 ~ predict(l.bs, horizons(lab)), colramp=viridis, colorkey=FALSE, xbins=30, main='MLRAs: 17, 18, 22A', ylab='Predicted Base Saturation (sum of bases, pH 8.2)', xlab='Measured Base Saturation (sum of bases, pH 8.2)', trans=log, inv=exp, asp=1) + latticeExtra::layer(panel.abline(0, 1, col='red', lwd=2, lty=2)))
dev.off()
# RMSE: ~ 12% base saturation
sqrt(mean((predict(l.bs, horizons(lab)) - lab$bs82)^2, na.rm = TRUE))
# save model for others... could probably use some work
save(l.bs, file='S:/NRCS/430 SOI Soil Survey/430-13 Investigations/Lab_Data/mlra-17-18-22A-BS82-model.Rda')
# re-index missing values, that CAN BE predicted from BS7
missing.bs82 <- which(is.na(lab$bs82) & !is.na(lab$bs7) & lab$bs7 < 100)
# predict bs82 from bs7 when missing:
lab$bs82[missing.bs82] <- predict(l.bs, data.frame(bs7=lab$bs7[missing.bs82]))
# make note of estimated bs82
lab$bs82.method <- rep('measured', times=nrow(lab))
lab$bs82.method[missing.bs82] <- 'estimated'
# check: ok
(p3 <- xyplot(bs82 ~ bs7 | bs82.method, data=horizons(lab), type=c('p','smooth','g')))
## save to CSV file for others
write.csv(as(lab, 'data.frame'), file='S:/NRCS/430 SOI Soil Survey/430-13 Investigations/Lab_Data/kssl-ca-september-2017.csv', row.names=FALSE)
# init coordinates
coordinates(lab) <- ~ x + y
proj4string(lab) <- '+proj=longlat +datum=NAD83 +no_defs +ellps=GRS80 +towgs84=0,0,0'
## save result to Rda object for later
save(lab, file='S:/NRCS/430 SOI Soil Survey/430-13 Investigations/Lab_Data/kssl-ca-september-2017.Rda')
## graphical check: OK
png(file='S:/NRCS/430 SOI Soil Survey/430-13 Investigations/Lab_Data/sample-locations.png', width=600, height=800, antialias = 'cleartype')
par(mar=c(0,0,3,0))
map('county', 'California')
plot(mlra[mlra$MLRARSYM %in% c('17', '18', '22A'), ], border='blue', add=TRUE)
plot(as(lab, 'SpatialPoints'), add=TRUE, col='red', cex=0.25)
title('September 2017')
dev.off()
## save select attributes to SHP
writeOGR(as(lab, 'SpatialPointsDataFrame')[, c('pedon_id', 'taxonname')], dsn='L:/NRCS/MLRAShared/Geodata/UCD_NCSS', layer='mlra_17_18_22-lab_data', driver='ESRI Shapefile', overwrite_layer=TRUE)
## aggregate some soil properties for all profiles by MLRA, along 1 cm slices
a <- slab(lab, mlra ~ clay + ex_k_saturation + estimated_ph_h2o + bs82 + estimated_om)
# adjust factor labels for MLRA to include number of pedons
pedons.per.mlra <- tapply(site(lab)$mlra, site(lab)$mlra, length)
a$mlra <- factor(a$mlra, levels=names(pedons.per.mlra), labels=paste(names(pedons.per.mlra), ' (', pedons.per.mlra, ' profiles)', sep=''))
# re-name variables
a$variable <- factor(a$variable, labels=c('Clay %', 'Ex-K Saturation', 'pH 1:1 Water', 'Base Sat. pH 8.2', 'O.M. %'))
# make some nice colors
cols <- brewer.pal('Set1', n=3)
# plot: nice
png(file='S:/NRCS/430 SOI Soil Survey/430-13 Investigations/Lab_Data/properties_by_mlra.png', width=1400, height=700, antialias = 'cleartype')
print(xyplot(
top ~ p.q50 | variable, groups=mlra, data=a, lower=a$p.q25, upper=a$p.q75, layout=c(5, 1),
ylim=c(170,-5), alpha=0.25, scales=list(y=list(tick.num=7, alternating=3), x=list(relation='free',alternating=1)),
panel=panel.depth_function, prepanel=prepanel.depth_function, sync.colors=TRUE, asp=1.5,
ylab='Depth (cm)', xlab='median bounded by 25th and 75th percentiles', strip=strip.custom(bg=grey(0.85)), cf=a$contributing_fraction,
par.settings=list(superpose.line=list(col=cols, lty=c(1,2,3), lwd=2)),
auto.key=list(columns=3, title='MLRA', points=FALSE, lines=TRUE),
sub=paste(length(lab), 'profiles')
))
dev.off()
|
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2017 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Queries the last available (non-empty) row in a worksheet
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
setGeneric("getLastRow",
function(object, sheet) standardGeneric("getLastRow"))
setMethod("getLastRow",
signature(object = "workbook", sheet = "numeric"),
function(object, sheet) {
xlcCall(object, "getLastRow", as.integer(sheet-1)) + 1
}
)
setMethod("getLastRow",
signature(object = "workbook", sheet = "character"),
function(object, sheet) {
xlcCall(object, "getLastRow", sheet) + 1
}
)
| /R/workbook.getLastRow.R | no_license | GSuvorov/xlconnect | R | false | false | 1,584 | r | #############################################################################
#
# XLConnect
# Copyright (C) 2010-2017 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Queries the last available (non-empty) row in a worksheet
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
setGeneric("getLastRow",
function(object, sheet) standardGeneric("getLastRow"))
setMethod("getLastRow",
signature(object = "workbook", sheet = "numeric"),
function(object, sheet) {
xlcCall(object, "getLastRow", as.integer(sheet-1)) + 1
}
)
setMethod("getLastRow",
signature(object = "workbook", sheet = "character"),
function(object, sheet) {
xlcCall(object, "getLastRow", sheet) + 1
}
)
|
# OpenSilex API
#
# No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
#
# OpenAPI spec version: 1.0.0-rc+2
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' PositionCreationDTO Class
#'
#' @field point
#' @field x
#' @field y
#' @field z
#' @field text
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
PositionCreationDTO <- R6::R6Class(
'PositionCreationDTO',
public = list(
`point` = NULL,
`x` = NULL,
`y` = NULL,
`z` = NULL,
`text` = NULL,
initialize = function(`point`, `x`, `y`, `z`, `text`){
if (!missing(`point`)) {
stopifnot(R6::is.R6(`point`))
self$`point` <- `point`
}
if (!missing(`x`)) {
stopifnot(is.numeric(`x`), length(`x`) == 1)
self$`x` <- `x`
}
if (!missing(`y`)) {
stopifnot(is.numeric(`y`), length(`y`) == 1)
self$`y` <- `y`
}
if (!missing(`z`)) {
stopifnot(is.numeric(`z`), length(`z`) == 1)
self$`z` <- `z`
}
if (!missing(`text`)) {
stopifnot(is.character(`text`), length(`text`) == 1)
self$`text` <- `text`
}
},
toJSON = function() {
PositionCreationDTOObject <- list()
if (!is.null(self$`point`)) {
PositionCreationDTOObject[['point']] <- self$`point`$toJSON()
}
if (!is.null(self$`x`)) {
PositionCreationDTOObject[['x']] <- self$`x`
}
if (!is.null(self$`y`)) {
PositionCreationDTOObject[['y']] <- self$`y`
}
if (!is.null(self$`z`)) {
PositionCreationDTOObject[['z']] <- self$`z`
}
if (!is.null(self$`text`)) {
PositionCreationDTOObject[['text']] <- self$`text`
}
PositionCreationDTOObject
},
fromJSON = function(PositionCreationDTOJson) {
PositionCreationDTOObject <- jsonlite::fromJSON(PositionCreationDTOJson)
if (!is.null(PositionCreationDTOObject$`point`)) {
pointObject <- Point$new()
pointObject$fromJSON(jsonlite::toJSON(PositionCreationDTOObject$point, auto_unbox = TRUE, null = "null"))
self$`point` <- pointObject
}
if (!is.null(PositionCreationDTOObject$`x`)) {
self$`x` <- PositionCreationDTOObject$`x`
}
if (!is.null(PositionCreationDTOObject$`y`)) {
self$`y` <- PositionCreationDTOObject$`y`
}
if (!is.null(PositionCreationDTOObject$`z`)) {
self$`z` <- PositionCreationDTOObject$`z`
}
if (!is.null(PositionCreationDTOObject$`text`)) {
self$`text` <- PositionCreationDTOObject$`text`
}
},
fromJSONObject = function(PositionCreationDTOObject) {
if (!is.null(PositionCreationDTOObject$`point`)) {
pointObject <- Point$new()
pointObject$fromJSON(jsonlite::toJSON(PositionCreationDTOObject$point, auto_unbox = TRUE, null = "null"))
self$`point` <- pointObject
}
if (!is.null(PositionCreationDTOObject$`x`)) {
self$`x` <- PositionCreationDTOObject$`x`
}
if (!is.null(PositionCreationDTOObject$`y`)) {
self$`y` <- PositionCreationDTOObject$`y`
}
if (!is.null(PositionCreationDTOObject$`z`)) {
self$`z` <- PositionCreationDTOObject$`z`
}
if (!is.null(PositionCreationDTOObject$`text`)) {
self$`text` <- PositionCreationDTOObject$`text`
}
},
toJSONString = function() {
sprintf(
'{
"point": %s,
"x": %s,
"y": %s,
"z": %s,
"text": %s
}',
jsonlite::toJSON(self$`point`$toJSON(),auto_unbox=TRUE, null = "null"),
ifelse(is.null(self$`x`), "null",as.numeric(jsonlite::toJSON(self$`x`,auto_unbox=TRUE, null = "null"))),
ifelse(is.null(self$`y`), "null",as.numeric(jsonlite::toJSON(self$`y`,auto_unbox=TRUE, null = "null"))),
ifelse(is.null(self$`z`), "null",as.numeric(jsonlite::toJSON(self$`z`,auto_unbox=TRUE, null = "null"))),
ifelse(is.null(self$`text`), "null",jsonlite::toJSON(self$`text`,auto_unbox=TRUE, null = "null"))
)
},
fromJSONString = function(PositionCreationDTOJson) {
PositionCreationDTOObject <- jsonlite::fromJSON(PositionCreationDTOJson)
PointObject <- Point$new()
self$`point` <- PointObject$fromJSON(jsonlite::toJSON(PositionCreationDTOObject$point, auto_unbox = TRUE))
self$`x` <- PositionCreationDTOObject$`x`
self$`y` <- PositionCreationDTOObject$`y`
self$`z` <- PositionCreationDTOObject$`z`
self$`text` <- PositionCreationDTOObject$`text`
}
)
)
| /R/PositionCreationDTO.r | no_license | OpenSILEX/opensilexClientToolsR | R | false | false | 4,641 | r | # OpenSilex API
#
# No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
#
# OpenAPI spec version: 1.0.0-rc+2
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' PositionCreationDTO Class
#'
#' @field point
#' @field x
#' @field y
#' @field z
#' @field text
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
PositionCreationDTO <- R6::R6Class(
'PositionCreationDTO',
public = list(
`point` = NULL,
`x` = NULL,
`y` = NULL,
`z` = NULL,
`text` = NULL,
initialize = function(`point`, `x`, `y`, `z`, `text`){
if (!missing(`point`)) {
stopifnot(R6::is.R6(`point`))
self$`point` <- `point`
}
if (!missing(`x`)) {
stopifnot(is.numeric(`x`), length(`x`) == 1)
self$`x` <- `x`
}
if (!missing(`y`)) {
stopifnot(is.numeric(`y`), length(`y`) == 1)
self$`y` <- `y`
}
if (!missing(`z`)) {
stopifnot(is.numeric(`z`), length(`z`) == 1)
self$`z` <- `z`
}
if (!missing(`text`)) {
stopifnot(is.character(`text`), length(`text`) == 1)
self$`text` <- `text`
}
},
toJSON = function() {
PositionCreationDTOObject <- list()
if (!is.null(self$`point`)) {
PositionCreationDTOObject[['point']] <- self$`point`$toJSON()
}
if (!is.null(self$`x`)) {
PositionCreationDTOObject[['x']] <- self$`x`
}
if (!is.null(self$`y`)) {
PositionCreationDTOObject[['y']] <- self$`y`
}
if (!is.null(self$`z`)) {
PositionCreationDTOObject[['z']] <- self$`z`
}
if (!is.null(self$`text`)) {
PositionCreationDTOObject[['text']] <- self$`text`
}
PositionCreationDTOObject
},
fromJSON = function(PositionCreationDTOJson) {
PositionCreationDTOObject <- jsonlite::fromJSON(PositionCreationDTOJson)
if (!is.null(PositionCreationDTOObject$`point`)) {
pointObject <- Point$new()
pointObject$fromJSON(jsonlite::toJSON(PositionCreationDTOObject$point, auto_unbox = TRUE, null = "null"))
self$`point` <- pointObject
}
if (!is.null(PositionCreationDTOObject$`x`)) {
self$`x` <- PositionCreationDTOObject$`x`
}
if (!is.null(PositionCreationDTOObject$`y`)) {
self$`y` <- PositionCreationDTOObject$`y`
}
if (!is.null(PositionCreationDTOObject$`z`)) {
self$`z` <- PositionCreationDTOObject$`z`
}
if (!is.null(PositionCreationDTOObject$`text`)) {
self$`text` <- PositionCreationDTOObject$`text`
}
},
fromJSONObject = function(PositionCreationDTOObject) {
if (!is.null(PositionCreationDTOObject$`point`)) {
pointObject <- Point$new()
pointObject$fromJSON(jsonlite::toJSON(PositionCreationDTOObject$point, auto_unbox = TRUE, null = "null"))
self$`point` <- pointObject
}
if (!is.null(PositionCreationDTOObject$`x`)) {
self$`x` <- PositionCreationDTOObject$`x`
}
if (!is.null(PositionCreationDTOObject$`y`)) {
self$`y` <- PositionCreationDTOObject$`y`
}
if (!is.null(PositionCreationDTOObject$`z`)) {
self$`z` <- PositionCreationDTOObject$`z`
}
if (!is.null(PositionCreationDTOObject$`text`)) {
self$`text` <- PositionCreationDTOObject$`text`
}
},
toJSONString = function() {
sprintf(
'{
"point": %s,
"x": %s,
"y": %s,
"z": %s,
"text": %s
}',
jsonlite::toJSON(self$`point`$toJSON(),auto_unbox=TRUE, null = "null"),
ifelse(is.null(self$`x`), "null",as.numeric(jsonlite::toJSON(self$`x`,auto_unbox=TRUE, null = "null"))),
ifelse(is.null(self$`y`), "null",as.numeric(jsonlite::toJSON(self$`y`,auto_unbox=TRUE, null = "null"))),
ifelse(is.null(self$`z`), "null",as.numeric(jsonlite::toJSON(self$`z`,auto_unbox=TRUE, null = "null"))),
ifelse(is.null(self$`text`), "null",jsonlite::toJSON(self$`text`,auto_unbox=TRUE, null = "null"))
)
},
fromJSONString = function(PositionCreationDTOJson) {
PositionCreationDTOObject <- jsonlite::fromJSON(PositionCreationDTOJson)
PointObject <- Point$new()
self$`point` <- PointObject$fromJSON(jsonlite::toJSON(PositionCreationDTOObject$point, auto_unbox = TRUE))
self$`x` <- PositionCreationDTOObject$`x`
self$`y` <- PositionCreationDTOObject$`y`
self$`z` <- PositionCreationDTOObject$`z`
self$`text` <- PositionCreationDTOObject$`text`
}
)
)
|
library(tidyverse)
library(brms)
path <- '/gpfs1/data/idiv_chase/emmala/NutNet'
p.all <- read.csv(paste0(path, '/nutnet_cumulative_time.csv'), header=T,fill=TRUE,sep=",",na.strings=c(""," ","NA","NA ","na"))
p.all$site_code<-as.factor(p.all$site_code)
p.all$site.year.id<-as.factor(p.all$site.year.id)
p.all$block<-as.factor(p.all$block)
p.all$plot<-as.factor(p.all$plot)
#
p.all <- p.all %>% group_by(site_code) %>% filter(year_max >= 3) %>%
ungroup()
pp.multi_all <- brm( mvbind(SL,SG,CDE,s.loss.n,s.gain) ~ trt.y * year.y.m + (trt.y * year.y.m | p | site_code),
data = p.all,
family=student(),
cores = 4, iter = 6000, warmup = 1000, chains = 4,
control = list(adapt_delta = 0.99) )
save(pp.multi_all,
file=Sys.getenv('OFILE'))
| /cluster/multivariate_models/multi_price_all.R | no_license | emma-ladouceur/NutNet-CAFE | R | false | false | 817 | r |
library(tidyverse)
library(brms)
path <- '/gpfs1/data/idiv_chase/emmala/NutNet'
p.all <- read.csv(paste0(path, '/nutnet_cumulative_time.csv'), header=T,fill=TRUE,sep=",",na.strings=c(""," ","NA","NA ","na"))
p.all$site_code<-as.factor(p.all$site_code)
p.all$site.year.id<-as.factor(p.all$site.year.id)
p.all$block<-as.factor(p.all$block)
p.all$plot<-as.factor(p.all$plot)
#
p.all <- p.all %>% group_by(site_code) %>% filter(year_max >= 3) %>%
ungroup()
pp.multi_all <- brm( mvbind(SL,SG,CDE,s.loss.n,s.gain) ~ trt.y * year.y.m + (trt.y * year.y.m | p | site_code),
data = p.all,
family=student(),
cores = 4, iter = 6000, warmup = 1000, chains = 4,
control = list(adapt_delta = 0.99) )
save(pp.multi_all,
file=Sys.getenv('OFILE'))
|
#Produce plot 4 - line plots of numeric variables over time.
source("GetData.R")
power.sample <- GetData()
png("plot4.png",h=480,w=480)
par(mfrow=c(2,2))
#This is nearly identical to plot 2.
plot( Global_active_power ~ Date, power.sample, type="l",col="black",
xlab="",ylab="Global Active Power")
plot( Voltage ~ Date, power.sample, type="l",col="black",
xlab="datetime",ylab="Voltage")
#This is nearly identical to plot 3.
plot(Sub_metering_1 ~ Date, power.sample, type="l",col="black",
ylab="Energy sub metering", xlab="")
lines(Sub_metering_2 ~ Date, power.sample, type="l", col="red")
lines(Sub_metering_3 ~ Date, power.sample, type="l", col="blue")
legend("topright",paste("Sub_metering_",1:3,sep=""),
col=c("black","red","blue"),lwd=1,bty="n")
#fourth panel
plot(Global_reactive_power ~ Date, power.sample, type="l",col="black",
ylab="Global_reactive_power",xlab="datetime")
dev.off()
| /plot4.R | no_license | vpipkt/ExData_Plotting1 | R | false | false | 928 | r | #Produce plot 4 - line plots of numeric variables over time.
source("GetData.R")
power.sample <- GetData()
png("plot4.png",h=480,w=480)
par(mfrow=c(2,2))
#This is nearly identical to plot 2.
plot( Global_active_power ~ Date, power.sample, type="l",col="black",
xlab="",ylab="Global Active Power")
plot( Voltage ~ Date, power.sample, type="l",col="black",
xlab="datetime",ylab="Voltage")
#This is nearly identical to plot 3.
plot(Sub_metering_1 ~ Date, power.sample, type="l",col="black",
ylab="Energy sub metering", xlab="")
lines(Sub_metering_2 ~ Date, power.sample, type="l", col="red")
lines(Sub_metering_3 ~ Date, power.sample, type="l", col="blue")
legend("topright",paste("Sub_metering_",1:3,sep=""),
col=c("black","red","blue"),lwd=1,bty="n")
#fourth panel
plot(Global_reactive_power ~ Date, power.sample, type="l",col="black",
ylab="Global_reactive_power",xlab="datetime")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createTsneSlides.R
\name{createTsneSlides}
\alias{createTsneSlides}
\title{Create slides of tsne process for use in animations}
\usage{
createTsneSlides(df, gowerDiss, id, pam_fit, iterations)
}
\arguments{
\item{df}{Data frame from which dissimilarities were calculated}
\item{gowerDiss}{gower dissimilarities to use}
\item{id}{ids from original data}
\item{pam_fit}{PAM object from building a PAM clustering model}
\item{iterations}{Number of iterations for tsne}
}
\description{
This function allows user to plot cluster results from a PAM object using
t-distributed stochastic neighborhood embedding. Several slides are created
based on the number of iterations specified. E.g. If you specify 1000
iterations, the slides created will be tsne results from 1, 10, 20, 30, 40,
and so on up to 1000 iterations.
}
\keyword{clustering,}
\keyword{dimensionality}
\keyword{plot}
\keyword{reduction}
\keyword{t-SNE,}
| /man/createTsneSlides.Rd | no_license | EthanTaft/clusteringTables | R | false | true | 994 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createTsneSlides.R
\name{createTsneSlides}
\alias{createTsneSlides}
\title{Create slides of tsne process for use in animations}
\usage{
createTsneSlides(df, gowerDiss, id, pam_fit, iterations)
}
\arguments{
\item{df}{Data frame from which dissimilarities were calculated}
\item{gowerDiss}{gower dissimilarities to use}
\item{id}{ids from original data}
\item{pam_fit}{PAM object from building a PAM clustering model}
\item{iterations}{Number of iterations for tsne}
}
\description{
This function allows user to plot cluster results from a PAM object using
t-distributed stochastic neighborhood embedding. Several slides are created
based on the number of iterations specified. E.g. If you specify 1000
iterations, the slides created will be tsne results from 1, 10, 20, 30, 40,
and so on up to 1000 iterations.
}
\keyword{clustering,}
\keyword{dimensionality}
\keyword{plot}
\keyword{reduction}
\keyword{t-SNE,}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_table.R
\name{split.read_table}
\alias{split.read_table}
\title{Split a read table into two read tables.}
\usage{
\method{split}{read_table}(df)
}
\value{
A list of two read tables labeled df1 and df2. If the read table
has only 1 position, then NULL is returned.
}
\description{
Split a read table into two read tables.
}
| /man/split.read_table.Rd | no_license | stephenshank/RegressHaplo | R | false | true | 408 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_table.R
\name{split.read_table}
\alias{split.read_table}
\title{Split a read table into two read tables.}
\usage{
\method{split}{read_table}(df)
}
\value{
A list of two read tables labeled df1 and df2. If the read table
has only 1 position, then NULL is returned.
}
\description{
Split a read table into two read tables.
}
|
#part 1
#read data
final=read.csv("Final Project_Part 3_planttype.csv")
#use str
str(final)
#head
head(final)
#scarterplot matrix
attach(final)
pairs(final[,1:8],col=c(x1,x2,x3,x4,x5,x6,x7,x8))
#PART TWO
#FIT logit regression modell
lfit=glm(type~x1+x2+x4+x5+x6+x7+x8,family=binomial,data=final)
#Add a fitted logistic regression probability curve to a plot of plant type versus predicted values
predicted=predict(lfit,final,type="response")
plot(final$type,predicted)
points(type,fitted(lfit),pch=20)
library(ggplot2)
ggplot(final, aes(x=final$type, y=predicted)) + geom_point() +
stat_smooth(method="glm", family="binomial", se=FALSE)
# USE summary COMMAND
summary(lfit)
#use the coefficient to calculate predicted value
est=round(12.3-(6*1.2)+(1.4*7)-(0.8*7)-(0.6*6)-(0.7*6)-(1*6)+(0.8*7),5)
#Using the inverse of the logit transformation, convert this prediction to a probability of plant type 1 on the original scale
library(boot)
inv.logit(est)
#Calculate the likelihood-based 95% confidence interval for the logistic regression coefficients.
exp(confint(lfit,level=0.95))
#reclassify
round(predict(lfit,final,type="response"),5)
predict <- ifelse(predict(lfit, type="response")>.5, 1, 0)
xtabs(~predict+final$type)
| /GLM.R | no_license | Abinah/logistic-linear-regression | R | false | false | 1,237 | r | #part 1
#read data
final=read.csv("Final Project_Part 3_planttype.csv")
#use str
str(final)
#head
head(final)
#scarterplot matrix
attach(final)
pairs(final[,1:8],col=c(x1,x2,x3,x4,x5,x6,x7,x8))
#PART TWO
#FIT logit regression modell
lfit=glm(type~x1+x2+x4+x5+x6+x7+x8,family=binomial,data=final)
#Add a fitted logistic regression probability curve to a plot of plant type versus predicted values
predicted=predict(lfit,final,type="response")
plot(final$type,predicted)
points(type,fitted(lfit),pch=20)
library(ggplot2)
ggplot(final, aes(x=final$type, y=predicted)) + geom_point() +
stat_smooth(method="glm", family="binomial", se=FALSE)
# USE summary COMMAND
summary(lfit)
#use the coefficient to calculate predicted value
est=round(12.3-(6*1.2)+(1.4*7)-(0.8*7)-(0.6*6)-(0.7*6)-(1*6)+(0.8*7),5)
#Using the inverse of the logit transformation, convert this prediction to a probability of plant type 1 on the original scale
library(boot)
inv.logit(est)
#Calculate the likelihood-based 95% confidence interval for the logistic regression coefficients.
exp(confint(lfit,level=0.95))
#reclassify
round(predict(lfit,final,type="response"),5)
predict <- ifelse(predict(lfit, type="response")>.5, 1, 0)
xtabs(~predict+final$type)
|
# -------------------------------------- #
# Spat21/Mozzie Study #
# Code exposure and outcome #
# Aim 1A #
# Human Data #
# Mozzie Phase 3 #
# K. Sumner #
# August 18, 2020 #
# -------------------------------------- #
#### -------- load packages ------------ ####
# load in the packages of interest
library(dplyr)
library(readr)
library(tidyr)
#### ------- read in the data sets -------- ####
# read in the full data set
final_data = read_rds("Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Final Data Sets/De-identified Phase II_v13/final_merged_data/phase3_spat21_human_final_censored_data_for_dissertation_18AUG2020.rds")
# read in the consecutive monthly follow-up data set
followup_data = read_csv("Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Aim 1A/Consecutive Follow-up Tables/phase3_aim1a_consecutive_follow_up_order_df_after_censoring_18AUG2020.csv")
#### -------- code the three main exposures ------ ####
# add a variable for if had a least 1 symptom to the data set
had_at_least_1_symptom = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if ((final_data$visit_type[i] == "monthly and sick visit" | final_data$visit_type[i]=="sick visit") &
(final_data$fever[i] == "yes" | final_data$Aches[i] == "yes" | final_data$Vomiting[i] == "yes" | final_data$Diarrhea[i] == "yes" | final_data$Chills[i] == "yes" | final_data$congestion[i] =="yes" |
final_data$Cough[i] =="yes" | final_data$Other[i] == "yes")){
had_at_least_1_symptom[i] = "yes"
} else {
had_at_least_1_symptom[i] = "no"
}
}
table(had_at_least_1_symptom, useNA="always")
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 414 that meet this
final_data$had_at_least_1_symptom = had_at_least_1_symptom
# code the main exposure for the primary case definition
main_exposure_primary_case_def = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if (final_data$visit_type[i] == "monthly visit" & final_data$pf_pcr_infection_status[i] == "positive"){
main_exposure_primary_case_def[i] = "asymptomatic infection"
} else if (final_data$visit_type[i] == "monthly visit" & final_data$pf_pcr_infection_status[i] == "negative"){
main_exposure_primary_case_def[i] = "no infection"
}
}
# check the output
table(main_exposure_primary_case_def, useNA="always")
table(main_exposure_primary_case_def,final_data$pf_pcr_infection_status,useNA="always")
table(main_exposure_primary_case_def,final_data$rdt_rst,useNA="always")
length(which(final_data$visit_type == "monthly and sick visit" & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
!(final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes")))) # 13 that meet this
table(main_exposure_primary_case_def,final_data$visit_type, useNA="always")
length(which(final_data$visit_type == "monthly and sick visit" & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 89 meet this
table(main_exposure_primary_case_def,final_data$had_at_least_1_symptom, useNA="always")
# looks good, add to the data set
final_data$main_exposure_primary_case_def = main_exposure_primary_case_def
# code the main exposure for secondary stringent case definition
main_exposure_secondary_stringent_case_def = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if (final_data$visit_type[i] == "monthly visit" & final_data$pf_pcr_infection_status[i] == "positive"){
main_exposure_secondary_stringent_case_def[i] = "asymptomatic infection"
} else if (final_data$visit_type[i] == "monthly visit" & final_data$pf_pcr_infection_status[i] == "negative"){
main_exposure_secondary_stringent_case_def[i] = "no infection"
}
}
# check the output
table(main_exposure_secondary_stringent_case_def, useNA="always")
table(main_exposure_secondary_stringent_case_def,final_data$pf_pcr_infection_status,useNA="always")
table(main_exposure_secondary_stringent_case_def,final_data$rdt_rst,useNA="always")
table(main_exposure_secondary_stringent_case_def,final_data$fever,useNA="always")
length(which(final_data$visit_type == "monthly and sick visit" & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
!(final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
final_data$fever == "yes" ))) # 34 that meet this
table(main_exposure_secondary_stringent_case_def,final_data$visit_type, useNA="always")
length(which(final_data$visit_type == "monthly and sick visit" & (final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
final_data$fever == "yes"))) # 68 meet this
table(main_exposure_secondary_stringent_case_def,final_data$had_at_least_1_symptom, useNA="always")
# looks good, add to the data set
final_data$main_exposure_secondary_stringent_case_def = main_exposure_secondary_stringent_case_def
# code the main exposure for secondary permissive case definition
main_exposure_secondary_permissive_case_def = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if (final_data$visit_type[i] == "monthly visit" & final_data$pf_pcr_infection_status[i] == "positive"){
main_exposure_secondary_permissive_case_def[i] = "asymptomatic infection"
} else if (final_data$visit_type[i] == "monthly visit" & final_data$pf_pcr_infection_status[i] == "negative"){
main_exposure_secondary_permissive_case_def[i] = "no infection"
}
}
# check the output
table(main_exposure_secondary_permissive_case_def, useNA="always")
table(main_exposure_secondary_permissive_case_def,final_data$pf_pcr_infection_status,useNA="always")
table(main_exposure_secondary_permissive_case_def,final_data$rdt_rst,useNA="always")
table(main_exposure_secondary_permissive_case_def,final_data$fever,useNA="always")
length(which(final_data$visit_type == "monthly and sick visit" & final_data$pf_pcr_infection_status == "positive" &
!(final_data$pf_pcr_infection_status == "positive" &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes")))) # 32 that meet this
table(main_exposure_secondary_permissive_case_def,final_data$visit_type, useNA="always")
length(which(final_data$visit_type == "monthly and sick visit" & final_data$pf_pcr_infection_status == "positive" &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 146 meet this
table(main_exposure_secondary_permissive_case_def,final_data$had_at_least_1_symptom, useNA="always")
# looks good, add to the data set
final_data$main_exposure_secondary_permissive_case_def = main_exposure_secondary_permissive_case_def
#### --------- code the three main outcomes -------- ####
# code the main outcome for the primary case definition
main_outcome_primary_case_def = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if (final_data$visit_type[i] == "sick visit" & final_data$pf_pcr_infection_status[i] == "positive" & final_data$rdt_rst[i] == "positive" &
(final_data$fever[i] == "yes" | final_data$Aches[i] == "yes" | final_data$Vomiting[i] == "yes" | final_data$Diarrhea[i] == "yes" | final_data$Chills[i] == "yes" | final_data$congestion[i] =="yes" |
final_data$Cough[i] =="yes" | final_data$Other[i] == "yes")){
main_outcome_primary_case_def[i] = "symptomatic infection"
} else if (final_data$visit_type[i] == "monthly and sick visit" & final_data$pf_pcr_infection_status[i] == "positive" & final_data$rdt_rst[i] == "positive" &
(final_data$fever[i] == "yes" | final_data$Aches[i] == "yes" | final_data$Vomiting[i] == "yes" | final_data$Diarrhea[i] == "yes" | final_data$Chills[i] == "yes" | final_data$congestion[i] =="yes" |
final_data$Cough[i] =="yes" | final_data$Other[i] == "yes")) {
main_outcome_primary_case_def[i] = "symptomatic infection"
}
}
# check the output
table(main_outcome_primary_case_def, useNA="always")
table(main_outcome_primary_case_def,final_data$pf_pcr_infection_status,useNA="always")
table(main_outcome_primary_case_def,final_data$rdt_rst,useNA="always")
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 310 that meet this
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive"))
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 814 that meet this
table(final_data$visit_type, useNA="always")
table(main_outcome_primary_case_def,final_data$visit_type, useNA="always")
table(main_outcome_primary_case_def,final_data$had_at_least_1_symptom, useNA="always")
length(which(final_data$had_at_least_1_symptom=="yes" & final_data$pf_pcr_infection_status=="positive" & final_data$rdt_rst=="positive"))
length(which(final_data$had_at_least_1_symptom=="yes" & !(final_data$pf_pcr_infection_status=="positive" & final_data$rdt_rst=="positive")))
# looks good, add to the data set
final_data$main_outcome_primary_case_def = main_outcome_primary_case_def
# code the main outcome for the secondary stringent
main_outcome_secondary_stringent_case_def = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if (final_data$visit_type[i] == "sick visit" & final_data$pf_pcr_infection_status[i] == "positive" & final_data$rdt_rst[i] == "positive" &
(final_data$fever[i] == "yes")){
main_outcome_secondary_stringent_case_def[i] = "symptomatic infection"
} else if (final_data$visit_type[i] == "monthly and sick visit" & final_data$pf_pcr_infection_status[i] == "positive" & final_data$rdt_rst[i] == "positive" &
(final_data$fever[i] == "yes")) {
main_outcome_secondary_stringent_case_def[i] = "symptomatic infection"
}
}
# check the output
table(main_outcome_secondary_stringent_case_def, useNA="always")
table(main_outcome_secondary_stringent_case_def,final_data$pf_pcr_infection_status,useNA="always")
table(main_outcome_secondary_stringent_case_def,final_data$rdt_rst,useNA="always")
table(main_outcome_secondary_stringent_case_def,final_data$fever,useNA="always")
table(main_outcome_secondary_stringent_case_def,final_data$visit_type, useNA="always")
length(which(final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
final_data$fever == "yes")) # 247 meet this
table(main_outcome_secondary_stringent_case_def,final_data$had_at_least_1_symptom, useNA="always")
# looks good, add to the data set
final_data$main_outcome_secondary_stringent_case_def = main_outcome_secondary_stringent_case_def
# code the main outcome for the secondary permissive case definition
main_outcome_secondary_permissive_case_def = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if (final_data$visit_type[i] == "sick visit" & final_data$pf_pcr_infection_status[i] == "positive" &
(final_data$fever[i] == "yes" | final_data$Aches[i] == "yes" | final_data$Vomiting[i] == "yes" | final_data$Diarrhea[i] == "yes" | final_data$Chills[i] == "yes" | final_data$congestion[i] =="yes" |
final_data$Cough[i] =="yes" | final_data$Other[i] == "yes")){
main_outcome_secondary_permissive_case_def[i] = "symptomatic infection"
} else if (final_data$visit_type[i] == "monthly and sick visit" & final_data$pf_pcr_infection_status[i] == "positive" &
(final_data$fever[i] == "yes" | final_data$Aches[i] == "yes" | final_data$Vomiting[i] == "yes" | final_data$Diarrhea[i] == "yes" | final_data$Chills[i] == "yes" | final_data$congestion[i] =="yes" |
final_data$Cough[i] =="yes" | final_data$Other[i] == "yes")) {
main_outcome_secondary_permissive_case_def[i] = "symptomatic infection"
}
}
# check the output
table(main_outcome_secondary_permissive_case_def, useNA="always")
table(main_outcome_secondary_permissive_case_def,final_data$pf_pcr_infection_status,useNA="always")
table(main_outcome_secondary_permissive_case_def,final_data$rdt_rst,useNA="always")
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 310 that meet this
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive"))
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 814 that meet this
table(final_data$visit_type, useNA="always")
table(main_outcome_secondary_permissive_case_def,final_data$visit_type, useNA="always")
table(main_outcome_secondary_permissive_case_def,final_data$had_at_least_1_symptom, useNA="always")
length(which(final_data$had_at_least_1_symptom=="yes" & final_data$pf_pcr_infection_status=="positive"))
length(which(final_data$had_at_least_1_symptom=="yes" & !(final_data$pf_pcr_infection_status=="positive")))
table(main_outcome_secondary_permissive_case_def,final_data$pf_pcr_infection_status, useNA="always")
# looks good, add to the data set
final_data$main_outcome_secondary_permissive_case_def = main_outcome_secondary_permissive_case_def
# check the exposure and outcomes coding
# for the primary coding
table(final_data$main_exposure_primary_case_def,final_data$main_outcome_primary_case_def,useNA="always")
# for the secondary stringent coding
table(final_data$main_exposure_secondary_stringent_case_def,final_data$main_outcome_secondary_stringent_case_def,useNA="always")
# for the secondary permissive coding
table(final_data$main_exposure_secondary_permissive_case_def,final_data$main_outcome_secondary_permissive_case_def,useNA="always")
# all looks good
# export data sets
write_csv(final_data,"Desktop/phase3_spat21_human_final_censored_data_for_dissertation_with_exposure_outcome_18AUG2020.csv")
write_rds(final_data,"Desktop/phase3_spat21_human_final_censored_data_for_dissertation_with_exposure_outcome_18AUG2020.rds")
| /SpatialR21_project/code/aim1a_analyses/spat21_aim1a_code_exposure_outcome_phase3.R | no_license | kelseysumner/taylorlab | R | false | false | 16,830 | r | # -------------------------------------- #
# Spat21/Mozzie Study #
# Code exposure and outcome #
# Aim 1A #
# Human Data #
# Mozzie Phase 3 #
# K. Sumner #
# August 18, 2020 #
# -------------------------------------- #
#### -------- load packages ------------ ####
# load in the packages of interest
library(dplyr)
library(readr)
library(tidyr)
#### ------- read in the data sets -------- ####
# read in the full data set
final_data = read_rds("Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Final Data Sets/De-identified Phase II_v13/final_merged_data/phase3_spat21_human_final_censored_data_for_dissertation_18AUG2020.rds")
# read in the consecutive monthly follow-up data set
followup_data = read_csv("Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Aim 1A/Consecutive Follow-up Tables/phase3_aim1a_consecutive_follow_up_order_df_after_censoring_18AUG2020.csv")
#### -------- code the three main exposures ------ ####
# add a variable for if had a least 1 symptom to the data set
had_at_least_1_symptom = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if ((final_data$visit_type[i] == "monthly and sick visit" | final_data$visit_type[i]=="sick visit") &
(final_data$fever[i] == "yes" | final_data$Aches[i] == "yes" | final_data$Vomiting[i] == "yes" | final_data$Diarrhea[i] == "yes" | final_data$Chills[i] == "yes" | final_data$congestion[i] =="yes" |
final_data$Cough[i] =="yes" | final_data$Other[i] == "yes")){
had_at_least_1_symptom[i] = "yes"
} else {
had_at_least_1_symptom[i] = "no"
}
}
table(had_at_least_1_symptom, useNA="always")
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 414 that meet this
final_data$had_at_least_1_symptom = had_at_least_1_symptom
# code the main exposure for the primary case definition
main_exposure_primary_case_def = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if (final_data$visit_type[i] == "monthly visit" & final_data$pf_pcr_infection_status[i] == "positive"){
main_exposure_primary_case_def[i] = "asymptomatic infection"
} else if (final_data$visit_type[i] == "monthly visit" & final_data$pf_pcr_infection_status[i] == "negative"){
main_exposure_primary_case_def[i] = "no infection"
}
}
# check the output
table(main_exposure_primary_case_def, useNA="always")
table(main_exposure_primary_case_def,final_data$pf_pcr_infection_status,useNA="always")
table(main_exposure_primary_case_def,final_data$rdt_rst,useNA="always")
length(which(final_data$visit_type == "monthly and sick visit" & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
!(final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes")))) # 13 that meet this
table(main_exposure_primary_case_def,final_data$visit_type, useNA="always")
length(which(final_data$visit_type == "monthly and sick visit" & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 89 meet this
table(main_exposure_primary_case_def,final_data$had_at_least_1_symptom, useNA="always")
# looks good, add to the data set
final_data$main_exposure_primary_case_def = main_exposure_primary_case_def
# code the main exposure for secondary stringent case definition
main_exposure_secondary_stringent_case_def = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if (final_data$visit_type[i] == "monthly visit" & final_data$pf_pcr_infection_status[i] == "positive"){
main_exposure_secondary_stringent_case_def[i] = "asymptomatic infection"
} else if (final_data$visit_type[i] == "monthly visit" & final_data$pf_pcr_infection_status[i] == "negative"){
main_exposure_secondary_stringent_case_def[i] = "no infection"
}
}
# check the output
table(main_exposure_secondary_stringent_case_def, useNA="always")
table(main_exposure_secondary_stringent_case_def,final_data$pf_pcr_infection_status,useNA="always")
table(main_exposure_secondary_stringent_case_def,final_data$rdt_rst,useNA="always")
table(main_exposure_secondary_stringent_case_def,final_data$fever,useNA="always")
length(which(final_data$visit_type == "monthly and sick visit" & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
!(final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
final_data$fever == "yes" ))) # 34 that meet this
table(main_exposure_secondary_stringent_case_def,final_data$visit_type, useNA="always")
length(which(final_data$visit_type == "monthly and sick visit" & (final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
final_data$fever == "yes"))) # 68 meet this
table(main_exposure_secondary_stringent_case_def,final_data$had_at_least_1_symptom, useNA="always")
# looks good, add to the data set
final_data$main_exposure_secondary_stringent_case_def = main_exposure_secondary_stringent_case_def
# code the main exposure for secondary permissive case definition
main_exposure_secondary_permissive_case_def = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if (final_data$visit_type[i] == "monthly visit" & final_data$pf_pcr_infection_status[i] == "positive"){
main_exposure_secondary_permissive_case_def[i] = "asymptomatic infection"
} else if (final_data$visit_type[i] == "monthly visit" & final_data$pf_pcr_infection_status[i] == "negative"){
main_exposure_secondary_permissive_case_def[i] = "no infection"
}
}
# check the output
table(main_exposure_secondary_permissive_case_def, useNA="always")
table(main_exposure_secondary_permissive_case_def,final_data$pf_pcr_infection_status,useNA="always")
table(main_exposure_secondary_permissive_case_def,final_data$rdt_rst,useNA="always")
table(main_exposure_secondary_permissive_case_def,final_data$fever,useNA="always")
length(which(final_data$visit_type == "monthly and sick visit" & final_data$pf_pcr_infection_status == "positive" &
!(final_data$pf_pcr_infection_status == "positive" &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes")))) # 32 that meet this
table(main_exposure_secondary_permissive_case_def,final_data$visit_type, useNA="always")
length(which(final_data$visit_type == "monthly and sick visit" & final_data$pf_pcr_infection_status == "positive" &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 146 meet this
table(main_exposure_secondary_permissive_case_def,final_data$had_at_least_1_symptom, useNA="always")
# looks good, add to the data set
final_data$main_exposure_secondary_permissive_case_def = main_exposure_secondary_permissive_case_def
#### --------- code the three main outcomes -------- ####
# code the main outcome for the primary case definition
main_outcome_primary_case_def = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if (final_data$visit_type[i] == "sick visit" & final_data$pf_pcr_infection_status[i] == "positive" & final_data$rdt_rst[i] == "positive" &
(final_data$fever[i] == "yes" | final_data$Aches[i] == "yes" | final_data$Vomiting[i] == "yes" | final_data$Diarrhea[i] == "yes" | final_data$Chills[i] == "yes" | final_data$congestion[i] =="yes" |
final_data$Cough[i] =="yes" | final_data$Other[i] == "yes")){
main_outcome_primary_case_def[i] = "symptomatic infection"
} else if (final_data$visit_type[i] == "monthly and sick visit" & final_data$pf_pcr_infection_status[i] == "positive" & final_data$rdt_rst[i] == "positive" &
(final_data$fever[i] == "yes" | final_data$Aches[i] == "yes" | final_data$Vomiting[i] == "yes" | final_data$Diarrhea[i] == "yes" | final_data$Chills[i] == "yes" | final_data$congestion[i] =="yes" |
final_data$Cough[i] =="yes" | final_data$Other[i] == "yes")) {
main_outcome_primary_case_def[i] = "symptomatic infection"
}
}
# check the output
table(main_outcome_primary_case_def, useNA="always")
table(main_outcome_primary_case_def,final_data$pf_pcr_infection_status,useNA="always")
table(main_outcome_primary_case_def,final_data$rdt_rst,useNA="always")
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 310 that meet this
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive"))
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 814 that meet this
table(final_data$visit_type, useNA="always")
table(main_outcome_primary_case_def,final_data$visit_type, useNA="always")
table(main_outcome_primary_case_def,final_data$had_at_least_1_symptom, useNA="always")
length(which(final_data$had_at_least_1_symptom=="yes" & final_data$pf_pcr_infection_status=="positive" & final_data$rdt_rst=="positive"))
length(which(final_data$had_at_least_1_symptom=="yes" & !(final_data$pf_pcr_infection_status=="positive" & final_data$rdt_rst=="positive")))
# looks good, add to the data set
final_data$main_outcome_primary_case_def = main_outcome_primary_case_def
# code the main outcome for the secondary stringent
main_outcome_secondary_stringent_case_def = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if (final_data$visit_type[i] == "sick visit" & final_data$pf_pcr_infection_status[i] == "positive" & final_data$rdt_rst[i] == "positive" &
(final_data$fever[i] == "yes")){
main_outcome_secondary_stringent_case_def[i] = "symptomatic infection"
} else if (final_data$visit_type[i] == "monthly and sick visit" & final_data$pf_pcr_infection_status[i] == "positive" & final_data$rdt_rst[i] == "positive" &
(final_data$fever[i] == "yes")) {
main_outcome_secondary_stringent_case_def[i] = "symptomatic infection"
}
}
# check the output
table(main_outcome_secondary_stringent_case_def, useNA="always")
table(main_outcome_secondary_stringent_case_def,final_data$pf_pcr_infection_status,useNA="always")
table(main_outcome_secondary_stringent_case_def,final_data$rdt_rst,useNA="always")
table(main_outcome_secondary_stringent_case_def,final_data$fever,useNA="always")
table(main_outcome_secondary_stringent_case_def,final_data$visit_type, useNA="always")
length(which(final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
final_data$fever == "yes")) # 247 meet this
table(main_outcome_secondary_stringent_case_def,final_data$had_at_least_1_symptom, useNA="always")
# looks good, add to the data set
final_data$main_outcome_secondary_stringent_case_def = main_outcome_secondary_stringent_case_def
# code the main outcome for the secondary permissive case definition
main_outcome_secondary_permissive_case_def = rep(NA,nrow(final_data))
for (i in 1:nrow(final_data)){
if (final_data$visit_type[i] == "sick visit" & final_data$pf_pcr_infection_status[i] == "positive" &
(final_data$fever[i] == "yes" | final_data$Aches[i] == "yes" | final_data$Vomiting[i] == "yes" | final_data$Diarrhea[i] == "yes" | final_data$Chills[i] == "yes" | final_data$congestion[i] =="yes" |
final_data$Cough[i] =="yes" | final_data$Other[i] == "yes")){
main_outcome_secondary_permissive_case_def[i] = "symptomatic infection"
} else if (final_data$visit_type[i] == "monthly and sick visit" & final_data$pf_pcr_infection_status[i] == "positive" &
(final_data$fever[i] == "yes" | final_data$Aches[i] == "yes" | final_data$Vomiting[i] == "yes" | final_data$Diarrhea[i] == "yes" | final_data$Chills[i] == "yes" | final_data$congestion[i] =="yes" |
final_data$Cough[i] =="yes" | final_data$Other[i] == "yes")) {
main_outcome_secondary_permissive_case_def[i] = "symptomatic infection"
}
}
# check the output
table(main_outcome_secondary_permissive_case_def, useNA="always")
table(main_outcome_secondary_permissive_case_def,final_data$pf_pcr_infection_status,useNA="always")
table(main_outcome_secondary_permissive_case_def,final_data$rdt_rst,useNA="always")
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive" &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 310 that meet this
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") & final_data$pf_pcr_infection_status == "positive" & final_data$rdt_rst == "positive"))
length(which((final_data$visit_type == "monthly and sick visit" | final_data$visit_type=="sick visit") &
(final_data$fever == "yes" | final_data$Aches == "yes" | final_data$Vomiting == "yes" | final_data$Diarrhea == "yes" | final_data$Chills == "yes" | final_data$congestion =="yes" |
final_data$Cough =="yes" | final_data$Other == "yes"))) # 814 that meet this
table(final_data$visit_type, useNA="always")
table(main_outcome_secondary_permissive_case_def,final_data$visit_type, useNA="always")
table(main_outcome_secondary_permissive_case_def,final_data$had_at_least_1_symptom, useNA="always")
length(which(final_data$had_at_least_1_symptom=="yes" & final_data$pf_pcr_infection_status=="positive"))
length(which(final_data$had_at_least_1_symptom=="yes" & !(final_data$pf_pcr_infection_status=="positive")))
table(main_outcome_secondary_permissive_case_def,final_data$pf_pcr_infection_status, useNA="always")
# looks good, add to the data set
final_data$main_outcome_secondary_permissive_case_def = main_outcome_secondary_permissive_case_def
# check the exposure and outcomes coding
# for the primary coding
table(final_data$main_exposure_primary_case_def,final_data$main_outcome_primary_case_def,useNA="always")
# for the secondary stringent coding
table(final_data$main_exposure_secondary_stringent_case_def,final_data$main_outcome_secondary_stringent_case_def,useNA="always")
# for the secondary permissive coding
table(final_data$main_exposure_secondary_permissive_case_def,final_data$main_outcome_secondary_permissive_case_def,useNA="always")
# all looks good
# export data sets
write_csv(final_data,"Desktop/phase3_spat21_human_final_censored_data_for_dissertation_with_exposure_outcome_18AUG2020.csv")
write_rds(final_data,"Desktop/phase3_spat21_human_final_censored_data_for_dissertation_with_exposure_outcome_18AUG2020.rds")
|
# R codes below are used for climate reconstruction of past climate using Hinoki cypress tree ring width:
# Tree ring ID: '100$19M', Period: 1734-2006
# Climate Records:
# Site: Matsumoto
# Load R packages:
library(RMySQL)
library(dplyr)
library(tseries)
library(bootRes)
library(reshape)
library(reshape2)
library(dplR)
# clear workspace
rm(list=ls())
# Tree ring data:
con=dbConnect(RMySQL::MySQL(), host = "localhost",user = "root", password = "timberland",dbname="HinokiCypress")
dbListTables(con)
dbListFields(con,'treeringsource')
dbListFields(con,'treeringwidthdata')
rs=dbSendQuery(con,"SELECT Years,RawWidth,TreeRingName FROM treeringsource, treeringwidthdata WHERE TreeRingID=ringID AND TreeRingName='100$19M' ORDER BY Years;")
ring=dbFetch(rs,n=-1); ring=ring[,c(1,2)]; colnames(ring)=c("year","width")
dbClearResult(rs)
dbDisconnect(con)
# Climate Data:
con=dbConnect(RMySQL::MySQL(),host='localhost',user='root',password='timberland',dbname='HinokiCypress')
dbListTables(con)
dbListFields(con,'climatedata'); dbListFields(con,'climatestation'); dbListFields(con,'climatevariable')
rs=dbSendQuery(con,"SELECT Years,Months,variable,yvalue,SiteNames FROM climatedata,climatestation,climatevariable
WHERE sitid=SiteID AND varid=VariableID AND SiteNames='Matsumoto' ORDER BY Years;")
clim=dbFetch(rs,n=-1)
dbClearResult(rs)
dbDisconnect(con)
# Ring width:----
# Detrend a raw ring width by Ar and get residual chronology
year=ring[,"year"]
width=ring[,"width"]
series=data.frame(width,row.names=year)
names(width)=rownames(series)
#series.rwi=detrend.series(y=width,y.name="100-19M",verbose=TRUE,nyrs=20)
pdf(file="detrend_width.pdf",width=12,height=9)
series.rwi=detrend.series(y=width,y.name="100-19M",verbose=TRUE,nyrs=20)
dev.off()
gg=data.frame(year=rownames(series.rwi),series.rwi,row.names=NULL)
ring=merge(ring,gg,by="year") # Use this ring data for all analyses
# Fit AR1 for Residual Series
AR=select.list(names(ring[,2:ncol(ring)]),multiple=TRUE,title="Select Type of Rind Width Series For Residual Series:",graphics=TRUE)
x=ring[,names(ring) %in% AR]
M=arma(x,order=c(1,0))
acf(residuals(M),na.action=na.remove)
X=data.frame(ring,ar1=residuals(M)) # Use "residual chronology" derived from selected type of ring width series
colnames(X)[c(2:ncol(X))]=c("raw","spline","modnegexp","means","ar","res")
# Climate Data:----
# NDM0 = the number of days of daily min. temp. below 0 Celsius (excluding 0) (日最低気温0度未満の日数)
# NDA0 = the number of days of average daily temp. below 0 Celsius (日平均気温0度未満の日数)
# ADMM = Monthly average daily min. temp. (日最低気温の月平均)
# ADMX = Monthly average daily max. temp. (日最高気温の月平均)
# ADMMX = ADMX - ADMM
# ADTM = Average temp. (平均気温)
# MTPP = Monthly total precipitation
# Delete missing values (i.e., -999.0):
Y=clim
Y$yvalue[Y$yvalue==-999.0]=NA
# Choose Climate Variables:----
colnames(Y)[1:2]=c("year","month");Y=Y[,c(1:4)]
Y=dcast(Y,year+month~variable)
# BootRes:
# Choose only up to 2 cliamte variables:
Clim=select.list(names(Y[,3:ncol(Y)]),multiple=TRUE,title="Select Climate Variables of Your interest:",graphics=TRUE)
Y1=Y[,names(Y) %in% c("year","month",Clim)]
# Select Type of Ring Width (Choose only one)
Ring.list=select.list(names(X[,2:ncol(X)]),multiple=TRUE,title="Select Rind Width Series of Xour Interest:",graphics=TRUE)
X1=X[,names(X) %in% c("year",Ring.list)]
# Identify Strength of Monthly Signals with Tree Ring----
X1=data.frame(X1[,1:2],row.names="year")
# View Important Climate Variables
op=par(mar=c(5,5,6,3))
dc.corr <- dcc(X1,Y1,method = "corr")
dcplot(dc.corr)
par(op)
## Correlation and Linear Regression
# Reshape Climte dataset
## Use only one climate variable:
Clim=select.list(names(Y[,3:ncol(Y)]),multiple=TRUE,title="Select only one climate variable:",graphics=TRUE)
Y1=Y[,names(Y) %in% c("year","month",Clim)]
Y1=recast(Y1,year~variable+month,id.var=c("year","month"),na.rm=TRUE)
# Convert to time series object
Y1=ts(Y1,frequency=1,start=min(Y1$year),end=max(Y1$year))
Y1=cbind(p=Y1,c=lag(Y1))
Y1=as.data.frame(Y1)
# Rename variables for ease of interpretation
colnames(Y1)=c("year","Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec",
"p.year","p.Jan","p.Feb","p.Mar","p.Apr","p.May","p.Jun","p.Jul","p.Aug","p.Sep","p.Oct","p.Nov","p.Dec")
ind=which(colnames(Y1)=="p.year")
Y1=Y1[-1,c(1:(ind-1),(ind+1):ncol(Y1))]
# Select Months to be Averaged
Month.list=select.list(names(Y1[,c(2:ncol(Y1))]),multiple=TRUE,title="Choose Months to be Averaged:",graphics=TRUE)
if(length(Month.list)==1) {Y2=transform(Y1,means=Y1[,c(Month.list)])} else{Y2=transform(Y1,means=rowMeans(Y1[,c(Month.list)]))}
# ID time period
time=paste(min(Y2$year),max(Y2$year),sep="-")
timePeriod=matrix(unlist(strsplit(time,"-")),length(time),2,byrow=TRUE)
# Ring width do not seem reliable after 1998 based on large residuals after linear fit. so remove
# ring width after 1998
YearCut=1998
# Delete rows where missing values are observed
Y2=na.omit(Y2)
Y2=subset(Y2,year>=as.numeric(timePeriod[1,1]) & year<YearCut)
# Select Ring Width during the specified Period
#X1=subset(X,year>=as.numeric(timePeriod[1,1]) & year<=as.numeric(timePeriod[1,2]))
X1=subset(X,year>=as.numeric(timePeriod[1,1]) & year<YearCut)
Ring.list=select.list(names(X1[,2:ncol(X1)]),multiple=TRUE,title="Select Rind Width Series of Xour Interest:",graphics=TRUE)
X1=X1[,names(X1) %in% c("year",Ring.list)]
# Plot
RingName=names(X1)[2]
op=par(mar=c(5,5,4,5))
c=paste(Month.list,collapse=", ")
plot(Y2$year,Y2$means,type="l",xlab="Year",ylab=Clim,main=paste("Months: ",c,sep=""))
par(new=TRUE)
plot(X$year,X[,2],col="blue",type="l",axes = FALSE, bty = "n", xlab = "", ylab = "") # Tree ring
axis(side=4, at = pretty(range(X[,2])))
mtext(RingName, side=4, line=3)
par(op)
# Run correlation and linear regression
XX=merge(X1,Y2,by="year")
cor(XX[,2],XX$means,method="pearson")
M0=lm(XX$means~XX[,2])
summary(M0)
op=par(mfrow=c(2,2))
plot(M0)
par(op)
####################################################
## 1B-1: When You Chose "Daily Min. Temp. Matsumoto 1898-2006.csv-----
# Note: Minimum temperatures above 0 are already omitted from the dataset.
# Convert date to Date class
head(Climate)
Climate$date=as.Date(Climate$date)
Climate$time=as.Date(Climate$date,"%Y-%m-%d")
Climate$week=1
Climate$week[Climate$day>=15 & Climate$day<=31]=2
# Convert min. temp to numeric variable
Climate$min.temp=as.numeric(Climate$min.temp)
###########################################
## 2A: Daily Minimum Temperature Matsumoto 1898-2006:----
## 2A-1: Sum of Daily Minimum Temperature:----
library(reshape2)
X1=Climate[,c("min.temp","year","month")]
X1$min.temp[X1$min.temp>=0]=0
X1=recast(X1,year~variable+month,sum,id.var=c("year","month"),na.rm=FALSE) # Sum daily min temp
colnames(X1)=c("year","Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
# Select Type of Ring Width
Y=ring
Ring.list=select.list(names(Y[,2:ncol(Y)]),multiple=TRUE,title="Select Rind Width Series of Your Interest:",graphics=TRUE)
List="year"
Y=Y[,names(Y) %in% c(List,Ring.list)]
Y=data.frame(Y[,1:2],row.names="year")
## Prepare a table descring monthly sum of daily min. temp.
### convert to time series object
X=X1
X=ts(X,frequency=1,start=min(X$year),end=max(X$year))
X=cbind(p=X,c=lag(X))
# Convert back to data.frame
X=as.data.frame(X)
# rename variables for ease of interpretation
colnames(X)=c("p.year","p.Jan","p.Feb","p.Mar","p.Apr","p.May","p.Jun","p.Jul","p.Aug","p.Sep","p.Oct","p.Nov","p.Dec",
"year","Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
ind=which(colnames(X)=="year")
X=X[,c(ind,2:(ind-1),(ind+1):ncol(X))]
# Select Months to be Summed or averaged
ind=which(colnames(X)=="year")
Month.list=select.list(names(X[,c(2:ncol(X))]),multiple=TRUE,title="Choose Months to be Summed:",graphics=TRUE)
Month.list
X=transform(X,y=rowSums(X[,c(Month.list)])) # Sum
X=transform(X,y=abs(rowSums(X[,c(Month.list)]))) # Absolute Sum
X=transform(X,y=rowMeans(X[,c(Month.list)])) # Average
# Delete rows where missing values are observed
X=na.omit(X)
# ID time period
time=paste(min(X$year),max(X$year),sep="-")
timePeriod=matrix(unlist(strsplit(time,"-")),length(time),2,byrow=TRUE)
timePeriod
# Response to be used for correlation and linear regression
# this is wrong. how to exlude "0" from calcuation? mean(sum) shoud not include "0"
X=transform(X,y1=y) # Raw: 1
X=transform(X,y1=mean(y)/y) # 2
X=transform(X,y1=y/mean(y)) # 3
X=transform(X,y1=scale(y)) # standardize: 4
# Select Type of Ring Width
Y=ring
Y=subset(ring,year>=as.numeric(timePeriod[1,1]) & year<=as.numeric(timePeriod[1,2]))
Ring.list=select.list(names(Y[,2:ncol(Y)]),multiple=TRUE,title="Select Rind Width Series of Your Interest:",graphics=TRUE)
List="year"
Y=Y[,names(Y) %in% c(List,Ring.list)]
names(Y)
# Plot
op=par(mar=c(5,5,4,5))
plot(X$year,X$y1,type="l")
par(new=TRUE)
plot(Y$year,Y[,2],col="blue",type="l",axes = FALSE, bty = "n", xlab = "", ylab = "")
axis(side=4, at = pretty(range(Y[,2])))
mtext("Y$width", side=4, line=3)
# Run correlation and linear regression
cor(X$y1,Y[,2],method="spearman") # Spearman should be used when variables are not normally distributed
M0=lm(X$y1~Y[,2])
summary(M0)
############################
## PCA Analysis
X=X1
X=ts(X,frequency=1,start=min(X$year),end=max(X$year))
X=cbind(p=X,c=lag(X))
# Convert back to data.frame
X=as.data.frame(X)
# rename variables for ease of interpretation
colnames(X)=c("p.year","p.Jan","p.Feb","p.Mar","p.Apr","p.May","p.Jun","p.Jul","p.Aug","p.Sep","p.Oct","p.Nov","p.Dec",
"year","Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
ind=which(colnames(X)=="year")
X=X[,c(ind,2:(ind-1),(ind+1):ncol(X))]
X=na.omit(X)
# Select Months for PCA
Month.list=select.list(names(X[,c(2:ncol(X))]),multiple=TRUE,title="Choose Months Used for PCA:",graphics=TRUE)
List="year"
X=X[,names(X) %in% c(List,Month.list)]
Month.list
head(X)
# Select Type of Ring Width
Y=ring
Ring.list=select.list(names(Y[,2:ncol(Y)]),multiple=TRUE,title="Select Rind Width Series of Your Interest:",graphics=TRUE)
List="year"
Y=Y[,names(Y) %in% c(List,Ring.list)]
#Y=data.frame(Y[,1:2],row.names="year")
head(X);head(Y)
X2=merge(Y,X,by="year")
# fit PCA
library(FactoMineR)
RES=PCA(X2[,3:ncol(X2)],quanti.sup=2,scale.unit=TRUE,graph=FALSE,ncp=4)
RES$eig
RES$var$cor
Score=RES$ind$coord
X3=cbind(X2[,1:2],Score)
X3=X3[,-1]
M=lm(ModNegExp~., data=X3)
summary(M)
M1=update(M,.~.-Dim.3)
summary(M1)
M2=update(M1,.~.-Dim.4)
summary(M2)
### 2A-2-II. Analyze Bi-weekly----
# Sum daily min. temp. by year and month
library(reshape2)
X=Climate
X=X[,-c(1,5,6)]
X$min.temp[X$min.temp>=0]=0
X=recast(X,year~variable+month+week,sum,id.var=c("year","month","week"),na.rm=FALSE)
head(X)
## Prepare a table descring monthly sum of daily min. temp.
### convert to time series object
X=ts(X,frequency=1,start=min(X$year),end=max(X$year))
X=cbind(X,lag(X))
### Convert back to data.frame
X=as.data.frame(X)
names(X)
### rename variables for ease of interpretation
colnames(X)=c("lag.year","l.Jan.1","l.Jan.2","l.Feb.1","l.Feb.2","l.Mar.1","l.Mar.2","l.Apr.1","l.Apr.2",
"l.May.1","l.May.2","l.Jun.1","l.Jun.2","l.Jul.1","l.Jul.2","l.Aug.1","l.Aug.2","l.Sep.1","l.Sep.2",
"l.Oct.1","l.Oct.2","l.Nov.1","l.Nov.2","l.Dec.1","l.Dec.2","year",
"Jan.1","Jan.2","Feb.1","Feb.2","Mar.1","Mar.2","Apr.1","Apr.2","May.1","May.2",
"Jun.1","Jun.2","Ju1","Ju2","Aug.1","Aug.2","Sep.1","Sep.2","Oct.1","Oct.2","Nov.1","Nov.2","Dec.1","Dec.2")
ind=which(colnames(X)=="year")
X=X[,c(ind,2:(ind-1),(ind+1):ncol(X))]
# Select Months to be Summed
ind=which(colnames(X)=="year")
Month.list=select.list(names(X[,c(2:ncol(X))]),multiple=TRUE,title="Choose Months to be Summed:",graphics=TRUE)
if(length(Month.list)==1) {
X=transform(X,sum=abs(X[,c(Month.list)]))
} else{
X=transform(X,sum=abs(rowSums(X[,c(Month.list)])))
}
# Delete rows where missing values are observed
X=na.omit(X)
# ID time period
time=paste(min(X$year),max(X$year),sep="-")
timePeriod=matrix(unlist(strsplit(time,"-")),length(time),2,byrow=TRUE)
# Ceter sum with a column mean
X=transform(X,c.sum=mean(sum)/sum)
X=transform(X,c.sum=sum/mean(sum))
# Select Type of Ring Width
Y=subset(ring,year>=as.numeric(timePeriod[1,1]) & year<=as.numeric(timePeriod[1,2]))
Ring.list=select.list(names(Y[,2:ncol(Y)]),multiple=TRUE,title="Select Rind Width Series of Your Interest:",graphics=TRUE)
List="year"
Y=Y[,names(Y) %in% c(List,Ring.list)]
# Plot
head(X);head(Y)
X
op=par(mar=c(5,5,4,5))
plot(X$year,X$c.sum,type="l")
par(new=TRUE)
plot(Y$year,Y[,2],col="blue",type="l",axes = FALSE, bty = "n", xlab = "", ylab = "")
axis(side=4, at = pretty(range(Y[,2])))
mtext("Y$width", side=4, line=3)
# Run correlation and linear regression
cor(X$c.sum,Y[,2])
M0=lm(X$c.sum~Y[,2])
summary(M0)
# # # # # # # # # # # # # # # # # # # # # # # #
## PCA and Random Forest
# ID important monthly climate variables
## PCA Analysis
X=Climate[,-1] # drop date
# Choose only up to 2 cliamte variables:
Clim=select.list(names(X[,3:ncol(X)]),multiple=TRUE,title="Select Climate Variables of Your interest:",graphics=TRUE)
List=c("year","month")
X=X[,names(X) %in% c(List,Clim)]
X1=recast(X,year~variable+month,id.var=c("year","month"),na.rm=TRUE)
# Convert to time series object
X1=ts(X1,frequency=1,start=min(X1$year),end=max(X1$year))
X1=cbind(p=X1,c=lag(X1))
# Convert back to data.frame
X1=as.data.frame(X1)
names(X1)
ind=which(colnames(X1)=="c.year")
X1=X1[,c(ind,2:(ind-1),(ind+1):ncol(X1))]
X1=na.omit(X1)
colnames(X1)[1]="year"
# Fit Random Forest to identify and reduce monthly variables prior to PCA
library(randomForest)
## Select Type of Ring Width
Y=ring
Ring.list=select.list(names(Y[,2:ncol(Y)]),multiple=TRUE,title="Select Rind Width Series of Your Interest:",graphics=TRUE)
List="year"
Y=Y[,names(Y) %in% c(List,Ring.list)]
#Y=data.frame(Y[,1:2],row.names="year")
## Merge ring width & Climate variables
head(X1);head(Y)
X2=merge(Y,X1,by="year")
X3=X2[,-1]
## Run RF
fit=randomForest(ModNegExp~.,data=X3,confusion=TRUE,ntree=5000,proximity=TRUE,importance=TRUE,na.action=na.omit)
print(fit)
# Variable importance
varImpPlot(fit,cex=0.7,main="Variable Importance")
box(which = "outer", lty = "solid")
# Choose top 10 important variable
rf=data.frame(importance(fit))
rf=data.frame(Var=rownames(rf),rf);rownames(rf)=NULL
rf1=rf[order(-rf$X.IncMSE),,drop=FALSE]
a=rf1[1:30,]
VarNames=as.character(a$Var)
# If you want to Select monthly Climate variables based on RandomForest:
Ring=colnames(X2[2])
Year="year"
X2=X2[,names(X2) %in% c(Year,Ring,VarNames)]
# fit PCA
library(FactoMineR)
RES=PCA(X2[,3:ncol(X2)],quanti.sup=2,scale.unit=TRUE,graph=FALSE,ncp=33)
RES$eig
RES$var$cor
Score=RES$ind$coord
X3=cbind(X2[,1:2],Score)
X3=X3[,-1]
library(RcmdrMisc)
M=lm(ModNegExp~., data=X3)
step=stepwise(M,criterion="AIC")
summary(step)
| /ClimateReconstruction.R | no_license | EijiGorilla/Hinoki-Cypress | R | false | false | 15,118 | r | # R codes below are used for climate reconstruction of past climate using Hinoki cypress tree ring width:
# Tree ring ID: '100$19M', Period: 1734-2006
# Climate Records:
# Site: Matsumoto
# Load R packages:
library(RMySQL)
library(dplyr)
library(tseries)
library(bootRes)
library(reshape)
library(reshape2)
library(dplR)
# clear workspace
rm(list=ls())
# Tree ring data:
con=dbConnect(RMySQL::MySQL(), host = "localhost",user = "root", password = "timberland",dbname="HinokiCypress")
dbListTables(con)
dbListFields(con,'treeringsource')
dbListFields(con,'treeringwidthdata')
rs=dbSendQuery(con,"SELECT Years,RawWidth,TreeRingName FROM treeringsource, treeringwidthdata WHERE TreeRingID=ringID AND TreeRingName='100$19M' ORDER BY Years;")
ring=dbFetch(rs,n=-1); ring=ring[,c(1,2)]; colnames(ring)=c("year","width")
dbClearResult(rs)
dbDisconnect(con)
# Climate Data:
con=dbConnect(RMySQL::MySQL(),host='localhost',user='root',password='timberland',dbname='HinokiCypress')
dbListTables(con)
dbListFields(con,'climatedata'); dbListFields(con,'climatestation'); dbListFields(con,'climatevariable')
rs=dbSendQuery(con,"SELECT Years,Months,variable,yvalue,SiteNames FROM climatedata,climatestation,climatevariable
WHERE sitid=SiteID AND varid=VariableID AND SiteNames='Matsumoto' ORDER BY Years;")
clim=dbFetch(rs,n=-1)
dbClearResult(rs)
dbDisconnect(con)
# Ring width:----
# Detrend a raw ring width by Ar and get residual chronology
year=ring[,"year"]
width=ring[,"width"]
series=data.frame(width,row.names=year)
names(width)=rownames(series)
#series.rwi=detrend.series(y=width,y.name="100-19M",verbose=TRUE,nyrs=20)
pdf(file="detrend_width.pdf",width=12,height=9)
series.rwi=detrend.series(y=width,y.name="100-19M",verbose=TRUE,nyrs=20)
dev.off()
gg=data.frame(year=rownames(series.rwi),series.rwi,row.names=NULL)
ring=merge(ring,gg,by="year") # Use this ring data for all analyses
# Fit AR1 for Residual Series
AR=select.list(names(ring[,2:ncol(ring)]),multiple=TRUE,title="Select Type of Rind Width Series For Residual Series:",graphics=TRUE)
x=ring[,names(ring) %in% AR]
M=arma(x,order=c(1,0))
acf(residuals(M),na.action=na.remove)
X=data.frame(ring,ar1=residuals(M)) # Use "residual chronology" derived from selected type of ring width series
colnames(X)[c(2:ncol(X))]=c("raw","spline","modnegexp","means","ar","res")
# Climate Data:----
# NDM0 = the number of days of daily min. temp. below 0 Celsius (excluding 0) (日最低気温0度未満の日数)
# NDA0 = the number of days of average daily temp. below 0 Celsius (日平均気温0度未満の日数)
# ADMM = Monthly average daily min. temp. (日最低気温の月平均)
# ADMX = Monthly average daily max. temp. (日最高気温の月平均)
# ADMMX = ADMX - ADMM
# ADTM = Average temp. (平均気温)
# MTPP = Monthly total precipitation
# Delete missing values (i.e., -999.0):
Y=clim
Y$yvalue[Y$yvalue==-999.0]=NA
# Choose Climate Variables:----
colnames(Y)[1:2]=c("year","month");Y=Y[,c(1:4)]
Y=dcast(Y,year+month~variable)
# BootRes:
# Choose only up to 2 cliamte variables:
Clim=select.list(names(Y[,3:ncol(Y)]),multiple=TRUE,title="Select Climate Variables of Your interest:",graphics=TRUE)
Y1=Y[,names(Y) %in% c("year","month",Clim)]
# Select Type of Ring Width (Choose only one)
Ring.list=select.list(names(X[,2:ncol(X)]),multiple=TRUE,title="Select Rind Width Series of Xour Interest:",graphics=TRUE)
X1=X[,names(X) %in% c("year",Ring.list)]
# Identify Strength of Monthly Signals with Tree Ring----
X1=data.frame(X1[,1:2],row.names="year")
# View Important Climate Variables
op=par(mar=c(5,5,6,3))
dc.corr <- dcc(X1,Y1,method = "corr")
dcplot(dc.corr)
par(op)
## Correlation and Linear Regression
# Reshape Climte dataset
## Use only one climate variable:
Clim=select.list(names(Y[,3:ncol(Y)]),multiple=TRUE,title="Select only one climate variable:",graphics=TRUE)
Y1=Y[,names(Y) %in% c("year","month",Clim)]
Y1=recast(Y1,year~variable+month,id.var=c("year","month"),na.rm=TRUE)
# Convert to time series object
Y1=ts(Y1,frequency=1,start=min(Y1$year),end=max(Y1$year))
Y1=cbind(p=Y1,c=lag(Y1))
Y1=as.data.frame(Y1)
# Rename variables for ease of interpretation
colnames(Y1)=c("year","Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec",
"p.year","p.Jan","p.Feb","p.Mar","p.Apr","p.May","p.Jun","p.Jul","p.Aug","p.Sep","p.Oct","p.Nov","p.Dec")
ind=which(colnames(Y1)=="p.year")
Y1=Y1[-1,c(1:(ind-1),(ind+1):ncol(Y1))]
# Select Months to be Averaged
Month.list=select.list(names(Y1[,c(2:ncol(Y1))]),multiple=TRUE,title="Choose Months to be Averaged:",graphics=TRUE)
if(length(Month.list)==1) {Y2=transform(Y1,means=Y1[,c(Month.list)])} else{Y2=transform(Y1,means=rowMeans(Y1[,c(Month.list)]))}
# ID time period
time=paste(min(Y2$year),max(Y2$year),sep="-")
timePeriod=matrix(unlist(strsplit(time,"-")),length(time),2,byrow=TRUE)
# Ring width do not seem reliable after 1998 based on large residuals after linear fit. so remove
# ring width after 1998
YearCut=1998
# Delete rows where missing values are observed
Y2=na.omit(Y2)
Y2=subset(Y2,year>=as.numeric(timePeriod[1,1]) & year<YearCut)
# Select Ring Width during the specified Period
#X1=subset(X,year>=as.numeric(timePeriod[1,1]) & year<=as.numeric(timePeriod[1,2]))
X1=subset(X,year>=as.numeric(timePeriod[1,1]) & year<YearCut)
Ring.list=select.list(names(X1[,2:ncol(X1)]),multiple=TRUE,title="Select Rind Width Series of Xour Interest:",graphics=TRUE)
X1=X1[,names(X1) %in% c("year",Ring.list)]
# Plot
RingName=names(X1)[2]
op=par(mar=c(5,5,4,5))
c=paste(Month.list,collapse=", ")
plot(Y2$year,Y2$means,type="l",xlab="Year",ylab=Clim,main=paste("Months: ",c,sep=""))
par(new=TRUE)
plot(X$year,X[,2],col="blue",type="l",axes = FALSE, bty = "n", xlab = "", ylab = "") # Tree ring
axis(side=4, at = pretty(range(X[,2])))
mtext(RingName, side=4, line=3)
par(op)
# Run correlation and linear regression
XX=merge(X1,Y2,by="year")
cor(XX[,2],XX$means,method="pearson")
M0=lm(XX$means~XX[,2])
summary(M0)
op=par(mfrow=c(2,2))
plot(M0)
par(op)
####################################################
## 1B-1: When You Chose "Daily Min. Temp. Matsumoto 1898-2006.csv-----
# Note: Minimum temperatures above 0 are already omitted from the dataset.
# Convert date to Date class
head(Climate)
Climate$date=as.Date(Climate$date)
Climate$time=as.Date(Climate$date,"%Y-%m-%d")
Climate$week=1
Climate$week[Climate$day>=15 & Climate$day<=31]=2
# Convert min. temp to numeric variable
Climate$min.temp=as.numeric(Climate$min.temp)
###########################################
## 2A: Daily Minimum Temperature Matsumoto 1898-2006:----
## 2A-1: Sum of Daily Minimum Temperature:----
library(reshape2)
X1=Climate[,c("min.temp","year","month")]
X1$min.temp[X1$min.temp>=0]=0
X1=recast(X1,year~variable+month,sum,id.var=c("year","month"),na.rm=FALSE) # Sum daily min temp
colnames(X1)=c("year","Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
# Select Type of Ring Width
Y=ring
Ring.list=select.list(names(Y[,2:ncol(Y)]),multiple=TRUE,title="Select Rind Width Series of Your Interest:",graphics=TRUE)
List="year"
Y=Y[,names(Y) %in% c(List,Ring.list)]
Y=data.frame(Y[,1:2],row.names="year")
## Prepare a table descring monthly sum of daily min. temp.
### convert to time series object
X=X1
X=ts(X,frequency=1,start=min(X$year),end=max(X$year))
X=cbind(p=X,c=lag(X))
# Convert back to data.frame
X=as.data.frame(X)
# rename variables for ease of interpretation
colnames(X)=c("p.year","p.Jan","p.Feb","p.Mar","p.Apr","p.May","p.Jun","p.Jul","p.Aug","p.Sep","p.Oct","p.Nov","p.Dec",
"year","Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
ind=which(colnames(X)=="year")
X=X[,c(ind,2:(ind-1),(ind+1):ncol(X))]
# Select Months to be Summed or averaged
ind=which(colnames(X)=="year")
Month.list=select.list(names(X[,c(2:ncol(X))]),multiple=TRUE,title="Choose Months to be Summed:",graphics=TRUE)
Month.list
X=transform(X,y=rowSums(X[,c(Month.list)])) # Sum
X=transform(X,y=abs(rowSums(X[,c(Month.list)]))) # Absolute Sum
X=transform(X,y=rowMeans(X[,c(Month.list)])) # Average
# Delete rows where missing values are observed
X=na.omit(X)
# ID time period
time=paste(min(X$year),max(X$year),sep="-")
timePeriod=matrix(unlist(strsplit(time,"-")),length(time),2,byrow=TRUE)
timePeriod
# Response to be used for correlation and linear regression
# this is wrong. how to exlude "0" from calcuation? mean(sum) shoud not include "0"
X=transform(X,y1=y) # Raw: 1
X=transform(X,y1=mean(y)/y) # 2
X=transform(X,y1=y/mean(y)) # 3
X=transform(X,y1=scale(y)) # standardize: 4
# Select Type of Ring Width
Y=ring
Y=subset(ring,year>=as.numeric(timePeriod[1,1]) & year<=as.numeric(timePeriod[1,2]))
Ring.list=select.list(names(Y[,2:ncol(Y)]),multiple=TRUE,title="Select Rind Width Series of Your Interest:",graphics=TRUE)
List="year"
Y=Y[,names(Y) %in% c(List,Ring.list)]
names(Y)
# Plot
op=par(mar=c(5,5,4,5))
plot(X$year,X$y1,type="l")
par(new=TRUE)
plot(Y$year,Y[,2],col="blue",type="l",axes = FALSE, bty = "n", xlab = "", ylab = "")
axis(side=4, at = pretty(range(Y[,2])))
mtext("Y$width", side=4, line=3)
# Run correlation and linear regression
cor(X$y1,Y[,2],method="spearman") # Spearman should be used when variables are not normally distributed
M0=lm(X$y1~Y[,2])
summary(M0)
############################
## PCA Analysis
X=X1
X=ts(X,frequency=1,start=min(X$year),end=max(X$year))
X=cbind(p=X,c=lag(X))
# Convert back to data.frame
X=as.data.frame(X)
# rename variables for ease of interpretation
colnames(X)=c("p.year","p.Jan","p.Feb","p.Mar","p.Apr","p.May","p.Jun","p.Jul","p.Aug","p.Sep","p.Oct","p.Nov","p.Dec",
"year","Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
ind=which(colnames(X)=="year")
X=X[,c(ind,2:(ind-1),(ind+1):ncol(X))]
X=na.omit(X)
# Select Months for PCA
Month.list=select.list(names(X[,c(2:ncol(X))]),multiple=TRUE,title="Choose Months Used for PCA:",graphics=TRUE)
List="year"
X=X[,names(X) %in% c(List,Month.list)]
Month.list
head(X)
# Select Type of Ring Width
Y=ring
Ring.list=select.list(names(Y[,2:ncol(Y)]),multiple=TRUE,title="Select Rind Width Series of Your Interest:",graphics=TRUE)
List="year"
Y=Y[,names(Y) %in% c(List,Ring.list)]
#Y=data.frame(Y[,1:2],row.names="year")
head(X);head(Y)
X2=merge(Y,X,by="year")
# fit PCA
library(FactoMineR)
RES=PCA(X2[,3:ncol(X2)],quanti.sup=2,scale.unit=TRUE,graph=FALSE,ncp=4)
RES$eig
RES$var$cor
Score=RES$ind$coord
X3=cbind(X2[,1:2],Score)
X3=X3[,-1]
M=lm(ModNegExp~., data=X3)
summary(M)
M1=update(M,.~.-Dim.3)
summary(M1)
M2=update(M1,.~.-Dim.4)
summary(M2)
### 2A-2-II. Analyze Bi-weekly----
# Sum daily min. temp. by year and month
library(reshape2)
X=Climate
X=X[,-c(1,5,6)]
X$min.temp[X$min.temp>=0]=0
X=recast(X,year~variable+month+week,sum,id.var=c("year","month","week"),na.rm=FALSE)
head(X)
## Prepare a table descring monthly sum of daily min. temp.
### convert to time series object
X=ts(X,frequency=1,start=min(X$year),end=max(X$year))
X=cbind(X,lag(X))
### Convert back to data.frame
X=as.data.frame(X)
names(X)
### rename variables for ease of interpretation
colnames(X)=c("lag.year","l.Jan.1","l.Jan.2","l.Feb.1","l.Feb.2","l.Mar.1","l.Mar.2","l.Apr.1","l.Apr.2",
"l.May.1","l.May.2","l.Jun.1","l.Jun.2","l.Jul.1","l.Jul.2","l.Aug.1","l.Aug.2","l.Sep.1","l.Sep.2",
"l.Oct.1","l.Oct.2","l.Nov.1","l.Nov.2","l.Dec.1","l.Dec.2","year",
"Jan.1","Jan.2","Feb.1","Feb.2","Mar.1","Mar.2","Apr.1","Apr.2","May.1","May.2",
"Jun.1","Jun.2","Ju1","Ju2","Aug.1","Aug.2","Sep.1","Sep.2","Oct.1","Oct.2","Nov.1","Nov.2","Dec.1","Dec.2")
ind=which(colnames(X)=="year")
X=X[,c(ind,2:(ind-1),(ind+1):ncol(X))]
# Select Months to be Summed
ind=which(colnames(X)=="year")
Month.list=select.list(names(X[,c(2:ncol(X))]),multiple=TRUE,title="Choose Months to be Summed:",graphics=TRUE)
if(length(Month.list)==1) {
X=transform(X,sum=abs(X[,c(Month.list)]))
} else{
X=transform(X,sum=abs(rowSums(X[,c(Month.list)])))
}
# Delete rows where missing values are observed
X=na.omit(X)
# ID time period
time=paste(min(X$year),max(X$year),sep="-")
timePeriod=matrix(unlist(strsplit(time,"-")),length(time),2,byrow=TRUE)
# Ceter sum with a column mean
X=transform(X,c.sum=mean(sum)/sum)
X=transform(X,c.sum=sum/mean(sum))
# Select Type of Ring Width
Y=subset(ring,year>=as.numeric(timePeriod[1,1]) & year<=as.numeric(timePeriod[1,2]))
Ring.list=select.list(names(Y[,2:ncol(Y)]),multiple=TRUE,title="Select Rind Width Series of Your Interest:",graphics=TRUE)
List="year"
Y=Y[,names(Y) %in% c(List,Ring.list)]
# Plot
head(X);head(Y)
X
op=par(mar=c(5,5,4,5))
plot(X$year,X$c.sum,type="l")
par(new=TRUE)
plot(Y$year,Y[,2],col="blue",type="l",axes = FALSE, bty = "n", xlab = "", ylab = "")
axis(side=4, at = pretty(range(Y[,2])))
mtext("Y$width", side=4, line=3)
# Run correlation and linear regression
cor(X$c.sum,Y[,2])
M0=lm(X$c.sum~Y[,2])
summary(M0)
# # # # # # # # # # # # # # # # # # # # # # # #
## PCA and Random Forest
# ID important monthly climate variables
## PCA Analysis
X=Climate[,-1] # drop date
# Choose only up to 2 cliamte variables:
Clim=select.list(names(X[,3:ncol(X)]),multiple=TRUE,title="Select Climate Variables of Your interest:",graphics=TRUE)
List=c("year","month")
X=X[,names(X) %in% c(List,Clim)]
X1=recast(X,year~variable+month,id.var=c("year","month"),na.rm=TRUE)
# Convert to time series object
X1=ts(X1,frequency=1,start=min(X1$year),end=max(X1$year))
X1=cbind(p=X1,c=lag(X1))
# Convert back to data.frame
X1=as.data.frame(X1)
names(X1)
ind=which(colnames(X1)=="c.year")
X1=X1[,c(ind,2:(ind-1),(ind+1):ncol(X1))]
X1=na.omit(X1)
colnames(X1)[1]="year"
# Fit Random Forest to identify and reduce monthly variables prior to PCA
library(randomForest)
## Select Type of Ring Width
Y=ring
Ring.list=select.list(names(Y[,2:ncol(Y)]),multiple=TRUE,title="Select Rind Width Series of Your Interest:",graphics=TRUE)
List="year"
Y=Y[,names(Y) %in% c(List,Ring.list)]
#Y=data.frame(Y[,1:2],row.names="year")
## Merge ring width & Climate variables
head(X1);head(Y)
X2=merge(Y,X1,by="year")
X3=X2[,-1]
## Run RF
fit=randomForest(ModNegExp~.,data=X3,confusion=TRUE,ntree=5000,proximity=TRUE,importance=TRUE,na.action=na.omit)
print(fit)
# Variable importance
varImpPlot(fit,cex=0.7,main="Variable Importance")
box(which = "outer", lty = "solid")
# Choose top 10 important variable
rf=data.frame(importance(fit))
rf=data.frame(Var=rownames(rf),rf);rownames(rf)=NULL
rf1=rf[order(-rf$X.IncMSE),,drop=FALSE]
a=rf1[1:30,]
VarNames=as.character(a$Var)
# If you want to Select monthly Climate variables based on RandomForest:
Ring=colnames(X2[2])
Year="year"
X2=X2[,names(X2) %in% c(Year,Ring,VarNames)]
# fit PCA
library(FactoMineR)
RES=PCA(X2[,3:ncol(X2)],quanti.sup=2,scale.unit=TRUE,graph=FALSE,ncp=33)
RES$eig
RES$var$cor
Score=RES$ind$coord
X3=cbind(X2[,1:2],Score)
X3=X3[,-1]
library(RcmdrMisc)
M=lm(ModNegExp~., data=X3)
step=stepwise(M,criterion="AIC")
summary(step)
|
## https://shenbaba.weebly.com/blog/how-to-use-the-pac-measure-in-consensus-clustering
## https://dx.doi.org/10.13140/RG.2.1.5075.8480
#' @export
get_optimal_cluster_count <- function(
x, # Matrix, usu. "pmm" object from 'get_expression_subset()'
channels = TRUE, # Vector of column names if not TRUE
max_k = 28,
seed = 666,
ConsensusClusterPlus... = list()
)
{
x <- x[, channels, drop = FALSE]
ConsensusClusterPlusArgs <- list(
d = as.matrix(x),
maxK = max_k, # Max limit for 'clusterAlg = "hc"' is 28
reps = 100,
pItem = 0.8,
pFeature = 1,
clusterAlg = "hc",
distance = "pearson",
title = "consensus-clusters",
innerLinkage = "complete",
seed = seed,
plot = NULL
)
ConsensusClusterPlusArgs <-
utils::modifyList(ConsensusClusterPlusArgs, ConsensusClusterPlus..., keep.null = TRUE)
ccp_res <- tryCatch({
do.call(ConsensusClusterPlus::ConsensusClusterPlus, ConsensusClusterPlusArgs)
}, error = function(e) { message("\nError: ", e$message); flush.console(); return (NULL) })
if (is.null(ccp_res))
return (NA_real_)
## PAC (proportion of ambiguous clustering) implementation
Kvec <- 2:max_k
x1 <- 0.1; x2 <- 0.9 # Threshold defining intermediate sub-interval
PAC <- rep(NA, length(Kvec))
names(PAC) <- paste0("K = ", Kvec) # 2:max_k
for (i in Kvec) {
M <- ccp_res[[i]]$consensusMatrix
Fn <- stats::ecdf(M[lower.tri(M)])
PAC[i - 1] <- Fn(x2) - Fn(x1)
}
## Optimal K value
optK <- Kvec[which.min(PAC)]
optK
}
## Heavily borrowed from 'cytofkit:::FlowSOM_integrate2cytofkit()'
#' @export
simple_FlowSOM <- function(
xdata,
k,
flow_seed = NULL,
...
)
{
xdata <- as.matrix(xdata)
cat("Building SOM..."); utils::flush.console()
ord <- tryCatch({
map <- FlowSOM::SOM(xdata, silent = TRUE, ...)
cat(". Done.", fill = TRUE)
cat("Metaclustering to", k, "clusters..."); utils::flush.console()
metaClusters <- suppressMessages(FlowSOM::metaClustering_consensus(map$codes,
k = k, seed = flow_seed))
cat(". Done.", fill = TRUE)
cluster <- metaClusters[map$mapping[, 1]]
}, error = function(e) { message("\nError: ", e$message); flush.console(); return (NULL) })
if (is.null(ord)) {
cluster <- NULL
} else {
if (length(ord) != NROW(xdata)) {
message("\nError: FlowSOM failed.")
return (NULL)
}
cluster <- ord
}
return (cluster)
}
#' @export
make_clusters <- function(
x, # Matrix, usu. "pmm" object from 'get_expression_subset()'
method = c(
"Rphenograph",
"FlowSOM",
"Xshift",
"ClusterX",
"DensVM"
),
channels = TRUE, # Vector of column names if not TRUE
seed = 666,
## cytofkit
cytof_cluster... = list(), Rphenograph_k = 50,
## FlowSOM
FlowSOM_k = 40, estimate_cluster_count = TRUE,
## X-shift
VorteX_path = "./VorteX.jar",
num_nearest_neighbors = 40,
Xshift_command = "java -Xmx64G -cp \"%s\" standalone.Xshift -NUM_NEAREST_NEIGHBORS=%d",
importConfig... = list(),
tol = 1e-5
)
{
## This is necessary for using 'keystone::cordon()', because 1-arg version of 'match.arg()' fails -- TODO
method <- match.arg(method, formals(make_clusters)$method %>% eval)
x <- x[, channels, drop = FALSE]
cluster_id <- switch(method,
`FlowSOM` = (function() {
### Do FlowSOM clustering
if (estimate_cluster_count) {
opt_k <- get_optimal_cluster_count(x)
## 'get_optimal_cluster_count()' tends to fail for small data sets, so use minimum 'FlowSOM_k'
if (is.na(opt_k) || opt_k < FlowSOM_k)
FlowSOM_k <- max(opt_k, 3, na.rm = TRUE)
}
simple_FlowSOM(xdata = x, k = FlowSOM_k, flow_seed = seed)
})(),
`Xshift` = (function() {
set.seed(seed)
### Do X-shift clustering
## For details v. file "X-shift_standalone_README.txt"
## X-shift default arguments
importConfigArgs <- list(
CLUSTERING_COLUMNS = paste(seq(NCOL(x)), collapse = ","),
LIMIT_EVENTS_PER_FILE = -1,
TRANSFORMATION = "NONE",
SCALING_FACTOR = 1,
NOISE_THRESHOLD = 1.0,
EUCLIDIAN_LENGTH_THRESHOLD = 0.0,
RESCALE = "NONE",
QUANTILE = 0.95,
RESCALE_SEPARATELY = "false"
)
importConfigArgs <- utils::modifyList(importConfigArgs, importConfig..., keep.null = TRUE)
## N.B. Change this to a package file in directory "extdata" when the time comes:
ic <- readLines(system.file("inst/templates/importConfig.txt", package = "flowpipe"))
plyr::l_ply(names(importConfigArgs),
function(a)
{
ic <<- stringr::str_replace_all(ic, paste0("%", a, "%"), importConfigArgs[[a]] %>% as.character)
})
## Set up a temporary workspace
d <- tempdir(check = TRUE) %>% normalizePath(winslash = "/", mustWork = FALSE)
p <- tempfile(tmpdir = d, fileext = ".fcs") %>% normalizePath(winslash = "/", mustWork = FALSE)
## N.B. "Output is written into an automatically created subdir within the current directory named 'out'."
o <- paste(d, "out", sep = "/")
## Create temporary files
p1 <- flowCore::write.FCS(flowCore::flowFrame(as.matrix(x)), p)
writeLines(ic, paste(d, "importConfig.txt", sep = "/"))
writeLines(normalizePath(p1, mustWork = FALSE), paste(d, "fcsFileList.txt", sep = "/"))
XshiftCommand <- sprintf(Xshift_command, normalizePath(VorteX_path, mustWork = FALSE), num_nearest_neighbors)
currentWorkingDir <- getwd()
setwd(d)
#if (!dir.exists(o)) dir.create(o, recursive = TRUE) # Make sure output directory exists
XshiftOutput <- system(XshiftCommand, intern = TRUE)
setwd(currentWorkingDir)
## On clustering failure, return single cluster
#if (!file.exists(paste(o, basename(p1), sep = "/"))) browser()
if (is.null(attr(XshiftOutput, "status"))) {
xx <- flowCore::read.FCS(paste(o, basename(p1), sep = "/"))
## Are the original & X-shift expression matrices the same except for some tolerance?
if (!(dplyr::near(flowCore::exprs(xx)[, seq(NCOL(xx) - 1)], x, tol = tol) %>% all))
warning("Input & X-Shift expression matrices don't match within tolerance")
cluster_id <- flowCore::exprs(xx)[, "cluster_id"]
## This can be plotted;
## Examples here: rstudio-pubs-static.s3.amazonaws.com/362044_903076131972463e8fdfcc00885fc9a6.html
cluster_graph <- igraph::read.graph(paste(o, "mst.gml", sep = "/"), format = c("gml"))
} else { # Clustering failed
cluster_id <- rep(0, NROW(x))
}
return (structure(cluster_id, cluster_graph = cluster_graph, XshiftOutput = XshiftOutput))
})(),
(function() {
## N.B. This list is for 'cytofkit2::cytof_cluster()', which provides additional clustering methods:
cytof_clusterArgs <- list(
xdata = x,
method = method,
Rphenograph_k = Rphenograph_k
)
## N.B. This list is for 'Rphenograph::Rphenograph()', with only the one method:
# cytof_clusterArgs <- list(
# data = x,
# k = Rphenograph_k
# )
cytof_clusterArgs <- utils::modifyList(cytof_clusterArgs, cytof_cluster..., keep.null = TRUE)
tryCatch({
do.call(cytofkit2::cytof_cluster, cytof_clusterArgs)
# do.call(Rphenograph::Rphenograph, cytof_clusterArgs)
}, error = function(e) { message("\nError: ", e$message); flush.console(); return (NULL) })
})()
)
cluster_id
}
## Also see:
# clusters_pg <- cytofkit2::cytof_cluster(xdata = e[, channels, drop = FALSE], method = "Rphenograph")
## N.B. Might need to remove duplicate rows beforehand!
#' @export
make_metaclusters <- function(
x, # "pmm" object from 'get_expression_subset()'
channels = TRUE, # Vector of column names if not TRUE
make_clusters... = list(),
# make_clusters... = list( # Some useful defaults
# Rphenograph_k = 50,
# FlowSOM_k = 40, estimate_cluster_count = FALSE,
# num_nearest_neighbors = 30
# ),
make_metaclusters... = list(), # Passed to 'make_clusters()' for metaclustering step
centroid_fun = median
)
{
make_clustersArgs <- list(
channels = channels
)
l <- x %>% as.data.frame %>%
dplyr::group_by(id) %>%
dplyr::group_map(
.f = ~ (function(x, y)
{
## Also see 'rlist::list.append()'
mica <- utils::modifyList(c(make_clustersArgs, list(x = x %>% data.matrix)), make_clusters...,
keep.null = TRUE)
if (memoise::is.memoised(make_clusters))
cluster_ids <- do.call(environment(make_clusters)$`_f`, mica)
else
cluster_ids <- do.call(make_clusters, mica)
## If 'make_clusters()' fails, assign all events to single cluster
if (is.null(cluster_ids))
cluster_ids <- rep(1, NROW(x))
cluster_ids %>% table(dnn = "sample " %_% y) %>% print; utils::flush.console()
structure(cluster_ids, sample_id = dplyr::pull(y, id))
})(.x, .y), .keep = TRUE)
centroids <- x %>% as.data.frame %>%
dplyr::select(all_of(c("id", channels %>% as.vector))) %>%
dplyr::rename(sample_id = id) %>%
dplyr::mutate(cluster_id = unlist(l)) %>%
dplyr::relocate(cluster_id, .after = sample_id) %>%
dplyr::group_by(sample_id, cluster_id) %>%
dplyr::group_modify(
.f = ~ (function(x, y)
{
d <- x %>% dplyr::select(-c("sample_id", "cluster_id"))
## Find centroid for each group
dplyr::summarize(d, across(everything(), ~ centroid_fun(.x, na.rm = TRUE)))
})(.x, .y), .keep = TRUE)
make_metaclustersArgs <- make_clustersArgs
mica <- utils::modifyList(c(make_metaclustersArgs,
list(x = centroids %>% data.matrix)), make_metaclusters..., keep.null = TRUE)
if (memoise::is.memoised(make_clusters))
centroid_cluster_id <- do.call(environment(make_clusters)$`_f`, mica)
else
centroid_cluster_id <- do.call(make_clusters, mica)
centroid_cluster_id %>% table(dnn = "metaclusters") %>% print
## Match metaclusters back to individual events
centroids_clustered <- centroids %>%
dplyr::ungroup() %>%
dplyr::mutate(centroid_cluster_id = centroid_cluster_id) %>%
dplyr::relocate(centroid_cluster_id, .after = cluster_id)
centroid_cluster_df <- sapply(l,
function(a) keystone::dataframe(sample_id = attr(a, "sample_id"), cluster_id = a),
simplify = FALSE) %>%
purrr::reduce(dplyr::bind_rows) %>%
dplyr::left_join(centroids_clustered %>% dplyr::select(sample_id, cluster_id, centroid_cluster_id),
by = c("sample_id", "cluster_id"))
event_metacluster_id <- centroid_cluster_df$centroid_cluster_id
if (is.na(event_metacluster_id) %>% any)
warning("Some events are incorrectly unmatched to centroid clusters")
event_metacluster_id %>%
table(useNA = "always", dnn = "event metaclusters") %>% print
#event_metacluster_id
## [11 Jan 2023] Make single relevant return value to move away from 'keystone::cordon()'.
structure(event_metacluster_id, cluster_centroids = centroids_clustered)
}
#' @export
summary.pmm <- function(
x, # "pmm" object from 'get_expression_subset()'
n = NULL, # Cluster numbers: NULL or TRUE for all, FALSE for every event
which_cluster_set = 1, # If 'attr(x, "cluster_id")' is matrix, pick a column by name or number
channels = colnames(x),
merged_labels = list(
`-/d` = c("-", "d"),
`+/++` = c("+", "++"),
`d/+` = c("d", "+"),
all = c("-", "d", "+", "++")
),
overall_label_threshold = Inf,
label_threshold = 0.90,
collapse = ";", expression_level_sep = ",",
element_names = TRUE,
as_list = TRUE
)
{
clusterId <- attr(x, "cluster_id")
byEvent <- FALSE
if (is.logical(n)) {
if (!n) {
byEvent <- TRUE
}
n <- NULL
}
if (is.null(attr(x, "cluster_id"))) {
clusterId <- sprintf("%d", seq(NROW(x)))
stop("PMM object 'x' has no 'cluster_id' attribute")
}
if (is.matrix(clusterId))
clusterId <- clusterId[, which_cluster_set] %>% drop
if (is.null(n)) {
n <- clusterId %>% unique # But don't sort, because character-numbers don't stay in numeric order
if (!byEvent)
n <- n %>% sort
}
pmm <- attr(x, "plus_minus_matrix")[, channels, drop = FALSE]
if (!is.null(overall_label_threshold)) {
# comp <- (plyr::aaply(pmm, 2, table)/NROW(pmm)) %>%
# as.data.frame %>% tibble::rownames_to_column()
comp <- sapply(pmm, table, simplify = FALSE) %>% purrr::compact() %>%
{ structure(dplyr::bind_rows(.) %>% as.data.frame, row.names = names(.)) } %>%
data.matrix %>% `/`(NROW(pmm)) %>%
as.data.frame %>% tibble::rownames_to_column()
plyr::l_ply(names(merged_labels),
function(a)
{
comp <<- comp %@>% dplyr::rowwise() %@>% dplyr::mutate(
!!a := sum(!!!rlang::syms(merged_labels[[a]]))
)
})
comp <- comp %>% `rownames<-`(NULL) %>% tibble::column_to_rownames() %>% data.matrix
overall_channels <- (comp > overall_label_threshold) %>% apply(1, any) %>% `!`
channels <- names(overall_channels)[overall_channels]
if (any(!overall_channels))
warning(sprintf("The following channels are overrepresented in all cells: %s",
paste(names(overall_channels)[!overall_channels], collapse = " ")))
}
## Here, 'comp' should look something like this:
# - d + ++ -/d +/++
# IL-23_p19 0.8236050 0.10851854 0.05529584 0.012580632 0.9321235 0.06787647
# CD69 0.8493953 0.07843002 0.04751813 0.024656565 0.9278253 0.07217470
# TGFb 0.8752095 0.08020844 0.02647643 0.018105611 0.9554180 0.04458204
# IL-17A 0.8639330 0.07175749 0.04820795 0.016101551 0.9356905 0.06430950
# IL-10 0.8733086 0.07338119 0.04515121 0.008158991 0.9466898 0.05331020
# CCR7 0.8402868 0.09671154 0.04927721 0.013724493 0.9369983 0.06300171
# [...]
##
## The non-'merged_labels' columns should add to 1, i.e. '(rowSums(comp[, 1:4]) == 1) %>% all' is TRUE.
## The 'merged_labels' columns should add to their component non-merged columns, e.g. "-/d" = "-" + "d".
## 'comp' summarizes the phenotypic composition of all clusters at once as the proportion of each label count
## relative to all the events.
if (byEvent) {
e <- x[, channels, drop = FALSE]
pmm <- attr(e, "plus_minus_matrix")
mpmm <- as.matrix(pmm)
merges <- sapply(names(merged_labels),
function(a)
{
mpmm %in% merged_labels[[a]] %>% `dim<-`(dim(mpmm)) %>%
`is.na<-`(. == FALSE) %>% `[<-`(., ., a)
}, simplify = FALSE)
allLabels <- c(list(as.list(t(mpmm))), sapply(merges, function(a) as.list(t(a))
%>% `[<-`(is.na(.), list(NULL)), simplify = FALSE))
r <- purrr::pmap(allLabels,
function(...) { as.vector(c(...)) }) %>%
`names<-`(rep(colnames(pmm), length.out = length(.))) %>%
keystone::chunk(NCOL(pmm))
if (!as_list) {
r <- sapply(r,
function(l) { sapply(names(l), function(a) a %_% paste(l[[a]], collapse = expression_level_sep)) %>%
paste(collapse = collapse) }, simplify = FALSE)
}
} else {
r <- sapply(n, # This doesn't appear to benefit if 'keystone::psapply()' is dropped in here -- it's worse, in fact!
function(i)
{
e <- x[clusterId %in% i, channels, drop = FALSE]
pmm <- attr(e, "plus_minus_matrix")
l <- sapply(colnames(pmm),
function(a)
{
comp <- (table(pmm[, a])/NROW(pmm)) %>%
data.matrix %>% t %>% as.data.frame %>%
`rownames<-`(a) %>% tibble::rownames_to_column()
plyr::l_ply(names(merged_labels),
function(a)
{
comp <<- comp %@>% dplyr::rowwise() %@>% dplyr::mutate(
!!a := sum(!!!rlang::syms(merged_labels[[a]]))
)
})
comp <- comp %>% keystone::dataframe() %>% tibble::column_to_rownames() %>%
data.matrix
## 'comp' should look something like this:
# - d + ++ -/d +/++
# IL-23_p19 0.9394749 0.03492733 0.0209564 0.00464135 0.9744023 0.02559775
##
## The names of all columns meeting 'label_threshold' (see below) are returned.
rr <- NULL # Default for channels that meet *none* of the label thresholds
if (any(comp >= label_threshold)) {
rr <- colnames(comp)[comp >= label_threshold]
}
rr
}, simplify = FALSE) %>%
purrr::compact() # Remove any channels that meet *none* of the label thresholds
if (as_list)
l
else
sapply(names(l), function(a) a %_% paste(l[[a]], collapse = expression_level_sep)) %>%
paste(collapse = collapse)
}, simplify = ifelse(as_list, FALSE, TRUE))
}
if (!byEvent) {
if (is.logical(element_names)) {
if (element_names)
names(r) <- as.character(n)
} else if (is.character(element_names)) {
names(r) <- element_names
}
}
## If 'as_list = TRUE', 'r' is a list the length of the unique cluster names in the current cluster set,
## w/ elements named after the clusters; each element is a sub-list named after the channels/columns of 'x',
## whose elements contain all the phenotype names (e.g. "-", "+", "+/++", &c) meeting
## the proportion threshold for that channel & cluster. If no phenotype meets the threshold, "" is returned.
## If 'as_list = FALSE', 'r' is a list the length of the unique cluster names in the current cluster set,
## each of whose elements is a single string displaying a full set of channel phenotypes separated
## according to 'collapse' & 'expression_level_sep'.
structure(r, comp = comp) %>% keystone::add_class("summaryPmm")
}
## usage:
# summary(e[, -1], label_threshold = 0.90, as_list = TRUE)
#' @export
search <- function(x, ...)
UseMethod("search")
#' @export
search.default <- function(x, ...)
{
search(x, ...)
}
## Search plus-minus matrix of "pmm" expression object for channel phenotypes given in 'query', e.g.
## r <- search(e[, analysis_channels], query = c("cd45+/++", "cd3-/d"), summary... = list(which_cluster_set = 1, label_threshold = 0.55))
## Return Value: A vector of names of clusters whose 'query' channels all meet/exceed their 'label_threshold's,
## i.e. each cluster returned is a hit for all the 'query' phenotypes.
#' @export
search.pmm <- function(
x, # "pmm" object from 'get_expression_subset()'
query, # Vector of search terms based on channel names
summary... = list(), # Additional arguments to 'summary.pmm()' or a "summaryPmm" object
return_type = c("character", "logical", "grid")
)
{
return_type <- match.arg(return_type)
if (inherits(summary..., "summaryPmm")) {
sm <- summary...
} else {
summaryArgs <- list(
x = x,
as_list = TRUE
)
summaryArgs <- utils::modifyList(summaryArgs, summary..., keep.null = TRUE)
sm <- do.call(summary, summaryArgs)
}
comp <- attr(sm, "comp")
## Enumerate all possible event states as a named logical vector
template <- expand.grid(rownames(comp), colnames(comp), stringsAsFactors = FALSE) %>%
plyr::alply(1, function(a) { unlist(a, use.names = FALSE) %>%
paste(collapse = "") }) %>% unlist(use.names = FALSE) %>%
{ structure(rep(FALSE, length(.)), .Names = .) }
## Multiple OR-conditional gates lead to multiple queries that need testing;
## find all possible combinations and OR them to test for a hit.
baseQuery <- stringr::str_split(query, "\\s*\\|\\|\\s*") ## Split query elements by "||"
allQueries <- expand.grid(baseQuery, stringsAsFactors = FALSE) %>%
plyr::alply(1, unlist, use.names = FALSE)
mm <- lapply(allQueries,
function(a)
{
sapply(a,
function(b) { adist(b, names(template), fixed = TRUE) %>% which.min }, simplify = TRUE) %>%
{ names(template)[.] }
})
tests <- lapply(mm,
function(a)
{
test <- template
test[a] <- TRUE
test
})
if (return_type == "grid") {
r <- sapply(sm,
function(a) {
event <- template
event[unlist(lapply(names(a), function(b) paste0(b, a[[b]])))] <- TRUE
event
}, simplify = TRUE)
return (structure(t(r), gates = mm, query = query))
}
r <- lapply(sm,
function(a)
{
event <- template
event[unlist(lapply(names(a), function(b) paste0(b, a[[b]])))] <- TRUE
## Does this event/cluster include the same phenotypes as the query?
Reduce(`||`, sapply(tests, function(b) sum(b & event) == sum(b)))
}) %>% unlist(use.names = FALSE)
if (is_invalid(r))
return (NULL)
if (return_type == "character")
return (which(r) %>% as.character)
r
}
## usage:
# r <- search(e[, analysis_channels], c("cd45+/++", "cd3-/d"), summary... = list(overall_label_threshold = Inf, label_threshold = 0.90))
#' @export
search_orig.pmm <- function(
x, # "pmm" object from 'get_expression_subset()'
query, # Vector of search terms based on channel names
query_re = "^(%s)$", # RegEx template for search
summary... = list(), # Additional arguments to 'summary.pmm()'
ids_only = TRUE
)
{
summaryArgs <- list(
x = x,
as_list = TRUE
)
summaryArgs <- utils::modifyList(summaryArgs, summary..., keep.null = TRUE)
sm <- do.call(summary, summaryArgs)
r <- sapply(sm,
function(a)
{
test <- sapply(names(a),
function(b)
{
if (all(a[[b]] == "")) return (NULL); paste0(b, a[[b]])
}, simplify = FALSE) %>% unlist(use.names = FALSE)
## This produces a list whose elements have >1-length vectors for each either-or query:
baseQuery <- stringr::str_split(query, stringr::fixed("||", TRUE))
re <- sapply(baseQuery,
function(b)
{
stringr::regex(sprintf(query_re, paste(rex::escape(b %>% unlist), collapse = "|")),
ignore_case = TRUE)
}, simplify = FALSE)
d <- sapply(re, function(b) stringr::str_subset(test, b), simplify = FALSE)
## Were all the query terms found?
if (length(sapply(d, table, simplify = FALSE) %>% purrr::compact()) == length(baseQuery))
## If yes, return all those query terms that were found
{ return (d %>% unlist) }
NULL
}, simplify = FALSE) %>% purrr::compact()
if (is_invalid(r))
return (NULL)
if (ids_only)
return (names(r))
r
}
## usage:
# r <- search(e[, analysis_channels], c("cd45+/++", "cd3-/d"), summary... = list(overall_label_threshold = Inf, label_threshold = 0.90))
#' @export
merge_clusters <- function(
x, # "pmm" object from 'get_expression_subset()'
clusters, # Named list of cell subsets
channels,
label_threshold,
which_cluster_set = 1, # Column no. or name; NULL or FALSE to set off by-event search
search... = list(),
verbose = TRUE,
leftover_clusters = NULL,
make_gating_poster = FALSE, # Logical, or character path to directory for individual plots
visualize_channels... = list(),
devices = flowpipe:::graphics_devices,
#save_plot_fun = grDevices::pdf, save_plot... = list(compress = FALSE)
save_plot_fun = grDevices::cairo_pdf, save_plot... = list(onefile = TRUE)
)
{
origClusterId <- attr(x, "cluster_id")
byEvent <- FALSE
if (is.null(which_cluster_set) || (is.logical(which_cluster_set) && !which_cluster_set)) {
## N.B. The "event" clusters must be run though 'sprintf()' to prevent exponentiation > 399999.
attr(x, "cluster_id") <- sprintf("%d", seq(NROW(x)))
which_cluster_set <- 1
byEvent <- TRUE
} else if (is.logical(which_cluster_set) && which_cluster_set) {
which_cluster_set <- 1
}
searchArgs <- list(
x = x,
summary... = list(which_cluster_set = which_cluster_set)
)
if (!missing(channels))
searchArgs$summary...$channels <- channels
searchArgs <- utils::modifyList(searchArgs, search..., keep.null = TRUE)
if (byEvent)
searchArgs$summary... <- utils::modifyList(searchArgs$summary..., list(n = FALSE), keep.null = TRUE)
label_thresholds <- structure(
rep(formals(summary.pmm)$label_threshold, length(clusters)),
.Names = names(clusters)
)
if (!missing(label_threshold)) {
if (is_invalid(names(label_threshold)))
names(label_threshold) <- rep("", length(label_threshold))
namedThresholds <- label_threshold[names(label_threshold) != ""]
if (!is_invalid(namedThresholds))
label_thresholds <-
replace(label_thresholds, names(namedThresholds), namedThresholds)
unnamedThresholds <- label_threshold[names(label_threshold) == ""]
if (!is_invalid(unnamedThresholds)) {
indices <- names(label_thresholds) %nin% names(namedThresholds)
label_thresholds[indices] <-
rep(unnamedThresholds, length.out = length(label_thresholds[indices]))
}
}
### Create plots to visually follow a sequence of predefined gates
gating_poster_dir <- NULL
if (is.character(make_gating_poster)) {
gating_poster_dir <- make_gating_poster
make_gating_poster <- TRUE
if (!dir.exists(gating_poster_dir))
dir.create(gating_poster_dir, recursive = TRUE)
}
tictoc::tic("Search clusters")
cc <- NULL
if (make_gating_poster && byEvent) {
## This probably doesn't dispatch on 'summary' alone because of the name/position of the 1st argument
sm <- do.call(summary.pmm, utils::modifyList(searchArgs$summary...,
list(x = x), keep.null = TRUE))
## Prepare data set to proceed through & plot predefined gating sequences
## N.B. For size considerations, I might want to plot inside 'sapply()' & return NULL
cc_grid <- keystone::psapply(names(clusters),
function(a)
{
searchArgs$query <- clusters[[a]]
searchArgs$summary...$label_threshold <- label_thresholds[a]
searchArgs$return_type <- "grid"
searchArgs$summary... <- sm
if (verbose) {
cat(sprintf("Querying for '%s' clusters at event level...", a))
utils::flush.console()
}
r <- do.call(search, searchArgs)
if (verbose) {
cat(". Done.", fill = TRUE); utils::flush.console()
}
r
}, simplify = FALSE)
## Ordering the colnames by decreasing length will prevent e.g. a match between
## "CD4" & "CD45" before the regex search has gotten to "CD45".
re <- stringr::regex(stringr::str_flatten(rex::escape(colnames(x)[colnames(x)
%>% nchar %>% order(decreasing = TRUE)]), "|"))
`cc+grobs` <- keystone::psapply(seq_along(cc_grid), # So 'a' can be used for numbering plots
function(a)
{
chunks <- sapply(attr(cc_grid[[a]], "gates"), function(b) keystone::chunk(b, 2), simplify = FALSE)
## The first "chunk" will have the same no. of elements as all the others:
tests <- sapply(seq_along(chunks[[1]]),
function(b) sapply(chunks, function(g) g[[b]], simplify = FALSE) %>%
unique, simplify = FALSE)
query <- attr(cc_grid[[a]], "query")
query_chunks <- keystone::chunk(query, 2)
cat(sprintf("%s:", names(cc_grid)[a]), fill = TRUE); print(query); utils::flush.console()
gated_events <- rep(TRUE, NROW(cc_grid[[a]]))
flit <- sapply(seq_along(tests), # So 'b' can be used for numbering plots
function(b)
{
## 'NCOL(.)' handles the case where the test matrix has only one column:
r <- Reduce(`|`, sapply(tests[[b]], function(g) { cc_grid[[a]][, g, drop = FALSE] %>% { `&`(.[, 1], .[, NCOL(.)]) } },
simplify = FALSE), accumulate = TRUE)
## For each list element, create a biaxial plot
grobs <- mapply(function(k, l)
{
plot_channels <- stringr::str_match_all(paste(k, collapse = " "), re)[[1]] %>% drop %>% unique
## N.B. Uncomment 'event_mask' just below to plot only events selected by the previous gate:
visualize_channelsArgs <- list(
x = x,
channels = list(gated_events & r[[l]]),
event_mask = gated_events,
extract_gating_channels = function(...) plot_channels,
points... = list(col = scales::alpha("red", 0.5)),
plot_end_callback = function(...) { # A function will carry its environment along w/ itself
graphics::title(main = sprintf("Gate: %s", paste(query_chunks[[b]], collapse = " & ")), cex.main = 0.9, ...)
if (l > 1) graphics::mtext("(OR'd with previous gate)", cex = 0.9)
graphics::mtext(sprintf("Events: %d/%d", sum(gated_events & r[[l]]), sum(gated_events)),
side = 1, line = -1, cex = 0.8)
}
)
visualize_channelsArgs <-
utils::modifyList(visualize_channelsArgs, visualize_channels..., keep.null = TRUE)
grobs <- list()
if (!is.null(gating_poster_dir)) {
# plyr::l_ply(seq_along(devices),
# function(d)
# {
# ext <- devices[[d]]$ext; devices[[d]]$ext <- NULL
# ## Reduce resolution for 'png()' etc. to a manageable value:
# if ("res" %in% names(formals(eval(parse(text = names(devices)[d]))))) devices[[d]]$res <- 150
# do.call(eval(parse(text = names(devices)[d])),
# modifyList(devices[[d]],
# list(
# width = 5, height = 5,
# file = sprintf("%s/%03d-%03d%s_gate-%s",
# gating_poster_dir, a, b, letters[l], paste(plot_channels, collapse = "&")) %_% paste0(".", ext)
# )
# )
# )
# dev.control(displaylist = "enable")
# do.call(visualize_channels, visualize_channelsArgs)
# if (d == length(devices)) {
# grobs <<- append(grobs, list(grDevices::recordPlot()))
# }
# dev.off()
# })
gatePlotPath <- tempfile()
grDevices::png(file = gatePlotPath, bg = "transparent")
dev.control(displaylist = "enable")
do.call(visualize_channels, visualize_channelsArgs)
gatePlot <- grDevices::recordPlot()
invisible(dev.off())
unlink(gatePlotPath)
grobs <- append(grobs, list(gatePlot))
} else {
do.call(visualize_channels, visualize_channelsArgs)
}
grobs
}, tests[[b]], seq_along(r), USE.NAMES = TRUE, SIMPLIFY = FALSE)
gated_events <<- gated_events & r[[length(r)]]
print(table(gated_events)); utils::flush.console()
grobs
}, simplify = FALSE)
list(gated_events = gated_events, grobs = flit %>% purrr::flatten())
}, simplify = FALSE)
grobs <- NULL
if (!is.null(gating_poster_dir)) {
grobs <- sapply(`cc+grobs`, function(a) a$grobs, simplify = FALSE) %>% `names<-`(names(cc_grid))
## Keep list of grobs for e.g. single plots, different image types:
saveRDS(object = grobs, file = paste(data_dir, "gated-clusters-poster.rds", sep = "/"))
}
cc <- sapply(`cc+grobs`, function(a) a$gated_events, simplify = FALSE) %>%
sapply(function(a) { as.vector(a) %>% which %>% as.character }, simplify = FALSE) %>% `names<-`(names(cc_grid))
rm(`cc+grobs`)
## Finally, create full gating poster
if (!is.null(grobs)) {
max_gates <- sapply(grobs, length) %>% max
grobs <- sapply(grobs, `length<-`, value = max_gates, simplify = FALSE)
save_plotArgs <- list(
width = min(5.0 * max_gates + 1, 200), # 200 in. is PDF maximum
height = min(5.0 * length(grobs) + 1, 200), # 200 in. is PDF maximum
#file = paste(gating_poster_dir, "gated-clusters-poster.pdf", sep = "/")
filename = paste(gating_poster_dir, "gated-clusters-poster.pdf", sep = "/") # For 'grDevices::cairo_pdf()'
)
save_plotArgs <- utils::modifyList(save_plotArgs, save_plot..., keep.null = TRUE)
do.call(save_plot_fun, save_plotArgs)
## Create a blank plot for empty grid cells (but not needed for 'cowplot::plot_grid()')
if (FALSE) {
blankPath <- tempfile()
grDevices::png(file = blankPath, bg = "transparent")
dev.control(displaylist = "enable")
plot.new()
blank <- grDevices::recordPlot()
invisible(dev.off())
unlink(blankPath)
}
cowplot::plot_grid(
## This creates a list of "recordedplot" objects:
#plotlist = sapply(grobs %>% purrr::flatten(), function(a) if (is.null(a)) list(blank) else a),
plotlist = sapply(grobs %>% purrr::flatten(), function(a) if (is.null(a)) list(NULL) else a),
ncol = max_gates,
hjust = 0, label_x = 0.01,
labels = rep("", max_gates * length(grobs)) %>%
`[<-`(seq(from = 1, by = max_gates, length.out = length(grobs)), names(grobs)),
#label_colour = "darkgreen",
label_size = 16
) %>% print
dev.off()
## Convert PDF to PNG
suppressWarnings(pdftools::pdf_convert(
pdf = save_plotArgs$file,
format = "png",
dpi = 100,
filenames = sprintf("%s.png", tools::file_path_sans_ext(save_plotArgs$file))
))
}
}
if (is.null(cc)) {
cc <- keystone::psapply(names(clusters),
function(a)
{
searchArgs$query <- clusters[[a]]
searchArgs$summary...$label_threshold <- label_thresholds[a]
if (verbose) {
if (!byEvent)
cat(sprintf("Querying for '%s' clusters at %0.2f threshold...", a,
searchArgs$summary...$label_threshold))
else
cat(sprintf("Querying for '%s' clusters at event level...", a))
utils::flush.console()
}
if (byEvent)
searchArgs$summary... <- sm
r <- do.call(search, searchArgs)
if (verbose) {
cat(". Done.", fill = TRUE); utils::flush.console()
}
r
}, simplify = FALSE)
}
tictoc::toc()
cc0 <- cc[sapply(cc, is.null)]
if (length(cc0) > 0)
warning(sprintf("Clusters %s were not found", cc0 %>% names %>% sQuote %>% paste(collapse = ", ")))
cc1 <- cc %>% purrr::compact()
clusterId <- attr(x, "cluster_id")
if (is.matrix(clusterId))
clusterId <- clusterId[, which_cluster_set]
merged_clusters <- list(
new_cluster_id = clusterId,
orig_cluster_id = origClusterId
)
if (is_invalid(cc1)) { # No new clusters found
if (byEvent)
merged_clusters <- list(new_cluster_id = origClusterId, orig_cluster_id = origClusterId)
return (merged_clusters)
}
## Create 'replace()' arguments
replaceArgss <- sapply(names(cc1),
function(a)
{
list(
list = clusterId %in% cc1[[a]],
value = a
)
}, simplify = FALSE)
newClusterId <- sapply(replaceArgss,
function(a)
{
r <- replace(merged_clusters$new_cluster_id, a$list %>% which, a$value) %>%
replace((!a$list) %>% which, NA_character_)
r
}, simplify = TRUE)
## Now collapse all the mutually exclusive columns together
newMergedClusterId <- merge_mutually_exclusive_cols(newClusterId) %>%
cbind(orig = merged_clusters$orig_cluster_id, .)
## N.B. If 'merged_clusters$orig_cluster_id' is already a matrix, the name "orig" is unused.
merged_clusters$new_cluster_id <- newMergedClusterId
merged_clusters
}
merge_mutually_exclusive_cols <- function(
..., # Combination of matrices/vectors having the same no. rows/length
collapse = "|"
)
{
d0 <- cbind(...); d <- rlang::duplicate(d0, shallow = FALSE)
if (NCOL(d) < 3)
return (d)
repeat {
merge_comb <- utils::combn(seq(NCOL(d)), 2, simplify = FALSE)
didMerge <- FALSE; startNcol <- NCOL(d)
for (a in merge_comb) {
print(a)
colsAreMutuallyExclusive <-
apply(d[, a], 1, function(b) (!is.na(b)) %>% sum, simplify = TRUE) %>% `==`(2) %>% any %>% `!`
if (colsAreMutuallyExclusive) {
## Merge them into single column
temp <- d[, a[1]] %>% `[<-`(!is.na(d[, a[2]]), d[, a[2]][!is.na(d[, a[2]])])
d[, a[1]] <- temp
## Name of new column becomes a combination of both starting columns
newColname <- paste(colnames(d[, a]), collapse = collapse)
colnames(d)[a[1]] <- newColname
d <- d[, -a[2], drop = TRUE]
## Don't ever finish right after a merge; check for mergeable columns at least once more.
didMerge <- TRUE
break
}
}
## Finish if no. cols are at minimum or are unchanged, & no merge just happened
if ((NCOL(d) < 3 || NCOL(d) == startNcol) && !didMerge)
break
}
#browser()
d
}
#merge_mutually_exclusive_cols(orig = cluster_id, new_cluster)
#merge_mutually_exclusive_cols(new_cluster)
| /R/flowpipe-clusters.R | no_license | priscian/flowpipe | R | false | false | 37,249 | r | ## https://shenbaba.weebly.com/blog/how-to-use-the-pac-measure-in-consensus-clustering
## https://dx.doi.org/10.13140/RG.2.1.5075.8480
#' @export
get_optimal_cluster_count <- function(
x, # Matrix, usu. "pmm" object from 'get_expression_subset()'
channels = TRUE, # Vector of column names if not TRUE
max_k = 28,
seed = 666,
ConsensusClusterPlus... = list()
)
{
x <- x[, channels, drop = FALSE]
ConsensusClusterPlusArgs <- list(
d = as.matrix(x),
maxK = max_k, # Max limit for 'clusterAlg = "hc"' is 28
reps = 100,
pItem = 0.8,
pFeature = 1,
clusterAlg = "hc",
distance = "pearson",
title = "consensus-clusters",
innerLinkage = "complete",
seed = seed,
plot = NULL
)
ConsensusClusterPlusArgs <-
utils::modifyList(ConsensusClusterPlusArgs, ConsensusClusterPlus..., keep.null = TRUE)
ccp_res <- tryCatch({
do.call(ConsensusClusterPlus::ConsensusClusterPlus, ConsensusClusterPlusArgs)
}, error = function(e) { message("\nError: ", e$message); flush.console(); return (NULL) })
if (is.null(ccp_res))
return (NA_real_)
## PAC (proportion of ambiguous clustering) implementation
Kvec <- 2:max_k
x1 <- 0.1; x2 <- 0.9 # Threshold defining intermediate sub-interval
PAC <- rep(NA, length(Kvec))
names(PAC) <- paste0("K = ", Kvec) # 2:max_k
for (i in Kvec) {
M <- ccp_res[[i]]$consensusMatrix
Fn <- stats::ecdf(M[lower.tri(M)])
PAC[i - 1] <- Fn(x2) - Fn(x1)
}
## Optimal K value
optK <- Kvec[which.min(PAC)]
optK
}
## Heavily borrowed from 'cytofkit:::FlowSOM_integrate2cytofkit()'
#' @export
simple_FlowSOM <- function(
xdata,
k,
flow_seed = NULL,
...
)
{
xdata <- as.matrix(xdata)
cat("Building SOM..."); utils::flush.console()
ord <- tryCatch({
map <- FlowSOM::SOM(xdata, silent = TRUE, ...)
cat(". Done.", fill = TRUE)
cat("Metaclustering to", k, "clusters..."); utils::flush.console()
metaClusters <- suppressMessages(FlowSOM::metaClustering_consensus(map$codes,
k = k, seed = flow_seed))
cat(". Done.", fill = TRUE)
cluster <- metaClusters[map$mapping[, 1]]
}, error = function(e) { message("\nError: ", e$message); flush.console(); return (NULL) })
if (is.null(ord)) {
cluster <- NULL
} else {
if (length(ord) != NROW(xdata)) {
message("\nError: FlowSOM failed.")
return (NULL)
}
cluster <- ord
}
return (cluster)
}
#' @export
make_clusters <- function(
x, # Matrix, usu. "pmm" object from 'get_expression_subset()'
method = c(
"Rphenograph",
"FlowSOM",
"Xshift",
"ClusterX",
"DensVM"
),
channels = TRUE, # Vector of column names if not TRUE
seed = 666,
## cytofkit
cytof_cluster... = list(), Rphenograph_k = 50,
## FlowSOM
FlowSOM_k = 40, estimate_cluster_count = TRUE,
## X-shift
VorteX_path = "./VorteX.jar",
num_nearest_neighbors = 40,
Xshift_command = "java -Xmx64G -cp \"%s\" standalone.Xshift -NUM_NEAREST_NEIGHBORS=%d",
importConfig... = list(),
tol = 1e-5
)
{
## This is necessary for using 'keystone::cordon()', because 1-arg version of 'match.arg()' fails -- TODO
method <- match.arg(method, formals(make_clusters)$method %>% eval)
x <- x[, channels, drop = FALSE]
cluster_id <- switch(method,
`FlowSOM` = (function() {
### Do FlowSOM clustering
if (estimate_cluster_count) {
opt_k <- get_optimal_cluster_count(x)
## 'get_optimal_cluster_count()' tends to fail for small data sets, so use minimum 'FlowSOM_k'
if (is.na(opt_k) || opt_k < FlowSOM_k)
FlowSOM_k <- max(opt_k, 3, na.rm = TRUE)
}
simple_FlowSOM(xdata = x, k = FlowSOM_k, flow_seed = seed)
})(),
`Xshift` = (function() {
set.seed(seed)
### Do X-shift clustering
## For details v. file "X-shift_standalone_README.txt"
## X-shift default arguments
importConfigArgs <- list(
CLUSTERING_COLUMNS = paste(seq(NCOL(x)), collapse = ","),
LIMIT_EVENTS_PER_FILE = -1,
TRANSFORMATION = "NONE",
SCALING_FACTOR = 1,
NOISE_THRESHOLD = 1.0,
EUCLIDIAN_LENGTH_THRESHOLD = 0.0,
RESCALE = "NONE",
QUANTILE = 0.95,
RESCALE_SEPARATELY = "false"
)
importConfigArgs <- utils::modifyList(importConfigArgs, importConfig..., keep.null = TRUE)
## N.B. Change this to a package file in directory "extdata" when the time comes:
ic <- readLines(system.file("inst/templates/importConfig.txt", package = "flowpipe"))
plyr::l_ply(names(importConfigArgs),
function(a)
{
ic <<- stringr::str_replace_all(ic, paste0("%", a, "%"), importConfigArgs[[a]] %>% as.character)
})
## Set up a temporary workspace
d <- tempdir(check = TRUE) %>% normalizePath(winslash = "/", mustWork = FALSE)
p <- tempfile(tmpdir = d, fileext = ".fcs") %>% normalizePath(winslash = "/", mustWork = FALSE)
## N.B. "Output is written into an automatically created subdir within the current directory named 'out'."
o <- paste(d, "out", sep = "/")
## Create temporary files
p1 <- flowCore::write.FCS(flowCore::flowFrame(as.matrix(x)), p)
writeLines(ic, paste(d, "importConfig.txt", sep = "/"))
writeLines(normalizePath(p1, mustWork = FALSE), paste(d, "fcsFileList.txt", sep = "/"))
XshiftCommand <- sprintf(Xshift_command, normalizePath(VorteX_path, mustWork = FALSE), num_nearest_neighbors)
currentWorkingDir <- getwd()
setwd(d)
#if (!dir.exists(o)) dir.create(o, recursive = TRUE) # Make sure output directory exists
XshiftOutput <- system(XshiftCommand, intern = TRUE)
setwd(currentWorkingDir)
## On clustering failure, return single cluster
#if (!file.exists(paste(o, basename(p1), sep = "/"))) browser()
if (is.null(attr(XshiftOutput, "status"))) {
xx <- flowCore::read.FCS(paste(o, basename(p1), sep = "/"))
## Are the original & X-shift expression matrices the same except for some tolerance?
if (!(dplyr::near(flowCore::exprs(xx)[, seq(NCOL(xx) - 1)], x, tol = tol) %>% all))
warning("Input & X-Shift expression matrices don't match within tolerance")
cluster_id <- flowCore::exprs(xx)[, "cluster_id"]
## This can be plotted;
## Examples here: rstudio-pubs-static.s3.amazonaws.com/362044_903076131972463e8fdfcc00885fc9a6.html
cluster_graph <- igraph::read.graph(paste(o, "mst.gml", sep = "/"), format = c("gml"))
} else { # Clustering failed
cluster_id <- rep(0, NROW(x))
}
return (structure(cluster_id, cluster_graph = cluster_graph, XshiftOutput = XshiftOutput))
})(),
(function() {
## N.B. This list is for 'cytofkit2::cytof_cluster()', which provides additional clustering methods:
cytof_clusterArgs <- list(
xdata = x,
method = method,
Rphenograph_k = Rphenograph_k
)
## N.B. This list is for 'Rphenograph::Rphenograph()', with only the one method:
# cytof_clusterArgs <- list(
# data = x,
# k = Rphenograph_k
# )
cytof_clusterArgs <- utils::modifyList(cytof_clusterArgs, cytof_cluster..., keep.null = TRUE)
tryCatch({
do.call(cytofkit2::cytof_cluster, cytof_clusterArgs)
# do.call(Rphenograph::Rphenograph, cytof_clusterArgs)
}, error = function(e) { message("\nError: ", e$message); flush.console(); return (NULL) })
})()
)
cluster_id
}
## Also see:
# clusters_pg <- cytofkit2::cytof_cluster(xdata = e[, channels, drop = FALSE], method = "Rphenograph")
## N.B. Might need to remove duplicate rows beforehand!
#' @export
make_metaclusters <- function(
x, # "pmm" object from 'get_expression_subset()'
channels = TRUE, # Vector of column names if not TRUE
make_clusters... = list(),
# make_clusters... = list( # Some useful defaults
# Rphenograph_k = 50,
# FlowSOM_k = 40, estimate_cluster_count = FALSE,
# num_nearest_neighbors = 30
# ),
make_metaclusters... = list(), # Passed to 'make_clusters()' for metaclustering step
centroid_fun = median
)
{
make_clustersArgs <- list(
channels = channels
)
l <- x %>% as.data.frame %>%
dplyr::group_by(id) %>%
dplyr::group_map(
.f = ~ (function(x, y)
{
## Also see 'rlist::list.append()'
mica <- utils::modifyList(c(make_clustersArgs, list(x = x %>% data.matrix)), make_clusters...,
keep.null = TRUE)
if (memoise::is.memoised(make_clusters))
cluster_ids <- do.call(environment(make_clusters)$`_f`, mica)
else
cluster_ids <- do.call(make_clusters, mica)
## If 'make_clusters()' fails, assign all events to single cluster
if (is.null(cluster_ids))
cluster_ids <- rep(1, NROW(x))
cluster_ids %>% table(dnn = "sample " %_% y) %>% print; utils::flush.console()
structure(cluster_ids, sample_id = dplyr::pull(y, id))
})(.x, .y), .keep = TRUE)
centroids <- x %>% as.data.frame %>%
dplyr::select(all_of(c("id", channels %>% as.vector))) %>%
dplyr::rename(sample_id = id) %>%
dplyr::mutate(cluster_id = unlist(l)) %>%
dplyr::relocate(cluster_id, .after = sample_id) %>%
dplyr::group_by(sample_id, cluster_id) %>%
dplyr::group_modify(
.f = ~ (function(x, y)
{
d <- x %>% dplyr::select(-c("sample_id", "cluster_id"))
## Find centroid for each group
dplyr::summarize(d, across(everything(), ~ centroid_fun(.x, na.rm = TRUE)))
})(.x, .y), .keep = TRUE)
make_metaclustersArgs <- make_clustersArgs
mica <- utils::modifyList(c(make_metaclustersArgs,
list(x = centroids %>% data.matrix)), make_metaclusters..., keep.null = TRUE)
if (memoise::is.memoised(make_clusters))
centroid_cluster_id <- do.call(environment(make_clusters)$`_f`, mica)
else
centroid_cluster_id <- do.call(make_clusters, mica)
centroid_cluster_id %>% table(dnn = "metaclusters") %>% print
## Match metaclusters back to individual events
centroids_clustered <- centroids %>%
dplyr::ungroup() %>%
dplyr::mutate(centroid_cluster_id = centroid_cluster_id) %>%
dplyr::relocate(centroid_cluster_id, .after = cluster_id)
centroid_cluster_df <- sapply(l,
function(a) keystone::dataframe(sample_id = attr(a, "sample_id"), cluster_id = a),
simplify = FALSE) %>%
purrr::reduce(dplyr::bind_rows) %>%
dplyr::left_join(centroids_clustered %>% dplyr::select(sample_id, cluster_id, centroid_cluster_id),
by = c("sample_id", "cluster_id"))
event_metacluster_id <- centroid_cluster_df$centroid_cluster_id
if (is.na(event_metacluster_id) %>% any)
warning("Some events are incorrectly unmatched to centroid clusters")
event_metacluster_id %>%
table(useNA = "always", dnn = "event metaclusters") %>% print
#event_metacluster_id
## [11 Jan 2023] Make single relevant return value to move away from 'keystone::cordon()'.
structure(event_metacluster_id, cluster_centroids = centroids_clustered)
}
#' @export
summary.pmm <- function(
x, # "pmm" object from 'get_expression_subset()'
n = NULL, # Cluster numbers: NULL or TRUE for all, FALSE for every event
which_cluster_set = 1, # If 'attr(x, "cluster_id")' is matrix, pick a column by name or number
channels = colnames(x),
merged_labels = list(
`-/d` = c("-", "d"),
`+/++` = c("+", "++"),
`d/+` = c("d", "+"),
all = c("-", "d", "+", "++")
),
overall_label_threshold = Inf,
label_threshold = 0.90,
collapse = ";", expression_level_sep = ",",
element_names = TRUE,
as_list = TRUE
)
{
clusterId <- attr(x, "cluster_id")
byEvent <- FALSE
if (is.logical(n)) {
if (!n) {
byEvent <- TRUE
}
n <- NULL
}
if (is.null(attr(x, "cluster_id"))) {
clusterId <- sprintf("%d", seq(NROW(x)))
stop("PMM object 'x' has no 'cluster_id' attribute")
}
if (is.matrix(clusterId))
clusterId <- clusterId[, which_cluster_set] %>% drop
if (is.null(n)) {
n <- clusterId %>% unique # But don't sort, because character-numbers don't stay in numeric order
if (!byEvent)
n <- n %>% sort
}
pmm <- attr(x, "plus_minus_matrix")[, channels, drop = FALSE]
if (!is.null(overall_label_threshold)) {
# comp <- (plyr::aaply(pmm, 2, table)/NROW(pmm)) %>%
# as.data.frame %>% tibble::rownames_to_column()
comp <- sapply(pmm, table, simplify = FALSE) %>% purrr::compact() %>%
{ structure(dplyr::bind_rows(.) %>% as.data.frame, row.names = names(.)) } %>%
data.matrix %>% `/`(NROW(pmm)) %>%
as.data.frame %>% tibble::rownames_to_column()
plyr::l_ply(names(merged_labels),
function(a)
{
comp <<- comp %@>% dplyr::rowwise() %@>% dplyr::mutate(
!!a := sum(!!!rlang::syms(merged_labels[[a]]))
)
})
comp <- comp %>% `rownames<-`(NULL) %>% tibble::column_to_rownames() %>% data.matrix
overall_channels <- (comp > overall_label_threshold) %>% apply(1, any) %>% `!`
channels <- names(overall_channels)[overall_channels]
if (any(!overall_channels))
warning(sprintf("The following channels are overrepresented in all cells: %s",
paste(names(overall_channels)[!overall_channels], collapse = " ")))
}
## Here, 'comp' should look something like this:
# - d + ++ -/d +/++
# IL-23_p19 0.8236050 0.10851854 0.05529584 0.012580632 0.9321235 0.06787647
# CD69 0.8493953 0.07843002 0.04751813 0.024656565 0.9278253 0.07217470
# TGFb 0.8752095 0.08020844 0.02647643 0.018105611 0.9554180 0.04458204
# IL-17A 0.8639330 0.07175749 0.04820795 0.016101551 0.9356905 0.06430950
# IL-10 0.8733086 0.07338119 0.04515121 0.008158991 0.9466898 0.05331020
# CCR7 0.8402868 0.09671154 0.04927721 0.013724493 0.9369983 0.06300171
# [...]
##
## The non-'merged_labels' columns should add to 1, i.e. '(rowSums(comp[, 1:4]) == 1) %>% all' is TRUE.
## The 'merged_labels' columns should add to their component non-merged columns, e.g. "-/d" = "-" + "d".
## 'comp' summarizes the phenotypic composition of all clusters at once as the proportion of each label count
## relative to all the events.
if (byEvent) {
e <- x[, channels, drop = FALSE]
pmm <- attr(e, "plus_minus_matrix")
mpmm <- as.matrix(pmm)
merges <- sapply(names(merged_labels),
function(a)
{
mpmm %in% merged_labels[[a]] %>% `dim<-`(dim(mpmm)) %>%
`is.na<-`(. == FALSE) %>% `[<-`(., ., a)
}, simplify = FALSE)
allLabels <- c(list(as.list(t(mpmm))), sapply(merges, function(a) as.list(t(a))
%>% `[<-`(is.na(.), list(NULL)), simplify = FALSE))
r <- purrr::pmap(allLabels,
function(...) { as.vector(c(...)) }) %>%
`names<-`(rep(colnames(pmm), length.out = length(.))) %>%
keystone::chunk(NCOL(pmm))
if (!as_list) {
r <- sapply(r,
function(l) { sapply(names(l), function(a) a %_% paste(l[[a]], collapse = expression_level_sep)) %>%
paste(collapse = collapse) }, simplify = FALSE)
}
} else {
r <- sapply(n, # This doesn't appear to benefit if 'keystone::psapply()' is dropped in here -- it's worse, in fact!
function(i)
{
e <- x[clusterId %in% i, channels, drop = FALSE]
pmm <- attr(e, "plus_minus_matrix")
l <- sapply(colnames(pmm),
function(a)
{
comp <- (table(pmm[, a])/NROW(pmm)) %>%
data.matrix %>% t %>% as.data.frame %>%
`rownames<-`(a) %>% tibble::rownames_to_column()
plyr::l_ply(names(merged_labels),
function(a)
{
comp <<- comp %@>% dplyr::rowwise() %@>% dplyr::mutate(
!!a := sum(!!!rlang::syms(merged_labels[[a]]))
)
})
comp <- comp %>% keystone::dataframe() %>% tibble::column_to_rownames() %>%
data.matrix
## 'comp' should look something like this:
# - d + ++ -/d +/++
# IL-23_p19 0.9394749 0.03492733 0.0209564 0.00464135 0.9744023 0.02559775
##
## The names of all columns meeting 'label_threshold' (see below) are returned.
rr <- NULL # Default for channels that meet *none* of the label thresholds
if (any(comp >= label_threshold)) {
rr <- colnames(comp)[comp >= label_threshold]
}
rr
}, simplify = FALSE) %>%
purrr::compact() # Remove any channels that meet *none* of the label thresholds
if (as_list)
l
else
sapply(names(l), function(a) a %_% paste(l[[a]], collapse = expression_level_sep)) %>%
paste(collapse = collapse)
}, simplify = ifelse(as_list, FALSE, TRUE))
}
if (!byEvent) {
if (is.logical(element_names)) {
if (element_names)
names(r) <- as.character(n)
} else if (is.character(element_names)) {
names(r) <- element_names
}
}
## If 'as_list = TRUE', 'r' is a list the length of the unique cluster names in the current cluster set,
## w/ elements named after the clusters; each element is a sub-list named after the channels/columns of 'x',
## whose elements contain all the phenotype names (e.g. "-", "+", "+/++", &c) meeting
## the proportion threshold for that channel & cluster. If no phenotype meets the threshold, "" is returned.
## If 'as_list = FALSE', 'r' is a list the length of the unique cluster names in the current cluster set,
## each of whose elements is a single string displaying a full set of channel phenotypes separated
## according to 'collapse' & 'expression_level_sep'.
structure(r, comp = comp) %>% keystone::add_class("summaryPmm")
}
## usage:
# summary(e[, -1], label_threshold = 0.90, as_list = TRUE)
#' @export
search <- function(x, ...)
UseMethod("search")
#' @export
search.default <- function(x, ...)
{
search(x, ...)
}
## Search plus-minus matrix of "pmm" expression object for channel phenotypes given in 'query', e.g.
## r <- search(e[, analysis_channels], query = c("cd45+/++", "cd3-/d"), summary... = list(which_cluster_set = 1, label_threshold = 0.55))
## Return Value: A vector of names of clusters whose 'query' channels all meet/exceed their 'label_threshold's,
## i.e. each cluster returned is a hit for all the 'query' phenotypes.
#' @export
search.pmm <- function(
x, # "pmm" object from 'get_expression_subset()'
query, # Vector of search terms based on channel names
summary... = list(), # Additional arguments to 'summary.pmm()' or a "summaryPmm" object
return_type = c("character", "logical", "grid")
)
{
return_type <- match.arg(return_type)
if (inherits(summary..., "summaryPmm")) {
sm <- summary...
} else {
summaryArgs <- list(
x = x,
as_list = TRUE
)
summaryArgs <- utils::modifyList(summaryArgs, summary..., keep.null = TRUE)
sm <- do.call(summary, summaryArgs)
}
comp <- attr(sm, "comp")
## Enumerate all possible event states as a named logical vector
template <- expand.grid(rownames(comp), colnames(comp), stringsAsFactors = FALSE) %>%
plyr::alply(1, function(a) { unlist(a, use.names = FALSE) %>%
paste(collapse = "") }) %>% unlist(use.names = FALSE) %>%
{ structure(rep(FALSE, length(.)), .Names = .) }
## Multiple OR-conditional gates lead to multiple queries that need testing;
## find all possible combinations and OR them to test for a hit.
baseQuery <- stringr::str_split(query, "\\s*\\|\\|\\s*") ## Split query elements by "||"
allQueries <- expand.grid(baseQuery, stringsAsFactors = FALSE) %>%
plyr::alply(1, unlist, use.names = FALSE)
mm <- lapply(allQueries,
function(a)
{
sapply(a,
function(b) { adist(b, names(template), fixed = TRUE) %>% which.min }, simplify = TRUE) %>%
{ names(template)[.] }
})
tests <- lapply(mm,
function(a)
{
test <- template
test[a] <- TRUE
test
})
if (return_type == "grid") {
r <- sapply(sm,
function(a) {
event <- template
event[unlist(lapply(names(a), function(b) paste0(b, a[[b]])))] <- TRUE
event
}, simplify = TRUE)
return (structure(t(r), gates = mm, query = query))
}
r <- lapply(sm,
function(a)
{
event <- template
event[unlist(lapply(names(a), function(b) paste0(b, a[[b]])))] <- TRUE
## Does this event/cluster include the same phenotypes as the query?
Reduce(`||`, sapply(tests, function(b) sum(b & event) == sum(b)))
}) %>% unlist(use.names = FALSE)
if (is_invalid(r))
return (NULL)
if (return_type == "character")
return (which(r) %>% as.character)
r
}
## usage:
# r <- search(e[, analysis_channels], c("cd45+/++", "cd3-/d"), summary... = list(overall_label_threshold = Inf, label_threshold = 0.90))
#' @export
search_orig.pmm <- function(
x, # "pmm" object from 'get_expression_subset()'
query, # Vector of search terms based on channel names
query_re = "^(%s)$", # RegEx template for search
summary... = list(), # Additional arguments to 'summary.pmm()'
ids_only = TRUE
)
{
summaryArgs <- list(
x = x,
as_list = TRUE
)
summaryArgs <- utils::modifyList(summaryArgs, summary..., keep.null = TRUE)
sm <- do.call(summary, summaryArgs)
r <- sapply(sm,
function(a)
{
test <- sapply(names(a),
function(b)
{
if (all(a[[b]] == "")) return (NULL); paste0(b, a[[b]])
}, simplify = FALSE) %>% unlist(use.names = FALSE)
## This produces a list whose elements have >1-length vectors for each either-or query:
baseQuery <- stringr::str_split(query, stringr::fixed("||", TRUE))
re <- sapply(baseQuery,
function(b)
{
stringr::regex(sprintf(query_re, paste(rex::escape(b %>% unlist), collapse = "|")),
ignore_case = TRUE)
}, simplify = FALSE)
d <- sapply(re, function(b) stringr::str_subset(test, b), simplify = FALSE)
## Were all the query terms found?
if (length(sapply(d, table, simplify = FALSE) %>% purrr::compact()) == length(baseQuery))
## If yes, return all those query terms that were found
{ return (d %>% unlist) }
NULL
}, simplify = FALSE) %>% purrr::compact()
if (is_invalid(r))
return (NULL)
if (ids_only)
return (names(r))
r
}
## usage:
# r <- search(e[, analysis_channels], c("cd45+/++", "cd3-/d"), summary... = list(overall_label_threshold = Inf, label_threshold = 0.90))
#' @export
merge_clusters <- function(
x, # "pmm" object from 'get_expression_subset()'
clusters, # Named list of cell subsets
channels,
label_threshold,
which_cluster_set = 1, # Column no. or name; NULL or FALSE to set off by-event search
search... = list(),
verbose = TRUE,
leftover_clusters = NULL,
make_gating_poster = FALSE, # Logical, or character path to directory for individual plots
visualize_channels... = list(),
devices = flowpipe:::graphics_devices,
#save_plot_fun = grDevices::pdf, save_plot... = list(compress = FALSE)
save_plot_fun = grDevices::cairo_pdf, save_plot... = list(onefile = TRUE)
)
{
origClusterId <- attr(x, "cluster_id")
byEvent <- FALSE
if (is.null(which_cluster_set) || (is.logical(which_cluster_set) && !which_cluster_set)) {
## N.B. The "event" clusters must be run though 'sprintf()' to prevent exponentiation > 399999.
attr(x, "cluster_id") <- sprintf("%d", seq(NROW(x)))
which_cluster_set <- 1
byEvent <- TRUE
} else if (is.logical(which_cluster_set) && which_cluster_set) {
which_cluster_set <- 1
}
searchArgs <- list(
x = x,
summary... = list(which_cluster_set = which_cluster_set)
)
if (!missing(channels))
searchArgs$summary...$channels <- channels
searchArgs <- utils::modifyList(searchArgs, search..., keep.null = TRUE)
if (byEvent)
searchArgs$summary... <- utils::modifyList(searchArgs$summary..., list(n = FALSE), keep.null = TRUE)
label_thresholds <- structure(
rep(formals(summary.pmm)$label_threshold, length(clusters)),
.Names = names(clusters)
)
if (!missing(label_threshold)) {
if (is_invalid(names(label_threshold)))
names(label_threshold) <- rep("", length(label_threshold))
namedThresholds <- label_threshold[names(label_threshold) != ""]
if (!is_invalid(namedThresholds))
label_thresholds <-
replace(label_thresholds, names(namedThresholds), namedThresholds)
unnamedThresholds <- label_threshold[names(label_threshold) == ""]
if (!is_invalid(unnamedThresholds)) {
indices <- names(label_thresholds) %nin% names(namedThresholds)
label_thresholds[indices] <-
rep(unnamedThresholds, length.out = length(label_thresholds[indices]))
}
}
### Create plots to visually follow a sequence of predefined gates
gating_poster_dir <- NULL
if (is.character(make_gating_poster)) {
gating_poster_dir <- make_gating_poster
make_gating_poster <- TRUE
if (!dir.exists(gating_poster_dir))
dir.create(gating_poster_dir, recursive = TRUE)
}
tictoc::tic("Search clusters")
cc <- NULL
if (make_gating_poster && byEvent) {
## This probably doesn't dispatch on 'summary' alone because of the name/position of the 1st argument
sm <- do.call(summary.pmm, utils::modifyList(searchArgs$summary...,
list(x = x), keep.null = TRUE))
## Prepare data set to proceed through & plot predefined gating sequences
## N.B. For size considerations, I might want to plot inside 'sapply()' & return NULL
cc_grid <- keystone::psapply(names(clusters),
function(a)
{
searchArgs$query <- clusters[[a]]
searchArgs$summary...$label_threshold <- label_thresholds[a]
searchArgs$return_type <- "grid"
searchArgs$summary... <- sm
if (verbose) {
cat(sprintf("Querying for '%s' clusters at event level...", a))
utils::flush.console()
}
r <- do.call(search, searchArgs)
if (verbose) {
cat(". Done.", fill = TRUE); utils::flush.console()
}
r
}, simplify = FALSE)
## Ordering the colnames by decreasing length will prevent e.g. a match between
## "CD4" & "CD45" before the regex search has gotten to "CD45".
re <- stringr::regex(stringr::str_flatten(rex::escape(colnames(x)[colnames(x)
%>% nchar %>% order(decreasing = TRUE)]), "|"))
`cc+grobs` <- keystone::psapply(seq_along(cc_grid), # So 'a' can be used for numbering plots
function(a)
{
chunks <- sapply(attr(cc_grid[[a]], "gates"), function(b) keystone::chunk(b, 2), simplify = FALSE)
## The first "chunk" will have the same no. of elements as all the others:
tests <- sapply(seq_along(chunks[[1]]),
function(b) sapply(chunks, function(g) g[[b]], simplify = FALSE) %>%
unique, simplify = FALSE)
query <- attr(cc_grid[[a]], "query")
query_chunks <- keystone::chunk(query, 2)
cat(sprintf("%s:", names(cc_grid)[a]), fill = TRUE); print(query); utils::flush.console()
gated_events <- rep(TRUE, NROW(cc_grid[[a]]))
flit <- sapply(seq_along(tests), # So 'b' can be used for numbering plots
function(b)
{
## 'NCOL(.)' handles the case where the test matrix has only one column:
r <- Reduce(`|`, sapply(tests[[b]], function(g) { cc_grid[[a]][, g, drop = FALSE] %>% { `&`(.[, 1], .[, NCOL(.)]) } },
simplify = FALSE), accumulate = TRUE)
## For each list element, create a biaxial plot
grobs <- mapply(function(k, l)
{
plot_channels <- stringr::str_match_all(paste(k, collapse = " "), re)[[1]] %>% drop %>% unique
## N.B. Uncomment 'event_mask' just below to plot only events selected by the previous gate:
visualize_channelsArgs <- list(
x = x,
channels = list(gated_events & r[[l]]),
event_mask = gated_events,
extract_gating_channels = function(...) plot_channels,
points... = list(col = scales::alpha("red", 0.5)),
plot_end_callback = function(...) { # A function will carry its environment along w/ itself
graphics::title(main = sprintf("Gate: %s", paste(query_chunks[[b]], collapse = " & ")), cex.main = 0.9, ...)
if (l > 1) graphics::mtext("(OR'd with previous gate)", cex = 0.9)
graphics::mtext(sprintf("Events: %d/%d", sum(gated_events & r[[l]]), sum(gated_events)),
side = 1, line = -1, cex = 0.8)
}
)
visualize_channelsArgs <-
utils::modifyList(visualize_channelsArgs, visualize_channels..., keep.null = TRUE)
grobs <- list()
if (!is.null(gating_poster_dir)) {
# plyr::l_ply(seq_along(devices),
# function(d)
# {
# ext <- devices[[d]]$ext; devices[[d]]$ext <- NULL
# ## Reduce resolution for 'png()' etc. to a manageable value:
# if ("res" %in% names(formals(eval(parse(text = names(devices)[d]))))) devices[[d]]$res <- 150
# do.call(eval(parse(text = names(devices)[d])),
# modifyList(devices[[d]],
# list(
# width = 5, height = 5,
# file = sprintf("%s/%03d-%03d%s_gate-%s",
# gating_poster_dir, a, b, letters[l], paste(plot_channels, collapse = "&")) %_% paste0(".", ext)
# )
# )
# )
# dev.control(displaylist = "enable")
# do.call(visualize_channels, visualize_channelsArgs)
# if (d == length(devices)) {
# grobs <<- append(grobs, list(grDevices::recordPlot()))
# }
# dev.off()
# })
gatePlotPath <- tempfile()
grDevices::png(file = gatePlotPath, bg = "transparent")
dev.control(displaylist = "enable")
do.call(visualize_channels, visualize_channelsArgs)
gatePlot <- grDevices::recordPlot()
invisible(dev.off())
unlink(gatePlotPath)
grobs <- append(grobs, list(gatePlot))
} else {
do.call(visualize_channels, visualize_channelsArgs)
}
grobs
}, tests[[b]], seq_along(r), USE.NAMES = TRUE, SIMPLIFY = FALSE)
gated_events <<- gated_events & r[[length(r)]]
print(table(gated_events)); utils::flush.console()
grobs
}, simplify = FALSE)
list(gated_events = gated_events, grobs = flit %>% purrr::flatten())
}, simplify = FALSE)
grobs <- NULL
if (!is.null(gating_poster_dir)) {
grobs <- sapply(`cc+grobs`, function(a) a$grobs, simplify = FALSE) %>% `names<-`(names(cc_grid))
## Keep list of grobs for e.g. single plots, different image types:
saveRDS(object = grobs, file = paste(data_dir, "gated-clusters-poster.rds", sep = "/"))
}
cc <- sapply(`cc+grobs`, function(a) a$gated_events, simplify = FALSE) %>%
sapply(function(a) { as.vector(a) %>% which %>% as.character }, simplify = FALSE) %>% `names<-`(names(cc_grid))
rm(`cc+grobs`)
## Finally, create full gating poster
if (!is.null(grobs)) {
max_gates <- sapply(grobs, length) %>% max
grobs <- sapply(grobs, `length<-`, value = max_gates, simplify = FALSE)
save_plotArgs <- list(
width = min(5.0 * max_gates + 1, 200), # 200 in. is PDF maximum
height = min(5.0 * length(grobs) + 1, 200), # 200 in. is PDF maximum
#file = paste(gating_poster_dir, "gated-clusters-poster.pdf", sep = "/")
filename = paste(gating_poster_dir, "gated-clusters-poster.pdf", sep = "/") # For 'grDevices::cairo_pdf()'
)
save_plotArgs <- utils::modifyList(save_plotArgs, save_plot..., keep.null = TRUE)
do.call(save_plot_fun, save_plotArgs)
## Create a blank plot for empty grid cells (but not needed for 'cowplot::plot_grid()')
if (FALSE) {
blankPath <- tempfile()
grDevices::png(file = blankPath, bg = "transparent")
dev.control(displaylist = "enable")
plot.new()
blank <- grDevices::recordPlot()
invisible(dev.off())
unlink(blankPath)
}
cowplot::plot_grid(
## This creates a list of "recordedplot" objects:
#plotlist = sapply(grobs %>% purrr::flatten(), function(a) if (is.null(a)) list(blank) else a),
plotlist = sapply(grobs %>% purrr::flatten(), function(a) if (is.null(a)) list(NULL) else a),
ncol = max_gates,
hjust = 0, label_x = 0.01,
labels = rep("", max_gates * length(grobs)) %>%
`[<-`(seq(from = 1, by = max_gates, length.out = length(grobs)), names(grobs)),
#label_colour = "darkgreen",
label_size = 16
) %>% print
dev.off()
## Convert PDF to PNG
suppressWarnings(pdftools::pdf_convert(
pdf = save_plotArgs$file,
format = "png",
dpi = 100,
filenames = sprintf("%s.png", tools::file_path_sans_ext(save_plotArgs$file))
))
}
}
if (is.null(cc)) {
cc <- keystone::psapply(names(clusters),
function(a)
{
searchArgs$query <- clusters[[a]]
searchArgs$summary...$label_threshold <- label_thresholds[a]
if (verbose) {
if (!byEvent)
cat(sprintf("Querying for '%s' clusters at %0.2f threshold...", a,
searchArgs$summary...$label_threshold))
else
cat(sprintf("Querying for '%s' clusters at event level...", a))
utils::flush.console()
}
if (byEvent)
searchArgs$summary... <- sm
r <- do.call(search, searchArgs)
if (verbose) {
cat(". Done.", fill = TRUE); utils::flush.console()
}
r
}, simplify = FALSE)
}
tictoc::toc()
cc0 <- cc[sapply(cc, is.null)]
if (length(cc0) > 0)
warning(sprintf("Clusters %s were not found", cc0 %>% names %>% sQuote %>% paste(collapse = ", ")))
cc1 <- cc %>% purrr::compact()
clusterId <- attr(x, "cluster_id")
if (is.matrix(clusterId))
clusterId <- clusterId[, which_cluster_set]
merged_clusters <- list(
new_cluster_id = clusterId,
orig_cluster_id = origClusterId
)
if (is_invalid(cc1)) { # No new clusters found
if (byEvent)
merged_clusters <- list(new_cluster_id = origClusterId, orig_cluster_id = origClusterId)
return (merged_clusters)
}
## Create 'replace()' arguments
replaceArgss <- sapply(names(cc1),
function(a)
{
list(
list = clusterId %in% cc1[[a]],
value = a
)
}, simplify = FALSE)
newClusterId <- sapply(replaceArgss,
function(a)
{
r <- replace(merged_clusters$new_cluster_id, a$list %>% which, a$value) %>%
replace((!a$list) %>% which, NA_character_)
r
}, simplify = TRUE)
## Now collapse all the mutually exclusive columns together
newMergedClusterId <- merge_mutually_exclusive_cols(newClusterId) %>%
cbind(orig = merged_clusters$orig_cluster_id, .)
## N.B. If 'merged_clusters$orig_cluster_id' is already a matrix, the name "orig" is unused.
merged_clusters$new_cluster_id <- newMergedClusterId
merged_clusters
}
merge_mutually_exclusive_cols <- function(
..., # Combination of matrices/vectors having the same no. rows/length
collapse = "|"
)
{
d0 <- cbind(...); d <- rlang::duplicate(d0, shallow = FALSE)
if (NCOL(d) < 3)
return (d)
repeat {
merge_comb <- utils::combn(seq(NCOL(d)), 2, simplify = FALSE)
didMerge <- FALSE; startNcol <- NCOL(d)
for (a in merge_comb) {
print(a)
colsAreMutuallyExclusive <-
apply(d[, a], 1, function(b) (!is.na(b)) %>% sum, simplify = TRUE) %>% `==`(2) %>% any %>% `!`
if (colsAreMutuallyExclusive) {
## Merge them into single column
temp <- d[, a[1]] %>% `[<-`(!is.na(d[, a[2]]), d[, a[2]][!is.na(d[, a[2]])])
d[, a[1]] <- temp
## Name of new column becomes a combination of both starting columns
newColname <- paste(colnames(d[, a]), collapse = collapse)
colnames(d)[a[1]] <- newColname
d <- d[, -a[2], drop = TRUE]
## Don't ever finish right after a merge; check for mergeable columns at least once more.
didMerge <- TRUE
break
}
}
## Finish if no. cols are at minimum or are unchanged, & no merge just happened
if ((NCOL(d) < 3 || NCOL(d) == startNcol) && !didMerge)
break
}
#browser()
d
}
#merge_mutually_exclusive_cols(orig = cluster_id, new_cluster)
#merge_mutually_exclusive_cols(new_cluster)
|
rm(list=ls())
# environment ====
#if (!requireNamespace("remotes", quietly = TRUE)) install.packages("remotes")
#remotes::install_github("MRCIEU/genetics.binaRies", force = F)
#remotes::install_github("explodecomputer/plinkbinr", force = F)
#remotes::install_github("chr1swallace/coloc@main", force = F)
#remotes::install_github("sjmgarnier/viridis", force = F)
library(genetics.binaRies)
library(plinkbinr)
library(coloc)
library(viridis)
library(data.table)
library(ieugwasr)
library(dplyr)
library(TwoSampleMR)
library(tidyverse)
source("scripts/011_colocalisation/functions/my_coloc_chriswallace.R")
# data ====
a <- read.table("analysis/008_mvmr/mvmr_results.txt", header = T, sep = "\t")
a <- subset(a, group == "Female")
a <- subset(a, exposure != "BMI")
a <- subset(a, exposure != "WHR")
a <- subset(a, exposure != "WHRadjBMI")
a <- subset(a, cancer == "colon")
filenames_mvmr <- paste0("/data/protein_GWAS_ferkingstad_EU_2021/files/cis_snps_1mb//", a$exposure, "_", a$gene, "_", a$protein, ".txt.gz.annotated.gz.exclusions.gz.alleles.gz.unzipped.cis.txt")
filenames_all <- dir("/data/protein_GWAS_ferkingstad_EU_2021/files/cis_snps_1mb/", recursive = TRUE, full.names = TRUE, pattern = ".cis.txt")
filenames <- intersect(filenames_mvmr, filenames_all)
# exposure ====
exposure_list <- lapply(filenames, fread, col.names = c("CHR", "POS", "SNPID", "SNP", "EA", "OA",
"beta.exposure", "pval.exposure", "minus_log10_pval", "se.exposure", "samplesize.exposure",
"EAF", "exposure", "effect_allele.exposure", "other_allele.exposure", "eaf.exposure"))
length(exposure_list)
exposure_list <- exposure_list[sapply(exposure_list, nrow) > 0]
length(exposure_list)
exposure_list <- purrr::discard(exposure_list, ~any(.x$CHR == "chrX")) # X CHR not available in outcome
length(exposure_list)
# format exposure ====
exposure_filenames <- gsub("/data/protein_GWAS_ferkingstad_EU_2021/files/cis_snps_1mb//", "", filenames)
exposure_filenames <- gsub(".txt.gz.annotated.gz.exclusions.gz.alleles.gz.unzipped.cis.txt", "", exposure_filenames)
for (i in 1:length(exposure_list)){
exposure_list[[i]]$exposure <- gsub("/data/protein_GWAS_ferkingstad_EU_2021/files/", "", exposure_list[[i]]$exposure)
}
for (i in 1:length(exposure_list)){
exposure_list[[i]]$exposure <- gsub(".txt.gz.unzipped", "", exposure_list[[i]]$exposure)
}
for (i in 1:length(exposure_list)){
exposure_list[[i]]$id.exposure <- paste0(exposure_filenames[[i]], "_", "joint_colon_female")
}
# outcome data ====
filenames <- c("joint_colon_Female_wald_MAC50_1.TBL.annotated.txt")
outcome_list <- list()
for (i in 1:length(exposure_list)){
outcome_list[i] <- lapply(paste0("/data/GWAS_data/files/huyghe_2018_PMID30510241/processed/",filenames),
read_outcome_data,
snps = exposure_list[[i]]$SNP,
sep = " ",
snp_col = "SNP",
beta_col = "Effect",
se_col = "StdErr",
eaf_col = "Freq1",
effect_allele_col = "Allele1",
other_allele_col = "Allele2",
pval_col = "P.value",
min_pval = 1e-200,
log_pval = FALSE,
chr_col = "Chr",
pos_col = "Position",
phenotype_col = "phenotype")
outcome_list[[i]]$outcome <- "joint_colon_female"
outcome_list[[i]]$id.outcome <- paste0(exposure_filenames[[i]], "_", outcome_list[[i]]$outcome)
}
# harmonise ====
exposure <- bind_rows(exposure_list)
outcome <- bind_rows(outcome_list)
harmonise_data <- harmonise_data(exposure, outcome, action = 2)
harmonise_data$remove_duplicates <- paste0(harmonise_data$SNP, "_", harmonise_data$id.exposure)
harmonise_data <- harmonise_data[!duplicated(harmonise_data$remove_duplicates),]
harmonise_data_list <- split(harmonise_data, harmonise_data$id.exposure)
# loop over all harmonised data and run ld matrix, formatting, coloc, save ====
table_master <- data.frame() # make empty dataframe for final results
for (i in 1:length(harmonise_data_list)){
label_exposure <- unique(harmonise_data_list[[i]]$exposure)
label <- paste0(label_exposure, "_", "joint_colon_female")
label_outcome <- "joint_colon_female"
# make ld matrix ====
ld <- ld_matrix_local(
harmonise_data_list[[i]]$SNP,
with_alleles = FALSE,
bfile = "/data/GWAS_data/files/references/1kG_v3/EUR/EUR",
plink_bin = get_plink_exe())
# format LD matrix and harmonised list ====
ld <- ld[which(rownames(ld) %in% harmonise_data_list[[i]]$SNP), which(colnames(ld) %in% harmonise_data_list[[i]]$SNP)]
harmonise_data_list[[i]] <- harmonise_data_list[[i]][which(harmonise_data_list[[i]]$SNP %in% rownames(ld)),]
ld <- ld[match(harmonise_data_list[[i]]$SNP,rownames(ld)),]
ld <- ld[,match(harmonise_data_list[[i]]$SNP, colnames(ld))]
harmonise_data_list[[i]] <- harmonise_data_list[[i]][match(rownames(ld), harmonise_data_list[[i]]$SNP),]
# make lists for coloc ====
coloc_data_exposure <- list(beta = harmonise_data_list[[i]]$beta.exposure, varbeta = harmonise_data_list[[i]]$se.exposure^2, MAF = harmonise_data_list[[i]]$eaf.exposure, type = "quant", N = 35559, snp = rownames(ld), LD = ld, position = harmonise_data_list[[i]]$POS)
coloc_data_outcome <- list(beta = harmonise_data_list[[i]]$beta.outcome, varbeta = harmonise_data_list[[i]]$se.outcome^2, MAF = harmonise_data_list[[i]]$eaf.outcome, type = "cc", N = 120328, snp = rownames(ld), LD = ld, position = harmonise_data_list[[i]]$POS)
# coloc ====
coloc_results <- coloc.abf(dataset1 = coloc_data_exposure, dataset2 = coloc_data_outcome)
pdf(paste0("analysis/009_colocalisation/results/joint_colon_female/figures/", label, ".pdf"),
height = 10, width = 10)
coloc_sensitivity <- my_sensitivity(coloc_results, "H4 > 0.9",
trait1_title = label_exposure, trait2_title = label_outcome)
dev.off()
# save ====
saveRDS(coloc_results, paste0("analysis/009_colocalisation/results/joint_colon_female/", label, ".RData"))
# make table ====
table <- data.frame(
exposure = label_exposure,
outcome = label_outcome,
id = label,
nsnps = coloc_results["summary"][[1]][1],
h0 = coloc_results["summary"][[1]][2],
h1 = coloc_results["summary"][[1]][3],
h2 = coloc_results["summary"][[1]][4],
h3 = coloc_results["summary"][[1]][5],
h4 = coloc_results["summary"][[1]][6])
table_master <- rbind(table_master, table)
}
write.table(table_master, "analysis/009_colocalisation/results/joint_colon_female/001_coloc_results.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
| /scripts/011_colocalisation/002_coloc/002_coloc_joint_colon_female.R | no_license | mattlee821/adiposity_proteins_colorectal_cancer | R | false | false | 6,955 | r | rm(list=ls())
# environment ====
#if (!requireNamespace("remotes", quietly = TRUE)) install.packages("remotes")
#remotes::install_github("MRCIEU/genetics.binaRies", force = F)
#remotes::install_github("explodecomputer/plinkbinr", force = F)
#remotes::install_github("chr1swallace/coloc@main", force = F)
#remotes::install_github("sjmgarnier/viridis", force = F)
library(genetics.binaRies)
library(plinkbinr)
library(coloc)
library(viridis)
library(data.table)
library(ieugwasr)
library(dplyr)
library(TwoSampleMR)
library(tidyverse)
source("scripts/011_colocalisation/functions/my_coloc_chriswallace.R")
# data ====
a <- read.table("analysis/008_mvmr/mvmr_results.txt", header = T, sep = "\t")
a <- subset(a, group == "Female")
a <- subset(a, exposure != "BMI")
a <- subset(a, exposure != "WHR")
a <- subset(a, exposure != "WHRadjBMI")
a <- subset(a, cancer == "colon")
filenames_mvmr <- paste0("/data/protein_GWAS_ferkingstad_EU_2021/files/cis_snps_1mb//", a$exposure, "_", a$gene, "_", a$protein, ".txt.gz.annotated.gz.exclusions.gz.alleles.gz.unzipped.cis.txt")
filenames_all <- dir("/data/protein_GWAS_ferkingstad_EU_2021/files/cis_snps_1mb/", recursive = TRUE, full.names = TRUE, pattern = ".cis.txt")
filenames <- intersect(filenames_mvmr, filenames_all)
# exposure ====
exposure_list <- lapply(filenames, fread, col.names = c("CHR", "POS", "SNPID", "SNP", "EA", "OA",
"beta.exposure", "pval.exposure", "minus_log10_pval", "se.exposure", "samplesize.exposure",
"EAF", "exposure", "effect_allele.exposure", "other_allele.exposure", "eaf.exposure"))
length(exposure_list)
exposure_list <- exposure_list[sapply(exposure_list, nrow) > 0]
length(exposure_list)
exposure_list <- purrr::discard(exposure_list, ~any(.x$CHR == "chrX")) # X CHR not available in outcome
length(exposure_list)
# format exposure ====
exposure_filenames <- gsub("/data/protein_GWAS_ferkingstad_EU_2021/files/cis_snps_1mb//", "", filenames)
exposure_filenames <- gsub(".txt.gz.annotated.gz.exclusions.gz.alleles.gz.unzipped.cis.txt", "", exposure_filenames)
for (i in 1:length(exposure_list)){
exposure_list[[i]]$exposure <- gsub("/data/protein_GWAS_ferkingstad_EU_2021/files/", "", exposure_list[[i]]$exposure)
}
for (i in 1:length(exposure_list)){
exposure_list[[i]]$exposure <- gsub(".txt.gz.unzipped", "", exposure_list[[i]]$exposure)
}
for (i in 1:length(exposure_list)){
exposure_list[[i]]$id.exposure <- paste0(exposure_filenames[[i]], "_", "joint_colon_female")
}
# outcome data ====
filenames <- c("joint_colon_Female_wald_MAC50_1.TBL.annotated.txt")
outcome_list <- list()
for (i in 1:length(exposure_list)){
outcome_list[i] <- lapply(paste0("/data/GWAS_data/files/huyghe_2018_PMID30510241/processed/",filenames),
read_outcome_data,
snps = exposure_list[[i]]$SNP,
sep = " ",
snp_col = "SNP",
beta_col = "Effect",
se_col = "StdErr",
eaf_col = "Freq1",
effect_allele_col = "Allele1",
other_allele_col = "Allele2",
pval_col = "P.value",
min_pval = 1e-200,
log_pval = FALSE,
chr_col = "Chr",
pos_col = "Position",
phenotype_col = "phenotype")
outcome_list[[i]]$outcome <- "joint_colon_female"
outcome_list[[i]]$id.outcome <- paste0(exposure_filenames[[i]], "_", outcome_list[[i]]$outcome)
}
# harmonise ====
exposure <- bind_rows(exposure_list)
outcome <- bind_rows(outcome_list)
harmonise_data <- harmonise_data(exposure, outcome, action = 2)
harmonise_data$remove_duplicates <- paste0(harmonise_data$SNP, "_", harmonise_data$id.exposure)
harmonise_data <- harmonise_data[!duplicated(harmonise_data$remove_duplicates),]
harmonise_data_list <- split(harmonise_data, harmonise_data$id.exposure)
# loop over all harmonised data and run ld matrix, formatting, coloc, save ====
table_master <- data.frame() # make empty dataframe for final results
for (i in 1:length(harmonise_data_list)){
label_exposure <- unique(harmonise_data_list[[i]]$exposure)
label <- paste0(label_exposure, "_", "joint_colon_female")
label_outcome <- "joint_colon_female"
# make ld matrix ====
ld <- ld_matrix_local(
harmonise_data_list[[i]]$SNP,
with_alleles = FALSE,
bfile = "/data/GWAS_data/files/references/1kG_v3/EUR/EUR",
plink_bin = get_plink_exe())
# format LD matrix and harmonised list ====
ld <- ld[which(rownames(ld) %in% harmonise_data_list[[i]]$SNP), which(colnames(ld) %in% harmonise_data_list[[i]]$SNP)]
harmonise_data_list[[i]] <- harmonise_data_list[[i]][which(harmonise_data_list[[i]]$SNP %in% rownames(ld)),]
ld <- ld[match(harmonise_data_list[[i]]$SNP,rownames(ld)),]
ld <- ld[,match(harmonise_data_list[[i]]$SNP, colnames(ld))]
harmonise_data_list[[i]] <- harmonise_data_list[[i]][match(rownames(ld), harmonise_data_list[[i]]$SNP),]
# make lists for coloc ====
coloc_data_exposure <- list(beta = harmonise_data_list[[i]]$beta.exposure, varbeta = harmonise_data_list[[i]]$se.exposure^2, MAF = harmonise_data_list[[i]]$eaf.exposure, type = "quant", N = 35559, snp = rownames(ld), LD = ld, position = harmonise_data_list[[i]]$POS)
coloc_data_outcome <- list(beta = harmonise_data_list[[i]]$beta.outcome, varbeta = harmonise_data_list[[i]]$se.outcome^2, MAF = harmonise_data_list[[i]]$eaf.outcome, type = "cc", N = 120328, snp = rownames(ld), LD = ld, position = harmonise_data_list[[i]]$POS)
# coloc ====
coloc_results <- coloc.abf(dataset1 = coloc_data_exposure, dataset2 = coloc_data_outcome)
pdf(paste0("analysis/009_colocalisation/results/joint_colon_female/figures/", label, ".pdf"),
height = 10, width = 10)
coloc_sensitivity <- my_sensitivity(coloc_results, "H4 > 0.9",
trait1_title = label_exposure, trait2_title = label_outcome)
dev.off()
# save ====
saveRDS(coloc_results, paste0("analysis/009_colocalisation/results/joint_colon_female/", label, ".RData"))
# make table ====
table <- data.frame(
exposure = label_exposure,
outcome = label_outcome,
id = label,
nsnps = coloc_results["summary"][[1]][1],
h0 = coloc_results["summary"][[1]][2],
h1 = coloc_results["summary"][[1]][3],
h2 = coloc_results["summary"][[1]][4],
h3 = coloc_results["summary"][[1]][5],
h4 = coloc_results["summary"][[1]][6])
table_master <- rbind(table_master, table)
}
write.table(table_master, "analysis/009_colocalisation/results/joint_colon_female/001_coloc_results.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
|
#' @title predict() for spatial use of BART models
#'
#' @description
#' A predict() wrapper for combining BART models with spatial input data, to generate a Raster or RasterStack of predicted outputs. This now includes the ability to predict from random intercept models, which can be used to deal with clustering in space and time of outcome variables!
#'
#' @param object A BART model objector riBART model object generated by the dbarts package
#' @param x.layers An object of class RasterStack
#' @param quantiles Include the extraction of quantiles (e.g. 5\% and 95\% credible interval) from the posterior
#' @param ri.data If 'object' is a riBART model, this gives either one consistent value (e.g. a prediction year) or a RasterLayer for the random effect
#' @param ri.name The name of the random intercept in the riBART model
#' @param ri.pred Should the random intercept be *included* in the prediction value or dropped? Defaults to FALSE (treats the random intercept as noise to be excluded)
#' @param splitby If set to a value higher than 1, will split your dataset into approximately n divisible chunks
#' @param quiet No progress bars
#'
#' @export
#'
############################
predict2.bart <- function(object,
x.layers,
quantiles=c(),
ri.data=NULL,
ri.name=NULL,
ri.pred=FALSE,
splitby=1,
quiet=FALSE) {
if(class(object)=='rbart') {
if(is.null(ri.data)) {stop('ERROR: Input either a value or a raster in ri.data')}
if(is.null(ri.name)) {stop('ERROR: Input the correct random effect variable name in the model object in ri.name')}
}
if(class(object)=='rbart') {
xnames <- attr(object$fit[[1]]$data@x, "term.labels")
if (all(xnames %in% c(names(x.layers),ri.name))) {
x.layers <- x.layers[[xnames[!(xnames==ri.name)]]]
} else {
stop("Variable names of RasterStack don't match the requested names")
}
}
if(class(object)=='bart') {
xnames <- attr(object$fit$data@x, "term.labels")
if(all(xnames %in% names(x.layers))) {
x.layers <- x.layers[[xnames]]
} else {
stop("Variable names of RasterStack don't match the requested names")
}
}
input.matrix <- as.matrix(getValues(x.layers))
blankout <- data.frame(matrix(ncol=(1+length(quantiles)),
nrow=ncell(x.layers[[1]])))
whichvals <- which(complete.cases(input.matrix))
input.matrix <- input.matrix[complete.cases(input.matrix),]
if(class(object)=='rbart') {
if(class(ri.data)=='RasterLayer') {
input.matrix <- cbind(input.matrix,values(ri.data))
} else {
input.matrix <- cbind(input.matrix,rep(ri.data, nrow(input.matrix)))
}
colnames(input.matrix)[ncol(input.matrix)] <- ri.name
}
if(splitby==1) {
if(class(object)=='bart') {
pred <- dbarts:::predict.bart(object, input.matrix)
} else if(class(object)=='rbart') {
if(ri.pred==FALSE) {
pred <- dbarts:::predict.rbart(object,
input.matrix[,!(colnames(input.matrix)==ri.name)],
group.by=input.matrix[,ri.name],
value='bart')
} else {
pred <- dbarts:::predict.rbart(object,
input.matrix[,!(colnames(input.matrix)==ri.name)],
group.by=input.matrix[,ri.name],
value='ppd')
}
}
pred.summary <- dfextract(pred, quant=quantiles)
} else {
split <- floor(nrow(input.matrix)/splitby)
input.df <- data.frame(input.matrix)
input.str <- split(input.df, (as.numeric(1:nrow(input.df))-1) %/% split)
for(i in 1:length(input.str)){
if(i==1) {start_time <- Sys.time()}
if(class(object)=='bart') {
pred <- dbarts:::predict.bart(object, input.str[[i]])
} else if(class(object)=='rbart') {
if(ri.pred==FALSE) {
pred <- dbarts:::predict.rbart(object,
input.str[[i]][,!(colnames(input.str[[i]])==ri.name)],
group.by=input.str[[i]][,ri.name],
value='bart')
} else {
pred <- dbarts:::predict.rbart(object,
input.str[[i]][,!(colnames(input.str[[i]])==ri.name)],
group.by=input.str[[i]][,ri.name],
value='ppd')
}
}
pred.summary <- dfextract(pred, quant=quantiles)
input.str[[i]] <- pred.summary
if(i==1) {end_time <- Sys.time()
cat('Estimated time to total prediction (mins):\n')
cat(length(input.str)*as.numeric(end_time - start_time)/60)
cat('\n')
if(!quiet){pb <- txtProgressBar(min = 0, max = length(input.str), style = 3)}}
if(!quiet){setTxtProgressBar(pb, i)}
}
if(length(quantiles)==0) {
pred.summary <- data.frame(means=unlist(input.str)) } else {
pred.summary <- rbindlist(input.str)
}
}
output <- as.matrix(pred.summary)
#output = pnorm(pred.summary)
blankout[whichvals,] <- output
output <- blankout
outlist <- lapply(1:ncol(output), function(x) {
output.m <- t(matrix(output[,x],
nrow = ncol(x.layers),
ncol = nrow(x.layers)))
return(raster(output.m,
xmn=xmin(x.layers[[1]]), xmx=xmax(x.layers[[1]]),
ymn=ymin(x.layers[[1]]), ymx=ymax(x.layers[[1]]),
crs=x.layers[[1]]@crs))
})
outlist <- stack(outlist)
return(outlist)
}
dfextract <- function(df, quant) {
if(length(quant)==0) {return(colMeans(df))} else
return(cbind(data.frame(colMeans(df)),
colQuantiles(df, probs=quant)))
}
| /R/predict.R | no_license | yangxhcaf/embarcadero | R | false | false | 6,132 | r |
#' @title predict() for spatial use of BART models
#'
#' @description
#' A predict() wrapper for combining BART models with spatial input data, to generate a Raster or RasterStack of predicted outputs. This now includes the ability to predict from random intercept models, which can be used to deal with clustering in space and time of outcome variables!
#'
#' @param object A BART model objector riBART model object generated by the dbarts package
#' @param x.layers An object of class RasterStack
#' @param quantiles Include the extraction of quantiles (e.g. 5\% and 95\% credible interval) from the posterior
#' @param ri.data If 'object' is a riBART model, this gives either one consistent value (e.g. a prediction year) or a RasterLayer for the random effect
#' @param ri.name The name of the random intercept in the riBART model
#' @param ri.pred Should the random intercept be *included* in the prediction value or dropped? Defaults to FALSE (treats the random intercept as noise to be excluded)
#' @param splitby If set to a value higher than 1, will split your dataset into approximately n divisible chunks
#' @param quiet No progress bars
#'
#' @export
#'
############################
predict2.bart <- function(object,
x.layers,
quantiles=c(),
ri.data=NULL,
ri.name=NULL,
ri.pred=FALSE,
splitby=1,
quiet=FALSE) {
if(class(object)=='rbart') {
if(is.null(ri.data)) {stop('ERROR: Input either a value or a raster in ri.data')}
if(is.null(ri.name)) {stop('ERROR: Input the correct random effect variable name in the model object in ri.name')}
}
if(class(object)=='rbart') {
xnames <- attr(object$fit[[1]]$data@x, "term.labels")
if (all(xnames %in% c(names(x.layers),ri.name))) {
x.layers <- x.layers[[xnames[!(xnames==ri.name)]]]
} else {
stop("Variable names of RasterStack don't match the requested names")
}
}
if(class(object)=='bart') {
xnames <- attr(object$fit$data@x, "term.labels")
if(all(xnames %in% names(x.layers))) {
x.layers <- x.layers[[xnames]]
} else {
stop("Variable names of RasterStack don't match the requested names")
}
}
input.matrix <- as.matrix(getValues(x.layers))
blankout <- data.frame(matrix(ncol=(1+length(quantiles)),
nrow=ncell(x.layers[[1]])))
whichvals <- which(complete.cases(input.matrix))
input.matrix <- input.matrix[complete.cases(input.matrix),]
if(class(object)=='rbart') {
if(class(ri.data)=='RasterLayer') {
input.matrix <- cbind(input.matrix,values(ri.data))
} else {
input.matrix <- cbind(input.matrix,rep(ri.data, nrow(input.matrix)))
}
colnames(input.matrix)[ncol(input.matrix)] <- ri.name
}
if(splitby==1) {
if(class(object)=='bart') {
pred <- dbarts:::predict.bart(object, input.matrix)
} else if(class(object)=='rbart') {
if(ri.pred==FALSE) {
pred <- dbarts:::predict.rbart(object,
input.matrix[,!(colnames(input.matrix)==ri.name)],
group.by=input.matrix[,ri.name],
value='bart')
} else {
pred <- dbarts:::predict.rbart(object,
input.matrix[,!(colnames(input.matrix)==ri.name)],
group.by=input.matrix[,ri.name],
value='ppd')
}
}
pred.summary <- dfextract(pred, quant=quantiles)
} else {
split <- floor(nrow(input.matrix)/splitby)
input.df <- data.frame(input.matrix)
input.str <- split(input.df, (as.numeric(1:nrow(input.df))-1) %/% split)
for(i in 1:length(input.str)){
if(i==1) {start_time <- Sys.time()}
if(class(object)=='bart') {
pred <- dbarts:::predict.bart(object, input.str[[i]])
} else if(class(object)=='rbart') {
if(ri.pred==FALSE) {
pred <- dbarts:::predict.rbart(object,
input.str[[i]][,!(colnames(input.str[[i]])==ri.name)],
group.by=input.str[[i]][,ri.name],
value='bart')
} else {
pred <- dbarts:::predict.rbart(object,
input.str[[i]][,!(colnames(input.str[[i]])==ri.name)],
group.by=input.str[[i]][,ri.name],
value='ppd')
}
}
pred.summary <- dfextract(pred, quant=quantiles)
input.str[[i]] <- pred.summary
if(i==1) {end_time <- Sys.time()
cat('Estimated time to total prediction (mins):\n')
cat(length(input.str)*as.numeric(end_time - start_time)/60)
cat('\n')
if(!quiet){pb <- txtProgressBar(min = 0, max = length(input.str), style = 3)}}
if(!quiet){setTxtProgressBar(pb, i)}
}
if(length(quantiles)==0) {
pred.summary <- data.frame(means=unlist(input.str)) } else {
pred.summary <- rbindlist(input.str)
}
}
output <- as.matrix(pred.summary)
#output = pnorm(pred.summary)
blankout[whichvals,] <- output
output <- blankout
outlist <- lapply(1:ncol(output), function(x) {
output.m <- t(matrix(output[,x],
nrow = ncol(x.layers),
ncol = nrow(x.layers)))
return(raster(output.m,
xmn=xmin(x.layers[[1]]), xmx=xmax(x.layers[[1]]),
ymn=ymin(x.layers[[1]]), ymx=ymax(x.layers[[1]]),
crs=x.layers[[1]]@crs))
})
outlist <- stack(outlist)
return(outlist)
}
dfextract <- function(df, quant) {
if(length(quant)==0) {return(colMeans(df))} else
return(cbind(data.frame(colMeans(df)),
colQuantiles(df, probs=quant)))
}
|
# Workflows examples
# Jeffrey C. Oliver
# jcoliver@email.arizona.edu
# 2021-03-01
rm(list = ls())
################################################################################
# Workflows: preprocessing + modeling
library(tidymodels)
# Loads in data with log-transformed Sale_Price and testing/training split
load(file = "ames.RData")
# Create just the model
lm_model <- linear_reg() %>% # linear_reg() is a parsnip object
set_engine("lm")
# Create a workflow object, adding the model
lm_workflow <- workflow() %>% # from the workflows package
add_model(lm_model)
# Add a pre-processing step
lm_workflow <- lm_workflow %>%
add_formula(Sale_Price ~ Longitude + Latitude)
# Workflow objects can use fit() with data
lm_fit <- fit(lm_workflow, ames_train)
# Predict the first five values on the testing data
predict(lm_fit, ames_test %>% slice(1:5))
# Models and preprocessors can updated with update_* functions:
lm_fit_long <- lm_fit %>%
update_formula(Sale_Price ~ Longitude)
# Instead of formula, can use parsnip recipe (but for this example, need to
# remove the formula first)
# Start by re-creating the recipe from ames-predictions.R:
# includes predictor transformations and the shape of the model
ames_recipe <- ames_train %>%
recipe(Sale_Price ~ Neighborhood + Gr_Liv_Area + Year_Built + Bldg_Type +
Latitude + Longitude) %>%
step_log(Gr_Liv_Area, base = 10) %>%
# If there are Neighborhoods only representated by a few rows, collapse them
# all into an "Other" category
step_other(Neighborhood, threshold = 0.01) %>%
# Set up dummy variables for any categorical predictors; in base R, this
# would happen in the background, but here we do it explicitly
step_dummy(all_nominal()) %>%
# Add interaction term to the receipe; since the categorical Bldg_Type has
# been transformed to a series of dummy variables, we use the selector
# function starts_with to help out
step_interact(~ Gr_Liv_Area:starts_with("Bldg_Type_")) %>%
# Predictors may not have linear relationship with response. For example,
# latitude is better fit by a spline (a "natural" spline, hence ns), so we
# can add that relationship. We'll use a 20-step spline
step_ns(Latitude, Longitude, deg_free = 20)
# Now use this recipe in the workflow (removing old model first)
lm_workflow <- lm_workflow %>%
remove_formula() %>%
add_recipe(ames_recipe)
# Previously, (ames-predictions.R), we used prep, bake, fit/predict, but when
# using a workflow, we can run fit() (actually, this will be fit-workflow()),
# which does prep + bake + fit
lm_fit <- fit(lm_workflow, data = ames_train)
# For predicting, we also do not need to run bake & predict separately, the
# bake is implied when running predict() on a workflow (actually ends up running
# predict-workflow())
predict(lm_fit, new_data = ames_test %>% slice(1:5))
# pull_* functions can extract particular elements from the fit object:
lm_fit %>%
pull_workflow_fit() %>%
broom::tidy() %>% # Clean up model fit info
slice(1:10)
# Assess model performance (of the large model, not the one based solely on
# Longitude & Latitude)
# Note: not clear why we have to drop the Sale_Price in tibble passed to
# new_data (excluding the select step does not appear to influence results)
ames_test_predict <- predict(lm_fit,
new_data = ames_test %>% select(-Sale_Price))
# Combine these predictions with observed values
ames_test_predict <- bind_cols(ames_test_predict,
ames_test %>% select(Sale_Price))
# Plot predicted vs. observed
ggplot(data = ames_test_predict, mapping = aes(x = Sale_Price, y = .pred)) +
geom_abline(lty = 2) +
geom_point(alpha = 0.5) +
labs(x = "Observed Sale Price (log10)", y = "Predicted Sale Price (log10)") +
tune::coord_obs_pred() # Sets x & y limits to be the same
# Calculate root mean squared error (RMSE)
rmse(ames_test_predict, truth = Sale_Price, estimate = .pred)
# To calculate multiple metrics, create a metric set via yardstick::metric_set
# Add RMSE, R^2, and mean absolute error
ames_metrics <- metric_set(rmse, rsq, mae)
ames_metrics(ames_test_predict, truth = Sale_Price, estimate = .pred)
| /ames-workflows.R | no_license | jcoliver/tmwr | R | false | false | 4,212 | r | # Workflows examples
# Jeffrey C. Oliver
# jcoliver@email.arizona.edu
# 2021-03-01
rm(list = ls())
################################################################################
# Workflows: preprocessing + modeling
library(tidymodels)
# Loads in data with log-transformed Sale_Price and testing/training split
load(file = "ames.RData")
# Create just the model
lm_model <- linear_reg() %>% # linear_reg() is a parsnip object
set_engine("lm")
# Create a workflow object, adding the model
lm_workflow <- workflow() %>% # from the workflows package
add_model(lm_model)
# Add a pre-processing step
lm_workflow <- lm_workflow %>%
add_formula(Sale_Price ~ Longitude + Latitude)
# Workflow objects can use fit() with data
lm_fit <- fit(lm_workflow, ames_train)
# Predict the first five values on the testing data
predict(lm_fit, ames_test %>% slice(1:5))
# Models and preprocessors can updated with update_* functions:
lm_fit_long <- lm_fit %>%
update_formula(Sale_Price ~ Longitude)
# Instead of formula, can use parsnip recipe (but for this example, need to
# remove the formula first)
# Start by re-creating the recipe from ames-predictions.R:
# includes predictor transformations and the shape of the model
ames_recipe <- ames_train %>%
recipe(Sale_Price ~ Neighborhood + Gr_Liv_Area + Year_Built + Bldg_Type +
Latitude + Longitude) %>%
step_log(Gr_Liv_Area, base = 10) %>%
# If there are Neighborhoods only representated by a few rows, collapse them
# all into an "Other" category
step_other(Neighborhood, threshold = 0.01) %>%
# Set up dummy variables for any categorical predictors; in base R, this
# would happen in the background, but here we do it explicitly
step_dummy(all_nominal()) %>%
# Add interaction term to the receipe; since the categorical Bldg_Type has
# been transformed to a series of dummy variables, we use the selector
# function starts_with to help out
step_interact(~ Gr_Liv_Area:starts_with("Bldg_Type_")) %>%
# Predictors may not have linear relationship with response. For example,
# latitude is better fit by a spline (a "natural" spline, hence ns), so we
# can add that relationship. We'll use a 20-step spline
step_ns(Latitude, Longitude, deg_free = 20)
# Now use this recipe in the workflow (removing old model first)
lm_workflow <- lm_workflow %>%
remove_formula() %>%
add_recipe(ames_recipe)
# Previously, (ames-predictions.R), we used prep, bake, fit/predict, but when
# using a workflow, we can run fit() (actually, this will be fit-workflow()),
# which does prep + bake + fit
lm_fit <- fit(lm_workflow, data = ames_train)
# For predicting, we also do not need to run bake & predict separately, the
# bake is implied when running predict() on a workflow (actually ends up running
# predict-workflow())
predict(lm_fit, new_data = ames_test %>% slice(1:5))
# pull_* functions can extract particular elements from the fit object:
lm_fit %>%
pull_workflow_fit() %>%
broom::tidy() %>% # Clean up model fit info
slice(1:10)
# Assess model performance (of the large model, not the one based solely on
# Longitude & Latitude)
# Note: not clear why we have to drop the Sale_Price in tibble passed to
# new_data (excluding the select step does not appear to influence results)
ames_test_predict <- predict(lm_fit,
new_data = ames_test %>% select(-Sale_Price))
# Combine these predictions with observed values
ames_test_predict <- bind_cols(ames_test_predict,
ames_test %>% select(Sale_Price))
# Plot predicted vs. observed
ggplot(data = ames_test_predict, mapping = aes(x = Sale_Price, y = .pred)) +
geom_abline(lty = 2) +
geom_point(alpha = 0.5) +
labs(x = "Observed Sale Price (log10)", y = "Predicted Sale Price (log10)") +
tune::coord_obs_pred() # Sets x & y limits to be the same
# Calculate root mean squared error (RMSE)
rmse(ames_test_predict, truth = Sale_Price, estimate = .pred)
# To calculate multiple metrics, create a metric set via yardstick::metric_set
# Add RMSE, R^2, and mean absolute error
ames_metrics <- metric_set(rmse, rsq, mae)
ames_metrics(ames_test_predict, truth = Sale_Price, estimate = .pred)
|
#' @title
#' Is Satan frightening or not
#'
#' @description
#' This function returns a funny answer depending on whether
#' you choose 'yes' or 'no' to the famous question:
#' Is Satan frightening?
#'
#' @param x 'yes'/'no'
#'
#' @usage
#' is_Satan_frightening(x)
#'
#' @return a string depending on whether you choose 'yes' or 'no'
#'
#' @examples
#' is_Satan_frightening('yes')
#' is_Satan_frightening('no')
#'
#' @export
is_Satan_frightening <- function(x){
if(x=="yes") return("Belive me, you will get to see Satan's funny nature too, once you work harder")
else if (x=="no") return("You haven't yet started following Satan's footsteps, right?")
else return("enter either 'yes' or 'no'")
}
| /R/is_Satan_frightening.R | no_license | indrag49/demopackage | R | false | false | 729 | r | #' @title
#' Is Satan frightening or not
#'
#' @description
#' This function returns a funny answer depending on whether
#' you choose 'yes' or 'no' to the famous question:
#' Is Satan frightening?
#'
#' @param x 'yes'/'no'
#'
#' @usage
#' is_Satan_frightening(x)
#'
#' @return a string depending on whether you choose 'yes' or 'no'
#'
#' @examples
#' is_Satan_frightening('yes')
#' is_Satan_frightening('no')
#'
#' @export
is_Satan_frightening <- function(x){
if(x=="yes") return("Belive me, you will get to see Satan's funny nature too, once you work harder")
else if (x=="no") return("You haven't yet started following Satan's footsteps, right?")
else return("enter either 'yes' or 'no'")
}
|
CalcularLongitud <- function(iteraciones, longitudI){
longitud <- longitudI*((4/3)^(iteraciones-1))
return(longitud)
}
errorPerimetro<-function (n,aprox){
real= 3*(4/3)^(n-1)
error = abs(real-aprox)
return (error)
}
valorTeorico<-function (n){
return (3*(4/3)^(n-1))
}
distancia<-function(x1, y1, x2, y2){
return (sqrt ( (x2-x1)^2 + (y2-y1)^2 ) )
}
curvaKoch<-function(n){
vertices <- koch(side = 1, niter = n)
text <- bquote(bold(paste("Curva de Koch con n = ", .(n))))
plot(vertices[, 1], vertices[, 2], type = "l", asp = TRUE, main = text, xlab = "x", ylab = "y", col = "black")
segments(vertices[nrow(vertices), 1], vertices[nrow(vertices), 2], vertices[1, 1], vertices[1, 2], col = "black")
}
graficar<-function(n){
curvaKoch(n)
total=0
x=koch(side = 1, n = n)
for(i in 2:nrow(x)){
x1=x[i-1,1]
y1=x[i-1,2]
x2=x[i,1]
y2=x[i,2]
total=total+distancia (x1, y1, x2, y2)
}
x1=x[nrow(x),1]
y1=x[nrow(x),2]
x2=x[1,1]
y2= x[1,2]
total=total+distancia (x1, y1, x2, y2)
cat("El perimetro de la curva de Koch con n = ", n, " es: ", format(round(total,4), nsmall=4), "y el valor teorico es ",format(round(valorTeorico(n),4), nsmall=4), "(Error de",format(round(errorPerimetro(n,total),4))," )")
}
graficar(4)
| /curvaKoch.R | no_license | dleonardo-gomez/AnalisisNumerico | R | false | false | 1,270 | r | CalcularLongitud <- function(iteraciones, longitudI){
longitud <- longitudI*((4/3)^(iteraciones-1))
return(longitud)
}
errorPerimetro<-function (n,aprox){
real= 3*(4/3)^(n-1)
error = abs(real-aprox)
return (error)
}
valorTeorico<-function (n){
return (3*(4/3)^(n-1))
}
distancia<-function(x1, y1, x2, y2){
return (sqrt ( (x2-x1)^2 + (y2-y1)^2 ) )
}
curvaKoch<-function(n){
vertices <- koch(side = 1, niter = n)
text <- bquote(bold(paste("Curva de Koch con n = ", .(n))))
plot(vertices[, 1], vertices[, 2], type = "l", asp = TRUE, main = text, xlab = "x", ylab = "y", col = "black")
segments(vertices[nrow(vertices), 1], vertices[nrow(vertices), 2], vertices[1, 1], vertices[1, 2], col = "black")
}
graficar<-function(n){
curvaKoch(n)
total=0
x=koch(side = 1, n = n)
for(i in 2:nrow(x)){
x1=x[i-1,1]
y1=x[i-1,2]
x2=x[i,1]
y2=x[i,2]
total=total+distancia (x1, y1, x2, y2)
}
x1=x[nrow(x),1]
y1=x[nrow(x),2]
x2=x[1,1]
y2= x[1,2]
total=total+distancia (x1, y1, x2, y2)
cat("El perimetro de la curva de Koch con n = ", n, " es: ", format(round(total,4), nsmall=4), "y el valor teorico es ",format(round(valorTeorico(n),4), nsmall=4), "(Error de",format(round(errorPerimetro(n,total),4))," )")
}
graficar(4)
|
library(latex2exp)
source("nicefigs.R")
# Polynomials
X <- matrix(1, ncol=6, nrow=100)
X[,2] <- x <- seq(-1,1,l=100)
X[,3] <- x^2
X[,4] <- x^3
X[,5] <- x^4
X[,6] <- x^5
X <- sweep(X, 2, FUN="/", STATS=apply(X,2,max))
savepdf("../figures/polybasis")
matplot(x, X, lty=1, type="l", main="Polynomial basis functions",
ylab=latex2exp("b_i(x)"))
legend("bottomright",col=1:6,lty=1,
legend=c(latex2exp("b_1(x)=1"),
latex2exp("b_2(x)=x"),
latex2exp("b_3(x)=x^2"),
latex2exp("b_4(x)=x^3"),
latex2exp("b_5(x)=x^4"),
latex2exp("b_6(x)=x^5")))
endpdf()
# Truncated power terms for cubic splines
X <- matrix(1, ncol=6, nrow=100)
X[,2] <- x <- seq(-1,1,l=100)
X[,3] <- x^2
X[,4] <- x^3
X[,5] <- pmax((x+0.5)^3,0)
X[,6] <- pmax((x-0.5)^3,0)
X <- sweep(X, 2, FUN="/", STATS=apply(X,2,max))
savepdf("../figures/truncpolybasis")
matplot(x, X, lty=1, type="l", main="Truncated power cubic spline basis functions",
ylab=latex2exp("b_i(x)"))
legend("bottomright",col=1:6,lty=1,
legend=c(latex2exp("b_1(x)=1"),
latex2exp("b_2(x)=x"),
latex2exp("b_3(x)=x^2"),
latex2exp("b_4(x)=x^3"),
latex2exp("b_5(x)=(x+0.5)^3_+"),
latex2exp("b_6(x)=(x-0.5)^3_+")))
endpdf()
# Natural splines
library(splines)
X <- matrix(1, ncol=6, nrow=100)
x <- seq(-1,1,l=100)
X[,2:6] <- ns(x, df=5)
X <- sweep(X, 2, FUN="/", STATS=apply(X,2,max))
savepdf("../figures/nsbasis")
matplot(x, X, lty=1, type="l", main="Natural cubic spline basis functions",
ylab=latex2exp("b_i(x)"))
legend("bottomright",col=1:6,lty=1,
legend=c(latex2exp("b_1(x)"),
latex2exp("b_2(x)"),
latex2exp("b_3(x)"),
latex2exp("b_4(x)"),
latex2exp("b_5(x)"),
latex2exp("b_6(x)")))
endpdf()
# B splines
library(splines)
X <- matrix(1, ncol=6, nrow=100)
x <- seq(-1,1,l=100)
X[,2:6] <- bs(x, df=5)
X <- sweep(X, 2, FUN="/", STATS=apply(X,2,max))
savepdf("../figures/bsbasis")
matplot(x, X, lty=1, type="l", main="Cubic B-spline basis functions",
ylab=latex2exp("b_i(x)"))
legend("bottomright",col=1:6,lty=1,
legend=c(latex2exp("b_1(x)"),
latex2exp("b_2(x)"),
latex2exp("b_3(x)"),
latex2exp("b_4(x)"),
latex2exp("b_5(x)"),
latex2exp("b_6(x)")))
endpdf()
| /slides/3-basisfunctions.R | no_license | pasteur90/BusinessAnalytics | R | false | false | 2,471 | r | library(latex2exp)
source("nicefigs.R")
# Polynomials
X <- matrix(1, ncol=6, nrow=100)
X[,2] <- x <- seq(-1,1,l=100)
X[,3] <- x^2
X[,4] <- x^3
X[,5] <- x^4
X[,6] <- x^5
X <- sweep(X, 2, FUN="/", STATS=apply(X,2,max))
savepdf("../figures/polybasis")
matplot(x, X, lty=1, type="l", main="Polynomial basis functions",
ylab=latex2exp("b_i(x)"))
legend("bottomright",col=1:6,lty=1,
legend=c(latex2exp("b_1(x)=1"),
latex2exp("b_2(x)=x"),
latex2exp("b_3(x)=x^2"),
latex2exp("b_4(x)=x^3"),
latex2exp("b_5(x)=x^4"),
latex2exp("b_6(x)=x^5")))
endpdf()
# Truncated power terms for cubic splines
X <- matrix(1, ncol=6, nrow=100)
X[,2] <- x <- seq(-1,1,l=100)
X[,3] <- x^2
X[,4] <- x^3
X[,5] <- pmax((x+0.5)^3,0)
X[,6] <- pmax((x-0.5)^3,0)
X <- sweep(X, 2, FUN="/", STATS=apply(X,2,max))
savepdf("../figures/truncpolybasis")
matplot(x, X, lty=1, type="l", main="Truncated power cubic spline basis functions",
ylab=latex2exp("b_i(x)"))
legend("bottomright",col=1:6,lty=1,
legend=c(latex2exp("b_1(x)=1"),
latex2exp("b_2(x)=x"),
latex2exp("b_3(x)=x^2"),
latex2exp("b_4(x)=x^3"),
latex2exp("b_5(x)=(x+0.5)^3_+"),
latex2exp("b_6(x)=(x-0.5)^3_+")))
endpdf()
# Natural splines
library(splines)
X <- matrix(1, ncol=6, nrow=100)
x <- seq(-1,1,l=100)
X[,2:6] <- ns(x, df=5)
X <- sweep(X, 2, FUN="/", STATS=apply(X,2,max))
savepdf("../figures/nsbasis")
matplot(x, X, lty=1, type="l", main="Natural cubic spline basis functions",
ylab=latex2exp("b_i(x)"))
legend("bottomright",col=1:6,lty=1,
legend=c(latex2exp("b_1(x)"),
latex2exp("b_2(x)"),
latex2exp("b_3(x)"),
latex2exp("b_4(x)"),
latex2exp("b_5(x)"),
latex2exp("b_6(x)")))
endpdf()
# B splines
library(splines)
X <- matrix(1, ncol=6, nrow=100)
x <- seq(-1,1,l=100)
X[,2:6] <- bs(x, df=5)
X <- sweep(X, 2, FUN="/", STATS=apply(X,2,max))
savepdf("../figures/bsbasis")
matplot(x, X, lty=1, type="l", main="Cubic B-spline basis functions",
ylab=latex2exp("b_i(x)"))
legend("bottomright",col=1:6,lty=1,
legend=c(latex2exp("b_1(x)"),
latex2exp("b_2(x)"),
latex2exp("b_3(x)"),
latex2exp("b_4(x)"),
latex2exp("b_5(x)"),
latex2exp("b_6(x)")))
endpdf()
|
library(tidyverse)
downloads <- read_csv(here::here("data", "our-monthly-downloads.csv"))
# crude approximation of our CRAN packages
on_cran <- downloads %>%
filter(downloads > 0)
get_pull_requests <- function(owner, repo) {
prs <- gh::gh(
"GET /repos/:owner/:repo/pulls",
owner = owner,
repo = repo,
state = "all",
.limit = Inf
)
attributes(prs) <- NULL
if (identical(prs, "")) {
return(NULL)
}
cat(owner, "/", repo, " ", length(prs), " PRs\n", sep = "")
prs <- tibble(owner = owner, repo = repo, pr = prs) %>%
hoist(pr,
title = "title",
number = "number",
created_at = "created_at",
updated_at = "updated_at",
closed_at = "closed_at",
merged_at = "merged_at"
)
prs %>%
mutate_at(vars(ends_with("_at")), as.Date)
}
prs <- map2_dfr(on_cran$owner, on_cran$package, get_pull_requests)
in_play <- function(d) d >= as.Date("2018-07-01")
prs_in_play <- prs %>%
filter(in_play(created_at) | in_play(updated_at) |
in_play(closed_at) | in_play(merged_at))
nrow(prs_in_play) # 5953
| /R/zz-pull-requests.R | no_license | rserran/2019-07_useR-toulouse-usethis | R | false | false | 1,110 | r | library(tidyverse)
downloads <- read_csv(here::here("data", "our-monthly-downloads.csv"))
# crude approximation of our CRAN packages
on_cran <- downloads %>%
filter(downloads > 0)
get_pull_requests <- function(owner, repo) {
prs <- gh::gh(
"GET /repos/:owner/:repo/pulls",
owner = owner,
repo = repo,
state = "all",
.limit = Inf
)
attributes(prs) <- NULL
if (identical(prs, "")) {
return(NULL)
}
cat(owner, "/", repo, " ", length(prs), " PRs\n", sep = "")
prs <- tibble(owner = owner, repo = repo, pr = prs) %>%
hoist(pr,
title = "title",
number = "number",
created_at = "created_at",
updated_at = "updated_at",
closed_at = "closed_at",
merged_at = "merged_at"
)
prs %>%
mutate_at(vars(ends_with("_at")), as.Date)
}
prs <- map2_dfr(on_cran$owner, on_cran$package, get_pull_requests)
in_play <- function(d) d >= as.Date("2018-07-01")
prs_in_play <- prs %>%
filter(in_play(created_at) | in_play(updated_at) |
in_play(closed_at) | in_play(merged_at))
nrow(prs_in_play) # 5953
|
################################################################################
context("AUC")
set.seed(SEED)
################################################################################
N <- 100
x0 <- rnorm(N, mean = runif(1))
x1 <- rnorm(N, mean = 2 * runif(1))
x <- c(x0, x1)
y <- c(rep(-1, N), rep(1, N))
################################################################################
auc.conf <- AUCBoot(x, y, seed = 1)
auc.conf2 <- AUCBoot(x, y, seed = 1)
test_that("Same results of AUC with seed", {
expect_equal(auc.conf, auc.conf2)
})
################################################################################
test_that("Same results of AUC in particular cases", {
expect_equal(AUC(c(0, 0), 0:1), 0.5) # Equality of scores
expect_equal(AUC(c(0.2, 0.1, 1), c(-1, -1, 1)), 1) # Perfect AUC
expect_warning(auc1 <- AUCBoot(c(0, 0), 0:1))
expect_equivalent(auc1, c(rep(0.5, 3), 0))
expect_warning(auc2 <- AUCBoot(c(0.2, 0.1, 1), c(-1, -1, 1)))
expect_equivalent(auc2, c(rep(1, 3), 0))
})
################################################################################
test_that("Same as wilcox test", {
expect_equivalent(AUC(x, y), wilcox.test(x1, x0)$statistic / N^2)
})
################################################################################
test_that("Same as package ModelMetrics (AUC < 0.5)", {
for (i in 1:5) {
N <- 10^i
x4 <- c(sample(10, size = N, replace = TRUE),
sample(5, size = N, replace = TRUE))
y4 <- rep(0:1, each = N)
expect_equivalent(AUC(x4, y4), ModelMetrics::auc(y4, x4))
}
})
################################################################################
| /tests/testthat/test-AUC.R | no_license | gridl/bigstatsr | R | false | false | 1,669 | r | ################################################################################
context("AUC")
set.seed(SEED)
################################################################################
N <- 100
x0 <- rnorm(N, mean = runif(1))
x1 <- rnorm(N, mean = 2 * runif(1))
x <- c(x0, x1)
y <- c(rep(-1, N), rep(1, N))
################################################################################
auc.conf <- AUCBoot(x, y, seed = 1)
auc.conf2 <- AUCBoot(x, y, seed = 1)
test_that("Same results of AUC with seed", {
expect_equal(auc.conf, auc.conf2)
})
################################################################################
test_that("Same results of AUC in particular cases", {
expect_equal(AUC(c(0, 0), 0:1), 0.5) # Equality of scores
expect_equal(AUC(c(0.2, 0.1, 1), c(-1, -1, 1)), 1) # Perfect AUC
expect_warning(auc1 <- AUCBoot(c(0, 0), 0:1))
expect_equivalent(auc1, c(rep(0.5, 3), 0))
expect_warning(auc2 <- AUCBoot(c(0.2, 0.1, 1), c(-1, -1, 1)))
expect_equivalent(auc2, c(rep(1, 3), 0))
})
################################################################################
test_that("Same as wilcox test", {
expect_equivalent(AUC(x, y), wilcox.test(x1, x0)$statistic / N^2)
})
################################################################################
test_that("Same as package ModelMetrics (AUC < 0.5)", {
for (i in 1:5) {
N <- 10^i
x4 <- c(sample(10, size = N, replace = TRUE),
sample(5, size = N, replace = TRUE))
y4 <- rep(0:1, each = N)
expect_equivalent(AUC(x4, y4), ModelMetrics::auc(y4, x4))
}
})
################################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mSpatial.R
\name{yoverx}
\alias{yoverx}
\title{Height / width ratio}
\usage{
yoverx(geom, osm = T)
}
\arguments{
\item{geom}{an \code{sp} geometry object
Since map objects have varying aspect ratios, saving them to files results in lots of whitespace. This function enables you to specify the output dimensions in the same ratio as the \code{sp} object.}
}
\description{
Quick function for getting the 1/aspect ratio of a geometry
}
\examples{
x = 800 # width in pixels
y = yoverx(basemap)
png(filename, width = x, height = y)
}
| /man/yoverx.Rd | no_license | townleym/mSpatial | R | false | true | 608 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mSpatial.R
\name{yoverx}
\alias{yoverx}
\title{Height / width ratio}
\usage{
yoverx(geom, osm = T)
}
\arguments{
\item{geom}{an \code{sp} geometry object
Since map objects have varying aspect ratios, saving them to files results in lots of whitespace. This function enables you to specify the output dimensions in the same ratio as the \code{sp} object.}
}
\description{
Quick function for getting the 1/aspect ratio of a geometry
}
\examples{
x = 800 # width in pixels
y = yoverx(basemap)
png(filename, width = x, height = y)
}
|
# Using ggmap
# If you haven't installed ggmap yet, then uncomment the below line or just remove # from the next line and run it
#install.packages("ggmap")
library(ggmap)
#Coordinates of Dhaka
code = geocode("Dhaka")
str(code)
code
# we will uninstall and install old ggmap
#remove.packages("ggplot2")
library(devtools)
#install_github("hadley/ggplot2@v2.2.0")
dhaka_location = c(lon = code[1][,], lat = code[2][,])
#Download the map of Dhaka
dhaka_map = get_map(location = dhaka_location, zoom = 7)
#Displaying the map of Dhaka
ggmap(dhaka_map)
# zoomed in map
dhaka_map2 = get_map(location = dhaka_location, zoom = 13)
#Displaying the map of Dhaka
ggmap(dhaka_map2)
# zoomed out map
dhaka_map3 = get_map(location = dhaka_location, zoom = 5)
#Displaying the map of Dhaka
ggmap(dhaka_map3)
# Importing an excel file
bd_val = read.csv("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data/r_val.csv", stringsAsFactors = FALSE)
str(bd_val)
bd_val$ind = factor(bd_val$ind)
# Plotting point data
ggmap(dhaka_map) +
geom_point(aes(lon, lat), data = bd_val)
# Coloring point data
ggmap(dhaka_map) +
geom_point(aes(lon, lat, color = value), data = bd_val)
# change ind column to factor type
bd_val$ind = factor(bd_val$ind)
# Coloring point data according to factor data
ggmap(dhaka_map) +
geom_point(aes(lon, lat, color = ind), data = bd_val)
# Map size to value
ggmap(dhaka_map) +
geom_point(aes(lon, lat, size = value), data = bd_val)
# Different options for get_map()
?get_map()
# toner-background
dhaka_map_toner = get_map(location = dhaka_location, zoom = 7, maptype="toner-background")
ggmap(dhaka_map_toner)
# satellite-background
dhaka_map_satellite = get_map(location = dhaka_location, zoom = 7, maptype="satellite")
ggmap(dhaka_map_satellite)
# Facetting
ggmap(dhaka_map, base_layer=
ggplot(bd_val, aes(lon, lat, color = value))) +
geom_point() + facet_wrap(~ind)
# Adding color argument
ggmap(dhaka_map, base_layer=
ggplot(bd_val, aes(lon, lat, color = ind))) +
geom_point() + facet_wrap(~ind)
# qmplot
qmplot(lon, lat, data = bd_val, geom = "point", color = ind) + facet_wrap(~ind)
# SpatialPoints
library(sp)
library(rgdal)
library(maptools)
map = readOGR("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data","indicator_point")
plot(map)
class(map)
# SpatialLines
highway = readOGR("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data","dhaka_highway_polylines")
plot(highway)
# Let's add background to this
install.packages("OpenStreetMap", depend = T)
#install.packages("rJava")
install.packages("PBSmapping")
#library(rJava)
library(RgoogleMaps)
library(PBSmapping)
map_dhaka = readOGR("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data","dhaka")
plot(map_dhaka)
# convert to polyset
polyset = SpatialPolygons2PolySet(map_dhaka)
head(polyset)
# Compuute the bounding box for longitude, latitude points
bounding_box = qbbox(lat = polyset[, "Y"], lon = polyset[, "X"])
# download background map
background_map = GetMap.bbox(bounding_box$lonR, bounding_box$latR)
# Overlaying polygons on a map
PlotPolysOnStaticMap(background_map, polyset, lwd = 3, col = rgb(0.3, 0.6, 0.3, 0.05), add = F)
# we use max.level = 2 to reduce displaying nestect structure
str(map_dhaka, max.level = 2)
# load another map
map_bd = readOGR("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data","BGD_adm3_data_re")
head(map_bd@data)
str(map_bd@polygons, max.level = 2)
#7th element in the Polygons slot of map_bd
seventh_element = map_bd@polygons[[6]]
# make it succinct with max.level = 2 in str() for the 7th element of the bd@Polygons
str(seventh_element, max.level = 2)
# Structure of the 2nd polygon inside seventh_element
str(seventh_element@Polygons[[2]], max.level = 2)
# plot() the coords slot of the 2nd element of the Polygons slot.
plot(seventh_element@Polygons[[2]]@coords)
# To acccess a column
map_bd$NAME_3
# or
map_bd[["NAME_3"]]
map_bd = spTransform(map_bd, CRS("+proj=longlat +datum=WGS84"))
class(bd)
# plot quantitative data
library(GISTools)
choropleth(map_bd, map_bd$value4)
# Plot qualitative data
#install.packages("RColorBrewer")
library(RColorBrewer)
dhaka_div = readOGR("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data","dhaka_div")
# check how many unique elements map_bd$NAME_3 has by writing unique(dhaka_div$NAME_3)
unique(dhaka_div$NAME_3)
# There are 7 unique districts and so pick 7 colors
colors = colorRampPalette(brewer.pal(12, "Set3"))(7)
dhaka_div$NAME_3 = as.factor(as.character(dhaka_div$NAME_3))
spplot(dhaka_div, "NAME_3", main = "Coloring different districts of Dhaka division", col.regions = colors, col = "white")
# Using tmap
install.packages("tmap")
library(tmap)
# load a map
map_bd = readOGR("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data","BGD_adm3_data_re")
#head(map_bd@data)
str(map_bd@data)
map_bd$value1 = as.numeric(map_bd$value1)
#str(map_bd@data)
qtm(shp = map_bd, fill = "value1")
# Using fill to have a choropleth map
tm_shape(map_bd) +
tm_borders() + # This adds a border
tm_fill(col="value1") +
tm_compass() + # This puts a compass on the bottom left of the map
tmap_style("cobalt")
# Using bubbles
tm_shape(map_bd) +
tm_bubbles(size = "value1", style = "quantile") +
tm_borders(col="orange3") # Add a colorful border
# labeling
tm_shape(map_bd) +
tm_fill(col = "value1", style = "quantile") +
tm_borders() +
tm_text(text = "NAME_3", size = 0.5)
# More stylized map
tm_shape(map_bd) +
tm_fill(col = "value1", style = "quantile", title = "Value of quantitative indicator", palette = "Blues") +
tm_borders(col = "grey30", lwd = 0.6) +
tm_text(text = "NAME_3", size = 0.5) +
tm_credits("Source: Author", position = c("right", "top")) | /chapter2.R | permissive | snowdj/Hands-On-Geospatial-Analysis-with-R-and-QGIS | R | false | false | 5,972 | r | # Using ggmap
# If you haven't installed ggmap yet, then uncomment the below line or just remove # from the next line and run it
#install.packages("ggmap")
library(ggmap)
#Coordinates of Dhaka
code = geocode("Dhaka")
str(code)
code
# we will uninstall and install old ggmap
#remove.packages("ggplot2")
library(devtools)
#install_github("hadley/ggplot2@v2.2.0")
dhaka_location = c(lon = code[1][,], lat = code[2][,])
#Download the map of Dhaka
dhaka_map = get_map(location = dhaka_location, zoom = 7)
#Displaying the map of Dhaka
ggmap(dhaka_map)
# zoomed in map
dhaka_map2 = get_map(location = dhaka_location, zoom = 13)
#Displaying the map of Dhaka
ggmap(dhaka_map2)
# zoomed out map
dhaka_map3 = get_map(location = dhaka_location, zoom = 5)
#Displaying the map of Dhaka
ggmap(dhaka_map3)
# Importing an excel file
bd_val = read.csv("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data/r_val.csv", stringsAsFactors = FALSE)
str(bd_val)
bd_val$ind = factor(bd_val$ind)
# Plotting point data
ggmap(dhaka_map) +
geom_point(aes(lon, lat), data = bd_val)
# Coloring point data
ggmap(dhaka_map) +
geom_point(aes(lon, lat, color = value), data = bd_val)
# change ind column to factor type
bd_val$ind = factor(bd_val$ind)
# Coloring point data according to factor data
ggmap(dhaka_map) +
geom_point(aes(lon, lat, color = ind), data = bd_val)
# Map size to value
ggmap(dhaka_map) +
geom_point(aes(lon, lat, size = value), data = bd_val)
# Different options for get_map()
?get_map()
# toner-background
dhaka_map_toner = get_map(location = dhaka_location, zoom = 7, maptype="toner-background")
ggmap(dhaka_map_toner)
# satellite-background
dhaka_map_satellite = get_map(location = dhaka_location, zoom = 7, maptype="satellite")
ggmap(dhaka_map_satellite)
# Facetting
ggmap(dhaka_map, base_layer=
ggplot(bd_val, aes(lon, lat, color = value))) +
geom_point() + facet_wrap(~ind)
# Adding color argument
ggmap(dhaka_map, base_layer=
ggplot(bd_val, aes(lon, lat, color = ind))) +
geom_point() + facet_wrap(~ind)
# qmplot
qmplot(lon, lat, data = bd_val, geom = "point", color = ind) + facet_wrap(~ind)
# SpatialPoints
library(sp)
library(rgdal)
library(maptools)
map = readOGR("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data","indicator_point")
plot(map)
class(map)
# SpatialLines
highway = readOGR("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data","dhaka_highway_polylines")
plot(highway)
# Let's add background to this
install.packages("OpenStreetMap", depend = T)
#install.packages("rJava")
install.packages("PBSmapping")
#library(rJava)
library(RgoogleMaps)
library(PBSmapping)
map_dhaka = readOGR("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data","dhaka")
plot(map_dhaka)
# convert to polyset
polyset = SpatialPolygons2PolySet(map_dhaka)
head(polyset)
# Compuute the bounding box for longitude, latitude points
bounding_box = qbbox(lat = polyset[, "Y"], lon = polyset[, "X"])
# download background map
background_map = GetMap.bbox(bounding_box$lonR, bounding_box$latR)
# Overlaying polygons on a map
PlotPolysOnStaticMap(background_map, polyset, lwd = 3, col = rgb(0.3, 0.6, 0.3, 0.05), add = F)
# we use max.level = 2 to reduce displaying nestect structure
str(map_dhaka, max.level = 2)
# load another map
map_bd = readOGR("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data","BGD_adm3_data_re")
head(map_bd@data)
str(map_bd@polygons, max.level = 2)
#7th element in the Polygons slot of map_bd
seventh_element = map_bd@polygons[[6]]
# make it succinct with max.level = 2 in str() for the 7th element of the bd@Polygons
str(seventh_element, max.level = 2)
# Structure of the 2nd polygon inside seventh_element
str(seventh_element@Polygons[[2]], max.level = 2)
# plot() the coords slot of the 2nd element of the Polygons slot.
plot(seventh_element@Polygons[[2]]@coords)
# To acccess a column
map_bd$NAME_3
# or
map_bd[["NAME_3"]]
map_bd = spTransform(map_bd, CRS("+proj=longlat +datum=WGS84"))
class(bd)
# plot quantitative data
library(GISTools)
choropleth(map_bd, map_bd$value4)
# Plot qualitative data
#install.packages("RColorBrewer")
library(RColorBrewer)
dhaka_div = readOGR("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data","dhaka_div")
# check how many unique elements map_bd$NAME_3 has by writing unique(dhaka_div$NAME_3)
unique(dhaka_div$NAME_3)
# There are 7 unique districts and so pick 7 colors
colors = colorRampPalette(brewer.pal(12, "Set3"))(7)
dhaka_div$NAME_3 = as.factor(as.character(dhaka_div$NAME_3))
spplot(dhaka_div, "NAME_3", main = "Coloring different districts of Dhaka division", col.regions = colors, col = "white")
# Using tmap
install.packages("tmap")
library(tmap)
# load a map
map_bd = readOGR("F:/Hands-on Geospatial Analysis Using R and QGIS/Chapter 2/Data","BGD_adm3_data_re")
#head(map_bd@data)
str(map_bd@data)
map_bd$value1 = as.numeric(map_bd$value1)
#str(map_bd@data)
qtm(shp = map_bd, fill = "value1")
# Using fill to have a choropleth map
tm_shape(map_bd) +
tm_borders() + # This adds a border
tm_fill(col="value1") +
tm_compass() + # This puts a compass on the bottom left of the map
tmap_style("cobalt")
# Using bubbles
tm_shape(map_bd) +
tm_bubbles(size = "value1", style = "quantile") +
tm_borders(col="orange3") # Add a colorful border
# labeling
tm_shape(map_bd) +
tm_fill(col = "value1", style = "quantile") +
tm_borders() +
tm_text(text = "NAME_3", size = 0.5)
# More stylized map
tm_shape(map_bd) +
tm_fill(col = "value1", style = "quantile", title = "Value of quantitative indicator", palette = "Blues") +
tm_borders(col = "grey30", lwd = 0.6) +
tm_text(text = "NAME_3", size = 0.5) +
tm_credits("Source: Author", position = c("right", "top")) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.