content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(mosaic)
library(tidyverse)
library(data.table)
library(tree)
library(randomForest)
library(rpart)
library(gbm)
library(pdp)
# unfiltered
greenbuildingsuf = read.csv("./DataScienceCourseHomework/exercises-3/data/greenbuildings.csv", header=TRUE)
# get the data
greenbuildings = read.csv("./DataScienceCourseHomework/exercises-3/data/greenbuildings.csv", header=TRUE) %>%
na.omit() %>%
mutate(
cluster = as.factor(cluster),
green_rating = as.factor(green_rating),
class = as.factor(2*class_a + 1*class_b),
renovated = as.factor(renovated),
net = as.factor(net),
amenities = as.factor(amenities)
)
# Fill factor levels
levels(greenbuildings$green_rating) = c('Non-Green','Green')
levels(greenbuildings$class) = c('C','B','A')
levels(greenbuildings$renovated) = c('No','Yes')
levels(greenbuildings$net) = c('No','Yes')
levels(greenbuildings$amenities) = c('No','Yes')
exclude_vars <- c('CS_PropertyID','cluster','class_a','class_b','LEED','Energystar')
gb_clean <- greenbuildings[ , !(names(greenbuildings) %in% exclude_vars)]
# split into a training and testing set
set.rseed(63)
N = nrow(gb_clean)
train_frac = 0.8
N_train = floor(train_frac*N)
N_test = N - N_train
train_ind = sample.int(N, N_train, replace=FALSE) %>% sort
gb_clean_train = gb_clean[train_ind,]
gb_clean_test = gb_clean[-train_ind,]
#### Basic Tree ####
#fit a big tree using rpart.control
gb_bigtree = rpart(Rent ~ ., data=gb_clean_train, method="anova",
control=rpart.control(minsplit=5,cp=.00005))
nbig = length(unique(gb_bigtree$where))
cat('size of big tree: ',nbig,'\n')
#look at cross-validation
plotcp(gb_bigtree)
#plot best tree
bestcp=gb_bigtree$cptable[which.min(gb_bigtree$cptable[,"xerror"]),"CP"]
cat('bestcp: ',bestcp,'\n')
gb_besttree = prune(gb_bigtree,cp=bestcp)
nbest = length(unique(gb_besttree$where))
cat('size of best tree: ',nbest,'\n')
yhat_gb_besttree = predict(gb_besttree, gb_clean_test)
rmse_tree = mean((gb_clean_test$Rent - yhat_gb_besttree)^2) %>% sqrt
rmse_tree
#### Bagging ####
gb_bag = randomForest(Rent ~ ., mtry=17, nTree=500, data=gb_clean_train)
yhat_gb_bag = predict(gb_bag, gb_clean_test)
rmse_bag = mean((gb_clean_test$Rent - yhat_gb_bag)^2) %>% sqrt
rmse_bag
#### Random Forest ####
gb_forest = randomForest(Rent ~ ., mtry=7, nTree=500, data=gb_clean_train)
yhat_gb_forest = predict(gb_forest, gb_clean_test)
rmse_forest = mean((gb_clean_test$Rent - yhat_gb_forest)^2) %>% sqrt
rmse_forest
#### Boosting ####
gb_boost = gbm(Rent ~ ., data=gb_clean_train, distribution = 'gaussian',
interaction.depth=4, n.trees=5000, shrinkage=.1)
yhat_gb_boost = predict(gb_boost, gb_clean_test, n.trees=5000)
rmse_boost = mean((gb_clean_test$Rent - yhat_gb_boost)^2) %>% sqrt
rmse_boost
gbm.perf(gb_boost)
#### Performance Comparison ####
max_rent <- max(gb_clean_test$Rent)
min_rent <- min(gb_clean_test$Rent)
min_max <- c(min_rent,max_rent)
tree_plot <- ggplot() +
geom_point(aes(x=gb_clean_test$Rent, y = yhat_gb_besttree), alpha = .3, size = 2, shape = 16) +
geom_line(aes(x=min_max, y=min_max), color = 'blue', size = 1) +
labs(title="Out Of Sample Predicted vs. Actual Rent - Simple Tree",
y="Prediction",
x = "Rent",
fill="") +
annotate("label", x = 25, y = 160, label = paste0('RMSE: ',as.character(round(rmse_tree,4))), color = 'red', size = 5) +
theme_minimal()
tree_plot
bag_plot <- ggplot() +
geom_point(aes(x=gb_clean_test$Rent, y = yhat_gb_bag), alpha = .3, size = 2, shape = 16) +
geom_line(aes(x=min_max, y=min_max), color = 'blue', size = 1) +
labs(title="Out Of Sample Predicted vs. Actual Rent - Bagging",
y="Prediction",
x = "Rent",
fill="") +
annotate("label", x = 25, y = 160, label = paste0('RMSE: ',as.character(round(rmse_bag,4))), color = 'red', size = 5) +
theme_minimal()
bag_plot
forest_plot <- ggplot() +
geom_point(aes(x=gb_clean_test$Rent, y = yhat_gb_forest), alpha = .3, size = 2, shape = 16) +
geom_line(aes(x=min_max, y=min_max), color = 'blue', size = 1) +
labs(title="Out Of Sample Predicted vs. Actual Rent - Random Forest",
y="Prediction",
x = "Rent",
fill="") +
annotate("label", x = 25, y = 160, label = paste0('RMSE: ',as.character(round(rmse_forest,4))), color = 'red', size = 5) +
theme_minimal()
forest_plot
boost_plot <- ggplot() +
geom_point(aes(x=gb_clean_test$Rent, y = yhat_gb_boost), alpha = .3, size = 2, shape = 16) +
geom_line(aes(x=min_max, y=min_max), color = 'blue', size = 1) +
labs(title="Out Of Sample Predicted vs. Actual Rent - Boosting",
y="Prediction",
x = "Rent",
fill="") +
annotate("label", x = 25, y = 160, label = paste0('RMSE: ',as.character(round(rmse_boost,4))), color = 'red', size = 5) +
theme_minimal()
boost_plot
#### Partial Dependence ####
forest_p <- partial(gb_forest, pred.var = c('green_rating'), train = gb_clean)
forest_p$green_rating
forest_p$yhat
pd_plot <- ggplot() +
geom_bar(mapping = aes(x=forest_p$green_rating, y=forest_p$yhat, fill = forest_p$green_rating), stat='identity') +
labs(title="Partial Dependence Plot - Random Forest",
y="Average Predicted Rent",
x = "Green Rating",
fill="") +
theme_minimal() +
geom_text(aes(x=forest_p$green_rating, y=forest_p$yhat+1, label = round(forest_p$yhat,2)), size = 4) +
scale_fill_manual(values=c("#FC6767", "#57D06B")) +
guides(fill=FALSE)
pd_plot
varImpPlot(gb_forest, main = 'Variable Importance Plot - Random Forest')
|
/exercises-3/scripts/exercises3_problem1.R
|
no_license
|
nfra/DataScienceCourseHomework
|
R
| false
| false
| 5,527
|
r
|
library(mosaic)
library(tidyverse)
library(data.table)
library(tree)
library(randomForest)
library(rpart)
library(gbm)
library(pdp)
# unfiltered
greenbuildingsuf = read.csv("./DataScienceCourseHomework/exercises-3/data/greenbuildings.csv", header=TRUE)
# get the data
greenbuildings = read.csv("./DataScienceCourseHomework/exercises-3/data/greenbuildings.csv", header=TRUE) %>%
na.omit() %>%
mutate(
cluster = as.factor(cluster),
green_rating = as.factor(green_rating),
class = as.factor(2*class_a + 1*class_b),
renovated = as.factor(renovated),
net = as.factor(net),
amenities = as.factor(amenities)
)
# Fill factor levels
levels(greenbuildings$green_rating) = c('Non-Green','Green')
levels(greenbuildings$class) = c('C','B','A')
levels(greenbuildings$renovated) = c('No','Yes')
levels(greenbuildings$net) = c('No','Yes')
levels(greenbuildings$amenities) = c('No','Yes')
exclude_vars <- c('CS_PropertyID','cluster','class_a','class_b','LEED','Energystar')
gb_clean <- greenbuildings[ , !(names(greenbuildings) %in% exclude_vars)]
# split into a training and testing set
set.rseed(63)
N = nrow(gb_clean)
train_frac = 0.8
N_train = floor(train_frac*N)
N_test = N - N_train
train_ind = sample.int(N, N_train, replace=FALSE) %>% sort
gb_clean_train = gb_clean[train_ind,]
gb_clean_test = gb_clean[-train_ind,]
#### Basic Tree ####
#fit a big tree using rpart.control
gb_bigtree = rpart(Rent ~ ., data=gb_clean_train, method="anova",
control=rpart.control(minsplit=5,cp=.00005))
nbig = length(unique(gb_bigtree$where))
cat('size of big tree: ',nbig,'\n')
#look at cross-validation
plotcp(gb_bigtree)
#plot best tree
bestcp=gb_bigtree$cptable[which.min(gb_bigtree$cptable[,"xerror"]),"CP"]
cat('bestcp: ',bestcp,'\n')
gb_besttree = prune(gb_bigtree,cp=bestcp)
nbest = length(unique(gb_besttree$where))
cat('size of best tree: ',nbest,'\n')
yhat_gb_besttree = predict(gb_besttree, gb_clean_test)
rmse_tree = mean((gb_clean_test$Rent - yhat_gb_besttree)^2) %>% sqrt
rmse_tree
#### Bagging ####
gb_bag = randomForest(Rent ~ ., mtry=17, nTree=500, data=gb_clean_train)
yhat_gb_bag = predict(gb_bag, gb_clean_test)
rmse_bag = mean((gb_clean_test$Rent - yhat_gb_bag)^2) %>% sqrt
rmse_bag
#### Random Forest ####
gb_forest = randomForest(Rent ~ ., mtry=7, nTree=500, data=gb_clean_train)
yhat_gb_forest = predict(gb_forest, gb_clean_test)
rmse_forest = mean((gb_clean_test$Rent - yhat_gb_forest)^2) %>% sqrt
rmse_forest
#### Boosting ####
gb_boost = gbm(Rent ~ ., data=gb_clean_train, distribution = 'gaussian',
interaction.depth=4, n.trees=5000, shrinkage=.1)
yhat_gb_boost = predict(gb_boost, gb_clean_test, n.trees=5000)
rmse_boost = mean((gb_clean_test$Rent - yhat_gb_boost)^2) %>% sqrt
rmse_boost
gbm.perf(gb_boost)
#### Performance Comparison ####
max_rent <- max(gb_clean_test$Rent)
min_rent <- min(gb_clean_test$Rent)
min_max <- c(min_rent,max_rent)
tree_plot <- ggplot() +
geom_point(aes(x=gb_clean_test$Rent, y = yhat_gb_besttree), alpha = .3, size = 2, shape = 16) +
geom_line(aes(x=min_max, y=min_max), color = 'blue', size = 1) +
labs(title="Out Of Sample Predicted vs. Actual Rent - Simple Tree",
y="Prediction",
x = "Rent",
fill="") +
annotate("label", x = 25, y = 160, label = paste0('RMSE: ',as.character(round(rmse_tree,4))), color = 'red', size = 5) +
theme_minimal()
tree_plot
bag_plot <- ggplot() +
geom_point(aes(x=gb_clean_test$Rent, y = yhat_gb_bag), alpha = .3, size = 2, shape = 16) +
geom_line(aes(x=min_max, y=min_max), color = 'blue', size = 1) +
labs(title="Out Of Sample Predicted vs. Actual Rent - Bagging",
y="Prediction",
x = "Rent",
fill="") +
annotate("label", x = 25, y = 160, label = paste0('RMSE: ',as.character(round(rmse_bag,4))), color = 'red', size = 5) +
theme_minimal()
bag_plot
forest_plot <- ggplot() +
geom_point(aes(x=gb_clean_test$Rent, y = yhat_gb_forest), alpha = .3, size = 2, shape = 16) +
geom_line(aes(x=min_max, y=min_max), color = 'blue', size = 1) +
labs(title="Out Of Sample Predicted vs. Actual Rent - Random Forest",
y="Prediction",
x = "Rent",
fill="") +
annotate("label", x = 25, y = 160, label = paste0('RMSE: ',as.character(round(rmse_forest,4))), color = 'red', size = 5) +
theme_minimal()
forest_plot
boost_plot <- ggplot() +
geom_point(aes(x=gb_clean_test$Rent, y = yhat_gb_boost), alpha = .3, size = 2, shape = 16) +
geom_line(aes(x=min_max, y=min_max), color = 'blue', size = 1) +
labs(title="Out Of Sample Predicted vs. Actual Rent - Boosting",
y="Prediction",
x = "Rent",
fill="") +
annotate("label", x = 25, y = 160, label = paste0('RMSE: ',as.character(round(rmse_boost,4))), color = 'red', size = 5) +
theme_minimal()
boost_plot
#### Partial Dependence ####
forest_p <- partial(gb_forest, pred.var = c('green_rating'), train = gb_clean)
forest_p$green_rating
forest_p$yhat
pd_plot <- ggplot() +
geom_bar(mapping = aes(x=forest_p$green_rating, y=forest_p$yhat, fill = forest_p$green_rating), stat='identity') +
labs(title="Partial Dependence Plot - Random Forest",
y="Average Predicted Rent",
x = "Green Rating",
fill="") +
theme_minimal() +
geom_text(aes(x=forest_p$green_rating, y=forest_p$yhat+1, label = round(forest_p$yhat,2)), size = 4) +
scale_fill_manual(values=c("#FC6767", "#57D06B")) +
guides(fill=FALSE)
pd_plot
varImpPlot(gb_forest, main = 'Variable Importance Plot - Random Forest')
|
# data
require(tseries)
require(forecast)
wales <- read.csv("WalesCases.csv")
df4 <- ts(wales$newCasesByPublishDate)
df4 <- rev(df4)
# Step 1: check if we need take transformation (qualtratics or exponential)
# plot the data
plot(df4, type = "l")
# plot the mean
Mt <- matrix(df4, 217, 2, byrow = T)
mt <- apply(Mt, 1, mean)
sdt <- apply(Mt,1, sd)
plot(mt, sdt, cex = 1.2, xlab = "Mean", ylab = "Sd")
## Suggested we do need log transformation to stablise the variance
df4 <- log(df4 + 1)
# Step 2: check if the original series is weakly stationary (plot of data, ACF and ADF test)
acf(df4)
adf.test(df4, k = 17)
## ACF suggested that it is not weakly stationary and ADF is larger than 0.05 as well
## This indicated that we need to check for differencing
# Step 3: differencing
par(mfrow = c(2, 2))
par(mai = c(0.8, 0.8, 0.8, 0.7))
plot(df4, type = "l", ylab = "Original series", xlab = "Time")
plot(diff(df4), type = "l", ylab = "First differences", xlab = "Time")
plot(diff(df4, differences = 2), type = "l", ylab = "Second differences", xlab = "Time")
plot(diff(df4, differences = 3), type = "l", ylab = "Second differences", xlab = "Time")
## It seems that the first differencing is better
ndiffs(df4)
# Step 4: plot acf and pacf and check unit root test again for first differencing
diff <- diff(df4)
par(mfrow = c(1, 2))
acf(diff, main = "ACF of differenced data")
pacf(diff, main = "PACF of differenced data")
adf.test(diff, k = 17)
## Suggested first differencing is better
### Step 1-4: Model Identification (finding d)
# Step 5: get model using auto.arima function
require(forecast)
auto.arima(df4, trace = TRUE)
## Suggested (4,1,5), aicc = 1017.09
# Step 6: model diagnositc to check correlation, residual acf and normality
m1 <- arima(df4, order = c(4,1,5))
checkresiduals(m1)
resid <- residuals(m1)
Box.test(resid, lag = 6, type = "Ljung-Box")
# not reject h0 thus not correlated
acf(resid)
jarque.bera.test(resid)
qqnorm(resid)
# reject h0 thus normally distirbuted
## Not perfect fit
# second round: Step 5 get model using arima function to get smallest
model <- matrix(NA, 9, 9)
for (i in 0:8) {
for (j in 0:8) {
fit <- Arima(df4, order = c(i, 1, j), include.mean = TRUE)
model[(i+1), (j+1)] <- fit$aicc
}
}
knitr::kable(
cbind(0:8, model), booktabs = TRUE, col.names = c("p/q", 0, 1, 2, 3, 4,5,6,7,8)
)
# p = 7, q = 7, AICC = 998.999
# Second Round: Step 6 model diagnositc to check correlation, residual acf and normality
m1 <- arima(df4, order = c(7,1,7))
checkresiduals(m1)
resid <- residuals(m1)
Box.test(resid, lag = 6, type = "Ljung-Box")
# not reject h0 thus not correlated
acf(resid)
pacf(resid)
jarque.bera.test(resid)
qqnorm(resid)
# reject h0 thus normally distirbuted
## not perfect fit
### Thus the final model is ARIMA(4,1,5)
# Step 7: Forcasting
model_w <- arima(df4, order = c(4,1,5))
forecast(model_w, h = 20)
autoplot(forecast(model_w, h = 20))
|
/src/Wales.R
|
no_license
|
Eleanorkong/Predicting-Covid-19-UK-Cases
|
R
| false
| false
| 2,926
|
r
|
# data
require(tseries)
require(forecast)
wales <- read.csv("WalesCases.csv")
df4 <- ts(wales$newCasesByPublishDate)
df4 <- rev(df4)
# Step 1: check if we need take transformation (qualtratics or exponential)
# plot the data
plot(df4, type = "l")
# plot the mean
Mt <- matrix(df4, 217, 2, byrow = T)
mt <- apply(Mt, 1, mean)
sdt <- apply(Mt,1, sd)
plot(mt, sdt, cex = 1.2, xlab = "Mean", ylab = "Sd")
## Suggested we do need log transformation to stablise the variance
df4 <- log(df4 + 1)
# Step 2: check if the original series is weakly stationary (plot of data, ACF and ADF test)
acf(df4)
adf.test(df4, k = 17)
## ACF suggested that it is not weakly stationary and ADF is larger than 0.05 as well
## This indicated that we need to check for differencing
# Step 3: differencing
par(mfrow = c(2, 2))
par(mai = c(0.8, 0.8, 0.8, 0.7))
plot(df4, type = "l", ylab = "Original series", xlab = "Time")
plot(diff(df4), type = "l", ylab = "First differences", xlab = "Time")
plot(diff(df4, differences = 2), type = "l", ylab = "Second differences", xlab = "Time")
plot(diff(df4, differences = 3), type = "l", ylab = "Second differences", xlab = "Time")
## It seems that the first differencing is better
ndiffs(df4)
# Step 4: plot acf and pacf and check unit root test again for first differencing
diff <- diff(df4)
par(mfrow = c(1, 2))
acf(diff, main = "ACF of differenced data")
pacf(diff, main = "PACF of differenced data")
adf.test(diff, k = 17)
## Suggested first differencing is better
### Step 1-4: Model Identification (finding d)
# Step 5: get model using auto.arima function
require(forecast)
auto.arima(df4, trace = TRUE)
## Suggested (4,1,5), aicc = 1017.09
# Step 6: model diagnositc to check correlation, residual acf and normality
m1 <- arima(df4, order = c(4,1,5))
checkresiduals(m1)
resid <- residuals(m1)
Box.test(resid, lag = 6, type = "Ljung-Box")
# not reject h0 thus not correlated
acf(resid)
jarque.bera.test(resid)
qqnorm(resid)
# reject h0 thus normally distirbuted
## Not perfect fit
# second round: Step 5 get model using arima function to get smallest
model <- matrix(NA, 9, 9)
for (i in 0:8) {
for (j in 0:8) {
fit <- Arima(df4, order = c(i, 1, j), include.mean = TRUE)
model[(i+1), (j+1)] <- fit$aicc
}
}
knitr::kable(
cbind(0:8, model), booktabs = TRUE, col.names = c("p/q", 0, 1, 2, 3, 4,5,6,7,8)
)
# p = 7, q = 7, AICC = 998.999
# Second Round: Step 6 model diagnositc to check correlation, residual acf and normality
m1 <- arima(df4, order = c(7,1,7))
checkresiduals(m1)
resid <- residuals(m1)
Box.test(resid, lag = 6, type = "Ljung-Box")
# not reject h0 thus not correlated
acf(resid)
pacf(resid)
jarque.bera.test(resid)
qqnorm(resid)
# reject h0 thus normally distirbuted
## not perfect fit
### Thus the final model is ARIMA(4,1,5)
# Step 7: Forcasting
model_w <- arima(df4, order = c(4,1,5))
forecast(model_w, h = 20)
autoplot(forecast(model_w, h = 20))
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/plot.plot.R
\name{plot1DNumeric}
\alias{plot1DNumeric}
\title{Plot an one-dimensional function.}
\usage{
plot1DNumeric(x, show.optimum = FALSE, n.samples = 500L, ...)
}
\arguments{
\item{x}{[\code{smoof_function}]\cr
Function.}
\item{show.optimum}{[\code{logical(1)}]\cr
If the function has a known global optimum, should its location be
plotted by a point or multiple points in case of multiple global optima?
Default is \code{FALSE}.}
\item{n.samples}{[\code{integer(1)}]\cr
Number of locations to be sampled. Default is 500.}
\item{...}{[any]\cr
Further paramerters passed to plot function.}
}
\value{
Nothing
}
\description{
Plot an one-dimensional function.
}
|
/man/plot1DNumeric.Rd
|
no_license
|
mhils/smoof
|
R
| false
| false
| 755
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/plot.plot.R
\name{plot1DNumeric}
\alias{plot1DNumeric}
\title{Plot an one-dimensional function.}
\usage{
plot1DNumeric(x, show.optimum = FALSE, n.samples = 500L, ...)
}
\arguments{
\item{x}{[\code{smoof_function}]\cr
Function.}
\item{show.optimum}{[\code{logical(1)}]\cr
If the function has a known global optimum, should its location be
plotted by a point or multiple points in case of multiple global optima?
Default is \code{FALSE}.}
\item{n.samples}{[\code{integer(1)}]\cr
Number of locations to be sampled. Default is 500.}
\item{...}{[any]\cr
Further paramerters passed to plot function.}
}
\value{
Nothing
}
\description{
Plot an one-dimensional function.
}
|
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'wald'
scenario <- 24
param <- 1
anal_type <- "mice"
ss <- ss.bounds%>%
dplyr::filter(method == "wald", scenario.id == scenario)
do_val <- 0.2
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%wald_ci(ss$M2,'y', alpha)
#define missingness parameters and do rates
m_param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss.mnar1 <- m_param%>%
slice(1)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wald_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 0.6, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss.mnar2 <- m_param%>%
slice(2)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wald_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 1.9, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss <- bind_rows(ci.miss.mnar1, ci.miss.mnar2)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.mice.sum.R')
h0.mice.sum(x1, method = 'wald')
|
/sim_pgms/wald/do20/2xcontH0_sc24_do20_mice.R
|
no_license
|
yuliasidi/nibinom_apply
|
R
| false
| false
| 3,328
|
r
|
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'wald'
scenario <- 24
param <- 1
anal_type <- "mice"
ss <- ss.bounds%>%
dplyr::filter(method == "wald", scenario.id == scenario)
do_val <- 0.2
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%wald_ci(ss$M2,'y', alpha)
#define missingness parameters and do rates
m_param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss.mnar1 <- m_param%>%
slice(1)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wald_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 0.6, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss.mnar2 <- m_param%>%
slice(2)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wald_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 1.9, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss <- bind_rows(ci.miss.mnar1, ci.miss.mnar2)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.mice.sum.R')
h0.mice.sum(x1, method = 'wald')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fct_reshaping.R
\name{matrix_graphe}
\alias{matrix_graphe}
\title{Constuire la matrice d'adjacence du graphe}
\usage{
matrix_graphe(m_crois, multi = TRUE)
}
\arguments{
\item{m_crois}{Matrice de croisement.}
\item{multi}{Booléen indiquant s'il faut considérer les zones de z2
recouvrant trois zones ou plus de z1.}
}
\value{
En sortie on obtient une matrice carré d'adjacence.
}
\description{
Fonction permettant à partir de la matrice de croisement
de déterminer la matrice d'adjacence du graphe. Cette matrice
de graphe est pondérée et non symmétrique (ce qui correspond
à un graphe orienté).
}
\details{
L'option \code{multi} permet de choisir si on prend en compte
ou non les zones de z2 recouvrant 3 zones de z1 ou plus. En effet
si on les prend en compte, alors certaines observations sont comptées
plusieurs fois dans le graphe, ce qui peut conduire à de mauvaises
interprétations.
}
|
/fuzzedpackages/diffman/man/matrix_graphe.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 983
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fct_reshaping.R
\name{matrix_graphe}
\alias{matrix_graphe}
\title{Constuire la matrice d'adjacence du graphe}
\usage{
matrix_graphe(m_crois, multi = TRUE)
}
\arguments{
\item{m_crois}{Matrice de croisement.}
\item{multi}{Booléen indiquant s'il faut considérer les zones de z2
recouvrant trois zones ou plus de z1.}
}
\value{
En sortie on obtient une matrice carré d'adjacence.
}
\description{
Fonction permettant à partir de la matrice de croisement
de déterminer la matrice d'adjacence du graphe. Cette matrice
de graphe est pondérée et non symmétrique (ce qui correspond
à un graphe orienté).
}
\details{
L'option \code{multi} permet de choisir si on prend en compte
ou non les zones de z2 recouvrant 3 zones de z1 ou plus. En effet
si on les prend en compte, alors certaines observations sont comptées
plusieurs fois dans le graphe, ce qui peut conduire à de mauvaises
interprétations.
}
|
# Parallel binary models ####
# Run source code
rm(list = ls())
source("R Code/00_Master Code.R")
# Run parallel
library(MCMCglmm); library(ggregplot); library(INLA); library(parallel); library(dplyr)
prior.bin2 <- list(R = list(V = diag(1), nu = 0.002, fix = 1))
# Modelling all mammal-mammal pairs ####
mf = 15
# Trying a Binomial model ####
parallel::mclapply(1:20, function(i) {
saveRDS(MCMCglmm(
data = FinalHostMatrix,
VirusBinary ~ Space + Phylo2 + Space:Phylo2 + MinDCites + DomDom,
prior = prior.bin2,
family = "categorical",
pr = TRUE,
nitt = 13000*mf, # REMEMBER YOU'VE DONE THIS
thin = 10*mf, burnin=8000*mf, trunc = T), file = paste0("Binomial Model ",i, ".Rdata"))
}, mc.cores = 10)
|
/R Code/HP3 Code/1_Sharing Models/Defunct Sharing Models/Parallel Binomial Model 2 (No G).R
|
no_license
|
gfalbery/Helmneth
|
R
| false
| false
| 742
|
r
|
# Parallel binary models ####
# Run source code
rm(list = ls())
source("R Code/00_Master Code.R")
# Run parallel
library(MCMCglmm); library(ggregplot); library(INLA); library(parallel); library(dplyr)
prior.bin2 <- list(R = list(V = diag(1), nu = 0.002, fix = 1))
# Modelling all mammal-mammal pairs ####
mf = 15
# Trying a Binomial model ####
parallel::mclapply(1:20, function(i) {
saveRDS(MCMCglmm(
data = FinalHostMatrix,
VirusBinary ~ Space + Phylo2 + Space:Phylo2 + MinDCites + DomDom,
prior = prior.bin2,
family = "categorical",
pr = TRUE,
nitt = 13000*mf, # REMEMBER YOU'VE DONE THIS
thin = 10*mf, burnin=8000*mf, trunc = T), file = paste0("Binomial Model ",i, ".Rdata"))
}, mc.cores = 10)
|
suppressPackageStartupMessages(library(tidyverse))
library(glue)
library(ggrepel)
library(here)
korean_archaeological_sites <- readxl::read_excel(here("analysis/data/raw_data/korean-archaeologica-sites.xlsx"))
# data from PhD data sheet, not KAS sheet.
mydata <- read.csv(here("analysis/data/raw_data/General_info.csv"))
# raw material data from KAS data sheet.
kasr <- read.csv(here("analysis/data/raw_data/Rawmaterial_info.csv"))
# assemblage composition data from KAS data sheet.
kasa <- read.csv(here("analysis/data/raw_data/Assemblage_info.csv"))
#volume of the cultural layer from KAS data sheet.
kasv <- read.csv(here("analysis/data/raw_data/Dating_info.csv"))
# join artefact type freqs with site data
kasa %>%
pivot_longer(-X1,
names_to = "site_name",
values_to = "count") %>%
pivot_wider(names_from = "X1",
values_from = "count") %>%
left_join(mydata)
kasv_tidy <-
kasv %>%
t %>%
as_tibble() %>%
setNames(as.character(.[1,])) %>%
.[-1,] %>%
mutate_all(parse_number) %>%
mutate(artefact_density = total_artifacts / volume,
sites = names(kasv)[-1]) %>%
left_join(mydata, by = c('sites' = 'site_name' )) %>%
mutate(has_sp = ifelse(is.na(SP.), "no", "yes"))
mydata_ages <-
mydata %>%
separate(C14.BP., into = c('age', 'error'),
sep = "±") %>%
mutate(age_ka = parse_number(age) / 1000,
error = parse_number(error)) %>%
mutate(has_sp = ifelse(!is.na(SP.), "yes", "no"))
# compute t-test
den_sp_ttest <-
t.test(artefact_density ~ has_sp, data = kasv_tidy)
# extract elements from the t-test output
den_sp_ttest_t <- round(unname(den_sp_ttest$statistic), 3)
den_sp_ttest_p <- round(unname(den_sp_ttest$p.value ), 3)
den_sp_ttest_df <- round(unname(den_sp_ttest$parameter ), 3)
# t(degress of freedom) = the t statistic, p = p value.
den_sp_ttest_str <-
paste0("t(", den_sp_ttest_df, ") = ", den_sp_ttest_t, ", p = ", den_sp_ttest_p)
#Volume and artefact counts to get density over time.
density_sp_sub_plot <-
ggplot(kasv_tidy,
aes(has_sp,
artefact_density)) +
geom_boxplot(lwd = 0.1) +
annotate("text",
x = 1.5,
y = 9,
label = den_sp_ttest_str,
size = 1.5) +
theme_bw(base_size = 6) +
labs(x = "Stemmed points present?",
y = "Artifact density")
density_sp_main_plot <-
ggplot(kasv_tidy,
aes(date_age / 1000,
artefact_density)) +
geom_point(aes(size = total_artifacts,
colour = has_sp)) +
ylab(bquote('Artifact density'~(n/m^3))) +
xlab("Age of assemblage (ka)") +
scale_size_continuous(name = "Total number\nof artifacts") +
scale_color_viridis_d(name = "Stemmed\npoints\npresent?") +
theme_minimal(base_size = 8)
# https://wilkelab.org/cowplot/articles/drawing_with_on_plots.html
library(cowplot)
ggdraw(density_sp_main_plot) +
draw_plot(density_sp_sub_plot,
.37, .62,
.32, .33) +
theme(panel.background = element_rect(fill='white', colour="white"),
plot.background = element_rect(fill='white', colour="white"))
ggsave(here::here("analysis/figures/002-age-by-density.png"),
width = 4.45,
height = 4.45,
units = "in")
|
/analysis/paper/002-artifact-volumetric-density.R
|
permissive
|
parkgayoung/koreapaleolithicmobilityoccupation
|
R
| false
| false
| 3,271
|
r
|
suppressPackageStartupMessages(library(tidyverse))
library(glue)
library(ggrepel)
library(here)
korean_archaeological_sites <- readxl::read_excel(here("analysis/data/raw_data/korean-archaeologica-sites.xlsx"))
# data from PhD data sheet, not KAS sheet.
mydata <- read.csv(here("analysis/data/raw_data/General_info.csv"))
# raw material data from KAS data sheet.
kasr <- read.csv(here("analysis/data/raw_data/Rawmaterial_info.csv"))
# assemblage composition data from KAS data sheet.
kasa <- read.csv(here("analysis/data/raw_data/Assemblage_info.csv"))
#volume of the cultural layer from KAS data sheet.
kasv <- read.csv(here("analysis/data/raw_data/Dating_info.csv"))
# join artefact type freqs with site data
kasa %>%
pivot_longer(-X1,
names_to = "site_name",
values_to = "count") %>%
pivot_wider(names_from = "X1",
values_from = "count") %>%
left_join(mydata)
kasv_tidy <-
kasv %>%
t %>%
as_tibble() %>%
setNames(as.character(.[1,])) %>%
.[-1,] %>%
mutate_all(parse_number) %>%
mutate(artefact_density = total_artifacts / volume,
sites = names(kasv)[-1]) %>%
left_join(mydata, by = c('sites' = 'site_name' )) %>%
mutate(has_sp = ifelse(is.na(SP.), "no", "yes"))
mydata_ages <-
mydata %>%
separate(C14.BP., into = c('age', 'error'),
sep = "±") %>%
mutate(age_ka = parse_number(age) / 1000,
error = parse_number(error)) %>%
mutate(has_sp = ifelse(!is.na(SP.), "yes", "no"))
# compute t-test
den_sp_ttest <-
t.test(artefact_density ~ has_sp, data = kasv_tidy)
# extract elements from the t-test output
den_sp_ttest_t <- round(unname(den_sp_ttest$statistic), 3)
den_sp_ttest_p <- round(unname(den_sp_ttest$p.value ), 3)
den_sp_ttest_df <- round(unname(den_sp_ttest$parameter ), 3)
# t(degress of freedom) = the t statistic, p = p value.
den_sp_ttest_str <-
paste0("t(", den_sp_ttest_df, ") = ", den_sp_ttest_t, ", p = ", den_sp_ttest_p)
#Volume and artefact counts to get density over time.
density_sp_sub_plot <-
ggplot(kasv_tidy,
aes(has_sp,
artefact_density)) +
geom_boxplot(lwd = 0.1) +
annotate("text",
x = 1.5,
y = 9,
label = den_sp_ttest_str,
size = 1.5) +
theme_bw(base_size = 6) +
labs(x = "Stemmed points present?",
y = "Artifact density")
density_sp_main_plot <-
ggplot(kasv_tidy,
aes(date_age / 1000,
artefact_density)) +
geom_point(aes(size = total_artifacts,
colour = has_sp)) +
ylab(bquote('Artifact density'~(n/m^3))) +
xlab("Age of assemblage (ka)") +
scale_size_continuous(name = "Total number\nof artifacts") +
scale_color_viridis_d(name = "Stemmed\npoints\npresent?") +
theme_minimal(base_size = 8)
# https://wilkelab.org/cowplot/articles/drawing_with_on_plots.html
library(cowplot)
ggdraw(density_sp_main_plot) +
draw_plot(density_sp_sub_plot,
.37, .62,
.32, .33) +
theme(panel.background = element_rect(fill='white', colour="white"),
plot.background = element_rect(fill='white', colour="white"))
ggsave(here::here("analysis/figures/002-age-by-density.png"),
width = 4.45,
height = 4.45,
units = "in")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Cicero.R
\name{featureToGR}
\alias{featureToGR}
\title{feature to GRanges}
\usage{
featureToGR(feature)
}
\description{
feature to GRanges
}
|
/man/featureToGR.Rd
|
no_license
|
xuzhougeng/scatacr
|
R
| false
| true
| 219
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Cicero.R
\name{featureToGR}
\alias{featureToGR}
\title{feature to GRanges}
\usage{
featureToGR(feature)
}
\description{
feature to GRanges
}
|
make_d_plot <- function(half_life, dose, blq, loq00, loq20, noerror) {
result <-
half_life %>%
full_join(dose) %>%
full_join(blq) %>%
full_join(
loq00 %>%
rename_at(.vars=-1, .funs=function(x) paste0(x, "_loq00"))
) %>%
full_join(
loq20 %>%
rename_at(.vars=-1, .funs=function(x) paste0(x, "_loq20"))
) %>%
full_join(
noerror %>%
rename_at(.vars=-1, .funs=function(x) paste0(x, "_noerror"))
) %>%
mutate(
tobit_20_00_same=abs((half.life_tobit_loq20 - thalf)/thalf) < 0.00001,
std_20_00_same=abs((half.life_std_loq20 - thalf)/thalf) < 0.00001
)
almost_zero <- c(result$half.life_tobit_loq20, result$half.life_std_loq20)
almost_zero <- min(almost_zero[!is.na(almost_zero) & almost_zero > 0])/2
ret <-
result %>%
mutate(
half.life_tobit_loq20_noneg=
case_when(
half.life_tobit_loq20 < 0~0,
TRUE~half.life_tobit_loq20
),
half.life_std_loq20_noneg=
case_when(
half.life_std_loq20 < 0~0,
TRUE~half.life_std_loq20
),
half.life_tobit_loq20_slight_positive=
case_when(
half.life_tobit_loq20 < 0~almost_zero,
TRUE~half.life_tobit_loq20
),
half.life_std_loq20_slight_positive=
case_when(
half.life_std_loq20 < 0~almost_zero,
TRUE~half.life_std_loq20
)
)
ret
}
verify_and_count_missing <- function(d_plot) {
d_plot %>%
# Both or neither method is missing results on the same rows
verify(!xor(is.na(half.life_std_loq20), is.na(half.life_std_loq20))) %>%
# NA values are only when there are <= 2 points available
verify(xor(n_above_loq > 2, is.na(half.life_std_loq20)))
sum(is.na(d_plot$half.life_tobit_loq20))
}
make_count_negative <- function(d_plot) {
tibble(
tobit_count=sum(d_plot$half.life_tobit_loq20 < 0),
std_count=sum(d_plot$half.life_std_loq20 < 0),
n=nrow(d_plot),
tobit_percent=100*tobit_count/n,
std_percent=100*std_count/n
)
}
make_figure1 <- function(d_plot) {
d_plot_mod <-
d_plot %>%
mutate(
n_below_loq_before_or_at_tlast_Text=
paste0("`", n_below_loq_before_or_at_tlast, " BLQ before t`[last]"),
n_below_loq_after_tlast_Text=
paste0("`", n_below_loq_after_tlast, " BLQ after t`[last]")
)
ggplot(d_plot_mod) +
geom_vline(xintercept=1, colour="gray", size=1) +
geom_vline(xintercept=c(0.5, 2), colour="gray", size=1, linetype="63") +
stat_ecdf(aes(x=half.life_tobit_loq20_slight_positive/thalf, colour="tobit"), size=1) +
stat_ecdf(aes(x=half.life_std_loq20_slight_positive/thalf, colour="least-squares"), size=1) +
scale_x_log10(breaks=c(0.1, 0.5, 1, 2, 10)) +
coord_cartesian(xlim=c(0.1, 10)) +
labs(
x="Estimated/Theoretical Half-Life Ratio",
y="Cumulative Distribution of Ratios",
colour=NULL
) +
theme(
legend.position=c(0.95, 0.05),
legend.justification=c(1, 0)
)
}
make_figure1_tmdd <- function(p) {
p +
facet_grid(~TMDD_Text) +
theme(
legend.position="bottom",
legend.justification=NULL
)
}
make_figure1_early_blq <- function(p) {
p_mod <- p
# Insert the hline under all the other elements so that it doesn't obscure the
# ecdf lines.
p_mod$layers <-
c(
geom_hline(yintercept=0.5, colour="gray"),
p_mod$layers
)
p_mod +
facet_grid(
n_below_loq_before_or_at_tlast_Text~.,
labeller=label_parsed
) +
theme(
legend.position="bottom",
legend.justification=NULL
)
}
make_figure1_late_blq <- function(p) {
p_mod <- p
# Insert the hline under all the other elements so that it doesn't obscure the
# ecdf lines.
p_mod$layers <-
c(
geom_hline(yintercept=0.5, colour="gray"),
p_mod$layers
)
p_mod +
facet_grid(
n_below_loq_after_tlast_Text~.,
labeller=label_parsed
) +
theme(
legend.position="bottom",
legend.justification=NULL
)
}
make_figure2 <- function(d_plot) {
ggplot(d_plot) +
geom_bar(
aes(
x=lambda.z.n.points_std_loq20,
y = 100*(..count..)/sum(..count..),
#colour="least-squares",
fill="least-squares"
),
colour=NA,
width=0.2, position=position_nudge(x=0.2)
) +
geom_bar(
aes(
x=lambda.z.n.points_tobit_loq20,
y = 100*(..count..)/sum(..count..),
#colour="tobit (above LOQ)",
fill="tobit (above LOQ)"
),
colour=NA,
width=0.2, position=position_nudge(x=0)
) +
geom_bar(
aes(
x=lambda.z.n.points_all_loq20,
y = 100*(..count..)/sum(..count..),
#colour="tobit (total)",
fill="tobit (total)"
),
colour=NA,
width=0.2, position=position_nudge(x=-0.2)
) +
scale_x_continuous(
breaks=
unique(c(
d_plot$lambda.z.n.points_std_loq20,
d_plot$lambda.z.n.points_all_loq20,
d_plot$lambda.z.n.points_tobit_loq20
))
) +
labs(
#colour=NULL,
fill=NULL,
x="Number of Points",
y="Percent of Profiles"
) +
theme(legend.position="bottom")
}
make_figure2_tmdd <- function(p) {
p +
facet_wrap(~TMDD_Text, scales="free_y", nrow=1)
}
|
/_drake_functions_figures.R
|
no_license
|
billdenney/tobit-half-life
|
R
| false
| false
| 5,333
|
r
|
make_d_plot <- function(half_life, dose, blq, loq00, loq20, noerror) {
result <-
half_life %>%
full_join(dose) %>%
full_join(blq) %>%
full_join(
loq00 %>%
rename_at(.vars=-1, .funs=function(x) paste0(x, "_loq00"))
) %>%
full_join(
loq20 %>%
rename_at(.vars=-1, .funs=function(x) paste0(x, "_loq20"))
) %>%
full_join(
noerror %>%
rename_at(.vars=-1, .funs=function(x) paste0(x, "_noerror"))
) %>%
mutate(
tobit_20_00_same=abs((half.life_tobit_loq20 - thalf)/thalf) < 0.00001,
std_20_00_same=abs((half.life_std_loq20 - thalf)/thalf) < 0.00001
)
almost_zero <- c(result$half.life_tobit_loq20, result$half.life_std_loq20)
almost_zero <- min(almost_zero[!is.na(almost_zero) & almost_zero > 0])/2
ret <-
result %>%
mutate(
half.life_tobit_loq20_noneg=
case_when(
half.life_tobit_loq20 < 0~0,
TRUE~half.life_tobit_loq20
),
half.life_std_loq20_noneg=
case_when(
half.life_std_loq20 < 0~0,
TRUE~half.life_std_loq20
),
half.life_tobit_loq20_slight_positive=
case_when(
half.life_tobit_loq20 < 0~almost_zero,
TRUE~half.life_tobit_loq20
),
half.life_std_loq20_slight_positive=
case_when(
half.life_std_loq20 < 0~almost_zero,
TRUE~half.life_std_loq20
)
)
ret
}
verify_and_count_missing <- function(d_plot) {
d_plot %>%
# Both or neither method is missing results on the same rows
verify(!xor(is.na(half.life_std_loq20), is.na(half.life_std_loq20))) %>%
# NA values are only when there are <= 2 points available
verify(xor(n_above_loq > 2, is.na(half.life_std_loq20)))
sum(is.na(d_plot$half.life_tobit_loq20))
}
make_count_negative <- function(d_plot) {
tibble(
tobit_count=sum(d_plot$half.life_tobit_loq20 < 0),
std_count=sum(d_plot$half.life_std_loq20 < 0),
n=nrow(d_plot),
tobit_percent=100*tobit_count/n,
std_percent=100*std_count/n
)
}
make_figure1 <- function(d_plot) {
d_plot_mod <-
d_plot %>%
mutate(
n_below_loq_before_or_at_tlast_Text=
paste0("`", n_below_loq_before_or_at_tlast, " BLQ before t`[last]"),
n_below_loq_after_tlast_Text=
paste0("`", n_below_loq_after_tlast, " BLQ after t`[last]")
)
ggplot(d_plot_mod) +
geom_vline(xintercept=1, colour="gray", size=1) +
geom_vline(xintercept=c(0.5, 2), colour="gray", size=1, linetype="63") +
stat_ecdf(aes(x=half.life_tobit_loq20_slight_positive/thalf, colour="tobit"), size=1) +
stat_ecdf(aes(x=half.life_std_loq20_slight_positive/thalf, colour="least-squares"), size=1) +
scale_x_log10(breaks=c(0.1, 0.5, 1, 2, 10)) +
coord_cartesian(xlim=c(0.1, 10)) +
labs(
x="Estimated/Theoretical Half-Life Ratio",
y="Cumulative Distribution of Ratios",
colour=NULL
) +
theme(
legend.position=c(0.95, 0.05),
legend.justification=c(1, 0)
)
}
make_figure1_tmdd <- function(p) {
p +
facet_grid(~TMDD_Text) +
theme(
legend.position="bottom",
legend.justification=NULL
)
}
make_figure1_early_blq <- function(p) {
p_mod <- p
# Insert the hline under all the other elements so that it doesn't obscure the
# ecdf lines.
p_mod$layers <-
c(
geom_hline(yintercept=0.5, colour="gray"),
p_mod$layers
)
p_mod +
facet_grid(
n_below_loq_before_or_at_tlast_Text~.,
labeller=label_parsed
) +
theme(
legend.position="bottom",
legend.justification=NULL
)
}
make_figure1_late_blq <- function(p) {
p_mod <- p
# Insert the hline under all the other elements so that it doesn't obscure the
# ecdf lines.
p_mod$layers <-
c(
geom_hline(yintercept=0.5, colour="gray"),
p_mod$layers
)
p_mod +
facet_grid(
n_below_loq_after_tlast_Text~.,
labeller=label_parsed
) +
theme(
legend.position="bottom",
legend.justification=NULL
)
}
make_figure2 <- function(d_plot) {
ggplot(d_plot) +
geom_bar(
aes(
x=lambda.z.n.points_std_loq20,
y = 100*(..count..)/sum(..count..),
#colour="least-squares",
fill="least-squares"
),
colour=NA,
width=0.2, position=position_nudge(x=0.2)
) +
geom_bar(
aes(
x=lambda.z.n.points_tobit_loq20,
y = 100*(..count..)/sum(..count..),
#colour="tobit (above LOQ)",
fill="tobit (above LOQ)"
),
colour=NA,
width=0.2, position=position_nudge(x=0)
) +
geom_bar(
aes(
x=lambda.z.n.points_all_loq20,
y = 100*(..count..)/sum(..count..),
#colour="tobit (total)",
fill="tobit (total)"
),
colour=NA,
width=0.2, position=position_nudge(x=-0.2)
) +
scale_x_continuous(
breaks=
unique(c(
d_plot$lambda.z.n.points_std_loq20,
d_plot$lambda.z.n.points_all_loq20,
d_plot$lambda.z.n.points_tobit_loq20
))
) +
labs(
#colour=NULL,
fill=NULL,
x="Number of Points",
y="Percent of Profiles"
) +
theme(legend.position="bottom")
}
make_figure2_tmdd <- function(p) {
p +
facet_wrap(~TMDD_Text, scales="free_y", nrow=1)
}
|
### Pass test ------------------------------------------------------------------
# type
expect_silent(check_class("chr", type = "character"))
expect_silent(check_class(2, type = "numeric"))
expect_silent(check_class(-1.4, type = "numeric"))
expect_silent(check_class(2L, type = "integer"))
expect_silent(check_class(TRUE, type = "logical"))
expect_silent(check_class(FALSE, type = "logical"))
expect_silent(check_class(NULL, type = "NULL"))
expect_silent(check_class(data.frame(A = c(1, 2)), type = "data.frame"))
# n
expect_silent(check_class("chr", type = "character", n = 1))
expect_silent(check_class(c("chr", "chr2"), type = "character", n = 2))
expect_silent(check_class("chr", type = "character", n = 1L))
# allowNULL (Exception from e.g. character)
expect_silent(check_class(NULL, type = "character", allowNULL = TRUE))
expect_silent(check_class(NULL, type = "character", allowNULL = TRUE, n = 1))
expect_silent(check_class(NULL, type = "numeric", allowNULL = TRUE, n = 2))
### Errors ---------------------------------------------------------------------
# var
expect_error(check_class(type = "character"))
# type
expect_error(check_class(var = "character"))
expect_error(check_class(id, type = 12), class = "check_class_type_error")
# n
expect_error(
check_class(var = "chr", type = "character", n = TRUE),
class = "check_class_n_error"
)
expect_error(
check_class(var = "chr", type = "character", n = -1),
class = "check_class_n_error",
pattern = "^`n` must be not negative numeric\\(1\\) or integer\\(1\\)\\.$"
)
expect_error(
check_class(var = "chr", type = "character", n = c(1, 2)),
class = "check_class_n_error",
pattern = paste("^`n` must be numeric\\(1\\) or integer\\(1\\),",
"not of class \"numeric\\(2\\)\"\\.$")
)
# allowNULL
expect_error(
check_class(id, type = "numeric", allowNULL = "x"),
class = "check_class_allowNULL_error"
)
expect_error(
check_class(id, type = "numeric", allowNULL = NULL),
class = "check_class_allowNULL_error"
)
expect_error(
check_class(id, type = "numeric", allowNULL = 12),
class = "check_class_allowNULL_error"
)
# individual error generated by function
expect_error(check_class(2, "character"), class = "eval_2_error")
expect_error(check_class(2, "character"), class = "rlang_error")
expect_error(check_class(2, "data.frame"), class = "eval_2_error")
expect_error(check_class(2, "data.frame"), class = "rlang_error")
expect_error(check_class(TRUE, "data.frame"), class = "eval_TRUE_error")
expect_error(check_class(TRUE, "data.frame"), class = "rlang_error")
expect_error(
check_class(NULL, "character", allowNULL = FALSE),
class = "eval_NULL_error"
)
expect_error(
check_class(NULL, "character", allowNULL = FALSE),
class = "rlang_error"
)
id <- 1
err <- tryCatch(
check_class(id, "character", allowNULL = FALSE),
error = function(err) err
)
expect_true(rlang::inherits_all(err, c("eval_id_error", "rlang_error")))
expect_equal(err$value, 1)
expect_equal(err$current_class, "numeric")
# check typical use in function
fun <- function(x, n = NULL) {
testr::check_class(x, "numeric", allowNULL = TRUE, n = n)
TRUE
}
expect_true(fun(1))
expect_true(fun(1, n = 1))
expect_true(fun(NULL))
expect_error(
fun("1"),
class = "fun_x_error",
pattern = "`x` must be numeric, not of class \"character\"\\."
)
expect_error(
fun(1L),
class = "fun_x_error",
pattern = "`x` must be numeric, not of class \"integer\"\\."
)
expect_error(
fun(1, n = 2),
"fun_x_error",
pattern = "`x` must be numeric\\(2\\), not of class \"numeric\\(1\\)\"\\."
)
expect_error(
fun(1, n = 0),
"fun_x_error",
pattern = "`x` must be numeric\\(0\\), not of class \"numeric\\(1\\)\"\\."
)
|
/inst/tinytest/test_check_class.R
|
no_license
|
thfuchs/testr
|
R
| false
| false
| 3,697
|
r
|
### Pass test ------------------------------------------------------------------
# type
expect_silent(check_class("chr", type = "character"))
expect_silent(check_class(2, type = "numeric"))
expect_silent(check_class(-1.4, type = "numeric"))
expect_silent(check_class(2L, type = "integer"))
expect_silent(check_class(TRUE, type = "logical"))
expect_silent(check_class(FALSE, type = "logical"))
expect_silent(check_class(NULL, type = "NULL"))
expect_silent(check_class(data.frame(A = c(1, 2)), type = "data.frame"))
# n
expect_silent(check_class("chr", type = "character", n = 1))
expect_silent(check_class(c("chr", "chr2"), type = "character", n = 2))
expect_silent(check_class("chr", type = "character", n = 1L))
# allowNULL (Exception from e.g. character)
expect_silent(check_class(NULL, type = "character", allowNULL = TRUE))
expect_silent(check_class(NULL, type = "character", allowNULL = TRUE, n = 1))
expect_silent(check_class(NULL, type = "numeric", allowNULL = TRUE, n = 2))
### Errors ---------------------------------------------------------------------
# var
expect_error(check_class(type = "character"))
# type
expect_error(check_class(var = "character"))
expect_error(check_class(id, type = 12), class = "check_class_type_error")
# n
expect_error(
check_class(var = "chr", type = "character", n = TRUE),
class = "check_class_n_error"
)
expect_error(
check_class(var = "chr", type = "character", n = -1),
class = "check_class_n_error",
pattern = "^`n` must be not negative numeric\\(1\\) or integer\\(1\\)\\.$"
)
expect_error(
check_class(var = "chr", type = "character", n = c(1, 2)),
class = "check_class_n_error",
pattern = paste("^`n` must be numeric\\(1\\) or integer\\(1\\),",
"not of class \"numeric\\(2\\)\"\\.$")
)
# allowNULL
expect_error(
check_class(id, type = "numeric", allowNULL = "x"),
class = "check_class_allowNULL_error"
)
expect_error(
check_class(id, type = "numeric", allowNULL = NULL),
class = "check_class_allowNULL_error"
)
expect_error(
check_class(id, type = "numeric", allowNULL = 12),
class = "check_class_allowNULL_error"
)
# individual error generated by function
expect_error(check_class(2, "character"), class = "eval_2_error")
expect_error(check_class(2, "character"), class = "rlang_error")
expect_error(check_class(2, "data.frame"), class = "eval_2_error")
expect_error(check_class(2, "data.frame"), class = "rlang_error")
expect_error(check_class(TRUE, "data.frame"), class = "eval_TRUE_error")
expect_error(check_class(TRUE, "data.frame"), class = "rlang_error")
expect_error(
check_class(NULL, "character", allowNULL = FALSE),
class = "eval_NULL_error"
)
expect_error(
check_class(NULL, "character", allowNULL = FALSE),
class = "rlang_error"
)
id <- 1
err <- tryCatch(
check_class(id, "character", allowNULL = FALSE),
error = function(err) err
)
expect_true(rlang::inherits_all(err, c("eval_id_error", "rlang_error")))
expect_equal(err$value, 1)
expect_equal(err$current_class, "numeric")
# check typical use in function
fun <- function(x, n = NULL) {
testr::check_class(x, "numeric", allowNULL = TRUE, n = n)
TRUE
}
expect_true(fun(1))
expect_true(fun(1, n = 1))
expect_true(fun(NULL))
expect_error(
fun("1"),
class = "fun_x_error",
pattern = "`x` must be numeric, not of class \"character\"\\."
)
expect_error(
fun(1L),
class = "fun_x_error",
pattern = "`x` must be numeric, not of class \"integer\"\\."
)
expect_error(
fun(1, n = 2),
"fun_x_error",
pattern = "`x` must be numeric\\(2\\), not of class \"numeric\\(1\\)\"\\."
)
expect_error(
fun(1, n = 0),
"fun_x_error",
pattern = "`x` must be numeric\\(0\\), not of class \"numeric\\(1\\)\"\\."
)
|
setwd('/home/kushwanth/machinelearning/r_practice/DataScience_Specialization/R programming/assignment1')
electric_data = read.table("household_power_consumption.txt",sep=";",header=TRUE)
electric_data$Date_Time<-strptime(paste(electric_data$Date,electric_data$Time),"%d/%m/%Y %H:%M:%S")
electric_data[c(100,20000,50000),c(1,2,10)]
electric_data$Date <- as.Date(electric_data$Date, "%d/%m/%Y")
electric_data<-electric_data[electric_data$Date==as.Date("2007-02-01")|electric_data$Date==as.Date("2007-02-02"),]
#png("./plot3.png", width = 480, height = 480)
electric_data$Global_active_power<-as.numeric(electric_data$Global_active_power)
electric_data$Sub_metering_1<-as.numeric(electric_data$Sub_metering_1)
electric_data$Sub_metering_2<-as.numeric(electric_data$Sub_metering_2)
electric_data$Sub_metering_3<-as.numeric(electric_data$Sub_metering_3)
electric_data$Global_active_power<-as.numeric(electric_data$Global_active_power)
png("./plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
#plot 1-----------------
plot(electric_data$Date_Time,electric_data$Global_active_power,type='l',xlab='',ylab='Global Active Power',main='')
#plot 2-----------------
plot(electric_data$Date_Time,electric_data$Voltage,type='l',ylab='Voltage',xlab='datetime',main='')
#plot 3-----------------
#png("./plot3.png", width = 480, height = 480)
plot(electric_data$Date_Time,electric_data$Sub_metering_1,type='l',xlab='',ylab='Energy sub metering',main='')
lines(electric_data$Date_Time,electric_data$Sub_metering_2,type='l',col='red')
lines(electric_data$Date_Time,electric_data$Sub_metering_3,type='l',col='blue')
expre<-expression('Sub_metering_1','Sub_metering_2','Sub_metering_3')
legend('topright', legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),cex=.75,lty = c(1,1,1), col = c(1,2,4), adj = c(0,0.00001,0.00002))
#plot 4------------------
plot(electric_data$Date_Time,electric_data$Global_reactive_power,type='l',ylab='Global_reactive_power',xlab='datetime',main='')
dev.off()
|
/plot4.R
|
no_license
|
tkngoutham/ExData_Plotting1
|
R
| false
| false
| 1,999
|
r
|
setwd('/home/kushwanth/machinelearning/r_practice/DataScience_Specialization/R programming/assignment1')
electric_data = read.table("household_power_consumption.txt",sep=";",header=TRUE)
electric_data$Date_Time<-strptime(paste(electric_data$Date,electric_data$Time),"%d/%m/%Y %H:%M:%S")
electric_data[c(100,20000,50000),c(1,2,10)]
electric_data$Date <- as.Date(electric_data$Date, "%d/%m/%Y")
electric_data<-electric_data[electric_data$Date==as.Date("2007-02-01")|electric_data$Date==as.Date("2007-02-02"),]
#png("./plot3.png", width = 480, height = 480)
electric_data$Global_active_power<-as.numeric(electric_data$Global_active_power)
electric_data$Sub_metering_1<-as.numeric(electric_data$Sub_metering_1)
electric_data$Sub_metering_2<-as.numeric(electric_data$Sub_metering_2)
electric_data$Sub_metering_3<-as.numeric(electric_data$Sub_metering_3)
electric_data$Global_active_power<-as.numeric(electric_data$Global_active_power)
png("./plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
#plot 1-----------------
plot(electric_data$Date_Time,electric_data$Global_active_power,type='l',xlab='',ylab='Global Active Power',main='')
#plot 2-----------------
plot(electric_data$Date_Time,electric_data$Voltage,type='l',ylab='Voltage',xlab='datetime',main='')
#plot 3-----------------
#png("./plot3.png", width = 480, height = 480)
plot(electric_data$Date_Time,electric_data$Sub_metering_1,type='l',xlab='',ylab='Energy sub metering',main='')
lines(electric_data$Date_Time,electric_data$Sub_metering_2,type='l',col='red')
lines(electric_data$Date_Time,electric_data$Sub_metering_3,type='l',col='blue')
expre<-expression('Sub_metering_1','Sub_metering_2','Sub_metering_3')
legend('topright', legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),cex=.75,lty = c(1,1,1), col = c(1,2,4), adj = c(0,0.00001,0.00002))
#plot 4------------------
plot(electric_data$Date_Time,electric_data$Global_reactive_power,type='l',ylab='Global_reactive_power',xlab='datetime',main='')
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cnd-restarts.R
\name{rst_abort}
\alias{rst_abort}
\title{Jump to the abort restart}
\usage{
rst_abort()
}
\description{
The abort restart is the only restart that is established at top
level. It is used by R as a top-level target, most notably when an
error is issued (see \code{\link[=abort]{abort()}}) that no handler is able
to deal with (see \code{\link[=with_handlers]{with_handlers()}}).
}
\examples{
# The `abort` restart is a bit special in that it is always
# registered in a R session. You will always find it on the restart
# stack because it is established at top level:
rst_list()
# You can use the `above` restart to jump to top level without
# signalling an error:
\dontrun{
fn <- function() {
cat("aborting...\\n")
rst_abort()
cat("This is never called\\n")
}
{
fn()
cat("This is never called\\n")
}
}
# The `above` restart is the target that R uses to jump to top
# level when critical errors are signalled:
\dontrun{
{
abort("error")
cat("This is never called\\n")
}
}
# If another `abort` restart is specified, errors are signalled as
# usual but then control flow resumes with from the new restart:
\dontrun{
out <- NULL
{
out <- with_restarts(abort("error"), abort = function() "restart!")
cat("This is called\\n")
}
cat("`out` has now become:", out, "\\n")
}
}
\seealso{
\code{\link[=rst_jump]{rst_jump()}}, \code{\link[=abort]{abort()}}
}
|
/man/rst_abort.Rd
|
no_license
|
yutannihilation/rlang
|
R
| false
| true
| 1,461
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cnd-restarts.R
\name{rst_abort}
\alias{rst_abort}
\title{Jump to the abort restart}
\usage{
rst_abort()
}
\description{
The abort restart is the only restart that is established at top
level. It is used by R as a top-level target, most notably when an
error is issued (see \code{\link[=abort]{abort()}}) that no handler is able
to deal with (see \code{\link[=with_handlers]{with_handlers()}}).
}
\examples{
# The `abort` restart is a bit special in that it is always
# registered in a R session. You will always find it on the restart
# stack because it is established at top level:
rst_list()
# You can use the `above` restart to jump to top level without
# signalling an error:
\dontrun{
fn <- function() {
cat("aborting...\\n")
rst_abort()
cat("This is never called\\n")
}
{
fn()
cat("This is never called\\n")
}
}
# The `above` restart is the target that R uses to jump to top
# level when critical errors are signalled:
\dontrun{
{
abort("error")
cat("This is never called\\n")
}
}
# If another `abort` restart is specified, errors are signalled as
# usual but then control flow resumes with from the new restart:
\dontrun{
out <- NULL
{
out <- with_restarts(abort("error"), abort = function() "restart!")
cat("This is called\\n")
}
cat("`out` has now become:", out, "\\n")
}
}
\seealso{
\code{\link[=rst_jump]{rst_jump()}}, \code{\link[=abort]{abort()}}
}
|
random <- function(x) {
n <- runif(1, 0.0, 1.0)
return (n[1])
}
n <- rnorm(1, mean=5.0, sd=2.0)
data <- rnorm(10, mean=n[1], sd=2.0)
cat("data D, taken from Normal distribution (mean= ", n[1], ", sd=2.0)\n\n")
data
summary(data)
cat("\n")
# log p(D|m)
likelihood <- function(m) {
return (sum(log(dnorm(data, mean=m, sd=2.0))))
}
# log p(m)
probP <- function(m) {
return (log(dnorm(m, mean=5.0, sd=1.0)))
}
# log p(D|m)p(m)
eval <- function(m) {
return (likelihood(m) + probP(m))
}
mean <- 10.0 * random()
prevEval <- eval(mean)
res <- 1:10999
i <- 1
while (i < 11000) {
newMean <- random() + mean - 0.5
newEval <- eval(newMean)
r <- newEval - prevEval
if (prevEval < newEval || log(random()) < r) {
prevEval = newEval
mean = newMean
res[i] <- newMean
i <- i + 1
}
}
# p(m|D)
sampling <- tail(res, n=10000)
summary(sampling)
hist(sampling, breaks=seq(-0.5, 9.5, 0.1))
|
/DataScience/KuboTakuya/chapter-08/metro.R
|
permissive
|
YuichiroSato/Blog
|
R
| false
| false
| 912
|
r
|
random <- function(x) {
n <- runif(1, 0.0, 1.0)
return (n[1])
}
n <- rnorm(1, mean=5.0, sd=2.0)
data <- rnorm(10, mean=n[1], sd=2.0)
cat("data D, taken from Normal distribution (mean= ", n[1], ", sd=2.0)\n\n")
data
summary(data)
cat("\n")
# log p(D|m)
likelihood <- function(m) {
return (sum(log(dnorm(data, mean=m, sd=2.0))))
}
# log p(m)
probP <- function(m) {
return (log(dnorm(m, mean=5.0, sd=1.0)))
}
# log p(D|m)p(m)
eval <- function(m) {
return (likelihood(m) + probP(m))
}
mean <- 10.0 * random()
prevEval <- eval(mean)
res <- 1:10999
i <- 1
while (i < 11000) {
newMean <- random() + mean - 0.5
newEval <- eval(newMean)
r <- newEval - prevEval
if (prevEval < newEval || log(random()) < r) {
prevEval = newEval
mean = newMean
res[i] <- newMean
i <- i + 1
}
}
# p(m|D)
sampling <- tail(res, n=10000)
summary(sampling)
hist(sampling, breaks=seq(-0.5, 9.5, 0.1))
|
# Plot expression of ANXA1 in our RNA-seq and proteomics data.
setwd("~/git/spinal-cord-injury-elife-2018")
options(stringsAsFactors = F)
library(tidyverse)
library(magrittr)
library(org.Hs.eg.db)
source("R/theme.R")
# read RNA-seq
rnaseq = read.delim("data/expression/rnaseq/sleuth/sleuth-norm-filt.txt")
targets = read.delim("data/expression/rnaseq/targets.txt")
rnaseq = as.data.frame(cbind(targets, t(rnaseq)))
# plot annexin A1
anxa1 = "ENSG00000135046"
rnaseq$label %<>% Hmisc::capitalize() %>%
factor(levels = c('Sham', 'Moderate', 'Severe'))
rnaseq$title = "ANXA1"
p = ggplot(rnaseq, aes(x = label, y = ENSG00000135046, color = label,
fill = label)) +
facet_grid(~ title) +
geom_boxplot(alpha = 0.4, outlier.size = 0, outlier.shape = NA) +
geom_jitter(height = 0, width = 0.2, size = 0.4) +
scale_x_discrete("Injury") + # , labels = c("Sham", "Mild", "Severe")) +
scale_y_continuous("TPM") +
scale_color_manual(values = ryb8[c(1, 3, 8)], guide = F) +
scale_fill_manual(values = ryb8[c(1, 3, 8)], guide = F) +
sci_theme +
theme(axis.line = element_line(color = 'grey50'),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.x = element_blank(),
strip.background = element_rect(fill = 'grey40', color = 'grey40'),
strip.text = element_text(color = 'white', size = 8, face = 'italic'))
p
ggsave("figures/figure-5J.pdf", p, width = 4, height = 4.5,
units = 'cm', useDingbats = F)
# read proteomics
prot = read.delim("data/proteomics/proteomics-processed.txt") %>%
column_to_rownames('Ortholog')
names = list(rownames(prot), colnames(prot))
prot %<>% as.matrix() %>%
preprocessCore::normalize.quantiles()
dimnames(prot) = names
targets = read.delim("data/proteomics/targets.txt")
prot = as.data.frame(cbind(t(prot), targets))
# plot annexin A1
anxa1 = "ENSG00000135046"
prot$title = "ANXA1"
prot$label %<>% Hmisc::capitalize() %>% factor(
levels = c('Sham', 'Moderate', 'Severe'))
scientific_10 <- function(x) {
parse(text = gsub("\\+", "", gsub(
"e", " %*% 10^", scales::scientific_format()(x))))
}
p = ggplot(prot, aes(x = label, y = ENSG00000135046, color = label,
fill = label)) +
facet_grid(~ title) +
geom_boxplot(alpha = 0.4, outlier.size = 0, outlier.shape = NA) +
geom_jitter(height = 0, width = 0.2, size = 0.4) +
scale_x_discrete("Injury") +
scale_y_continuous("Abundance", label = scientific_10) +
scale_color_manual(values = ryb8[c(1, 3, 8)], guide = F) +
scale_fill_manual(values = ryb8[c(1, 3, 8)], guide = F) +
sci_theme +
theme(axis.line = element_line(color = 'grey50'),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.x = element_blank(),
strip.background = element_rect(fill = 'grey40', color = 'grey40'),
strip.text = element_text(color = 'white', size = 8))
p
ggsave("figures/figure-5K.pdf", p, width = 4.3, height = 4.5,
units = 'cm', useDingbats = F)
|
/R/figures/plot-annexin.R
|
permissive
|
skinnider/spinal-cord-injury-elife-2018
|
R
| false
| false
| 2,990
|
r
|
# Plot expression of ANXA1 in our RNA-seq and proteomics data.
setwd("~/git/spinal-cord-injury-elife-2018")
options(stringsAsFactors = F)
library(tidyverse)
library(magrittr)
library(org.Hs.eg.db)
source("R/theme.R")
# read RNA-seq
rnaseq = read.delim("data/expression/rnaseq/sleuth/sleuth-norm-filt.txt")
targets = read.delim("data/expression/rnaseq/targets.txt")
rnaseq = as.data.frame(cbind(targets, t(rnaseq)))
# plot annexin A1
anxa1 = "ENSG00000135046"
rnaseq$label %<>% Hmisc::capitalize() %>%
factor(levels = c('Sham', 'Moderate', 'Severe'))
rnaseq$title = "ANXA1"
p = ggplot(rnaseq, aes(x = label, y = ENSG00000135046, color = label,
fill = label)) +
facet_grid(~ title) +
geom_boxplot(alpha = 0.4, outlier.size = 0, outlier.shape = NA) +
geom_jitter(height = 0, width = 0.2, size = 0.4) +
scale_x_discrete("Injury") + # , labels = c("Sham", "Mild", "Severe")) +
scale_y_continuous("TPM") +
scale_color_manual(values = ryb8[c(1, 3, 8)], guide = F) +
scale_fill_manual(values = ryb8[c(1, 3, 8)], guide = F) +
sci_theme +
theme(axis.line = element_line(color = 'grey50'),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.x = element_blank(),
strip.background = element_rect(fill = 'grey40', color = 'grey40'),
strip.text = element_text(color = 'white', size = 8, face = 'italic'))
p
ggsave("figures/figure-5J.pdf", p, width = 4, height = 4.5,
units = 'cm', useDingbats = F)
# read proteomics
prot = read.delim("data/proteomics/proteomics-processed.txt") %>%
column_to_rownames('Ortholog')
names = list(rownames(prot), colnames(prot))
prot %<>% as.matrix() %>%
preprocessCore::normalize.quantiles()
dimnames(prot) = names
targets = read.delim("data/proteomics/targets.txt")
prot = as.data.frame(cbind(t(prot), targets))
# plot annexin A1
anxa1 = "ENSG00000135046"
prot$title = "ANXA1"
prot$label %<>% Hmisc::capitalize() %>% factor(
levels = c('Sham', 'Moderate', 'Severe'))
scientific_10 <- function(x) {
parse(text = gsub("\\+", "", gsub(
"e", " %*% 10^", scales::scientific_format()(x))))
}
p = ggplot(prot, aes(x = label, y = ENSG00000135046, color = label,
fill = label)) +
facet_grid(~ title) +
geom_boxplot(alpha = 0.4, outlier.size = 0, outlier.shape = NA) +
geom_jitter(height = 0, width = 0.2, size = 0.4) +
scale_x_discrete("Injury") +
scale_y_continuous("Abundance", label = scientific_10) +
scale_color_manual(values = ryb8[c(1, 3, 8)], guide = F) +
scale_fill_manual(values = ryb8[c(1, 3, 8)], guide = F) +
sci_theme +
theme(axis.line = element_line(color = 'grey50'),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.x = element_blank(),
strip.background = element_rect(fill = 'grey40', color = 'grey40'),
strip.text = element_text(color = 'white', size = 8))
p
ggsave("figures/figure-5K.pdf", p, width = 4.3, height = 4.5,
units = 'cm', useDingbats = F)
|
#----Matrix----
# Let's construct two 5x2 matrix with a sequence of number from 1 to 10,
# one with byrow = TRUE and one with byrow = FALSE to see the difference.
#Construct a matrix with 5 rows that contain the numbers 1 up to 10 and byrow=TRUE
matrix_a <-matrix(1:10, byrow = TRUE, nrow = 5)
matrix_a
# Print dimension of the matrix with dim()
dim(matrix_a)
# Construct a matrix with 5 rows that contain the numbers 1 up to 10 and byrow=FALSE
matrix_b <-matrix(1:10, byrow = FALSE, nrow = 5)
matrix_b
# Print dimension of the matrix with dim()
dim(matrix_b)
# Construct a matrix with 5 rows that contain the numbers 1 up to 10 with byrow not mentioned.
matrix_c <-matrix(1:10, nrow = 5)
matrix_c
# Hence, by default, byrow is set to FALSE in a matrix if not mentioned explicitely.
# Matrix object Properties
mx= matrix(1:24,nrow=6)
class(mx)
dim(mx)
# Add a Column to a Matrix with the cbind()
# concatenate c(1:5) to the matrix_a
matrix_a1 <- cbind(matrix_a, c(1:5))
# Check the dimension
dim(matrix_a1)
matrix_a1
# Add a row to a Matrix with the rbind()
# Append to the matrix
matrix_a2 <- rbind(matrix_a, c(1:2))
# Check the dimension
dim(matrix_a2)
matrix_a2
--------------------------------------------------
#Slice a Matrix:
#We can select one or many elements from a matrix by using the square brackets [ ].
#This is where slicing comes into the picture.
#Example:
#Below cmd selects the element at the first row and second column of matrix a2.
matrix_a2[1,2]
#Below cmd results in a matrix with data on the rows 1, 2, 3 and columns 1, 2.
matrix_a2 [1:3,1:2]
#Below cmd selects all elements of the first column.
matrix_a2[,1]
#Below cmd selects all elements of the first row.
matrix_a2[1,]
#Matrix-----
(m1 = matrix(1:12, nrow=4))
#Add names of cols and rows in matrix
(colnames(m1) = paste('C',1:3, sep=''))
(rownames(m1) = paste('R',1:4, sep=''))
m1
# Conversion: Vector to Matrix
(m3 = 1:24)
dim(m3)= c(6,4)
m3
#access elements
(m2 = matrix(1:12, ncol=3, byrow=T))
m2
m2[1,] #first row
m2[c(1,3,4),] #1st,3rd,4th row
m2[,1] #first col
m2[,2:3] # 2nd to 3rd coln
m2[c(1,2),c(2,3)]
m2[,]
m2[-2,] # exclude 2nd row
m2[1:8] # matrix is like vector
m2[m2 > 5]
#modify Vector
m2[2,2]
m2[2,2] = 10
m2
m2[m2> 10] = 99
m2
#row and col wise summary
m1
colSums(m1);
rowSums(m1)
colMeans(m1);
rowMeans(m1)
# transpose
t(m1)
m1
|
/Matrix.R
|
no_license
|
divyaimale29/R_Programming
|
R
| false
| false
| 2,371
|
r
|
#----Matrix----
# Let's construct two 5x2 matrix with a sequence of number from 1 to 10,
# one with byrow = TRUE and one with byrow = FALSE to see the difference.
#Construct a matrix with 5 rows that contain the numbers 1 up to 10 and byrow=TRUE
matrix_a <-matrix(1:10, byrow = TRUE, nrow = 5)
matrix_a
# Print dimension of the matrix with dim()
dim(matrix_a)
# Construct a matrix with 5 rows that contain the numbers 1 up to 10 and byrow=FALSE
matrix_b <-matrix(1:10, byrow = FALSE, nrow = 5)
matrix_b
# Print dimension of the matrix with dim()
dim(matrix_b)
# Construct a matrix with 5 rows that contain the numbers 1 up to 10 with byrow not mentioned.
matrix_c <-matrix(1:10, nrow = 5)
matrix_c
# Hence, by default, byrow is set to FALSE in a matrix if not mentioned explicitely.
# Matrix object Properties
mx= matrix(1:24,nrow=6)
class(mx)
dim(mx)
# Add a Column to a Matrix with the cbind()
# concatenate c(1:5) to the matrix_a
matrix_a1 <- cbind(matrix_a, c(1:5))
# Check the dimension
dim(matrix_a1)
matrix_a1
# Add a row to a Matrix with the rbind()
# Append to the matrix
matrix_a2 <- rbind(matrix_a, c(1:2))
# Check the dimension
dim(matrix_a2)
matrix_a2
--------------------------------------------------
#Slice a Matrix:
#We can select one or many elements from a matrix by using the square brackets [ ].
#This is where slicing comes into the picture.
#Example:
#Below cmd selects the element at the first row and second column of matrix a2.
matrix_a2[1,2]
#Below cmd results in a matrix with data on the rows 1, 2, 3 and columns 1, 2.
matrix_a2 [1:3,1:2]
#Below cmd selects all elements of the first column.
matrix_a2[,1]
#Below cmd selects all elements of the first row.
matrix_a2[1,]
#Matrix-----
(m1 = matrix(1:12, nrow=4))
#Add names of cols and rows in matrix
(colnames(m1) = paste('C',1:3, sep=''))
(rownames(m1) = paste('R',1:4, sep=''))
m1
# Conversion: Vector to Matrix
(m3 = 1:24)
dim(m3)= c(6,4)
m3
#access elements
(m2 = matrix(1:12, ncol=3, byrow=T))
m2
m2[1,] #first row
m2[c(1,3,4),] #1st,3rd,4th row
m2[,1] #first col
m2[,2:3] # 2nd to 3rd coln
m2[c(1,2),c(2,3)]
m2[,]
m2[-2,] # exclude 2nd row
m2[1:8] # matrix is like vector
m2[m2 > 5]
#modify Vector
m2[2,2]
m2[2,2] = 10
m2
m2[m2> 10] = 99
m2
#row and col wise summary
m1
colSums(m1);
rowSums(m1)
colMeans(m1);
rowMeans(m1)
# transpose
t(m1)
m1
|
run_analysis <- function() {
datatrain<-read.table("UCI HAR Dataset/train/X_train.txt",stringsAsFactors=F) #Load raw data
datatest<-read.table("UCI HAR Dataset/test/X_test.txt",stringsAsFactors=F)
dataset<-rbind(datatrain,datatest)
names<-read.table("UCI HAR Dataset/features.txt",stringsAsFactors=F) #Load data names
names$V2<-gsub("\\(","",names$V2)
names$V2<-gsub("\\)","",names$V2) #Make them look nice
names$V2<-gsub(",","",names$V2)
names$V2<-gsub(" ","",names$V2)
names$V2<-gsub("-","",names$V2)
colnames(dataset)<-names$V2 #Give raw data names
datasetmean<-dataset[grep("mean",names$V2)]
datasetstd<-dataset[grep("std",names$V2)] #Keep only those data containing "std" or "mean"
datasettotal<-cbind(datasetmean,datasetstd)
testsub<-read.table("UCI HAR Dataset/test/subject_test.txt")
trainsub<-read.table("UCI HAR Dataset/train/subject_train.txt")
subject<-rbind(trainsub,testsub)[[1]] #Get subject id #
trainact<-read.table("UCI HAR Dataset/train/y_train.txt",stringsAsFactors=F) # Get Activity labels and apply them
testact<-read.table("UCI HAR Dataset/test/y_test.txt",stringsAsFactors=F) # to the Activity number
actlab<-read.table("UCI HAR Dataset/activity_labels.txt")
acttotal<-rbind(trainact,testact)
activity<-sapply(1:dim(acttotal)[1],function(x) actlab$V2[as.numeric(acttotal$V1[x])])
datasettotalact<-cbind(subject,activity,datasettotal) #combine everything and print
print(datasettotalact)
finaldataset<-NULL
for(i in 1:30){ #make average matrix
for(j in actlab[[2]]){
if(is.null(finaldataset)) finaldataset<-c(1,j,colMeans(datasettotalact[datasettotalact$subject==i & datasettotalact$activity==j,3:81]))
else finaldataset<-rbind(finaldataset,c(i,j,colMeans(datasettotalact[datasettotalact$subject==i & datasettotalact$activity==j,3:81])))
}
}
colnames(finaldataset)<-colnames(datasettotalact) #reassign labels
finaldataset<-as.data.frame(finaldataset,row.names=1:180) #print data frame
sink("tidydata.txt")
print(datasettotalact)
sink()
sink("tidydataaverages.txt")
print(finaldataset)
sink()
}
|
/run_analysis.R
|
no_license
|
N17051983/gettingandcleaningdata
|
R
| false
| false
| 2,043
|
r
|
run_analysis <- function() {
datatrain<-read.table("UCI HAR Dataset/train/X_train.txt",stringsAsFactors=F) #Load raw data
datatest<-read.table("UCI HAR Dataset/test/X_test.txt",stringsAsFactors=F)
dataset<-rbind(datatrain,datatest)
names<-read.table("UCI HAR Dataset/features.txt",stringsAsFactors=F) #Load data names
names$V2<-gsub("\\(","",names$V2)
names$V2<-gsub("\\)","",names$V2) #Make them look nice
names$V2<-gsub(",","",names$V2)
names$V2<-gsub(" ","",names$V2)
names$V2<-gsub("-","",names$V2)
colnames(dataset)<-names$V2 #Give raw data names
datasetmean<-dataset[grep("mean",names$V2)]
datasetstd<-dataset[grep("std",names$V2)] #Keep only those data containing "std" or "mean"
datasettotal<-cbind(datasetmean,datasetstd)
testsub<-read.table("UCI HAR Dataset/test/subject_test.txt")
trainsub<-read.table("UCI HAR Dataset/train/subject_train.txt")
subject<-rbind(trainsub,testsub)[[1]] #Get subject id #
trainact<-read.table("UCI HAR Dataset/train/y_train.txt",stringsAsFactors=F) # Get Activity labels and apply them
testact<-read.table("UCI HAR Dataset/test/y_test.txt",stringsAsFactors=F) # to the Activity number
actlab<-read.table("UCI HAR Dataset/activity_labels.txt")
acttotal<-rbind(trainact,testact)
activity<-sapply(1:dim(acttotal)[1],function(x) actlab$V2[as.numeric(acttotal$V1[x])])
datasettotalact<-cbind(subject,activity,datasettotal) #combine everything and print
print(datasettotalact)
finaldataset<-NULL
for(i in 1:30){ #make average matrix
for(j in actlab[[2]]){
if(is.null(finaldataset)) finaldataset<-c(1,j,colMeans(datasettotalact[datasettotalact$subject==i & datasettotalact$activity==j,3:81]))
else finaldataset<-rbind(finaldataset,c(i,j,colMeans(datasettotalact[datasettotalact$subject==i & datasettotalact$activity==j,3:81])))
}
}
colnames(finaldataset)<-colnames(datasettotalact) #reassign labels
finaldataset<-as.data.frame(finaldataset,row.names=1:180) #print data frame
sink("tidydata.txt")
print(datasettotalact)
sink()
sink("tidydataaverages.txt")
print(finaldataset)
sink()
}
|
library("ape")
library("kmer")
library("Matrix")
library("FindMyFriends")
# Format input and output
input_folder = "/Users/matthewthompson/Documents/UAMS_SURF/K-mer_testing/FAA_files/phylotypeA/"
folder_name = "FAA_files/phylotypeA/"
output_folder = "/Users/matthewthompson/Documents/UAMS_SURF/K-mer_testing/CSV_files/phylotypeA/"
# Set k-mer length
kmer_length = 3
kmer_string = paste(toString(kmer_length), "mer", sep = '')
# Read in .faa files after running Prodigal on genomic FASTA (.fna) files
setwd(input_folder)
genome_files <- list.files(getwd(), full.names=TRUE, pattern='*.faa')
# Output ordering of genes from FindMyFriends to combine with canopyClustering.py output
pan <- pangenome(genome_files[1:length(genome_files)], translated=TRUE, geneLocation='prodigal', lowMem=FALSE)
grouping_list <- c()
for(x in pan@sequences@ranges@NAMES)
{
grouping_list <- c(grouping_list, strsplit(x, "#")[[1]][1])
}
write.csv(grouping_list, file = paste(output_folder,"find_my_friends_gene_ordering_list.csv", sep=''))
for (file in genome_files)
{
proteins <- read.FASTA(file, type = "AA")
print(paste('Genome ',file,' proteins are loaded'))
# Count k-mers in each protein sequence
kmerCounts <- kcount(proteins, k = kmer_length, compress = FALSE)
print(paste('Genome ',file, ' kmerCounts is finished'))
# Convert to a sparse storage format to reduce file size when storing
sparseKmerCounts <- Matrix(kmerCounts, sparse = TRUE)
# Format output file name
file_name <- strsplit(file, ".faa")
file_name <- strsplit(file_name[[1]][1], folder_name)
file_name <- paste(output_folder, file_name[[1]][2], sep = '')
out_path <- paste(file_name,paste('_', kmer_string, sep = ''),sep = '')
out_path <- paste(out_path, '_count_matrix_full_alphabet.mtx',sep = '')
protein_out_path <- paste(strsplit(file_name, paste(kmer_string, "_count", sep = ''))[[1]][1], "_protein_list.csv", sep = '')
setwd(output_folder)
# Output to be used in kmerSelector.py
# Write matrix out in matrix market format
writeMM(sparseKmerCounts, file=out_path)
write.csv(rownames(kmerCounts), file = protein_out_path)
write.csv(colnames(kmerCounts), file = paste(kmer_string, "_list.csv", sep = ''))
print(paste('Genome ', file, ' matrix is output'))
print(paste('Genome ',file,' is complete', sep = ''))
}
|
/kmerCounter.R
|
no_license
|
mdttrump97/Kmer_pangenomes
|
R
| false
| false
| 2,323
|
r
|
library("ape")
library("kmer")
library("Matrix")
library("FindMyFriends")
# Format input and output
input_folder = "/Users/matthewthompson/Documents/UAMS_SURF/K-mer_testing/FAA_files/phylotypeA/"
folder_name = "FAA_files/phylotypeA/"
output_folder = "/Users/matthewthompson/Documents/UAMS_SURF/K-mer_testing/CSV_files/phylotypeA/"
# Set k-mer length
kmer_length = 3
kmer_string = paste(toString(kmer_length), "mer", sep = '')
# Read in .faa files after running Prodigal on genomic FASTA (.fna) files
setwd(input_folder)
genome_files <- list.files(getwd(), full.names=TRUE, pattern='*.faa')
# Output ordering of genes from FindMyFriends to combine with canopyClustering.py output
pan <- pangenome(genome_files[1:length(genome_files)], translated=TRUE, geneLocation='prodigal', lowMem=FALSE)
grouping_list <- c()
for(x in pan@sequences@ranges@NAMES)
{
grouping_list <- c(grouping_list, strsplit(x, "#")[[1]][1])
}
write.csv(grouping_list, file = paste(output_folder,"find_my_friends_gene_ordering_list.csv", sep=''))
for (file in genome_files)
{
proteins <- read.FASTA(file, type = "AA")
print(paste('Genome ',file,' proteins are loaded'))
# Count k-mers in each protein sequence
kmerCounts <- kcount(proteins, k = kmer_length, compress = FALSE)
print(paste('Genome ',file, ' kmerCounts is finished'))
# Convert to a sparse storage format to reduce file size when storing
sparseKmerCounts <- Matrix(kmerCounts, sparse = TRUE)
# Format output file name
file_name <- strsplit(file, ".faa")
file_name <- strsplit(file_name[[1]][1], folder_name)
file_name <- paste(output_folder, file_name[[1]][2], sep = '')
out_path <- paste(file_name,paste('_', kmer_string, sep = ''),sep = '')
out_path <- paste(out_path, '_count_matrix_full_alphabet.mtx',sep = '')
protein_out_path <- paste(strsplit(file_name, paste(kmer_string, "_count", sep = ''))[[1]][1], "_protein_list.csv", sep = '')
setwd(output_folder)
# Output to be used in kmerSelector.py
# Write matrix out in matrix market format
writeMM(sparseKmerCounts, file=out_path)
write.csv(rownames(kmerCounts), file = protein_out_path)
write.csv(colnames(kmerCounts), file = paste(kmer_string, "_list.csv", sep = ''))
print(paste('Genome ', file, ' matrix is output'))
print(paste('Genome ',file,' is complete', sep = ''))
}
|
# makeCacheMatrix
# Creates a special "matrix" object that can cache its inverse.
# The object does not calculate the inverse, just saves it inside.
# Saves the matrix to variable x and its inverse to variable s in scope.
# Returned object (actually it's a list) contains methods:
# set: sets matrix and resets cached inverse
# get: returns matrix
# setSolve: saves solve value
# getSolve: returns cached inverse valuePut comments here that give an overall description of what your
## functions do
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() {
x
}
setSolve <- function(solve) {
s <<- solve
}
getSolve <- function() {
s
}
list(set = set, get = get, setSolve = setSolve, getSolve = getSolve)
}
#Function to get the inversed matrix from a special object created by makeCacheMatrix.
# Takes the object of that type as an argument 'x', checks if the inverse value is already
# cached, and if it is returns the cached value; if not, this function calculates the
# inverse for the matrix saved in the 'x', saves it into 'x' cache using method 'setSolve'
# and returns the result.
cacheSolve <- function(x, ...) {
s <- x$getSolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setSolve(s)
s
}
|
/cachematrix.R
|
no_license
|
meghnasharma1410/ProgrammingAssignment2
|
R
| false
| false
| 1,377
|
r
|
# makeCacheMatrix
# Creates a special "matrix" object that can cache its inverse.
# The object does not calculate the inverse, just saves it inside.
# Saves the matrix to variable x and its inverse to variable s in scope.
# Returned object (actually it's a list) contains methods:
# set: sets matrix and resets cached inverse
# get: returns matrix
# setSolve: saves solve value
# getSolve: returns cached inverse valuePut comments here that give an overall description of what your
## functions do
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() {
x
}
setSolve <- function(solve) {
s <<- solve
}
getSolve <- function() {
s
}
list(set = set, get = get, setSolve = setSolve, getSolve = getSolve)
}
#Function to get the inversed matrix from a special object created by makeCacheMatrix.
# Takes the object of that type as an argument 'x', checks if the inverse value is already
# cached, and if it is returns the cached value; if not, this function calculates the
# inverse for the matrix saved in the 'x', saves it into 'x' cache using method 'setSolve'
# and returns the result.
cacheSolve <- function(x, ...) {
s <- x$getSolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setSolve(s)
s
}
|
regresseur_mais <- function(dataset) {
# Chargement de l environnement
load("envMais.Rdata")
library(kernlab)
library(MASS)
library(e1071)
predictions <- predict(svmfit, newdata = dataset)
return(predictions)
}
|
/TD9/sy19_tp7/scripts/regresseur_mais.R
|
no_license
|
sidiatig/SY19
|
R
| false
| false
| 227
|
r
|
regresseur_mais <- function(dataset) {
# Chargement de l environnement
load("envMais.Rdata")
library(kernlab)
library(MASS)
library(e1071)
predictions <- predict(svmfit, newdata = dataset)
return(predictions)
}
|
#'Make an interactive bar plot with error bar
#'
#'@param data A data.frame
#'@param mapping Set of aesthetic mappings created by aes or aes_.
#'@param interactive A logical value. If TRUE, an interactive plot will be returned
#'@param digits An integer indicating the number of decimal places
#'@param mode if 2, two-sided error bar will be displayed, if 1 one-sided errorbar will be displayed
#'@param errorbar which value is displayed with errorbar :"se" or "sd"
#'@param use.label Logical. Whether or not use column label in case of labelled data
#'@param use.labels Logical. Whether or not use value labels in case of labelled data
#'@importFrom ggiraph geom_bar_interactive
#'@export
#'@return An interactive catepillar plot
#'@examples
#'require(ggplot2)
#'require(ggiraph)
#'ggErrorBar(mpg,aes(x=drv,y=cty))
#'ggErrorBar(mpg,aes(x=drv,y=hwy,color=cyl),mode=1,interactive=TRUE,errorbar="sd")
ggErrorBar=function(data,mapping,interactive=FALSE,digits=1,mode=2,errorbar="se",
use.label=TRUE,use.labels=TRUE){
# data=mpg;mapping=aes(x=drv,y=cty);interactive=FALSE;digits=1;mode=2;errorbar="se"
# use.label=TRUE;use.labels=TRUE
df<-data
yvar=getMapping(mapping,"y")
xvar=getMapping(mapping,"x")
if(is.numeric(data[[xvar]])) data[[xvar]]=factor(data[[xvar]])
groupvar<-NULL
(groupname=setdiff(names(mapping),c("x","y")))
length(groupname)
if(length(groupname)>0){
groupvar=getMapping(mapping,groupname)
}
name=names(mapping)
xlabels<-ylabels<-filllabels<-colourlabels<-xlab<-ylab<-colourlab<-filllab<-NULL
for(i in 1:length(name)){
(varname=paste0(name[i],"var"))
labname=paste0(name[i],"lab")
labelsname=paste0(name[i],"labels")
assign(varname,getMapping(mapping,name[i]))
x=eval(parse(text=paste0("data$",eval(parse(text=varname)))))
assign(labname,attr(x,"label"))
assign(labelsname,get_labels(x))
}
A=yvar
(B<-groupvar)
(C=xvar)
if(is.null(B)){
dat=summarySE(df,A,C)
dat$tooltip=""
dat$label=paste0(C,"=",dat[[C]],"<br>mean:",round(dat[[A]],digits),
"<br>se:",round(dat$se,digits),"<br>sd:",round(dat$sd,digits))
} else {
dat=summarySE(df,A,c(B,C))
dat[[B]]=factor(dat[[B]])
dat$tooltip=dat[[B]]
dat$label=paste0(B,"=",dat[[B]],"<br>",C,"=",dat[[C]],"<br>mean:",round(dat[[A]],digits),
"<br>se:",round(dat$se,digits),"<br>sd:",round(dat$sd,digits))
}
dat$id=as.character(1:nrow(dat))
dat
if(is.null(B)) {
p<-ggplot(dat,aes_string(x=xvar,fill=xvar,y=yvar,tooltip="label",data_id="id"))+guides(fill=FALSE)
} else {
p<-ggplot(dat,aes_string(x=xvar,fill=groupvar,y=yvar,tooltip="label",data_id="id"))
}
if(mode==2) p<-p+geom_bar_interactive(position="dodge",stat="identity")
p<-p+eval(parse(text=paste0("geom_errorbar(aes(ymin=",A,"-",errorbar,",ymax=",
A,"+",errorbar,"),position=position_dodge(0.9),width=0.2)")))
if(mode!=2) p<-p+geom_bar_interactive(position="dodge",stat="identity")
p
if(use.labels) {
if(!is.null(xlabels)) p<-p+scale_x_discrete(labels=xlabels)
if(!is.null(ylabels)) p<-p+scale_y_continuous(breaks=1:length(ylabels),labels=ylabels)
if(!is.null(filllabels)) p=p+scale_fill_discrete(labels=filllabels)
if(!is.null(colourlabels)) p=p+scale_color_discrete(labels=colourlabels)
#p+scale_color_continuous(labels=colourlabels)
}
if(use.label){
if(!is.null(xlab)) p<-p+labs(x=xlab)
if(!is.null(ylab)) p<-p+labs(y=ylab)
if(!is.null(colourlab)) p<-p+labs(colour=colourlab)
if(!is.null(filllab)) p<-p+labs(fill=filllab)
}
p
# if(interactive) p<-ggiraph(code=print(p),zoom_max = 10)
if(interactive){
tooltip_css <- "background-color:white;font-style:italic;padding:10px;border-radius:10px 20px 10px 20px;"
hover_css="r:4px;cursor:pointer;stroke-width:6px;"
p<-girafe(ggobj=p)
p<-girafe_options(p,
opts_hover(css=hover_css),
opts_tooltip(css=tooltip_css,opacity=.75),
opts_zoom(min=1,max=10))
}
p
}
|
/R/ggErrorBar.R
|
no_license
|
cardiomoon/ggiraphExtra
|
R
| false
| false
| 4,392
|
r
|
#'Make an interactive bar plot with error bar
#'
#'@param data A data.frame
#'@param mapping Set of aesthetic mappings created by aes or aes_.
#'@param interactive A logical value. If TRUE, an interactive plot will be returned
#'@param digits An integer indicating the number of decimal places
#'@param mode if 2, two-sided error bar will be displayed, if 1 one-sided errorbar will be displayed
#'@param errorbar which value is displayed with errorbar :"se" or "sd"
#'@param use.label Logical. Whether or not use column label in case of labelled data
#'@param use.labels Logical. Whether or not use value labels in case of labelled data
#'@importFrom ggiraph geom_bar_interactive
#'@export
#'@return An interactive catepillar plot
#'@examples
#'require(ggplot2)
#'require(ggiraph)
#'ggErrorBar(mpg,aes(x=drv,y=cty))
#'ggErrorBar(mpg,aes(x=drv,y=hwy,color=cyl),mode=1,interactive=TRUE,errorbar="sd")
ggErrorBar=function(data,mapping,interactive=FALSE,digits=1,mode=2,errorbar="se",
use.label=TRUE,use.labels=TRUE){
# data=mpg;mapping=aes(x=drv,y=cty);interactive=FALSE;digits=1;mode=2;errorbar="se"
# use.label=TRUE;use.labels=TRUE
df<-data
yvar=getMapping(mapping,"y")
xvar=getMapping(mapping,"x")
if(is.numeric(data[[xvar]])) data[[xvar]]=factor(data[[xvar]])
groupvar<-NULL
(groupname=setdiff(names(mapping),c("x","y")))
length(groupname)
if(length(groupname)>0){
groupvar=getMapping(mapping,groupname)
}
name=names(mapping)
xlabels<-ylabels<-filllabels<-colourlabels<-xlab<-ylab<-colourlab<-filllab<-NULL
for(i in 1:length(name)){
(varname=paste0(name[i],"var"))
labname=paste0(name[i],"lab")
labelsname=paste0(name[i],"labels")
assign(varname,getMapping(mapping,name[i]))
x=eval(parse(text=paste0("data$",eval(parse(text=varname)))))
assign(labname,attr(x,"label"))
assign(labelsname,get_labels(x))
}
A=yvar
(B<-groupvar)
(C=xvar)
if(is.null(B)){
dat=summarySE(df,A,C)
dat$tooltip=""
dat$label=paste0(C,"=",dat[[C]],"<br>mean:",round(dat[[A]],digits),
"<br>se:",round(dat$se,digits),"<br>sd:",round(dat$sd,digits))
} else {
dat=summarySE(df,A,c(B,C))
dat[[B]]=factor(dat[[B]])
dat$tooltip=dat[[B]]
dat$label=paste0(B,"=",dat[[B]],"<br>",C,"=",dat[[C]],"<br>mean:",round(dat[[A]],digits),
"<br>se:",round(dat$se,digits),"<br>sd:",round(dat$sd,digits))
}
dat$id=as.character(1:nrow(dat))
dat
if(is.null(B)) {
p<-ggplot(dat,aes_string(x=xvar,fill=xvar,y=yvar,tooltip="label",data_id="id"))+guides(fill=FALSE)
} else {
p<-ggplot(dat,aes_string(x=xvar,fill=groupvar,y=yvar,tooltip="label",data_id="id"))
}
if(mode==2) p<-p+geom_bar_interactive(position="dodge",stat="identity")
p<-p+eval(parse(text=paste0("geom_errorbar(aes(ymin=",A,"-",errorbar,",ymax=",
A,"+",errorbar,"),position=position_dodge(0.9),width=0.2)")))
if(mode!=2) p<-p+geom_bar_interactive(position="dodge",stat="identity")
p
if(use.labels) {
if(!is.null(xlabels)) p<-p+scale_x_discrete(labels=xlabels)
if(!is.null(ylabels)) p<-p+scale_y_continuous(breaks=1:length(ylabels),labels=ylabels)
if(!is.null(filllabels)) p=p+scale_fill_discrete(labels=filllabels)
if(!is.null(colourlabels)) p=p+scale_color_discrete(labels=colourlabels)
#p+scale_color_continuous(labels=colourlabels)
}
if(use.label){
if(!is.null(xlab)) p<-p+labs(x=xlab)
if(!is.null(ylab)) p<-p+labs(y=ylab)
if(!is.null(colourlab)) p<-p+labs(colour=colourlab)
if(!is.null(filllab)) p<-p+labs(fill=filllab)
}
p
# if(interactive) p<-ggiraph(code=print(p),zoom_max = 10)
if(interactive){
tooltip_css <- "background-color:white;font-style:italic;padding:10px;border-radius:10px 20px 10px 20px;"
hover_css="r:4px;cursor:pointer;stroke-width:6px;"
p<-girafe(ggobj=p)
p<-girafe_options(p,
opts_hover(css=hover_css),
opts_tooltip(css=tooltip_css,opacity=.75),
opts_zoom(min=1,max=10))
}
p
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
}
makeCacheMatrix <- function(x = matrix()) {
## Create matrix
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
# Setting matrix
}
get <- function() x
## Getting matrix
setinverse <- function(inverse) i <<- inverse
#Set inverse function on matrix
getinverse <- function() i
## Get inverse on matrix
list(get=get,
set=set,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
}
cacheSolve <-function(x,...) {
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return (i)
}
data <- x$get()
## Get matrix
i <- solve(data,...)
x$setinverse(i)
#Set inverse
i
}
|
/cachematrix.R
|
no_license
|
dcarmody421/ProgrammingAssignment2
|
R
| false
| false
| 975
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
}
makeCacheMatrix <- function(x = matrix()) {
## Create matrix
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
# Setting matrix
}
get <- function() x
## Getting matrix
setinverse <- function(inverse) i <<- inverse
#Set inverse function on matrix
getinverse <- function() i
## Get inverse on matrix
list(get=get,
set=set,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
}
cacheSolve <-function(x,...) {
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return (i)
}
data <- x$get()
## Get matrix
i <- solve(data,...)
x$setinverse(i)
#Set inverse
i
}
|
#' Median value of a field among points within polygons
#'
#' Calculates the \strong{median} value of a field for a set of
#' \code{\link{data-Point}}'s within a set of \code{\link{data-Polygon}}'s
#'
#' @export
#' @template math
#' @template lint
#' @family aggregations
#' @return A FeatureCollection of \code{\link{data-Polygon}} features with
#' properties listed as \code{out_field}
#' @examples \dontrun{
#' poly <- lawn_data$polygons_average
#' pt <- lawn_data$points_average
#' lawn_median(polygons=poly, points=pt, in_field='population')
#' }
lawn_median <- function(polygons, points, in_field, out_field = "median", lint = FALSE) {
lawnlint(list(polygons, points), lint)
calc_math("median", convert(polygons), convert(points), in_field, out_field)
}
|
/R/median.R
|
permissive
|
jbousquin/lawn
|
R
| false
| false
| 764
|
r
|
#' Median value of a field among points within polygons
#'
#' Calculates the \strong{median} value of a field for a set of
#' \code{\link{data-Point}}'s within a set of \code{\link{data-Polygon}}'s
#'
#' @export
#' @template math
#' @template lint
#' @family aggregations
#' @return A FeatureCollection of \code{\link{data-Polygon}} features with
#' properties listed as \code{out_field}
#' @examples \dontrun{
#' poly <- lawn_data$polygons_average
#' pt <- lawn_data$points_average
#' lawn_median(polygons=poly, points=pt, in_field='population')
#' }
lawn_median <- function(polygons, points, in_field, out_field = "median", lint = FALSE) {
lawnlint(list(polygons, points), lint)
calc_math("median", convert(polygons), convert(points), in_field, out_field)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_properties.R
\name{DescendantEdges}
\alias{DescendantEdges}
\alias{AllDescendantEdges}
\title{Descendant Edges}
\usage{
DescendantEdges(edge, parent, child, nEdge = length(parent))
AllDescendantEdges(parent, child, nEdge = length(parent))
}
\arguments{
\item{edge}{number of the edge whose child edges are required}
\item{parent}{the first column of the edge matrix of a tree of class
\code{\link{phylo}}, i.e. \code{tree$edge[, 1]}}
\item{child}{the second column of the edge matrix of a tree of class
\code{\link{phylo}}, i.e. \code{tree$edge[, 2]}}
\item{nEdge}{number of edges (calculated from length(parent) if not supplied)}
}
\value{
\code{DescendantEdges} returns a logical vector stating whether each edge in turn is a descendant of the specified edge
(or the edge itself)
\code{AllDescendantEdges} returns a matrix of class logical, with row N specifying whether each edge is a descendant of edge N
(or the edge itself)
}
\description{
Quickly identifies edges that are 'descended' from a particular edge in a tree
}
\section{Functions}{
\itemize{
\item \code{AllDescendantEdges}: Quickly identifies edges that are 'descended' from each edge in a tree
}}
\seealso{
Other tree navigation: \code{\link{AllAncestors}},
\code{\link{AncestorEdge}}, \code{\link{EdgeAncestry}},
\code{\link{EdgeDistances}}, \code{\link{MRCA}},
\code{\link{NonDuplicateRoot}}
Other tree navigation: \code{\link{AllAncestors}},
\code{\link{AncestorEdge}}, \code{\link{EdgeAncestry}},
\code{\link{EdgeDistances}}, \code{\link{MRCA}},
\code{\link{NonDuplicateRoot}}
}
\concept{tree navigation}
|
/man/DescendantEdges.Rd
|
no_license
|
nanoquanta/TreeTools
|
R
| false
| true
| 1,679
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_properties.R
\name{DescendantEdges}
\alias{DescendantEdges}
\alias{AllDescendantEdges}
\title{Descendant Edges}
\usage{
DescendantEdges(edge, parent, child, nEdge = length(parent))
AllDescendantEdges(parent, child, nEdge = length(parent))
}
\arguments{
\item{edge}{number of the edge whose child edges are required}
\item{parent}{the first column of the edge matrix of a tree of class
\code{\link{phylo}}, i.e. \code{tree$edge[, 1]}}
\item{child}{the second column of the edge matrix of a tree of class
\code{\link{phylo}}, i.e. \code{tree$edge[, 2]}}
\item{nEdge}{number of edges (calculated from length(parent) if not supplied)}
}
\value{
\code{DescendantEdges} returns a logical vector stating whether each edge in turn is a descendant of the specified edge
(or the edge itself)
\code{AllDescendantEdges} returns a matrix of class logical, with row N specifying whether each edge is a descendant of edge N
(or the edge itself)
}
\description{
Quickly identifies edges that are 'descended' from a particular edge in a tree
}
\section{Functions}{
\itemize{
\item \code{AllDescendantEdges}: Quickly identifies edges that are 'descended' from each edge in a tree
}}
\seealso{
Other tree navigation: \code{\link{AllAncestors}},
\code{\link{AncestorEdge}}, \code{\link{EdgeAncestry}},
\code{\link{EdgeDistances}}, \code{\link{MRCA}},
\code{\link{NonDuplicateRoot}}
Other tree navigation: \code{\link{AllAncestors}},
\code{\link{AncestorEdge}}, \code{\link{EdgeAncestry}},
\code{\link{EdgeDistances}}, \code{\link{MRCA}},
\code{\link{NonDuplicateRoot}}
}
\concept{tree navigation}
|
library(tidyverse)
library(neonUtilities)
library(data.table)
library(phenocamapi)
library(lubridate)
library(jpeg)
library(phenocamr)
library(XML)
library(RCurl)
library(rlist)
sites <- c("HARV", "OSBS", "CPER")
### flux data ###
## from tutorial https://www.neonscience.org/eddy-data-intro
zipsByProduct(dpID="DP4.00200.001", package="basic",
site=sites,
startdate="2018-06", enddate="2018-07",
savepath="neonsummit/data",
check.size=F)
flux_dpid <- "DP4.00200.001"
flux <- stackEddy(filepath=paste0(getwd(), "/neonsummit/data/filesToStack00200"),
level="dp04")
### in situ phenology ###
phe_dpid <- 'DP1.10055.001'
zipsByProduct(dpID='DP1.10055.001', package ="basic",
site=sites,
savepath="neonsummit/data",
check.size = F)
stackByTable(phe_dpid, filepath=paste0(getwd(), "neonsummit/data/filesToStack10055"),
savepath = paste0(getwd(), "/filesToStack10055"), folder=T)
### phenocam data ###
ls("package:phenocamr")
#get list of sites
theurl <- getURL("https://phenocam.sr.unh.edu/webcam/network/table/",.opts = list(ssl.verifypeer = FALSE) )
cameraList <- readHTMLTable(theurl, which = 1, stringsAsFactors=FALSE)
neonCamera <- filter(cameraList, grepl('NEON', Camera))
neonCamera <- neonCamera[substr(neonCamera$Camera, 10, 13)%in%sites,]
phenos <- get_phenos()
landWater <- 'DP1.20002'
understory <- 'DP1.00042'
canopy <- 'DP1.00033'
neonCamera$dp <- ifelse(grepl('DP1.00033', neonCamera$Camera), "canopy",
ifelse(grepl('DP1.00042', neonCamera$Camera), "understory",
ifelse(grepl('DP1.20002', neonCamera$Camera), "landWater", NA)))
listUpper <- unique(neonCamera$Camera[grepl('DP1.00033', neonCamera$Camera)])
getwd()
list.files(getwd())
rois <- get_rois()
cam_df <- data.frame()
for (i in 1:length(neonCamera$Camera[neonCamera$dp=="understory"])){
temp_cam <- neonCamera$Camera[neonCamera$dp=="understory"][i]
download_phenocam(temp_cam,
frequency=1)
temp_df <- read_phenocam(file.path(tempdir(), paste(temp_cam, "_UN_")
cam_df <- rbind(cam_df, temp_df)
df <- read_phenocam(file.path(tempdir(),"NEON.D01.HARV.DP1.00033_DB_1000_1day.csv"))
rm(temp_cam)
rm(temp_df)
}
# download_phenocam(site = "NEON.D01.HARV.DP1.00033",
# frequency = 1)
list.files(tempdir())
df <- read_phenocam(file.path(tempdir(),"NEON.D01.HARV.DP1.00033_DB_1000_1day.csv"))
|
/data/pullData.R
|
no_license
|
katharynduffy/NEONSummitPhenology
|
R
| false
| false
| 2,539
|
r
|
library(tidyverse)
library(neonUtilities)
library(data.table)
library(phenocamapi)
library(lubridate)
library(jpeg)
library(phenocamr)
library(XML)
library(RCurl)
library(rlist)
sites <- c("HARV", "OSBS", "CPER")
### flux data ###
## from tutorial https://www.neonscience.org/eddy-data-intro
zipsByProduct(dpID="DP4.00200.001", package="basic",
site=sites,
startdate="2018-06", enddate="2018-07",
savepath="neonsummit/data",
check.size=F)
flux_dpid <- "DP4.00200.001"
flux <- stackEddy(filepath=paste0(getwd(), "/neonsummit/data/filesToStack00200"),
level="dp04")
### in situ phenology ###
phe_dpid <- 'DP1.10055.001'
zipsByProduct(dpID='DP1.10055.001', package ="basic",
site=sites,
savepath="neonsummit/data",
check.size = F)
stackByTable(phe_dpid, filepath=paste0(getwd(), "neonsummit/data/filesToStack10055"),
savepath = paste0(getwd(), "/filesToStack10055"), folder=T)
### phenocam data ###
ls("package:phenocamr")
#get list of sites
theurl <- getURL("https://phenocam.sr.unh.edu/webcam/network/table/",.opts = list(ssl.verifypeer = FALSE) )
cameraList <- readHTMLTable(theurl, which = 1, stringsAsFactors=FALSE)
neonCamera <- filter(cameraList, grepl('NEON', Camera))
neonCamera <- neonCamera[substr(neonCamera$Camera, 10, 13)%in%sites,]
phenos <- get_phenos()
landWater <- 'DP1.20002'
understory <- 'DP1.00042'
canopy <- 'DP1.00033'
neonCamera$dp <- ifelse(grepl('DP1.00033', neonCamera$Camera), "canopy",
ifelse(grepl('DP1.00042', neonCamera$Camera), "understory",
ifelse(grepl('DP1.20002', neonCamera$Camera), "landWater", NA)))
listUpper <- unique(neonCamera$Camera[grepl('DP1.00033', neonCamera$Camera)])
getwd()
list.files(getwd())
rois <- get_rois()
cam_df <- data.frame()
for (i in 1:length(neonCamera$Camera[neonCamera$dp=="understory"])){
temp_cam <- neonCamera$Camera[neonCamera$dp=="understory"][i]
download_phenocam(temp_cam,
frequency=1)
temp_df <- read_phenocam(file.path(tempdir(), paste(temp_cam, "_UN_")
cam_df <- rbind(cam_df, temp_df)
df <- read_phenocam(file.path(tempdir(),"NEON.D01.HARV.DP1.00033_DB_1000_1day.csv"))
rm(temp_cam)
rm(temp_df)
}
# download_phenocam(site = "NEON.D01.HARV.DP1.00033",
# frequency = 1)
list.files(tempdir())
df <- read_phenocam(file.path(tempdir(),"NEON.D01.HARV.DP1.00033_DB_1000_1day.csv"))
|
#' @export
#'
#' @title F.est.efficiency
#'
#' @description Estimate trap efficiency for every sample period, per trap.
#'
#' @param release.df A data frame produced by \code{F.get.release.data}.
#' Contains information on releases and recaptures. This data frame has one
#' line per release trial per trap, with trap identified via variable
#' \code{TrapPositionID}.
#'
#' @param batchDate A POSIX-formatted vector of dates.
#'
#' @param df.spline The default degrees of freedom to use in the estimation of
#' splines. Default is 4 (1 internal knot).
#'
#' @param plot A logical indicating if efficiencies are to be plotted over time,
#' per trap.
#'
#' @param plot.file The name to which a graph of efficiency is to be output, if
#' \code{plot=TRUE}.
#'
#' @return A data frame containing fishing intervals and associated capture
#' efficiency, along with variable \code{gam.estimated}. Variable
#' \code{gam.estimated} is \code{"Yes"} if efficiency for that interval was
#' estimated by the GAM model (\code{method=3}), rather than being empirical
#' (\code{method=1}).
#'
#' @details Generally, fish released as part of an efficiency trial arrive in
#' traps over the course of several days. \code{F.est.efficiency} calculates
#' the mean recapture time of all re-captured fish. When a release trial
#' resulted in no recaptures, the mean recapture time is half way between the
#' first and last visit of the trial (i.e., after release).
#'
#' Function \code{F.assign.batch.date} assigns mean recapture time, which is
#' mesured to the nearest minute, to a \code{batchDate}. Batch date a simple
#' calendar date.
#'
#' Fishing instances during which traps utilized half-cones are recorded in
#' variable \code{HalfCone}. During these instances, the number of captured
#' fish, variable \code{Recaps}, is multiplied by the value of
#' \code{halfConeMulti}. The value of \code{halfConeMulti} is set in
#' \code{GlobalVars} and defaults to 2. The expansion by \code{halfConeMulti}
#' happens on the raw catch, and not the mean recapture. In this way, the
#' number recorded in variable \code{Recaps} may not be twice the number
#' recorded in variable \code{oldRecaps}.
#'
#' Note that the run season sample period is a vector of length 2 of dates,
#' housing the beginning and ending of the run. These are stored as an
#' attribute of the \code{release.df} data frame.
#'
#' @seealso \code{F.get.release.data}, \code{F.assign.batch.date}
#'
#' @author WEST Inc.
#'
#' @examples
#' \dontrun{
#' # ---- Estimate the efficiency.
#' theEff <- F.est.efficiency(release.df,batchDate,df.spline=4,plot=TRUE,plots.file=NA)
#' }
F.est.efficiency <- function( release.df, batchDate, df.spline=4, plot=TRUE, plot.file=NA ){
# release.df <- release.df
# batchDate <- bd
# df.spline <- 4
# plot <- TRUE
# plot.file <- file.root
time.zone <- get("time.zone", envir=.GlobalEnv)
# ---- Fix up the data frame.
rel.df <- release.df[,c("releaseID","ReleaseDate","nReleased","HrsToFirstVisitAfter",
"HrsToLastVisitAfter","trapPositionID","meanRecapTime","Recaps",'beg.date','end.date',
"allMoonMins","meanMoonProp", # added this line for enhanced models for collapsing on batchDate.
"allNightMins","meanNightProp", # added this line for enhanced models for collapsing on batchDate.
"allfl","meanForkLength", # added this line for enhanced models for collapsing on batchDate.
"thisIsFake")] # added this line for enhanced models for when we have no legit eff trials.
rel.df$batchDate <- rep(NA, nrow(rel.df))
names(rel.df)[ names(rel.df) == "meanRecapTime" ] <- "EndTime"
# ---- The meanRecapTime is NA if they did not catch anything from a release.
# ---- This is different from the check on line 36, where there was no catch
# ---- over ALL releases. In this NA case, replace any NA meanEndTimes with
# ---- releaseTime plus mean of HrsToFirstVisitAfter and HrsToLastVisitAfter.
# ---- This will assign a batch date.
ind <- is.na( rel.df$EndTime )
rel.df$EndTime[ind] <- rel.df$ReleaseDate[ind] + (rel.df$HrsToFirstVisitAfter[ind] + rel.df$HrsToLastVisitAfter[ind]) / 2
# ---- Assign batch date to efficiency trials based on meanEndTime (really weighted meanVisitTime).
rel.df <- F.assign.batch.date( rel.df )
rel.df$batchDate.str <- format(rel.df$batchDate,"%Y-%m-%d")
# ---- Sum by batch dates. This combines release and catches over trials that occured close in time. For enhanced
# ---- efficiency models, need to collapse prop of moon and night and forklength as well over batchDate.
ind <- list( TrapPositionID=rel.df$trapPositionID,batchDate=rel.df$batchDate.str )
nReleased <- tapply( rel.df$nReleased,ind, sum )
nCaught <- tapply( rel.df$Recaps,ind, sum )
thisIsFake <- tapply( rel.df$thisIsFake,ind, max)
#lapply(split(truc, truc$x), function(z) weighted.mean(z$y, z$w))
bdMeanNightProp <- sapply( split(rel.df, ind) ,function(z) weighted.mean(z$meanNightProp,z$allNightMins) )
bdMeanMoonProp <- sapply( split(rel.df, ind) ,function(z) weighted.mean(z$meanMoonProp,z$allMoonMins) )
bdMeanForkLength <- sapply( split(rel.df, ind) ,function(z) weighted.mean(z$meanForkLength,z$allfl) )
eff.est <- cbind( expand.grid( TrapPositionID=dimnames(nReleased)[[1]],batchDate=dimnames(nReleased)[[2]]),
nReleased=c(nReleased),nCaught=c(nCaught),bdMeanNightProp=c(bdMeanNightProp),
bdMeanMoonProp=c(bdMeanMoonProp),bdMeanForkLength=c(bdMeanForkLength),thisIsFake=c(thisIsFake) )
eff.est$batchDate <- as.character(eff.est$batchDate)
# ================== done with data manipulations ===========================
# ---- Compute efficiency.
eff.est$nReleased[ eff.est$nReleased <= 0] <- NA # don't think this can happen, but just in case.
eff.est$efficiency <- (eff.est$nCaught)/(eff.est$nReleased) # eff$efficiency not used in computation, but is plotted.
eff.est <- eff.est[ !is.na(eff.est$efficiency), ]
# ---- Figure out which days have efficiency data.
bd <- expand.grid(TrapPositionID=sort(unique(eff.est$TrapPositionID)),batchDate=format(batchDate,"%Y-%m-%d"),stringsAsFactors=F)
eff <- merge( eff.est, bd, by=c("TrapPositionID","batchDate"),all.y=T)
eff$batchDate <- as.POSIXct( eff$batchDate, format="%Y-%m-%d",tz=time.zone )
# ---- Assign attributes for plotting.
ind <- !duplicated(release.df$TrapPosition)
attr(eff,"subsites") <- data.frame(subSiteName=as.character(release.df$TrapPosition[ind]),subSiteID=release.df$trapPositionID[ind],stringsAsFactors=F)
attr(eff, "site.name") <- release.df$siteName[1]
attr(eff, "min.date") <- attr(release.df,"min.date")
attr(eff, "max.date") <- attr(release.df,"max.date")
attr(eff, "enhmodel") <- attr(release.df,"enhmodel")
attr(eff, "site") <- release.df$siteID[1]
attr(eff, "catch.subsites") <- attr(release.df,"catch.subsites")
# ---- If there are missing days, impute them.
missing.days <- is.na(eff$efficiency)
if( any(missing.days) ){
eff.and.fits <- suppressWarnings(F.efficiency.model( eff, plot=plot, max.df.spline=df.spline, plot.file=plot.file ))
} else {
eff.and.fits <- list(eff=eff, fits=NULL, X=NULL, obs.data=eff.est)
attr(eff.and.fits, "out.fn.list") <- NULL
}
eff.and.fits
}
|
/R/est_efficiency.r
|
no_license
|
tmcd82070/CAMP_RST
|
R
| false
| false
| 7,516
|
r
|
#' @export
#'
#' @title F.est.efficiency
#'
#' @description Estimate trap efficiency for every sample period, per trap.
#'
#' @param release.df A data frame produced by \code{F.get.release.data}.
#' Contains information on releases and recaptures. This data frame has one
#' line per release trial per trap, with trap identified via variable
#' \code{TrapPositionID}.
#'
#' @param batchDate A POSIX-formatted vector of dates.
#'
#' @param df.spline The default degrees of freedom to use in the estimation of
#' splines. Default is 4 (1 internal knot).
#'
#' @param plot A logical indicating if efficiencies are to be plotted over time,
#' per trap.
#'
#' @param plot.file The name to which a graph of efficiency is to be output, if
#' \code{plot=TRUE}.
#'
#' @return A data frame containing fishing intervals and associated capture
#' efficiency, along with variable \code{gam.estimated}. Variable
#' \code{gam.estimated} is \code{"Yes"} if efficiency for that interval was
#' estimated by the GAM model (\code{method=3}), rather than being empirical
#' (\code{method=1}).
#'
#' @details Generally, fish released as part of an efficiency trial arrive in
#' traps over the course of several days. \code{F.est.efficiency} calculates
#' the mean recapture time of all re-captured fish. When a release trial
#' resulted in no recaptures, the mean recapture time is half way between the
#' first and last visit of the trial (i.e., after release).
#'
#' Function \code{F.assign.batch.date} assigns mean recapture time, which is
#' mesured to the nearest minute, to a \code{batchDate}. Batch date a simple
#' calendar date.
#'
#' Fishing instances during which traps utilized half-cones are recorded in
#' variable \code{HalfCone}. During these instances, the number of captured
#' fish, variable \code{Recaps}, is multiplied by the value of
#' \code{halfConeMulti}. The value of \code{halfConeMulti} is set in
#' \code{GlobalVars} and defaults to 2. The expansion by \code{halfConeMulti}
#' happens on the raw catch, and not the mean recapture. In this way, the
#' number recorded in variable \code{Recaps} may not be twice the number
#' recorded in variable \code{oldRecaps}.
#'
#' Note that the run season sample period is a vector of length 2 of dates,
#' housing the beginning and ending of the run. These are stored as an
#' attribute of the \code{release.df} data frame.
#'
#' @seealso \code{F.get.release.data}, \code{F.assign.batch.date}
#'
#' @author WEST Inc.
#'
#' @examples
#' \dontrun{
#' # ---- Estimate the efficiency.
#' theEff <- F.est.efficiency(release.df,batchDate,df.spline=4,plot=TRUE,plots.file=NA)
#' }
F.est.efficiency <- function( release.df, batchDate, df.spline=4, plot=TRUE, plot.file=NA ){
# release.df <- release.df
# batchDate <- bd
# df.spline <- 4
# plot <- TRUE
# plot.file <- file.root
time.zone <- get("time.zone", envir=.GlobalEnv)
# ---- Fix up the data frame.
rel.df <- release.df[,c("releaseID","ReleaseDate","nReleased","HrsToFirstVisitAfter",
"HrsToLastVisitAfter","trapPositionID","meanRecapTime","Recaps",'beg.date','end.date',
"allMoonMins","meanMoonProp", # added this line for enhanced models for collapsing on batchDate.
"allNightMins","meanNightProp", # added this line for enhanced models for collapsing on batchDate.
"allfl","meanForkLength", # added this line for enhanced models for collapsing on batchDate.
"thisIsFake")] # added this line for enhanced models for when we have no legit eff trials.
rel.df$batchDate <- rep(NA, nrow(rel.df))
names(rel.df)[ names(rel.df) == "meanRecapTime" ] <- "EndTime"
# ---- The meanRecapTime is NA if they did not catch anything from a release.
# ---- This is different from the check on line 36, where there was no catch
# ---- over ALL releases. In this NA case, replace any NA meanEndTimes with
# ---- releaseTime plus mean of HrsToFirstVisitAfter and HrsToLastVisitAfter.
# ---- This will assign a batch date.
ind <- is.na( rel.df$EndTime )
rel.df$EndTime[ind] <- rel.df$ReleaseDate[ind] + (rel.df$HrsToFirstVisitAfter[ind] + rel.df$HrsToLastVisitAfter[ind]) / 2
# ---- Assign batch date to efficiency trials based on meanEndTime (really weighted meanVisitTime).
rel.df <- F.assign.batch.date( rel.df )
rel.df$batchDate.str <- format(rel.df$batchDate,"%Y-%m-%d")
# ---- Sum by batch dates. This combines release and catches over trials that occured close in time. For enhanced
# ---- efficiency models, need to collapse prop of moon and night and forklength as well over batchDate.
ind <- list( TrapPositionID=rel.df$trapPositionID,batchDate=rel.df$batchDate.str )
nReleased <- tapply( rel.df$nReleased,ind, sum )
nCaught <- tapply( rel.df$Recaps,ind, sum )
thisIsFake <- tapply( rel.df$thisIsFake,ind, max)
#lapply(split(truc, truc$x), function(z) weighted.mean(z$y, z$w))
bdMeanNightProp <- sapply( split(rel.df, ind) ,function(z) weighted.mean(z$meanNightProp,z$allNightMins) )
bdMeanMoonProp <- sapply( split(rel.df, ind) ,function(z) weighted.mean(z$meanMoonProp,z$allMoonMins) )
bdMeanForkLength <- sapply( split(rel.df, ind) ,function(z) weighted.mean(z$meanForkLength,z$allfl) )
eff.est <- cbind( expand.grid( TrapPositionID=dimnames(nReleased)[[1]],batchDate=dimnames(nReleased)[[2]]),
nReleased=c(nReleased),nCaught=c(nCaught),bdMeanNightProp=c(bdMeanNightProp),
bdMeanMoonProp=c(bdMeanMoonProp),bdMeanForkLength=c(bdMeanForkLength),thisIsFake=c(thisIsFake) )
eff.est$batchDate <- as.character(eff.est$batchDate)
# ================== done with data manipulations ===========================
# ---- Compute efficiency.
eff.est$nReleased[ eff.est$nReleased <= 0] <- NA # don't think this can happen, but just in case.
eff.est$efficiency <- (eff.est$nCaught)/(eff.est$nReleased) # eff$efficiency not used in computation, but is plotted.
eff.est <- eff.est[ !is.na(eff.est$efficiency), ]
# ---- Figure out which days have efficiency data.
bd <- expand.grid(TrapPositionID=sort(unique(eff.est$TrapPositionID)),batchDate=format(batchDate,"%Y-%m-%d"),stringsAsFactors=F)
eff <- merge( eff.est, bd, by=c("TrapPositionID","batchDate"),all.y=T)
eff$batchDate <- as.POSIXct( eff$batchDate, format="%Y-%m-%d",tz=time.zone )
# ---- Assign attributes for plotting.
ind <- !duplicated(release.df$TrapPosition)
attr(eff,"subsites") <- data.frame(subSiteName=as.character(release.df$TrapPosition[ind]),subSiteID=release.df$trapPositionID[ind],stringsAsFactors=F)
attr(eff, "site.name") <- release.df$siteName[1]
attr(eff, "min.date") <- attr(release.df,"min.date")
attr(eff, "max.date") <- attr(release.df,"max.date")
attr(eff, "enhmodel") <- attr(release.df,"enhmodel")
attr(eff, "site") <- release.df$siteID[1]
attr(eff, "catch.subsites") <- attr(release.df,"catch.subsites")
# ---- If there are missing days, impute them.
missing.days <- is.na(eff$efficiency)
if( any(missing.days) ){
eff.and.fits <- suppressWarnings(F.efficiency.model( eff, plot=plot, max.df.spline=df.spline, plot.file=plot.file ))
} else {
eff.and.fits <- list(eff=eff, fits=NULL, X=NULL, obs.data=eff.est)
attr(eff.and.fits, "out.fn.list") <- NULL
}
eff.and.fits
}
|
# test_outerlabels.R
# Time-stamp: <23 Apr 2019 14:49:43 c:/x/rpack/corrgram/tests/testthat/test_outerlabels.R>
require(corrgram)
# short syntax for outer labels
corrgram(state.x77, outer.labels=list(bottom=TRUE, right=TRUE))
# use default labels in outer margin
corrgram(state.x77, outer.labels=list(bottom=TRUE, right=list(srt=25)))
labs=c("Population", "Income", "Illiteracy", "Life Exp", "Murder", "HS Grad", "Frost", "Area")
# outer.labels not given
corrgram(state.x77)
# outer labels, one side at a time
corrgram(state.x77, outer.labels=list(bottom=list(labels=labs)))
corrgram(state.x77, outer.labels=list(left=list(labels=labs)))
corrgram(state.x77, outer.labels=list(top=list(labels=labs)))
corrgram(state.x77, outer.labels=list(right=list(labels=labs)))
# outer labels with no diagonal labels
corrgram(state.x77, text.panel=NULL,
outer.labels=list(bottom=list(labels=labs)))
# outer.labels, all 4 sides at once
corrgram(state.x77,
outer.labels=list(bottom=list(labels=labs),
left=list(labels=labs),
top=list(labels=labs),
right=list(labels=labs)))
# outer.labels, all 4 sides at once, re-ordered
corrgram(state.x77, order=TRUE,
outer.labels=list(bottom=list(labels=labs),
left=list(labels=labs),
top=list(labels=labs),
right=list(labels=labs)))
# outer labels, srt, adj
corrgram(state.x77,
outer.labels=list(bottom=list(labels=labs,srt=60, adj=c(adj=1,.5)),
left=list(labels=labs,srt=30, adj=c(1,1)),
top=list(labels=labs,srt=90, adj=c(0,0)),
right=list(labels=labs,srt=0, adj=c(0,0))))
# outer labels, cex
corrgram(state.x77, outer.labels=list(bottom=list(labels=labs,cex=0.5)))
corrgram(state.x77, outer.labels=list(left=list(labels=labs,cex=1)))
corrgram(state.x77, outer.labels=list(top=list(labels=labs,cex=1.5)))
corrgram(state.x77, outer.labels=list(right=list(labels=labs,cex=2)))
# outer labels, all options, larger margins, xlab, ylab
corrgram(state.x77, oma=c(7, 7, 2, 2), main="state.x77",
outer.labels=list(bottom=list(labels=labs,cex=1.5,srt=60),
left=list(labels=labs,cex=1.5,srt=30)))
mtext("Bottom", side=1, cex=2, line = -1.5, outer=TRUE, xpd=NA)
mtext("Left", side=2, cex=2, line = -1.5, outer=TRUE, xpd=NA)
test_that("outer labels are wrong length", {
expect_error(corrgram(state.x77, outer.labels=list(bottom=list(labels=labs[-1]))))
expect_error(corrgram(state.x77, outer.labels=list(left=list(labels=labs[-1]))))
expect_error(corrgram(state.x77, outer.labels=list(top=list(labels=labs[-1]))))
expect_error(corrgram(state.x77, outer.labels=list(right=list(labels=labs[-1]))))
})
|
/tests/testthat/test_outerlabels.R
|
no_license
|
Moly-malibu/corrgram
|
R
| false
| false
| 2,848
|
r
|
# test_outerlabels.R
# Time-stamp: <23 Apr 2019 14:49:43 c:/x/rpack/corrgram/tests/testthat/test_outerlabels.R>
require(corrgram)
# short syntax for outer labels
corrgram(state.x77, outer.labels=list(bottom=TRUE, right=TRUE))
# use default labels in outer margin
corrgram(state.x77, outer.labels=list(bottom=TRUE, right=list(srt=25)))
labs=c("Population", "Income", "Illiteracy", "Life Exp", "Murder", "HS Grad", "Frost", "Area")
# outer.labels not given
corrgram(state.x77)
# outer labels, one side at a time
corrgram(state.x77, outer.labels=list(bottom=list(labels=labs)))
corrgram(state.x77, outer.labels=list(left=list(labels=labs)))
corrgram(state.x77, outer.labels=list(top=list(labels=labs)))
corrgram(state.x77, outer.labels=list(right=list(labels=labs)))
# outer labels with no diagonal labels
corrgram(state.x77, text.panel=NULL,
outer.labels=list(bottom=list(labels=labs)))
# outer.labels, all 4 sides at once
corrgram(state.x77,
outer.labels=list(bottom=list(labels=labs),
left=list(labels=labs),
top=list(labels=labs),
right=list(labels=labs)))
# outer.labels, all 4 sides at once, re-ordered
corrgram(state.x77, order=TRUE,
outer.labels=list(bottom=list(labels=labs),
left=list(labels=labs),
top=list(labels=labs),
right=list(labels=labs)))
# outer labels, srt, adj
corrgram(state.x77,
outer.labels=list(bottom=list(labels=labs,srt=60, adj=c(adj=1,.5)),
left=list(labels=labs,srt=30, adj=c(1,1)),
top=list(labels=labs,srt=90, adj=c(0,0)),
right=list(labels=labs,srt=0, adj=c(0,0))))
# outer labels, cex
corrgram(state.x77, outer.labels=list(bottom=list(labels=labs,cex=0.5)))
corrgram(state.x77, outer.labels=list(left=list(labels=labs,cex=1)))
corrgram(state.x77, outer.labels=list(top=list(labels=labs,cex=1.5)))
corrgram(state.x77, outer.labels=list(right=list(labels=labs,cex=2)))
# outer labels, all options, larger margins, xlab, ylab
corrgram(state.x77, oma=c(7, 7, 2, 2), main="state.x77",
outer.labels=list(bottom=list(labels=labs,cex=1.5,srt=60),
left=list(labels=labs,cex=1.5,srt=30)))
mtext("Bottom", side=1, cex=2, line = -1.5, outer=TRUE, xpd=NA)
mtext("Left", side=2, cex=2, line = -1.5, outer=TRUE, xpd=NA)
test_that("outer labels are wrong length", {
expect_error(corrgram(state.x77, outer.labels=list(bottom=list(labels=labs[-1]))))
expect_error(corrgram(state.x77, outer.labels=list(left=list(labels=labs[-1]))))
expect_error(corrgram(state.x77, outer.labels=list(top=list(labels=labs[-1]))))
expect_error(corrgram(state.x77, outer.labels=list(right=list(labels=labs[-1]))))
})
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{addDataset}
\alias{addDataset}
\title{Add a new dataset}
\usage{
addDataset(api, data)
}
\arguments{
\item{api}{a \code{\link{mangalapi}} object}
\item{data}{the dataset in list format}
}
\description{
Post a new dataset to the database
}
\details{
Requires authentication
}
|
/man/addDataset.Rd
|
no_license
|
mangal-interactions/rmangal-v1
|
R
| false
| false
| 336
|
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{addDataset}
\alias{addDataset}
\title{Add a new dataset}
\usage{
addDataset(api, data)
}
\arguments{
\item{api}{a \code{\link{mangalapi}} object}
\item{data}{the dataset in list format}
}
\description{
Post a new dataset to the database
}
\details{
Requires authentication
}
|
# Libraries ----
stopifnot(
require(optparse),
require(reshape2),
require(gplots),
require(openxlsx),
require(xtable),
require(tenxutils),
require(gsfisher)
)
# Options ----
option_list <- list(
make_option(c("--genesetdir"), default="none",
help="directory containing the genesets to aggregate"),
make_option(c("--nclusters"), type="integer", default=0,
help="the number of the clusters being analysed"),
make_option(c("--firstcluster"), type="integer", default=0,
help="clusters might not be zero based..."),
make_option(c("--pvaluethreshold"),type="double",default=0.05,
help="p value threshold for filtering sets"),
make_option(c("--padjustmethod"), default="BH",
help="The given method is passed to p.adjust"),
make_option(c("--useadjusted"), default=TRUE,
help="should adjusted p-values be used for the summary heatmap"),
make_option(c("--showcommon"), default=TRUE,
help=paste("Should genesets significantly enriched in all clusters",
"be shown in the summary heatmap")),
make_option(c("--mingenes"), type="integer", default=2,
help="min no. genes in foreground set"),
make_option(c("--maxgenes"), type="integer", default=500,
help="the maximum number of genes allowed per geneset"),
make_option(c("--minoddsratio"), type="double", default=1.5,
help="The minimum odds ratio."),
make_option(c("--gmt_names"), default="none",
help="comma separated list of names for the gmt files"),
make_option(c("--show_detailed"), default="none",
help=paste("comma separated list of names for which to make individual",
"per-sample/cluster plots")),
make_option(c("--clustertype"),default="cluster",
help="will be used e.g. in plot labels"),
make_option(c("--project"), default="SeuratAnalysis",
help="project name"),
make_option(c("--prefix"), default="genesets",
help="expected prefix for source files"),
make_option(c("--plotdirvar"), default="clusterGenesetsDir",
help="latex var containing name of the directory with the plots"),
make_option(c("--outprefix"), default="none",
help="prefix for outfiles")
)
opt <- parse_args(OptionParser(option_list=option_list))
cat("Running with options:\n")
print(opt)
## aggregate geneset types by worksheet
## all clusters in each worksheet
if(opt$gmt_names != "none")
{
gmt_names <- strsplit(opt$gmt_names,",")[[1]]
} else {
gmt_names <- c()
}
if(opt$show_detailed != "none")
{
show_detailed <- strsplit(opt$show_detailed,",")[[1]]
} else {
show_detailed <- c()
}
## TODO: Detect automatically
genesets = c("GO.BP","GO.MF","GO.CC",
"KEGG",
gmt_names)
## set up workbook.
wb <- createWorkbook()
ltabs <- list()
hmaps <- list()
tex <- c()
for(geneset in genesets)
{
genesets <- NULL
message(paste("Processing:", geneset,"annotations."))
begin=T
if(opt$firstcluster==0)
{
first <- 0
last <- opt$nclusters - 1
}
else {
first <- opt$firstcluster
last <- opt$nclusters}
## build a single table containing the results of the geneset tests for
## all clusters
for(cluster in first:last)
{
message(paste("Working on cluster: ", cluster))
fn = paste0(opt$genesetdir,"/",opt$prefix,".",cluster,".",geneset,".txt.gz")
if(file.exists(fn))
{
temp = read.table(gzfile(fn),sep="\t",header=T,as.is=T,quote="")
if(nrow(temp)>0)
{
temp$cluster <- cluster
}
else {
message(paste0("zero rows for cluster: ",cluster))
}
if(begin==T)
{
genesets <- temp
begin <- F
}
else {
genesets <- rbind(genesets,temp)
}
} else {
message(paste("Skipping ",fn,"(file not found)",sep="\t"))
}
}
make_plot = FALSE
if(!is.null(genesets))
{
## Filter out genesets we do not wish to consider
filtered_genesets <- filterGenesets(genesets,
min_foreground_genes = opt$mingenes,
max_genes_geneset = opt$maxgenes,
min_odds_ratio = opt$minoddsratio,
padjust_method=opt$padjustmethod,
use_adjusted_pvalues=opt$useadjusted,
pvalue_threshold=opt$pvaluethreshold)
results_table <- filtered_genesets
} else { results_table <- NULL }
if(!is.null(results_table) && nrow(results_table) > 0)
{
id_tab <- table(results_table$geneset_id)
results_table$n_clust_sig <- id_tab[results_table$geneset_id]
results_table$n_clust_sig[is.na(results_table$n_clust_sig)] <- 0
## Sort by p value
results_table <- results_table[order(results_table$cluster,results_table$p.val),]
## Tidy up the frame
firstcols <- c("cluster","geneset_id","description",
"p.adj","p.val",
"odds.ratio",
"n_clust_sig","n_fg","n_bg")
firstcols <- firstcols[firstcols %in% colnames(results_table)]
othercols <- colnames(results_table)[!colnames(results_table) %in% firstcols]
results_table <- results_table[,c(firstcols,othercols)]
numeric_cols <- colnames(results_table)[sapply(results_table, is.numeric)]
for(numeric_col in numeric_cols)
{
## set to 3 sf
xx <- results_table[[numeric_col]]
nas <- is.na(xx)
if(any(abs(xx)==Inf))
{
ints <- FALSE
} else {
ints <- all((xx - round(xx)) == 0)
}
xx[xx<1000 & !nas & !ints] <- signif(xx[xx<1000 & !nas & !ints],digits=3)
xx[xx>=1000 & !nas] <- round(xx[xx>=1000 & !nas],digits=0)
xx[ints] <- as.integer(xx[ints])
results_table[[numeric_col]] <- xx
}
## Add the results to the worksheet
addWorksheet(wb,geneset)
setColWidths(wb,geneset,cols=1:ncol(results_table),widths=10)
hs <- createStyle(textDecoration = "BOLD")
writeData(wb, geneset, results_table, withFilter = T, headerStyle=hs)
## prepare for writing out a latex summary table (unique pathways)
## and geneset heatmaps (can be shared)
for(clust in unique(as.character(results_table$cluster)))
{
temp <- results_table[results_table$cluster==clust,]
nrows <- nrow(temp)
if(nrows==0) { next }
temp <- temp[1:min(nrows,5),]
if(!"description" %in% colnames(temp))
{
temp$description <-temp$geneset_id
}
## trim long descriptions
maxl <- 45
temp$description <- formatDescriptions(temp$description,
c("REACTOME_", "BIOCARTA_"),
maxl)
temp_names <- colnames(temp)
temp$type <- geneset
temp <- temp[,c("type",temp_names)]
temp <- temp[,c("type","description","p.val","p.adj",
"n_fg","odds.ratio","n_clust_sig")]
colnames(temp) <- c("type","description","p.val","p.adj",
"n_fg","odds.ratio","n.clust")
if(clust %in% names(ltabs))
{
ltabs[[clust]] <- rbind(ltabs[[clust]],temp)
}
else {
ltabs[[clust]] <- temp
}
}
if(nrow(results_table) > 0) { make_plot <- TRUE }
}
plotfn <- paste(opt$outprefix, geneset, sep=".")
if(make_plot)
{
xx <- filtered_genesets
if(!opt$showcommon)
{
tmp <- table(xx$geneset_id)
xx <- xx[!xx$geneset_id %in% names(tmp)[tmp==opt$nclusters],]
}
xx$score <- -log10(xx$p.adj) * log2(xx$odds.ratio)
genesets_to_show <- getSampleGenesets(xx,
sort_by = "score",
max_rows = 50)
# add back adjusted p values
genesets$p.adj <- 1
genesets[rownames(filtered_genesets),"p.adj"] <- filtered_genesets$p.adj
message("making sample enrichment dotplot with n=",nrow(genesets)," genesets")
gp <- sampleEnrichmentDotplot(genesets,
selected_genesets = genesets_to_show,
selection_col = "geneset_id",
sample_levels =c(first:last),
min_dot_size =1, max_dot_size = 6,
maxl = 45,
pvalue_threshold = opt$pvaluethreshold,
title=geneset)
print(plotfn)
save_ggplots(plotfn,
gp,
width=8,
height=8)
message("saved sample enrichement dotplot")
per_sample_tex = c()
if(geneset %in% show_detailed)
{
## make the per sample plots
for(cluster in unique(xx$cluster))
{
tmp <- xx[xx$cluster==cluster,]
tmp <- tmp[rev(order(tmp$score)),]
max_n_cat = 150
if(nrow(tmp)> max_n_cat) { tmp <- tmp[1:max_n_cat,] }
if("description" %in% colnames(tmp))
{
desc_col <- "description"
} else { desc_col <- "geneset_id" }
gp <- visualiseClusteredGenesets(tmp,
highlight=genesets_to_show[genesets_to_show %in% tmp$geneset_id],
desc_col=desc_col)
detailed_plotfn <- paste(opt$outprefix,
geneset, "circle_plot", cluster, sep=".")
save_ggplots(detailed_plotfn,
gp,
width=10, height=10)
caption <- paste("Cluster", cluster, geneset,
"genesets clustered by similarity between over-represented genes.", sep=" ")
per_sample_tex <- c(per_sample_tex,
getFigureTex(basename(detailed_plotfn),
caption,
plot_dir_var=opt$plotdirvar))
}
}
} else {
# draw an empty plot with an error message
pngfn <- paste(plotfn, "png", sep=".")
png(pngfn,width=8,height=8,units="in",res=100)
plot.new()
text(0.5,0.5,paste0("no significant genesets for:\n",geneset))
dev.off()
}
caption <- paste("Heatmap of the top", geneset, "genesets", sep=" ")
tex <- c(tex,getSubsectionTex(geneset))
tex <- c(tex,getFigureTex(basename(plotfn), caption,
plot_dir_var=opt$plotdirvar))
tex <- c(tex, "\n",
per_sample_tex, "\n")
}
fig_file <- paste(opt$outprefix,"figure.tex", sep=".")
writeTex(fig_file,tex)
saveWorkbook(wb,
file=paste(opt$outprefix, "xlsx", sep="."),
overwrite=T)
begin=T
hlines <- c()
for(cluster in names(ltabs))
{
temp <- ltabs[[cluster]]
temp_names <- colnames(temp)
temp$cluster <- cluster
temp <- temp[,c("cluster",temp_names)]
if(begin==T)
{
out <- temp
r <- nrow(temp)
hlines <- r
begin <- F
}
else {
out <- rbind(out, temp)
r <- r + nrow(temp)
hlines <- c(hlines,r)
}
}
ltab_file <- paste(opt$outprefix,"table.tex", sep=".")
if(!exists("out"))
{
out <- data.frame(x=c("no significantly enriched genesets found"))
} else {
out <- sprintfResults(out)
}
xtab <- xtable(out, caption="The top (lowest p-value) genesets found (uniquely) in each cluster")
print(xtab,
include.rownames=F,
hline.after=hlines,
file=ltab_file,
tabular.environment="longtable",
size="\\fontsize{6pt}{9pt}\\selectfont")
|
/R/summariseGenesets.R
|
permissive
|
MatthieuRouland/tenx
|
R
| false
| false
| 13,009
|
r
|
# Libraries ----
stopifnot(
require(optparse),
require(reshape2),
require(gplots),
require(openxlsx),
require(xtable),
require(tenxutils),
require(gsfisher)
)
# Options ----
option_list <- list(
make_option(c("--genesetdir"), default="none",
help="directory containing the genesets to aggregate"),
make_option(c("--nclusters"), type="integer", default=0,
help="the number of the clusters being analysed"),
make_option(c("--firstcluster"), type="integer", default=0,
help="clusters might not be zero based..."),
make_option(c("--pvaluethreshold"),type="double",default=0.05,
help="p value threshold for filtering sets"),
make_option(c("--padjustmethod"), default="BH",
help="The given method is passed to p.adjust"),
make_option(c("--useadjusted"), default=TRUE,
help="should adjusted p-values be used for the summary heatmap"),
make_option(c("--showcommon"), default=TRUE,
help=paste("Should genesets significantly enriched in all clusters",
"be shown in the summary heatmap")),
make_option(c("--mingenes"), type="integer", default=2,
help="min no. genes in foreground set"),
make_option(c("--maxgenes"), type="integer", default=500,
help="the maximum number of genes allowed per geneset"),
make_option(c("--minoddsratio"), type="double", default=1.5,
help="The minimum odds ratio."),
make_option(c("--gmt_names"), default="none",
help="comma separated list of names for the gmt files"),
make_option(c("--show_detailed"), default="none",
help=paste("comma separated list of names for which to make individual",
"per-sample/cluster plots")),
make_option(c("--clustertype"),default="cluster",
help="will be used e.g. in plot labels"),
make_option(c("--project"), default="SeuratAnalysis",
help="project name"),
make_option(c("--prefix"), default="genesets",
help="expected prefix for source files"),
make_option(c("--plotdirvar"), default="clusterGenesetsDir",
help="latex var containing name of the directory with the plots"),
make_option(c("--outprefix"), default="none",
help="prefix for outfiles")
)
opt <- parse_args(OptionParser(option_list=option_list))
cat("Running with options:\n")
print(opt)
## aggregate geneset types by worksheet
## all clusters in each worksheet
if(opt$gmt_names != "none")
{
gmt_names <- strsplit(opt$gmt_names,",")[[1]]
} else {
gmt_names <- c()
}
if(opt$show_detailed != "none")
{
show_detailed <- strsplit(opt$show_detailed,",")[[1]]
} else {
show_detailed <- c()
}
## TODO: Detect automatically
genesets = c("GO.BP","GO.MF","GO.CC",
"KEGG",
gmt_names)
## set up workbook.
wb <- createWorkbook()
ltabs <- list()
hmaps <- list()
tex <- c()
for(geneset in genesets)
{
genesets <- NULL
message(paste("Processing:", geneset,"annotations."))
begin=T
if(opt$firstcluster==0)
{
first <- 0
last <- opt$nclusters - 1
}
else {
first <- opt$firstcluster
last <- opt$nclusters}
## build a single table containing the results of the geneset tests for
## all clusters
for(cluster in first:last)
{
message(paste("Working on cluster: ", cluster))
fn = paste0(opt$genesetdir,"/",opt$prefix,".",cluster,".",geneset,".txt.gz")
if(file.exists(fn))
{
temp = read.table(gzfile(fn),sep="\t",header=T,as.is=T,quote="")
if(nrow(temp)>0)
{
temp$cluster <- cluster
}
else {
message(paste0("zero rows for cluster: ",cluster))
}
if(begin==T)
{
genesets <- temp
begin <- F
}
else {
genesets <- rbind(genesets,temp)
}
} else {
message(paste("Skipping ",fn,"(file not found)",sep="\t"))
}
}
make_plot = FALSE
if(!is.null(genesets))
{
## Filter out genesets we do not wish to consider
filtered_genesets <- filterGenesets(genesets,
min_foreground_genes = opt$mingenes,
max_genes_geneset = opt$maxgenes,
min_odds_ratio = opt$minoddsratio,
padjust_method=opt$padjustmethod,
use_adjusted_pvalues=opt$useadjusted,
pvalue_threshold=opt$pvaluethreshold)
results_table <- filtered_genesets
} else { results_table <- NULL }
if(!is.null(results_table) && nrow(results_table) > 0)
{
id_tab <- table(results_table$geneset_id)
results_table$n_clust_sig <- id_tab[results_table$geneset_id]
results_table$n_clust_sig[is.na(results_table$n_clust_sig)] <- 0
## Sort by p value
results_table <- results_table[order(results_table$cluster,results_table$p.val),]
## Tidy up the frame
firstcols <- c("cluster","geneset_id","description",
"p.adj","p.val",
"odds.ratio",
"n_clust_sig","n_fg","n_bg")
firstcols <- firstcols[firstcols %in% colnames(results_table)]
othercols <- colnames(results_table)[!colnames(results_table) %in% firstcols]
results_table <- results_table[,c(firstcols,othercols)]
numeric_cols <- colnames(results_table)[sapply(results_table, is.numeric)]
for(numeric_col in numeric_cols)
{
## set to 3 sf
xx <- results_table[[numeric_col]]
nas <- is.na(xx)
if(any(abs(xx)==Inf))
{
ints <- FALSE
} else {
ints <- all((xx - round(xx)) == 0)
}
xx[xx<1000 & !nas & !ints] <- signif(xx[xx<1000 & !nas & !ints],digits=3)
xx[xx>=1000 & !nas] <- round(xx[xx>=1000 & !nas],digits=0)
xx[ints] <- as.integer(xx[ints])
results_table[[numeric_col]] <- xx
}
## Add the results to the worksheet
addWorksheet(wb,geneset)
setColWidths(wb,geneset,cols=1:ncol(results_table),widths=10)
hs <- createStyle(textDecoration = "BOLD")
writeData(wb, geneset, results_table, withFilter = T, headerStyle=hs)
## prepare for writing out a latex summary table (unique pathways)
## and geneset heatmaps (can be shared)
for(clust in unique(as.character(results_table$cluster)))
{
temp <- results_table[results_table$cluster==clust,]
nrows <- nrow(temp)
if(nrows==0) { next }
temp <- temp[1:min(nrows,5),]
if(!"description" %in% colnames(temp))
{
temp$description <-temp$geneset_id
}
## trim long descriptions
maxl <- 45
temp$description <- formatDescriptions(temp$description,
c("REACTOME_", "BIOCARTA_"),
maxl)
temp_names <- colnames(temp)
temp$type <- geneset
temp <- temp[,c("type",temp_names)]
temp <- temp[,c("type","description","p.val","p.adj",
"n_fg","odds.ratio","n_clust_sig")]
colnames(temp) <- c("type","description","p.val","p.adj",
"n_fg","odds.ratio","n.clust")
if(clust %in% names(ltabs))
{
ltabs[[clust]] <- rbind(ltabs[[clust]],temp)
}
else {
ltabs[[clust]] <- temp
}
}
if(nrow(results_table) > 0) { make_plot <- TRUE }
}
plotfn <- paste(opt$outprefix, geneset, sep=".")
if(make_plot)
{
xx <- filtered_genesets
if(!opt$showcommon)
{
tmp <- table(xx$geneset_id)
xx <- xx[!xx$geneset_id %in% names(tmp)[tmp==opt$nclusters],]
}
xx$score <- -log10(xx$p.adj) * log2(xx$odds.ratio)
genesets_to_show <- getSampleGenesets(xx,
sort_by = "score",
max_rows = 50)
# add back adjusted p values
genesets$p.adj <- 1
genesets[rownames(filtered_genesets),"p.adj"] <- filtered_genesets$p.adj
message("making sample enrichment dotplot with n=",nrow(genesets)," genesets")
gp <- sampleEnrichmentDotplot(genesets,
selected_genesets = genesets_to_show,
selection_col = "geneset_id",
sample_levels =c(first:last),
min_dot_size =1, max_dot_size = 6,
maxl = 45,
pvalue_threshold = opt$pvaluethreshold,
title=geneset)
print(plotfn)
save_ggplots(plotfn,
gp,
width=8,
height=8)
message("saved sample enrichement dotplot")
per_sample_tex = c()
if(geneset %in% show_detailed)
{
## make the per sample plots
for(cluster in unique(xx$cluster))
{
tmp <- xx[xx$cluster==cluster,]
tmp <- tmp[rev(order(tmp$score)),]
max_n_cat = 150
if(nrow(tmp)> max_n_cat) { tmp <- tmp[1:max_n_cat,] }
if("description" %in% colnames(tmp))
{
desc_col <- "description"
} else { desc_col <- "geneset_id" }
gp <- visualiseClusteredGenesets(tmp,
highlight=genesets_to_show[genesets_to_show %in% tmp$geneset_id],
desc_col=desc_col)
detailed_plotfn <- paste(opt$outprefix,
geneset, "circle_plot", cluster, sep=".")
save_ggplots(detailed_plotfn,
gp,
width=10, height=10)
caption <- paste("Cluster", cluster, geneset,
"genesets clustered by similarity between over-represented genes.", sep=" ")
per_sample_tex <- c(per_sample_tex,
getFigureTex(basename(detailed_plotfn),
caption,
plot_dir_var=opt$plotdirvar))
}
}
} else {
# draw an empty plot with an error message
pngfn <- paste(plotfn, "png", sep=".")
png(pngfn,width=8,height=8,units="in",res=100)
plot.new()
text(0.5,0.5,paste0("no significant genesets for:\n",geneset))
dev.off()
}
caption <- paste("Heatmap of the top", geneset, "genesets", sep=" ")
tex <- c(tex,getSubsectionTex(geneset))
tex <- c(tex,getFigureTex(basename(plotfn), caption,
plot_dir_var=opt$plotdirvar))
tex <- c(tex, "\n",
per_sample_tex, "\n")
}
fig_file <- paste(opt$outprefix,"figure.tex", sep=".")
writeTex(fig_file,tex)
saveWorkbook(wb,
file=paste(opt$outprefix, "xlsx", sep="."),
overwrite=T)
begin=T
hlines <- c()
for(cluster in names(ltabs))
{
temp <- ltabs[[cluster]]
temp_names <- colnames(temp)
temp$cluster <- cluster
temp <- temp[,c("cluster",temp_names)]
if(begin==T)
{
out <- temp
r <- nrow(temp)
hlines <- r
begin <- F
}
else {
out <- rbind(out, temp)
r <- r + nrow(temp)
hlines <- c(hlines,r)
}
}
ltab_file <- paste(opt$outprefix,"table.tex", sep=".")
if(!exists("out"))
{
out <- data.frame(x=c("no significantly enriched genesets found"))
} else {
out <- sprintfResults(out)
}
xtab <- xtable(out, caption="The top (lowest p-value) genesets found (uniquely) in each cluster")
print(xtab,
include.rownames=F,
hline.after=hlines,
file=ltab_file,
tabular.environment="longtable",
size="\\fontsize{6pt}{9pt}\\selectfont")
|
library(readr)
library(rstan)
setwd("C:\\Users\\Zicheng Cai\\Dropbox\\Courses\\18SP\\SDS383D\\Section3\\MATLAB\\3.9")
tea_discipline_oss <- read_csv("tea_discipline_oss.csv")
#View(tea_discipline_oss)
uncensored_data = subset(tea_discipline_oss,ACTIONS>0)
gender = uncensored_data$SEXX
gender[gender == 'FEMALE'] = 0
gender[gender == 'MALE'] = 1
gender = as.integer(gender)
tea <-data.frame(grade=uncensored_data$GRADE,se_attend=uncensored_data$SE_ATTEND,gender=gender,y=uncensored_data$ACTIONS)
tea$intercept =1
tea<-as.list(tea)
tea$N<-nrow(uncensored_data)
fileName <- "poisson_1.stan"
stan_code <- readChar(fileName, file.info(fileName)$size)
resStan<-stan(model_code=stan_code,data=tea,chains=3,iter=3000,warmup=1000,thin=10)
traceplot(resStan, pars = c("beta"), inc_warmup = FALSE) #set inc_warmup = TRUE to see burn in
|
/Section3/MATLAB/3.9/exercise39_1.R
|
no_license
|
caizicheng/SDS383D
|
R
| false
| false
| 827
|
r
|
library(readr)
library(rstan)
setwd("C:\\Users\\Zicheng Cai\\Dropbox\\Courses\\18SP\\SDS383D\\Section3\\MATLAB\\3.9")
tea_discipline_oss <- read_csv("tea_discipline_oss.csv")
#View(tea_discipline_oss)
uncensored_data = subset(tea_discipline_oss,ACTIONS>0)
gender = uncensored_data$SEXX
gender[gender == 'FEMALE'] = 0
gender[gender == 'MALE'] = 1
gender = as.integer(gender)
tea <-data.frame(grade=uncensored_data$GRADE,se_attend=uncensored_data$SE_ATTEND,gender=gender,y=uncensored_data$ACTIONS)
tea$intercept =1
tea<-as.list(tea)
tea$N<-nrow(uncensored_data)
fileName <- "poisson_1.stan"
stan_code <- readChar(fileName, file.info(fileName)$size)
resStan<-stan(model_code=stan_code,data=tea,chains=3,iter=3000,warmup=1000,thin=10)
traceplot(resStan, pars = c("beta"), inc_warmup = FALSE) #set inc_warmup = TRUE to see burn in
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{us_change}
\alias{us_change}
\title{Percentage changes in economic variables in the USA.}
\format{Time series of class `tsibble`}
\source{
Federal Reserve Bank of St Louis.
}
\description{
\code{us_change} is a quarterly `tsibble` containing percentage changes in
quarterly personal consumption expenditure, personal disposable income,
production, savings and the unemployment rate for the US, 1970 to 2016.
Original $ values were in chained 2012 US dollars.
}
\examples{
us_change
}
\keyword{datasets}
|
/man/us_change.Rd
|
no_license
|
nisargvp/fpp3-package
|
R
| false
| true
| 611
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{us_change}
\alias{us_change}
\title{Percentage changes in economic variables in the USA.}
\format{Time series of class `tsibble`}
\source{
Federal Reserve Bank of St Louis.
}
\description{
\code{us_change} is a quarterly `tsibble` containing percentage changes in
quarterly personal consumption expenditure, personal disposable income,
production, savings and the unemployment rate for the US, 1970 to 2016.
Original $ values were in chained 2012 US dollars.
}
\examples{
us_change
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rDNA.R
\name{dna_scale1dbin}
\alias{dna_scale1dbin}
\title{One-dimensional binary scaling from a DNA connection}
\usage{
dna_scale1dbin(connection, variable1 = "organization",
variable2 = "concept", qualifier = "agreement", threshold = NULL,
theta_constraints = NULL, mcmc_iterations = 20000,
mcmc_burnin = 1000, mcmc_thin = 10, mcmc_normalize = FALSE,
theta_start = NA, alpha_start = NA, beta_start = NA,
theta_prior_mean = 0, theta_prior_variance = 1,
alpha_beta_prior_mean = 0, alpha_beta_prior_variance = 0.25,
store_variables = "both", drop_constant_concepts = FALSE,
drop_min_actors = 1, drop_min_concepts = 2, verbose = TRUE,
seed = 12345, ...)
}
\arguments{
\item{connection}{A \code{dna_connection} object created by the
\link{dna_connection} function.}
\item{variable1}{The first variable for the scaling construction (see
\link{dna_network}). Defaults to \code{"organization"}.}
\item{variable2}{The second variable for the scaling construction (see
\link{dna_network}). Defaults to \code{"concept"}.}
\item{qualifier}{The qualifier variable for the scaling construction (see
\link{dna_network}). Defaults to \code{"agreement"}.}
\item{threshold}{Numeric value that specifies when a mixed position can be
considered as agreement or disagreement. If e.g. one actor has 60 percent
of agreeing and 40 percent of disagreeing statements towards a concept, a
\code{threshold} of 0.51 will recode the actor position on this concept as
"agreement". The same accounts also for disagreeing statements. If one
actor has 60 percent of disagreeing and 40 percent of agreeing statements,
a \code{threshold} of 0.51 will recode the actor position on this concept
as "disagreement". All values in between the \code{threshold} (e.g., 55
percent agreement and 45 percent of disagreement and a threshold of 0.6)
will be recoded as \code{NA}. If is set to \code{NULL}, all "mixed"
positions of actors will be recoded as \code{NA}. Must be strictly
positive.}
\item{theta_constraints}{A list specifying the constraints on the actor
parameter. Three forms of constraints are possible:
\code{actorname = value}, which will constrain an actor to be equal to the
specified value (e.g. \code{0}), \code{actorname = "+"}, which will
constrain the actor to be positively scaled and \code{actorname = "-"},
which will constrain the actor to be negatively scaled (see example).}
\item{mcmc_iterations}{The number of iterations for the sampler.}
\item{mcmc_burnin}{The number of burn-in iterations for the sampler.}
\item{mcmc_thin}{The thinning interval for the sampler. Iterations must be
divisible by the thinning interval.}
\item{mcmc_normalize}{Logical. Should the MCMC output be normalized? If
\code{TRUE}, samples are normalized to a mean of \code{0} and a standard
deviation of \code{1}.}
\item{theta_start}{The \code{starting values} for the actor parameters. Can
either be a scalar or a column vector with as many elements as the number
of actors included in the scaling. If set to the default \code{NA},
\code{starting values} will be set according to an eigenvalue-eigenvector
decomposition of the actor agreement score.}
\item{alpha_start}{The \code{starting values} for the concept difficulty
parameters. Can either be a scalar or a column vector with as many
elements as the number of actors included in the scaling. If set to the
default \code{NA}, \code{starting values} will be set according to a
series of probit regressions that condition the starting values of the
difficulty parameters.}
\item{beta_start}{The \code{starting values} for the concept discrimination
parameters. Can either be a scalar or a column vector with as many
elements as the number of actors included in the scaling. If set to the
default \code{NA}, \code{starting values} will be set according to a
series of probit regressions that condition the \code{starting values} of
the discrimination parameters.}
\item{theta_prior_mean}{A scalar value specifying the prior mean of the
actor parameters.}
\item{theta_prior_variance}{A scalar value specifying the prior inverse
variances of the actor parameters.}
\item{alpha_beta_prior_mean}{Mean of the difficulty and discrimination
parameters. Can either be a scalar or a 2-vector. If a scalar, both means
will be set according to the specified value.}
\item{alpha_beta_prior_variance}{Inverse variance of the difficulty and
discrimination parameters. Can either be a scalar or a 2-vector. If a
scalar, both means will be set according to the specified value.}
\item{store_variables}{A character vector indicating which variables should
be stored from the scaling. Can either take the value of the character
vector indicated in \code{variable1} or \code{variable2} or \code{"both"}
to store both variables. Note that saving both variables can impact the
speed of the scaling. Defaults to \code{"both"}.}
\item{drop_constant_concepts}{Logical. Should concepts that have no
variation be deleted before the scaling? Defaults to \code{FALSE}.}
\item{drop_min_actors}{A numeric value specifying the minimum number of
concepts actors should have mentioned to be included in the scaling.
Defaults to \code{1}.}
\item{drop_min_concepts}{A numeric value specifying the minimum number a
concept should have been jointly mentioned by actors. Defaults to \code{2}.}
\item{verbose}{A boolean or numeric value indicating whether the iterations
of the scaling should be printed to the R console. If set to a numeric
value, every \code{verboseth} iteration will be printed. If set to
\code{TRUE}, \code{verbose} will print the total of iterations and burn-in
divided by \code{100}.}
\item{seed}{The random seed for the scaling.}
\item{...}{Additional arguments passed to \link{dna_network}. Actors can
e.g. be removed with the \code{excludeValues} arguments. The scaling can
also be applied to a specific time slice by using \code{start.date} and
\code{stop.date}.}
}
\description{
Scale ideological positions of two variables (e.g., organizations and
concepts) from a DNA connection by using Markov Chain Monte Carlo for binary
one-dimensional Item Response Theory. This is one of the four scaling
functions. For one-dimensional ordinal scaling, see \link{dna_scale1dord},
for two-dimensional binary scaling, see \link{dna_scale2dbin} and for
two-dimensional ordinal scaling \link{dna_scale2dord}.
}
\details{
This function is a convenience wrapper for the \link[MCMCpack]{MCMCirt1d}
function. Using Markov Chain Monte Carlo (MCMC), \code{dna_scale1dbin}
generates a sample from the posterior distribution using standard Gibbs
sampling. For the model form and further help for the scaling arguments, see
\link[MCMCpack]{MCMCirt1d}.
As in a two-mode network in \link{dna_network}, two variables have to be
provided for the scaling. The first variable corresponds to the rows of a
two-mode network and usually entails actors (e.g., \code{"organizations"}),
while the second variable is equal to the columns of a two-mode network,
typically expressed by \code{"concepts"}. The \code{dna_scale} functions
use \code{"actors"} and \code{"concepts"} as synonyms for \code{variable1}
and \code{variable2}. However, the scaling is not restricted to
\code{"actors"} and \code{"concepts"} but depends on what you provide in
\code{variable1} or \code{variable2}.
For a binary qualifier, \code{dna_scale1dbin} internally uses the
\code{combine} qualifier aggregation and then recodes the values into
\code{0} for disagreement, \code{1} for agreement and \code{NA} for mixed
positions and non-mentions of concepts. Integer qualifiers are also recoded
into \code{0} and \code{1} by rescaling the qualifier values between
\code{0} and \code{1}. You can further relax the recoding of \code{NA} values by setting a
\code{threshold} which lets you decide at which percentage of agreement and
disagreement an actor position on a concept can be considered as
agreement/disagreement or mixed position.
The argument \code{drop_min_actors} excludes actors with only a limited
number of concepts used. Limited participation of actors in a debate can
impact the scaling of the ideal points, as actors with only few mentions of
concepts convey limited information on their ideological position. The same
can also be done for concepts with the argument \code{drop_min_concepts}.
Concepts that have been rarely mentioned do not strongly discriminate the
ideological positions of actors and can, therefore, impact the accuracy of
the scaling. Reducing the number of actors of concepts to be scaled hence
improves the precision of the ideological positions for both variables and
the scaling itself. Another possibility to reduce the number of concepts is
to use \code{drop_constant_concepts}, which will reduce concepts not having
any variation in the agreement/disagreement structure of actors. This means
that all concepts will be dropped which have only agreeing or disagreeing
statements.
As \code{dna_scale1dbin} implements a Bayesian Item Response Theory
approach, \code{priors} and \code{starting values} can be set on the actor
and concept parameters. Changing the default \code{prior} values can often
help you to achieve better results. Constraints on the actor parameters can
also be specified to help identifying the model and to indicate in which
direction ideological positions of actors and concepts run. The returned
MCMC output can also be post-processed by normalizing the samples for each
iteration with \code{mcmc_normalize}. Normalization can be a sufficient
way of identifying one-dimensional ideal point models.
To plot the resulting ideal points of actors and concepts, you can use the
\link{dna_plotScale} function. To assess if the returned MCMC chain has
converged to its stationary distribution, please use
\link{dna_convergenceScale}. The evaluation of convergence is essential to
report conclusions based on accurate parameter estimates. Achieving chain
convergence often requires setting the iterations of the MCMC chain to
several million.
}
\examples{
\dontrun{
dna_init()
conn <- dna_connection(dna_sample())
dna_scale <- dna_scale1dbin(
conn,
variable1 = "organization",
variable2 = "concept",
qualifier = "agreement",
threshold = 0.51,
theta_constraints = list(
`National Petrochemical & Refiners Association` = "+",
`Alliance to Save Energy` = "-"),
mcmc_iterations = 20000,
mcmc_burnin = 2000,
mcmc_thin = 10,
mcmc_normalize = TRUE,
theta_prior_mean = 0,
theta_prior_variance = 1,
alpha_beta_prior_mean = 0,
alpha_beta_prior_variance = 0.25,
store_variables = "both",
drop_constant_concepts = FALSE,
drop_min_actors = 1,
verbose = TRUE,
seed = 12345
)
}
}
\author{
Tim Henrichsen, Johannes B. Gruber
}
|
/rDNA/man/dna_scale1dbin.Rd
|
no_license
|
marcmelliger/dna
|
R
| false
| true
| 10,784
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rDNA.R
\name{dna_scale1dbin}
\alias{dna_scale1dbin}
\title{One-dimensional binary scaling from a DNA connection}
\usage{
dna_scale1dbin(connection, variable1 = "organization",
variable2 = "concept", qualifier = "agreement", threshold = NULL,
theta_constraints = NULL, mcmc_iterations = 20000,
mcmc_burnin = 1000, mcmc_thin = 10, mcmc_normalize = FALSE,
theta_start = NA, alpha_start = NA, beta_start = NA,
theta_prior_mean = 0, theta_prior_variance = 1,
alpha_beta_prior_mean = 0, alpha_beta_prior_variance = 0.25,
store_variables = "both", drop_constant_concepts = FALSE,
drop_min_actors = 1, drop_min_concepts = 2, verbose = TRUE,
seed = 12345, ...)
}
\arguments{
\item{connection}{A \code{dna_connection} object created by the
\link{dna_connection} function.}
\item{variable1}{The first variable for the scaling construction (see
\link{dna_network}). Defaults to \code{"organization"}.}
\item{variable2}{The second variable for the scaling construction (see
\link{dna_network}). Defaults to \code{"concept"}.}
\item{qualifier}{The qualifier variable for the scaling construction (see
\link{dna_network}). Defaults to \code{"agreement"}.}
\item{threshold}{Numeric value that specifies when a mixed position can be
considered as agreement or disagreement. If e.g. one actor has 60 percent
of agreeing and 40 percent of disagreeing statements towards a concept, a
\code{threshold} of 0.51 will recode the actor position on this concept as
"agreement". The same accounts also for disagreeing statements. If one
actor has 60 percent of disagreeing and 40 percent of agreeing statements,
a \code{threshold} of 0.51 will recode the actor position on this concept
as "disagreement". All values in between the \code{threshold} (e.g., 55
percent agreement and 45 percent of disagreement and a threshold of 0.6)
will be recoded as \code{NA}. If is set to \code{NULL}, all "mixed"
positions of actors will be recoded as \code{NA}. Must be strictly
positive.}
\item{theta_constraints}{A list specifying the constraints on the actor
parameter. Three forms of constraints are possible:
\code{actorname = value}, which will constrain an actor to be equal to the
specified value (e.g. \code{0}), \code{actorname = "+"}, which will
constrain the actor to be positively scaled and \code{actorname = "-"},
which will constrain the actor to be negatively scaled (see example).}
\item{mcmc_iterations}{The number of iterations for the sampler.}
\item{mcmc_burnin}{The number of burn-in iterations for the sampler.}
\item{mcmc_thin}{The thinning interval for the sampler. Iterations must be
divisible by the thinning interval.}
\item{mcmc_normalize}{Logical. Should the MCMC output be normalized? If
\code{TRUE}, samples are normalized to a mean of \code{0} and a standard
deviation of \code{1}.}
\item{theta_start}{The \code{starting values} for the actor parameters. Can
either be a scalar or a column vector with as many elements as the number
of actors included in the scaling. If set to the default \code{NA},
\code{starting values} will be set according to an eigenvalue-eigenvector
decomposition of the actor agreement score.}
\item{alpha_start}{The \code{starting values} for the concept difficulty
parameters. Can either be a scalar or a column vector with as many
elements as the number of actors included in the scaling. If set to the
default \code{NA}, \code{starting values} will be set according to a
series of probit regressions that condition the starting values of the
difficulty parameters.}
\item{beta_start}{The \code{starting values} for the concept discrimination
parameters. Can either be a scalar or a column vector with as many
elements as the number of actors included in the scaling. If set to the
default \code{NA}, \code{starting values} will be set according to a
series of probit regressions that condition the \code{starting values} of
the discrimination parameters.}
\item{theta_prior_mean}{A scalar value specifying the prior mean of the
actor parameters.}
\item{theta_prior_variance}{A scalar value specifying the prior inverse
variances of the actor parameters.}
\item{alpha_beta_prior_mean}{Mean of the difficulty and discrimination
parameters. Can either be a scalar or a 2-vector. If a scalar, both means
will be set according to the specified value.}
\item{alpha_beta_prior_variance}{Inverse variance of the difficulty and
discrimination parameters. Can either be a scalar or a 2-vector. If a
scalar, both means will be set according to the specified value.}
\item{store_variables}{A character vector indicating which variables should
be stored from the scaling. Can either take the value of the character
vector indicated in \code{variable1} or \code{variable2} or \code{"both"}
to store both variables. Note that saving both variables can impact the
speed of the scaling. Defaults to \code{"both"}.}
\item{drop_constant_concepts}{Logical. Should concepts that have no
variation be deleted before the scaling? Defaults to \code{FALSE}.}
\item{drop_min_actors}{A numeric value specifying the minimum number of
concepts actors should have mentioned to be included in the scaling.
Defaults to \code{1}.}
\item{drop_min_concepts}{A numeric value specifying the minimum number a
concept should have been jointly mentioned by actors. Defaults to \code{2}.}
\item{verbose}{A boolean or numeric value indicating whether the iterations
of the scaling should be printed to the R console. If set to a numeric
value, every \code{verboseth} iteration will be printed. If set to
\code{TRUE}, \code{verbose} will print the total of iterations and burn-in
divided by \code{100}.}
\item{seed}{The random seed for the scaling.}
\item{...}{Additional arguments passed to \link{dna_network}. Actors can
e.g. be removed with the \code{excludeValues} arguments. The scaling can
also be applied to a specific time slice by using \code{start.date} and
\code{stop.date}.}
}
\description{
Scale ideological positions of two variables (e.g., organizations and
concepts) from a DNA connection by using Markov Chain Monte Carlo for binary
one-dimensional Item Response Theory. This is one of the four scaling
functions. For one-dimensional ordinal scaling, see \link{dna_scale1dord},
for two-dimensional binary scaling, see \link{dna_scale2dbin} and for
two-dimensional ordinal scaling \link{dna_scale2dord}.
}
\details{
This function is a convenience wrapper for the \link[MCMCpack]{MCMCirt1d}
function. Using Markov Chain Monte Carlo (MCMC), \code{dna_scale1dbin}
generates a sample from the posterior distribution using standard Gibbs
sampling. For the model form and further help for the scaling arguments, see
\link[MCMCpack]{MCMCirt1d}.
As in a two-mode network in \link{dna_network}, two variables have to be
provided for the scaling. The first variable corresponds to the rows of a
two-mode network and usually entails actors (e.g., \code{"organizations"}),
while the second variable is equal to the columns of a two-mode network,
typically expressed by \code{"concepts"}. The \code{dna_scale} functions
use \code{"actors"} and \code{"concepts"} as synonyms for \code{variable1}
and \code{variable2}. However, the scaling is not restricted to
\code{"actors"} and \code{"concepts"} but depends on what you provide in
\code{variable1} or \code{variable2}.
For a binary qualifier, \code{dna_scale1dbin} internally uses the
\code{combine} qualifier aggregation and then recodes the values into
\code{0} for disagreement, \code{1} for agreement and \code{NA} for mixed
positions and non-mentions of concepts. Integer qualifiers are also recoded
into \code{0} and \code{1} by rescaling the qualifier values between
\code{0} and \code{1}. You can further relax the recoding of \code{NA} values by setting a
\code{threshold} which lets you decide at which percentage of agreement and
disagreement an actor position on a concept can be considered as
agreement/disagreement or mixed position.
The argument \code{drop_min_actors} excludes actors with only a limited
number of concepts used. Limited participation of actors in a debate can
impact the scaling of the ideal points, as actors with only few mentions of
concepts convey limited information on their ideological position. The same
can also be done for concepts with the argument \code{drop_min_concepts}.
Concepts that have been rarely mentioned do not strongly discriminate the
ideological positions of actors and can, therefore, impact the accuracy of
the scaling. Reducing the number of actors of concepts to be scaled hence
improves the precision of the ideological positions for both variables and
the scaling itself. Another possibility to reduce the number of concepts is
to use \code{drop_constant_concepts}, which will reduce concepts not having
any variation in the agreement/disagreement structure of actors. This means
that all concepts will be dropped which have only agreeing or disagreeing
statements.
As \code{dna_scale1dbin} implements a Bayesian Item Response Theory
approach, \code{priors} and \code{starting values} can be set on the actor
and concept parameters. Changing the default \code{prior} values can often
help you to achieve better results. Constraints on the actor parameters can
also be specified to help identifying the model and to indicate in which
direction ideological positions of actors and concepts run. The returned
MCMC output can also be post-processed by normalizing the samples for each
iteration with \code{mcmc_normalize}. Normalization can be a sufficient
way of identifying one-dimensional ideal point models.
To plot the resulting ideal points of actors and concepts, you can use the
\link{dna_plotScale} function. To assess if the returned MCMC chain has
converged to its stationary distribution, please use
\link{dna_convergenceScale}. The evaluation of convergence is essential to
report conclusions based on accurate parameter estimates. Achieving chain
convergence often requires setting the iterations of the MCMC chain to
several million.
}
\examples{
\dontrun{
dna_init()
conn <- dna_connection(dna_sample())
dna_scale <- dna_scale1dbin(
conn,
variable1 = "organization",
variable2 = "concept",
qualifier = "agreement",
threshold = 0.51,
theta_constraints = list(
`National Petrochemical & Refiners Association` = "+",
`Alliance to Save Energy` = "-"),
mcmc_iterations = 20000,
mcmc_burnin = 2000,
mcmc_thin = 10,
mcmc_normalize = TRUE,
theta_prior_mean = 0,
theta_prior_variance = 1,
alpha_beta_prior_mean = 0,
alpha_beta_prior_variance = 0.25,
store_variables = "both",
drop_constant_concepts = FALSE,
drop_min_actors = 1,
verbose = TRUE,
seed = 12345
)
}
}
\author{
Tim Henrichsen, Johannes B. Gruber
}
|
#' waltplot
#'
#' A function to grid all the waltplots.
#'
#' @param vars A vector of column names to be plotted.
#' @param smoothed A parameter of whether to use histogram (0) or violinplots (1) in the waltplot grid.
#' @param data The data frame.
#' @export
#'
waltplot <- function (vars, smoothed, data) {
p = NULL
for (i in 1:length(vars)) {
if (smoothed[i] == 0) {
p[[i]] <- suppressWarnings(walt.histogram(data, vars[i]))
} else {
p[[i]] <- walt.violinplot(data, vars[i])
}
}
suppressWarnings(cowplot::plot_grid(plotlist = p,
labels = c(LETTERS[1:length(vars)]),
ncol = round(sqrt(length(vars)), digits = 0),
nrow = ceiling(length(vars)/round(sqrt(length(vars))))
))
}
|
/R/waltplot.R
|
no_license
|
cognopod/walter
|
R
| false
| false
| 801
|
r
|
#' waltplot
#'
#' A function to grid all the waltplots.
#'
#' @param vars A vector of column names to be plotted.
#' @param smoothed A parameter of whether to use histogram (0) or violinplots (1) in the waltplot grid.
#' @param data The data frame.
#' @export
#'
waltplot <- function (vars, smoothed, data) {
p = NULL
for (i in 1:length(vars)) {
if (smoothed[i] == 0) {
p[[i]] <- suppressWarnings(walt.histogram(data, vars[i]))
} else {
p[[i]] <- walt.violinplot(data, vars[i])
}
}
suppressWarnings(cowplot::plot_grid(plotlist = p,
labels = c(LETTERS[1:length(vars)]),
ncol = round(sqrt(length(vars)), digits = 0),
nrow = ceiling(length(vars)/round(sqrt(length(vars))))
))
}
|
/Modélisation arima direct.R
|
no_license
|
isma-yod/Les-Cours-du-master-Statistiques-et-Econom-trie
|
R
| false
| false
| 9,369
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitMPWR.R
\name{fitMPWR}
\alias{fitMPWR}
\title{fitMPWR implements an optimized dynamic programming algorithm to fit a
MPWR model.}
\usage{
fitMPWR(X, Y, K, p = 3)
}
\arguments{
\item{X}{Numeric vector of length \emph{m} representing the covariates/inputs
\eqn{x_{1},\dots,x_{m}}.}
\item{Y}{Matrix of size \eqn{(m, d)} representing a \eqn{d} dimension
function of \code{X} observed at points \eqn{1,\dots,m}. \code{Y} is the observed
response/output.}
\item{K}{The number of regimes/segments (PWR components).}
\item{p}{Optional. The order of the polynomial regression. By default, \code{p} is
set at 3.}
}
\value{
fitMPWR returns an object of class \link{ModelMPWR}.
}
\description{
fitMPWR is used to fit a Mulitvariate Piecewise Regression (MPWR) model
by maximum-likelihood via an optimized dynamic programming algorithm. The
estimation performed by the dynamic programming algorithm provides an optimal
segmentation of the time series.
}
\details{
fitMPWR function implements an optimized dynamic programming
algorithm of the MPWR model. This function starts with the calculation of
the "cost matrix" then it estimates the transition points given \code{K} the
number of regimes thanks to the method \code{computeDynamicProgram} (method of
the class \link{ParamMPWR}).
}
\examples{
data(toydataset)
x <- toydataset$x
Y <- as.matrix(toydataset[,c("y1", "y2", "y3")])
mpwr <- fitMPWR(X = x, Y = Y, K = 5, p = 1)
mpwr$summary()
mpwr$plot()
}
\seealso{
\link{ModelMPWR}, \link{ParamMPWR}, \link{StatMPWR}
}
|
/man/fitMPWR.Rd
|
no_license
|
fchamroukhi/MPWR_r
|
R
| false
| true
| 1,591
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitMPWR.R
\name{fitMPWR}
\alias{fitMPWR}
\title{fitMPWR implements an optimized dynamic programming algorithm to fit a
MPWR model.}
\usage{
fitMPWR(X, Y, K, p = 3)
}
\arguments{
\item{X}{Numeric vector of length \emph{m} representing the covariates/inputs
\eqn{x_{1},\dots,x_{m}}.}
\item{Y}{Matrix of size \eqn{(m, d)} representing a \eqn{d} dimension
function of \code{X} observed at points \eqn{1,\dots,m}. \code{Y} is the observed
response/output.}
\item{K}{The number of regimes/segments (PWR components).}
\item{p}{Optional. The order of the polynomial regression. By default, \code{p} is
set at 3.}
}
\value{
fitMPWR returns an object of class \link{ModelMPWR}.
}
\description{
fitMPWR is used to fit a Mulitvariate Piecewise Regression (MPWR) model
by maximum-likelihood via an optimized dynamic programming algorithm. The
estimation performed by the dynamic programming algorithm provides an optimal
segmentation of the time series.
}
\details{
fitMPWR function implements an optimized dynamic programming
algorithm of the MPWR model. This function starts with the calculation of
the "cost matrix" then it estimates the transition points given \code{K} the
number of regimes thanks to the method \code{computeDynamicProgram} (method of
the class \link{ParamMPWR}).
}
\examples{
data(toydataset)
x <- toydataset$x
Y <- as.matrix(toydataset[,c("y1", "y2", "y3")])
mpwr <- fitMPWR(X = x, Y = Y, K = 5, p = 1)
mpwr$summary()
mpwr$plot()
}
\seealso{
\link{ModelMPWR}, \link{ParamMPWR}, \link{StatMPWR}
}
|
context("class level lsm_c_ed metric")
landscapemetrics_class_landscape_value <- lsm_c_ed(landscape)
test_that("lsm_c_ed is typestable", {
expect_is(lsm_c_ed(landscape), "tbl_df")
expect_is(lsm_c_ed(landscape_stack), "tbl_df")
expect_is(lsm_c_ed(landscape_brick), "tbl_df")
expect_is(lsm_c_ed(landscape_list), "tbl_df")
})
test_that("lsm_c_ed returns the desired number of columns", {
expect_equal(ncol(landscapemetrics_class_landscape_value), 6)
})
test_that("lsm_c_ed returns in every column the correct type", {
expect_type(landscapemetrics_class_landscape_value$layer, "integer")
expect_type(landscapemetrics_class_landscape_value$level, "character")
expect_type(landscapemetrics_class_landscape_value$class, "integer")
expect_type(landscapemetrics_class_landscape_value$id, "integer")
expect_type(landscapemetrics_class_landscape_value$metric, "character")
expect_type(landscapemetrics_class_landscape_value$value, "double")
})
|
/tests/testthat/test-lsm-c-ed.R
|
no_license
|
cran/landscapemetrics
|
R
| false
| false
| 1,006
|
r
|
context("class level lsm_c_ed metric")
landscapemetrics_class_landscape_value <- lsm_c_ed(landscape)
test_that("lsm_c_ed is typestable", {
expect_is(lsm_c_ed(landscape), "tbl_df")
expect_is(lsm_c_ed(landscape_stack), "tbl_df")
expect_is(lsm_c_ed(landscape_brick), "tbl_df")
expect_is(lsm_c_ed(landscape_list), "tbl_df")
})
test_that("lsm_c_ed returns the desired number of columns", {
expect_equal(ncol(landscapemetrics_class_landscape_value), 6)
})
test_that("lsm_c_ed returns in every column the correct type", {
expect_type(landscapemetrics_class_landscape_value$layer, "integer")
expect_type(landscapemetrics_class_landscape_value$level, "character")
expect_type(landscapemetrics_class_landscape_value$class, "integer")
expect_type(landscapemetrics_class_landscape_value$id, "integer")
expect_type(landscapemetrics_class_landscape_value$metric, "character")
expect_type(landscapemetrics_class_landscape_value$value, "double")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{expandLimits}
\alias{expandLimits}
\title{This function}
\usage{
expandLimits(x, factor = 0.1)
}
\arguments{
\item{x}{A numeric vector.}
\item{factor}{The factor to expand the limits with.}
}
\value{
A list with the expanded upper and lower limits
}
\description{
This is a description.
}
|
/man/expandLimits.Rd
|
no_license
|
beatnaut/remaputils
|
R
| false
| true
| 382
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{expandLimits}
\alias{expandLimits}
\title{This function}
\usage{
expandLimits(x, factor = 0.1)
}
\arguments{
\item{x}{A numeric vector.}
\item{factor}{The factor to expand the limits with.}
}
\value{
A list with the expanded upper and lower limits
}
\description{
This is a description.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfareporting_functions.R
\name{subaccounts.insert}
\alias{subaccounts.insert}
\title{Inserts a new subaccount.}
\usage{
subaccounts.insert(Subaccount, profileId)
}
\arguments{
\item{Subaccount}{The \link{Subaccount} object to pass to this method}
\item{profileId}{User profile ID associated with this request}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/dfatrafficking
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/dfatrafficking)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/doubleclick-advertisers/reporting/}{Google Documentation}
Other Subaccount functions: \code{\link{Subaccount}},
\code{\link{subaccounts.patch}},
\code{\link{subaccounts.update}}
}
|
/googledfareportingv25beta1.auto/man/subaccounts.insert.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 1,046
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfareporting_functions.R
\name{subaccounts.insert}
\alias{subaccounts.insert}
\title{Inserts a new subaccount.}
\usage{
subaccounts.insert(Subaccount, profileId)
}
\arguments{
\item{Subaccount}{The \link{Subaccount} object to pass to this method}
\item{profileId}{User profile ID associated with this request}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/dfatrafficking
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/dfatrafficking)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/doubleclick-advertisers/reporting/}{Google Documentation}
Other Subaccount functions: \code{\link{Subaccount}},
\code{\link{subaccounts.patch}},
\code{\link{subaccounts.update}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boost_tree_spark.R
\name{details_boost_tree_spark}
\alias{details_boost_tree_spark}
\title{Boosted trees via Spark}
\description{
\code{\link[sparklyr:ml_gradient_boosted_trees]{sparklyr::ml_gradient_boosted_trees()}} creates a series of decision trees
forming an ensemble. Each tree depends on the results of previous trees.
All trees in the ensemble are combined to produce a final prediction.
}
\details{
For this engine, there are multiple modes: classification and
regression. However, multiclass classification is not supported yet.
\subsection{Tuning Parameters}{
This model has 7 tuning parameters:
\itemize{
\item \code{tree_depth}: Tree Depth (type: integer, default: 5L)
\item \code{trees}: # Trees (type: integer, default: 20L)
\item \code{learn_rate}: Learning Rate (type: double, default: 0.1)
\item \code{mtry}: # Randomly Selected Predictors (type: integer, default: see
below)
\item \code{min_n}: Minimal Node Size (type: integer, default: 1L)
\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: 0.0)
\item \code{sample_size}: # Observations Sampled (type: integer, default: 1.0)
}
The \code{mtry} parameter is related to the number of predictors. The default
depends on the model mode. For classification, the square root of the
number of predictors is used and for regression, one third of the
predictors are sampled.
}
\subsection{Translation from parsnip to the original package (regression)}{
\if{html}{\out{<div class="sourceCode r">}}\preformatted{boost_tree(
mtry = integer(), trees = integer(), min_n = integer(), tree_depth = integer(),
learn_rate = numeric(), loss_reduction = numeric(), sample_size = numeric()
) \%>\%
set_engine("spark") \%>\%
set_mode("regression") \%>\%
translate()
}\if{html}{\out{</div>}}
\if{html}{\out{<div class="sourceCode">}}\preformatted{## Boosted Tree Model Specification (regression)
##
## Main Arguments:
## mtry = integer()
## trees = integer()
## min_n = integer()
## tree_depth = integer()
## learn_rate = numeric()
## loss_reduction = numeric()
## sample_size = numeric()
##
## Computational engine: spark
##
## Model fit template:
## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(),
## type = "regression", feature_subset_strategy = integer(),
## max_iter = integer(), min_instances_per_node = min_rows(integer(0),
## x), max_depth = integer(), step_size = numeric(), min_info_gain = numeric(),
## subsampling_rate = numeric(), seed = sample.int(10^5, 1))
}\if{html}{\out{</div>}}
}
\subsection{Translation from parsnip to the original package (classification)}{
\if{html}{\out{<div class="sourceCode r">}}\preformatted{boost_tree(
mtry = integer(), trees = integer(), min_n = integer(), tree_depth = integer(),
learn_rate = numeric(), loss_reduction = numeric(), sample_size = numeric()
) \%>\%
set_engine("spark") \%>\%
set_mode("classification") \%>\%
translate()
}\if{html}{\out{</div>}}
\if{html}{\out{<div class="sourceCode">}}\preformatted{## Boosted Tree Model Specification (classification)
##
## Main Arguments:
## mtry = integer()
## trees = integer()
## min_n = integer()
## tree_depth = integer()
## learn_rate = numeric()
## loss_reduction = numeric()
## sample_size = numeric()
##
## Computational engine: spark
##
## Model fit template:
## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(),
## type = "classification", feature_subset_strategy = integer(),
## max_iter = integer(), min_instances_per_node = min_rows(integer(0),
## x), max_depth = integer(), step_size = numeric(), min_info_gain = numeric(),
## subsampling_rate = numeric(), seed = sample.int(10^5, 1))
}\if{html}{\out{</div>}}
}
\subsection{Preprocessing requirements}{
This engine does not require any special encoding of the predictors.
Categorical predictors can be partitioned into groups of factor levels
(e.g. \verb{\{a, c\}} vs \verb{\{b, d\}}) when splitting at a node. Dummy variables
are not required for this model.
}
\subsection{Case weights}{
This model can utilize case weights during model fitting. To use them,
see the documentation in \link{case_weights} and the examples
on \code{tidymodels.org}.
The \code{fit()} and \code{fit_xy()} arguments have arguments called
\code{case_weights} that expect vectors of case weights.
Note that, for spark engines, the \code{case_weight} argument value should be
a character string to specify the column with the numeric case weights.
}
\subsection{Other details}{
For models created using the \code{"spark"} engine, there are several things
to consider.
\itemize{
\item Only the formula interface to via \code{fit()} is available; using
\code{fit_xy()} will generate an error.
\item The predictions will always be in a Spark table format. The names will
be the same as documented but without the dots.
\item There is no equivalent to factor columns in Spark tables so class
predictions are returned as character columns.
\item To retain the model object for a new R session (via \code{save()}), the
\code{model$fit} element of the parsnip object should be serialized via
\code{ml_save(object$fit)} and separately saved to disk. In a new session,
the object can be reloaded and reattached to the parsnip object.
}
}
\subsection{References}{
\itemize{
\item Luraschi, J, K Kuo, and E Ruiz. 2019. \emph{Mastering Spark with R}.
O’Reilly Media
\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
\keyword{internal}
|
/man/details_boost_tree_spark.Rd
|
permissive
|
tidymodels/parsnip
|
R
| false
| true
| 5,641
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boost_tree_spark.R
\name{details_boost_tree_spark}
\alias{details_boost_tree_spark}
\title{Boosted trees via Spark}
\description{
\code{\link[sparklyr:ml_gradient_boosted_trees]{sparklyr::ml_gradient_boosted_trees()}} creates a series of decision trees
forming an ensemble. Each tree depends on the results of previous trees.
All trees in the ensemble are combined to produce a final prediction.
}
\details{
For this engine, there are multiple modes: classification and
regression. However, multiclass classification is not supported yet.
\subsection{Tuning Parameters}{
This model has 7 tuning parameters:
\itemize{
\item \code{tree_depth}: Tree Depth (type: integer, default: 5L)
\item \code{trees}: # Trees (type: integer, default: 20L)
\item \code{learn_rate}: Learning Rate (type: double, default: 0.1)
\item \code{mtry}: # Randomly Selected Predictors (type: integer, default: see
below)
\item \code{min_n}: Minimal Node Size (type: integer, default: 1L)
\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: 0.0)
\item \code{sample_size}: # Observations Sampled (type: integer, default: 1.0)
}
The \code{mtry} parameter is related to the number of predictors. The default
depends on the model mode. For classification, the square root of the
number of predictors is used and for regression, one third of the
predictors are sampled.
}
\subsection{Translation from parsnip to the original package (regression)}{
\if{html}{\out{<div class="sourceCode r">}}\preformatted{boost_tree(
mtry = integer(), trees = integer(), min_n = integer(), tree_depth = integer(),
learn_rate = numeric(), loss_reduction = numeric(), sample_size = numeric()
) \%>\%
set_engine("spark") \%>\%
set_mode("regression") \%>\%
translate()
}\if{html}{\out{</div>}}
\if{html}{\out{<div class="sourceCode">}}\preformatted{## Boosted Tree Model Specification (regression)
##
## Main Arguments:
## mtry = integer()
## trees = integer()
## min_n = integer()
## tree_depth = integer()
## learn_rate = numeric()
## loss_reduction = numeric()
## sample_size = numeric()
##
## Computational engine: spark
##
## Model fit template:
## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(),
## type = "regression", feature_subset_strategy = integer(),
## max_iter = integer(), min_instances_per_node = min_rows(integer(0),
## x), max_depth = integer(), step_size = numeric(), min_info_gain = numeric(),
## subsampling_rate = numeric(), seed = sample.int(10^5, 1))
}\if{html}{\out{</div>}}
}
\subsection{Translation from parsnip to the original package (classification)}{
\if{html}{\out{<div class="sourceCode r">}}\preformatted{boost_tree(
mtry = integer(), trees = integer(), min_n = integer(), tree_depth = integer(),
learn_rate = numeric(), loss_reduction = numeric(), sample_size = numeric()
) \%>\%
set_engine("spark") \%>\%
set_mode("classification") \%>\%
translate()
}\if{html}{\out{</div>}}
\if{html}{\out{<div class="sourceCode">}}\preformatted{## Boosted Tree Model Specification (classification)
##
## Main Arguments:
## mtry = integer()
## trees = integer()
## min_n = integer()
## tree_depth = integer()
## learn_rate = numeric()
## loss_reduction = numeric()
## sample_size = numeric()
##
## Computational engine: spark
##
## Model fit template:
## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(),
## type = "classification", feature_subset_strategy = integer(),
## max_iter = integer(), min_instances_per_node = min_rows(integer(0),
## x), max_depth = integer(), step_size = numeric(), min_info_gain = numeric(),
## subsampling_rate = numeric(), seed = sample.int(10^5, 1))
}\if{html}{\out{</div>}}
}
\subsection{Preprocessing requirements}{
This engine does not require any special encoding of the predictors.
Categorical predictors can be partitioned into groups of factor levels
(e.g. \verb{\{a, c\}} vs \verb{\{b, d\}}) when splitting at a node. Dummy variables
are not required for this model.
}
\subsection{Case weights}{
This model can utilize case weights during model fitting. To use them,
see the documentation in \link{case_weights} and the examples
on \code{tidymodels.org}.
The \code{fit()} and \code{fit_xy()} arguments have arguments called
\code{case_weights} that expect vectors of case weights.
Note that, for spark engines, the \code{case_weight} argument value should be
a character string to specify the column with the numeric case weights.
}
\subsection{Other details}{
For models created using the \code{"spark"} engine, there are several things
to consider.
\itemize{
\item Only the formula interface to via \code{fit()} is available; using
\code{fit_xy()} will generate an error.
\item The predictions will always be in a Spark table format. The names will
be the same as documented but without the dots.
\item There is no equivalent to factor columns in Spark tables so class
predictions are returned as character columns.
\item To retain the model object for a new R session (via \code{save()}), the
\code{model$fit} element of the parsnip object should be serialized via
\code{ml_save(object$fit)} and separately saved to disk. In a new session,
the object can be reloaded and reattached to the parsnip object.
}
}
\subsection{References}{
\itemize{
\item Luraschi, J, K Kuo, and E Ruiz. 2019. \emph{Mastering Spark with R}.
O’Reilly Media
\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
\keyword{internal}
|
\name{print.glmnet}
\alias{print.glmnet}
\title{print a glmnet object}
\description{
Print a summary of the glmnet path at each step along the path.
}
\usage{
\method{print}{glmnet}(x, digits = max(3, getOption("digits") - 3), ...)
}
\arguments{
\item{x}{fitted glmnet object}
\item{digits}{significant digits in printout}
\item{\dots}{additional print arguments}
}
\details{
The call that produced the object \code{x} is printed, followed by a three-column
matrix
with columns \code{Df}, \code{\%dev} and \code{Lambda}. The \code{Df}
column is the number of nonzero coefficients (Df is a reasonable
name only for lasso fits). \code{\%dev} is the percent deviance
explained (relative to the null deviance).
}
\value{
The matrix above is silently returned}
\references{Friedman, J., Hastie, T. and Tibshirani, R. (2008)
\emph{Regularization Paths for Generalized Linear Models via Coordinate
Descent}}
\author{Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr
Maintainer: Trevor Hastie <hastie@stanford.edu>}
\seealso{\code{glmnet}, \code{predict} and \code{coef} methods.}
\examples{
x=matrix(rnorm(100*20),100,20)
y=rnorm(100)
fit1=glmnet(x,y)
print(fit1)
}
\keyword{models}
\keyword{regression}
|
/Programming_Projects/R Projects/glmnet/man/print.glmnet.Rd
|
no_license
|
pmnyc/Data_Engineering_Collections
|
R
| false
| false
| 1,235
|
rd
|
\name{print.glmnet}
\alias{print.glmnet}
\title{print a glmnet object}
\description{
Print a summary of the glmnet path at each step along the path.
}
\usage{
\method{print}{glmnet}(x, digits = max(3, getOption("digits") - 3), ...)
}
\arguments{
\item{x}{fitted glmnet object}
\item{digits}{significant digits in printout}
\item{\dots}{additional print arguments}
}
\details{
The call that produced the object \code{x} is printed, followed by a three-column
matrix
with columns \code{Df}, \code{\%dev} and \code{Lambda}. The \code{Df}
column is the number of nonzero coefficients (Df is a reasonable
name only for lasso fits). \code{\%dev} is the percent deviance
explained (relative to the null deviance).
}
\value{
The matrix above is silently returned}
\references{Friedman, J., Hastie, T. and Tibshirani, R. (2008)
\emph{Regularization Paths for Generalized Linear Models via Coordinate
Descent}}
\author{Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr
Maintainer: Trevor Hastie <hastie@stanford.edu>}
\seealso{\code{glmnet}, \code{predict} and \code{coef} methods.}
\examples{
x=matrix(rnorm(100*20),100,20)
y=rnorm(100)
fit1=glmnet(x,y)
print(fit1)
}
\keyword{models}
\keyword{regression}
|
# C Green 29 August 2021
# background_variables.R
# R script for results in Appendix 10: Analysis of possibly confounding variables in the main ESD study
#######################################################################################################
#
# Investigate effects of known background variables on all quiz 1 scores (including outlier)
# Most background variables are stored in the presurvey data, and quiz 1 scores are in quiz1 dataframe
# Analysis therefore requires merging two dataframes, presurvey and quiz1, by ParticipantID
# Background variables:
# -----------------------
# 1 Gender
# 2 Age Categories
# 3 Educational score
# 4 Prior sustainability knowledge
# 5 Prior ST/SD knowledge
# 6 Occupational/educational relevance
# 7 Engagement
# 8 Delay
# -----------------------
# Use xlsx package to import Excel
library(xlsx)
# presurvey dataframe contains presurvey data for all groups
presurvey <- read.xlsx("data/scores_tidy.xlsx", sheetIndex=1)
# Rename the group ids (from 0, 1, 2, and 3), and order them as factors
presurvey$Group <- ifelse(presurvey$Group==0, "Control", ifelse(presurvey$Group==1, "ST", ifelse(presurvey$Group==2, "Sim","ST+Sim")))
presurvey$Group <- factor(presurvey$Group, levels = c("Control", "ST", "Sim", "ST+Sim"))
# Colour palettes for graphs
library(RColorBrewer)
############
# 1 GENDER #
############
# Create a frequency table of group and gender
gender_breakdown <- table(presurvey$Gender)
gender_breakdown
# Result:
# Female Male
# 62 44
# Pie chart with percentages
pie(gender_breakdown,
main="Gender Breakdown: All Participants",
col=c("darkmagenta", "cornflowerblue"),
labels=paste(names(gender_breakdown),"\n", gender_breakdown, " (", round(100*gender_breakdown/sum(gender_breakdown), digits = 1), "%)", sep=""))
gender_by_group <- table(presurvey$Gender, presurvey$Group)
gender_by_group
# Result
# Control ST Sim ST+Sim
# Female 18 14 14 16
# Male 10 12 10 12
# Note that the legend had to be moved - increase the y axis max value with ylim
barplot(gender_by_group,
beside=T,
main="Gender by Group",
legend=TRUE,
ylab="Number of participants",
ylim = c(0,20),
col=c("darkmagenta", "cornflowerblue"),
names.arg= c("Control", "ST", "Sim", "ST+Sim"))
# Gender and QuizScore: Is there a relationship?
presurvey_gender <- data.frame(presurvey$ParticipantID, presurvey$Gender, presurvey$Group)
names(presurvey_gender) <- c('ParticipantID', 'Gender', 'Group')
quiz1_scores_by_participant <- data.frame(quiz1$ParticipantID, quiz1$QuizScore)
names(quiz1_scores_by_participant) <- c('ParticipantID', 'QuizScore')
# Merge with quiz 1 results
quiz1_results_and_gender <- merge(presurvey_gender, quiz1_scores_by_participant)
# Side-by-side boxplots for gender
boxplot(QuizScore ~ Gender,
data = quiz1_results_and_gender,
main="Quiz 1 Scores by Gender",
ylab = "Score (%)",
col = c("aquamarine3", "bisque2"))
# Chi-squared test on quiz1 data: are gender and group independent?
# install.packages("gmodels")
library(gmodels)
# Results for 106 participants:
CrossTable(quiz1_results_and_gender$Group,
quiz1_results_and_gender$Gender,
digits=1,
expected=TRUE,
prop.r=TRUE,
prop.c=TRUE,
prop.t=FALSE,
prop.chisq=TRUE,
sresid=FALSE,
format=c("SPSS"),
dnn = c("Group", "Gender"))
# Result: the p = 0.887335 means we cannot reject the null hypothesis that the variables are independent
# Get a table of means by Group and Gender
gender_group_means <- with(quiz1_results_and_gender, tapply(QuizScore, list(Group, Gender), mean))
# Result:
# Female Male
# Control 67.61111 76.50000
# Sim 74.78571 83.40000
# ST 73.42857 76.91667
# ST+Sim 71.37500 75.00000
barplot(gender_group_means, beside=TRUE, ylab="Quiz 1 Score (%)",
main="Quiz 1 scores by Gender and Group",
legend.text=c("Control", "ST", "Sim", "ST+Sim"),
args.legend = list(x = "top", ncol = 2),
ylim = c(0,90),
col = brewer.pal(4, "Set3"))
#########
# 2 AGE #
#########
# Age Group categories used:
# Age Group Integer
# 18-25 1
# 26-35 2
# 36-45 3
# 46-55 4
# 56-65 5
# Over 65 6
# Age breakdown - all participants
age_breakdown <- table(presurvey$Age)
age_breakdown
# Result
# 18-25 26-35 36-45 46-55 56-65 Over 65
# 7 15 18 24 19 23
barplot(age_breakdown,
main = "Age Breakdown: All Participants",
xlab = "Age in years",
ylab = "No of participants",
ylim = c(0,25),
col = brewer.pal(nrow(age_breakdown), "Set3"))
# Basic statistics
# First, calculate mean and median age for all participants
# Since we're dealing with categorical age groups, a new column is needed first
# Add a numeric age for each category - this will give a value 1 for 18-25, 2 for 26-35 etc.
presurvey$AgeNum <- as.numeric(factor(presurvey$Age))
mean(presurvey$AgeNum) # Result 3.962264 - taking midpoint of range that means age about 50
sort(table(presurvey$AgeNum)) # Result 4, ie age 46-55
median(presurvey$AgeNum) # Result 4, ie age 46-55
# Age breakdown by group
age_by_group <- table(presurvey$Age, presurvey$Group)
age_by_group
# Result
# Control ST Sim ST+Sim
# 18-25 1 2 2 2
# 26-35 4 3 5 3
# 36-45 4 5 4 5
# 46-55 7 6 7 4
# 56-65 9 4 4 2
# Over 65 3 6 2 12
# Get Group data
presurvey_group0 <- presurvey[presurvey$Group == "Control",]
presurvey_group1 <- presurvey[presurvey$Group == "ST",]
presurvey_group2 <- presurvey[presurvey$Group == "Sim",]
presurvey_group3 <- presurvey[presurvey$Group == "ST+Sim",]
median(presurvey$AgeNum) # 4
median(presurvey_group0$AgeNum) # 4
median(presurvey_group1$AgeNum) # 4
median(presurvey_group2$AgeNum) # 4
median(presurvey_group3$AgeNum) # 4.5
# To work out the mode, use a sorted table of frequencies
sort(table(presurvey$AgeNum)) # 4
sort(table(presurvey_group0$AgeNum)) # 5
sort(table(presurvey_group1$AgeNum)) # 4 and 6
sort(table(presurvey_group2$AgeNum)) # 4
sort(table(presurvey_group3$AgeNum)) # 6
# Boxplot age category for all, and by group
boxplot(presurvey$AgeNum,
presurvey_group0$AgeNum,
presurvey_group1$AgeNum,
presurvey_group2$AgeNum,
presurvey_group3$AgeNum,
main="Age Category by Group",
ylab="Age Category",
col= c("aquamarine3", "azure3", "bisque2", "bisque2", "bisque2"),
names = c("All", "Control", "ST", "Sim", "ST+Sim"))
# Is there a relationship between age and score?
presurvey_age <- data.frame(presurvey$ParticipantID, presurvey$Age)
names(presurvey_age) <- c('ParticipantID', 'Age')
quiz1_scores_by_participant <- data.frame(quiz1$ParticipantID, quiz1$QuizScore)
names(quiz1_scores_by_participant) <- c('ParticipantID', 'QuizScore')
# Merge pre-survey age with scores from quiz1
quiz1_results_and_age <- merge(presurvey_age, quiz1_scores_by_participant)
# Get the quiz 1 scores per age group
quiz1_score_18_25 <- quiz1_results_and_age[quiz1_results_and_age$Age == "18-25",]
quiz1_score_26_35 <- quiz1_results_and_age[quiz1_results_and_age$Age == "26-35",]
quiz1_score_36_45 <- quiz1_results_and_age[quiz1_results_and_age$Age == "36-45",]
quiz1_score_46_55 <- quiz1_results_and_age[quiz1_results_and_age$Age == "46-55",]
quiz1_score_56_65 <- quiz1_results_and_age[quiz1_results_and_age$Age == "56-65",]
quiz1_score_over_65 <- quiz1_results_and_age[quiz1_results_and_age$Age == "Over 65",]
# Side-by-side boxplots for all age groups
boxplot(quiz1_results_and_age$QuizScore,
quiz1_score_18_25$QuizScore,
quiz1_score_26_35$QuizScore,
quiz1_score_36_45$QuizScore,
quiz1_score_46_55$QuizScore,
quiz1_score_56_65$QuizScore,
quiz1_score_over_65$QuizScore,
main="Quiz 1 Scores by Age Group",
ylab = "Score (%)",
names = c("All", "18-25", "26-35", "36-45", "46-55", "56-65", "Over 65"),
col = c("aquamarine3", "bisque2", "bisque2", "bisque2", "bisque2", "bisque2", "bisque2"))
# Is age category a confounding variable in the relationship between group and score?
presurvey_group_age <- data.frame(presurvey$ParticipantID, presurvey$Group, presurvey$Age)
names(presurvey_group_age) <- c('ParticipantID', 'Group','Age')
# Merge for quiz 1 results
quiz1_results_and_group_and_age <- merge(presurvey_group_age, quiz1_scores_by_participant)
# Remove ParticipantID column, not needed for aggregating results
quiz1_results_and_group_and_age$ParticipantID <- NULL
# Get a frequency table with age and group
group_by_age <- table(quiz1_results_and_group_and_age$Group, quiz1_results_and_group_and_age$Age)
group_by_age
# Result
# 18-25 26-35 36-45 46-55 56-65 Over 65
# Control 1 4 4 7 9 3
# ST 2 3 5 6 4 6
# Sim 2 5 4 7 4 2
# ST+Sim 2 3 5 4 2 12
# Get a table of means by Group and Age Group
with(quiz1_results_and_group_and_age, tapply(QuizScore, list(Group, Age), mean))
# Result
# 18-25 26-35 36-45 46-55 56-65 Over 65
# Control 78.0 75.25000 74.50 73.00000 66.11111 66.33333
# ST 73.0 75.00000 76.40 74.66667 68.75000 79.16667
# Sim 87.5 84.80000 70.75 72.28571 80.75000 85.00000
# ST+Sim 66.0 75.33333 84.60 61.50000 66.00000 73.58333
# Repeat but reduce the age categories, there are too few observations to stratify according to 6 categories
quiz1_results_and_group_and_age_adjusted <- quiz1_results_and_group_and_age
quiz1_results_and_group_and_age_adjusted$Age_adjusted <-
ifelse(quiz1_results_and_group_and_age_adjusted$Age=='Over 65', 'Over 56',
ifelse(quiz1_results_and_group_and_age_adjusted$Age=='56-65', 'Over 56',
ifelse(quiz1_results_and_group_and_age_adjusted$Age=='46-55', '36-55',
ifelse(quiz1_results_and_group_and_age_adjusted$Age=='36-45', '36-55',
ifelse(quiz1_results_and_group_and_age_adjusted$Age=='26-35', '18-35',
ifelse(quiz1_results_and_group_and_age_adjusted$Age=='18-25', '18-35', ''))))))
# Get a frequency table with adjusted age and group
group_by_age_adjusted <- table(quiz1_results_and_group_and_age_adjusted$Group, quiz1_results_and_group_and_age_adjusted$Age_adjusted)
group_by_age_adjusted
# Result
# 18-35 36-55 Over 56
# Control 5 11 12
# ST 5 11 10
# Sim 7 11 6
# ST+Sim 5 9 14
# Get a table of means by Group and adjusted Age Group
with(quiz1_results_and_group_and_age_adjusted, tapply(QuizScore, list(Group, Age_adjusted), mean))
# Result
# 18-35 36-55 Over 56
# Control 75.80000 73.54545 66.16667
# ST 74.20000 75.45455 75.00000
# Sim 85.57143 71.72727 82.16667
# ST+Sim 71.60000 74.33333 72.50000
#######################
# 3 EDUCATIONAL SCORE #
#######################
# Educational Attainment Scores:
# 1 Leaving Certificate
# 2 Degree or equivalent
# 3 Higher Diploma or Masters Degree
# 4 PhD
# First, calculate mean and median ed score for all participants (categorical data so it's approximate)
mean(presurvey$EdScore) # 2.603774
sort(table(presurvey$EdScore)) # mode is 3
median(presurvey$EdScore) # 3
edscore_all <- table(presurvey$EdScore)
edscore_all
# Result:
# 1 2 3 4
# 9 39 43 15
barplot(edscore_all,
main = "Educational Attainment: All Participants",
names = c("Leaving\nCert", "Degree", "Masters", "PhD"),
ylab = "No of participants",
col = brewer.pal(nrow(edscore_all), "Set2"))
# Educational attainment and quizscore: Is there a relationship?
presurvey_ed <- data.frame(presurvey$ParticipantID, presurvey$Group, presurvey$EdScore)
names(presurvey_ed) <- c('ParticipantID', 'Group', 'EdScore')
# Merge for each set of quiz results
quiz1_results_and_ed <- merge(presurvey_ed, quiz1_scores_by_participant)
# Get a table of means by Ed
ed_means <- with(quiz1_results_and_ed, tapply(QuizScore, EdScore, mean))
ed_means
# Result:
# 1 2 3 4
# 71.77778 70.76923 75.86047 79.20000
# Get quiz 1 scores per age group
quiz1_score_ed1 <- quiz1_results_and_ed[quiz1_results_and_ed$EdScore == 1,]
quiz1_score_ed2 <- quiz1_results_and_ed[quiz1_results_and_ed$EdScore == 2,]
quiz1_score_ed3 <- quiz1_results_and_ed[quiz1_results_and_ed$EdScore == 3,]
quiz1_score_ed4 <- quiz1_results_and_ed[quiz1_results_and_ed$EdScore == 4,]
# Side-by-side boxplots for all age groups
boxplot(quiz1_results_and_ed$QuizScore,
quiz1_score_ed1$QuizScore,
quiz1_score_ed2$QuizScore,
quiz1_score_ed3$QuizScore,
quiz1_score_ed4$QuizScore,
main="Quiz 1 Scores by Educational Attainment",
ylab = "Score (%)",
names = c("All", "Leaving Cert", "Degree", "Masters", "PhD"),
col = c("aquamarine3", "bisque2", "bisque2", "bisque2", "bisque2"))
# Create a frequency table of Group and EdScore
edscore_by_group <- table(presurvey$EdScore, presurvey$Group)
# I used the below for transposing the table, easier for my written report:
edscore_by_group_flipped <- table(presurvey$Group, presurvey$EdScore)
edscore_by_group_flipped
# Result:
# 1 2 3 4
# Control 1 10 12 5
# ST 3 11 11 1
# Sim 3 7 11 3
# ST+Sim 2 11 9 6
# Use Barplot with bars beside option
barplot(edscore_by_group,
beside=T,
main="Educational Attainment by Group",
legend=TRUE,
legend.text=c("Leaving Cert", "Degree", "Masters", "PhD"),
ylab="No of participants",
xlab="Group",
ylim = c(0,15),
names.arg= c("Control", "ST", "Sim", "ST+Sim"),
col = brewer.pal(nrow(edscore_by_group), "Set2"))
# Repeat but reduce the ed levels, there are too few observations to stratify
quiz1_results_and_ed_adjusted <- quiz1_results_and_ed
quiz1_results_and_ed_adjusted$Ed_adjusted <-
ifelse(quiz1_results_and_ed_adjusted$EdScore== 1 | quiz1_results_and_ed_adjusted$EdScore== 2, "1-2",
ifelse(quiz1_results_and_ed_adjusted$EdScore== 3 | quiz1_results_and_ed_adjusted$EdScore== 4, "3-4", ''))
reduce_edscore_by_group <- table(quiz1_results_and_ed_adjusted$Group, quiz1_results_and_ed_adjusted$Ed_adjusted)
reduce_edscore_by_group
# Result:
# 1-2 3-4
# Control 11 17
# ST 14 12
# Sim 10 14
# ST+Sim 13 15
# Chi-squared test on quiz1 data: are educational score and group independent?
library(gmodels)
CrossTable(quiz1_results_and_ed_adjusted$Group,
quiz1_results_and_ed_adjusted$Ed_adjusted,
digits=1,
expected=TRUE,
prop.r=TRUE,
prop.c=TRUE,
prop.t=FALSE,
prop.chisq=TRUE,
sresid=FALSE,
format=c("SPSS"),
dnn = c("Group", "Ed Score"))
# Result: the p = 0.7250024 means we cannot reject the null hypothesis that the variables are independent
####################################
# 4 PRIOR SUSTAINABILITY KNOWLEDGE #
####################################
# Prior Sustainability Scores:
# 0 None at all
# 1 A little
# 2 A moderate amount
# 3 A lot
# First, calculate mean and median for all participants (categorical data so it's approximate)
mean(presurvey$PriorSustKnowledgeAdjusted) # 1.198113
sort(table(presurvey$PriorSustKnowledgeAdjusted)) # mode is 0
median(presurvey$PriorSustKnowledgeAdjusted) # 1
sus_score_all <- table(presurvey$PriorSustKnowledgeAdjusted)
sus_score_all
# Result:
# 0 1 2 3
# 38 28 21 19
barplot(sus_score_all,
main = "Prior Sustainability Knowledge: All Participants",
names = c("None at all", "A little", "A moderate amount", "A lot"),
ylab = "No of participants",
col = brewer.pal(nrow(sus_score_all), "Set2"))
# Prior sustainability knowledge by group
# Create a frequency table
sus_score_by_group <- table(presurvey$PriorSustKnowledgeAdjusted, presurvey$Group)
sus_score_by_group_flipped <- table(presurvey$Group, presurvey$PriorSustKnowledgeAdjusted)
sus_score_by_group_flipped
# Result
# 0 1 2 3
# Control 10 8 5 5
# ST 6 4 6 10
# Sim 8 9 5 2
# ST+Sim 14 7 5 2
# Barplot sustainability knowledge by group
barplot(sus_score_by_group,
beside=T,
main="Prior Sustainability Knowledge by Group",
legend=TRUE,
legend.text=c("None at all", "A little", "A moderate amount", "A lot"),
ylab="No of participants",
args.legend = list(x = "top"),
names.arg= c("Control", "ST", "Sim", "ST+Sim"),
col = brewer.pal(nrow(sus_score_all), "Set2"))
# Boxplot sustainability knowledge for all, and by group
boxplot(presurvey$PriorSustKnowledgeAdjusted,
presurvey_group0$PriorSustKnowledgeAdjusted,
presurvey_group1$PriorSustKnowledgeAdjusted,
presurvey_group2$PriorSustKnowledgeAdjusted,
presurvey_group3$PriorSustKnowledgeAdjusted,
main="Prior Sustainability Knowledge Scores by Group",
ylab="Sustainability Knowledge Score",
col= c("aquamarine3", "azure3", "bisque2", "bisque2", "bisque2"),
names = c("All", "Control", "ST", "Sim", "ST+Sim"))
# Prior sustainability knowledge and quizscore: is there a relationship?
presurvey_prior_sust_know <- data.frame(presurvey$ParticipantID, as.factor(presurvey$PriorSustKnowledgeAdjusted))
names(presurvey_prior_sust_know) <- c('ParticipantID', 'PriorSustKnowledgeAdjusted')
# Merge
quiz1_results_and_prior_sust_know <- merge(presurvey_prior_sust_know, quiz1_scores_by_participant)
# Get the quiz 1 scores per prior sust knowledge category
quiz1_score_0 <- quiz1_results_and_prior_sust_know[quiz1_results_and_prior_sust_know$PriorSustKnowledgeAdjusted == "0",]
quiz1_score_1 <- quiz1_results_and_prior_sust_know[quiz1_results_and_prior_sust_know$PriorSustKnowledgeAdjusted == "1",]
quiz1_score_2 <- quiz1_results_and_prior_sust_know[quiz1_results_and_prior_sust_know$PriorSustKnowledgeAdjusted == "2",]
quiz1_score_3 <- quiz1_results_and_prior_sust_know[quiz1_results_and_prior_sust_know$PriorSustKnowledgeAdjusted == "3",]
# Side-by-side boxplots
boxplot(quiz1_results_and_prior_sust_know$QuizScore,
quiz1_score_0$QuizScore,
quiz1_score_1$QuizScore,
quiz1_score_2$QuizScore,
quiz1_score_3$QuizScore,
main="Quiz 1 Scores by Prior Sustainability Knowledge",
ylab = "Score (%)",
names = c("All", "None at all", "A little", "Moderate", "A lot"),
xlab = "Prior Sustainability Knowledge",
col = c("aquamarine3", "bisque2", "bisque2", "bisque2", "bisque2"))
# Is prior sustainability experience a confounding variable in the relationship between group and score?
presurvey_group_sus_knowledge <- data.frame(presurvey$ParticipantID, presurvey$Group, presurvey$PriorSustKnowledgeAdjusted)
names(presurvey_group_sus_knowledge) <- c('ParticipantID', 'Group','PriorSustKnowledgeAdjusted')
# Merge for quiz 1 results
quiz1_results_and_group_and_sus_knowledge <- merge(presurvey_group_sus_knowledge, quiz1_scores_by_participant)
# Remove ParticipantID column, not needed for aggregating results
quiz1_results_and_group_and_sus_knowledge$ParticipantID <- NULL
# Get a table of means by Group and Prior Sus Knowledge
with(quiz1_results_and_group_and_sus_knowledge, tapply(QuizScore, list(Group, PriorSustKnowledgeAdjusted), mean))
# Result
# 0 1 2 3
# Control 70.10000 69.25000 73.20000 72.2
# ST 67.33333 73.00000 78.83333 78.2
# Sim 80.62500 77.22222 73.20000 87.5
# ST+Sim 75.21429 67.71429 69.40000 84.0
# Try chi squared test to test independence of group and prior sustainability knowledge:
# library(gmodels)
CrossTable(quiz1_results_and_group_and_sus_knowledge$Group,
quiz1_results_and_group_and_sus_knowledge$PriorSustKnowledgeAdjusted,
digits=1,
expected=TRUE,
prop.r=TRUE,
prop.c=TRUE,
prop.t=FALSE,
prop.chisq=TRUE,
sresid=FALSE,
format=c("SPSS"),
dnn = c("Group", "Prior Sus Knowledge"))
# Not enough observations in some of the cells, so not valid for Chi-squared test
# Repeat but reduce the levels
quiz1_results_and_sus_adjusted <- quiz1_results_and_group_and_sus_knowledge
quiz1_results_and_sus_adjusted$sus_adjusted <-
ifelse(quiz1_results_and_sus_adjusted$PriorSustKnowledgeAdjusted== 0 |
quiz1_results_and_sus_adjusted$PriorSustKnowledgeAdjusted== 1, "0-1",
ifelse(quiz1_results_and_sus_adjusted$PriorSustKnowledgeAdjusted== 2 |
quiz1_results_and_sus_adjusted$PriorSustKnowledgeAdjusted== 3, "2-3", ''))
CrossTable(quiz1_results_and_sus_adjusted$Group,
quiz1_results_and_sus_adjusted$sus_adjusted,
digits=1,
expected=TRUE,
prop.r=TRUE,
prop.c=TRUE,
prop.t=FALSE,
prop.chisq=TRUE,
sresid=FALSE,
format=c("SPSS"),
dnn = c("Group", "Prior Sus Knowledge"))
# Result: the p = 0.02927516 is significant, so we REJECT the null hypothesis that the variables are independent
###########################
# 5 Prior ST/SD knowledge #
###########################
# Prior STSD Scores:
# 0 None at all
# 1 A little
# 2 A moderate amount
# 3 A lot
# First, calculate mean and median for all participants (categorical data so it's approximate)
mean(presurvey$PriorSTSDKnowledge) # 0.2264151
sort(table(presurvey$PriorSTSDKnowledge)) # Mode 0
median(presurvey$PriorSTSDKnowledge) # Result 0
sdst_score_all <- table(presurvey$PriorSTSDKnowledge)
sdst_score_all
# Result:
# 0 1 2 3
# 92 7 4 3
barplot(sdst_score_all,
main = "Prior Systems Thinking / System Dynamics Knowledge: All Participants",
names = c("None at all", "A little", "A moderate amount", "A lot"),
ylab = "No of participants",
ylim = c(0, 90),
col = brewer.pal(nrow(sdst_score_all), "Set2"))
# Prior Systems Thinking / System Dynamics Knowledge by group
# Create a frequency table of Group and Prior ST/SD Knowledge
stsd_score_by_group <- table(presurvey$PriorSTSDKnowledge, presurvey$Group)
stsd_score_by_group_flipped <- table(presurvey$Group, presurvey$PriorSTSDKnowledge)
stsd_score_by_group_flipped
# Result
# 0 1 2 3
# Control 21 4 2 1
# ST 25 1 0 0
# Sim 21 2 0 1
# ST+Sim 25 0 2 1
barplot(stsd_score_by_group,
beside=T,
main="Prior Systems Thinking / System Dynamics Knowledge by Group",
legend=TRUE,
legend.text=c("None", "A little", "A moderate amount", "A lot"),
args.legend = list(x = "top", ncol = 2),
ylim = c(0,30),
ylab="No of participants",
names.arg= c("Control", "ST", "Sim", "ST+Sim"),
col = brewer.pal(nrow(sdst_score_all), "Set2"))
# Prior ST knowledge and quiz score: Is there a relationship?
presurvey_prior_st_know <- data.frame(presurvey$ParticipantID, as.factor(presurvey$PriorSTSDKnowledge))
names(presurvey_prior_st_know) <- c('ParticipantID', 'PriorSTSDKnowledge')
# Merge for quiz 1 results
quiz1_results_and_prior_st_know <- merge(presurvey_prior_st_know, quiz1_scores_by_participant)
# Get the quiz 1 scores per prior ST knowledge knowledge category
quiz1_st_score_0 <- quiz1_results_and_prior_st_know[quiz1_results_and_prior_st_know$PriorSTSDKnowledge == "0",]
quiz1_st_score_1 <- quiz1_results_and_prior_st_know[quiz1_results_and_prior_st_know$PriorSTSDKnowledge == "1",]
quiz1_st_score_2 <- quiz1_results_and_prior_st_know[quiz1_results_and_prior_st_know$PriorSTSDKnowledge == "2",]
quiz1_st_score_3 <- quiz1_results_and_prior_st_know[quiz1_results_and_prior_st_know$PriorSTSDKnowledge == "3",]
# Side-by-side boxplots
boxplot(quiz1_results_and_prior_st_know$QuizScore,
quiz1_st_score_0$QuizScore,
quiz1_st_score_1$QuizScore,
quiz1_st_score_2$QuizScore,
quiz1_st_score_3$QuizScore,
main="Quiz 1 Scores by Prior Systems Thinking Knowledge",
ylab = "Score (%)",
names = c("All", "None at all", "A little", "Moderate", "A lot"),
xlab = "Prior Sustainability Knowledge",
col = c("aquamarine3", "bisque2", "bisque2", "bisque2", "bisque2"))
##############################################
# 6 Occupational/educational relevance score #
##############################################
# Prior OccOrStudyRelevanceScore Scores:
# 0 Not at all relevant
# 1 A little relevant
# 2 Moderately relevant
# 3 Quite relevant
# 4 Highly relevant
# First, calculate mean and median ed score for all participants (categorical data so it's approximate)
mean(presurvey$OccOrStudyRelevanceScore) # 1.301887
sort(table(presurvey$OccOrStudyRelevanceScore)) # Mode is 0
median(presurvey$OccOrStudyRelevanceScore) # Result 1
occ_score_all <- table(presurvey$OccOrStudyRelevanceScore)
occ_score_all
# Result:
# 0 1 2 3 4
# 38 30 10 24 4
barplot(occ_score_all,
main = "Occupational or Educational Relevance: All Participants",
names = c("Not at all", "A little", "Moderately", "Quite", "Highly"),
ylab = "No of participants",
xlab = "How Relevant",
col = brewer.pal(nrow(occ_score_all), "Set2"))
# Create a frequency table of Group and EdOccRelevance
ed_occ_rel_score_by_group <- table(presurvey$OccOrStudyRelevanceScore, presurvey$Group)
# Flip the matrix for reporting purposes
ed_occ_rel_score_by_group_flipped <- table(presurvey$Group, presurvey$OccOrStudyRelevanceScore)
ed_occ_rel_score_by_group_flipped
barplot(ed_occ_rel_score_by_group,
beside=T,
main="Occupational or Educational Relevance by Group",
legend=TRUE,
legend.text=c("Not at all", "A little", "Moderately", "Quite", "Highly"),
ylab="No of participants",
xlab = "How Relevant",
ylim = c(0,13),
names.arg= c("Control", "ST", "Sim", "ST+Sim"),
col = brewer.pal(nrow(occ_score_all), "Set2"))
# Occupational or educational relevance and quiz score: is there a relationship?
presurvey_prior_occ <- data.frame(presurvey$ParticipantID, as.factor(presurvey$OccOrStudyRelevanceScore))
names(presurvey_prior_occ) <- c('ParticipantID', 'OccOrStudyRelevanceScore')
# Merge for quiz 1 results
quiz1_results_and_prior_occ <- merge(presurvey_prior_occ, quiz1_scores_by_participant)
# Get the quiz 1 scores per relevance category
quiz1_occ_score_0 <- quiz1_results_and_prior_occ[quiz1_results_and_prior_occ$OccOrStudyRelevanceScore == "0",]
quiz1_occ_score_1 <- quiz1_results_and_prior_occ[quiz1_results_and_prior_occ$OccOrStudyRelevanceScore == "1",]
quiz1_occ_score_2 <- quiz1_results_and_prior_occ[quiz1_results_and_prior_occ$OccOrStudyRelevanceScore == "2",]
quiz1_occ_score_3 <- quiz1_results_and_prior_occ[quiz1_results_and_prior_occ$OccOrStudyRelevanceScore == "3",]
quiz1_occ_score_4 <- quiz1_results_and_prior_occ[quiz1_results_and_prior_occ$OccOrStudyRelevanceScore == "4",]
# Side-by-side boxplots
boxplot(quiz1_results_and_prior_occ$QuizScore,
quiz1_occ_score_0$QuizScore,
quiz1_occ_score_1$QuizScore,
quiz1_occ_score_2$QuizScore,
quiz1_occ_score_3$QuizScore,
quiz1_occ_score_4$QuizScore,
main="Quiz 1 Scores by Occupational or Educational Relevance",
ylab = "Score (%)",
names = c("All", "Not at all", "A little", "Moderately", "Quite", "Highly"),
xlab = "Whether Occupation or Education Relevant",
col = c("aquamarine3", "bisque2", "bisque2", "bisque2", "bisque2", "bisque2"))
################
# 7 ENGAGEMENT #
################
# Note: These engagement scores are for quiz 1. Zero engagers for quiz 1 were already removed from
# the dataset of 106, because they did not properly engage with the introduction, ST and/or Sim sections,
# making the data unsuitable for assessing the impact of ST and/or Sim on learning outcomes.
# Engagement scores:
# 0 Unacceptable
# 1 Minimal
# 2 Good
# Engagement for ALL participants for Quiz 1
engagement_quiz1_all <- quiz1$Engagement
# Crosstab: Engagement frequencies
quiz1_engagement_table <- table(engagement_quiz1_all)
quiz1_engagement_table
# Result
# 1 2
# 6 100
# Crosstab: Engagement level by group
quiz1_engagement_by_group <- table(quiz1$Engagement, quiz1$Group)
quiz1_engagement_by_group
# Result
# Control ST Sim ST+Sim
# 1 2 2 2 0
# 2 26 24 22 28
############
# 8 DELAYS #
############
# Note: These delay scores are for quiz 1.
# Delay scores:
# 0 No significant delay
# 1 Significant delay
# Delays for ALL participants for Quiz 1
delays_quiz1_all <- quiz1$Delay
# Crosstab: Delay frequencies
quiz1_delays_table <- table(delays_quiz1_all)
quiz1_delays_table
# Result
# 0 1
# 89 17
# Is number of delays a confounding variable in the relationship between group and score?
# Extract quiz1 entries where there was a delay
delay_quiz1 <- quiz1[quiz1$Delay>0,]
# Create a frequency table, no of delays by group
quiz1_delays <- table(delay_quiz1$Delay, delay_quiz1$Group)
quiz1_delays
# Result (Delay=1)
# Control ST Sim ST+Sim
# 1 0 4 2 11
# Delays by group barplot
barplot(quiz1_delays,
main = "Delays for Quiz 1 by Group",
names = c("Control", "ST", "Sim", "ST+Sim"),
ylab = "No of participants",
col = brewer.pal(ncol(quiz1_delays), "Set2"))
# Get a table of means by Group and Delays
quiz1_scores_delays <- data.frame(quiz1$Group, quiz1$Delay, quiz1$QuizScore)
names(quiz1_scores_delays) <- c('Group', 'Delay', 'QuizScore')
with(quiz1_scores_delays, tapply(QuizScore, list(Group, Delay), mean))
# Results:
# 0 1
# Control 70.78571 NA
# ST 75.09091 79.5
# Sim 79.45455 66.5
# ST+Sim 72.88235 73.0
|
/R Scripts/background_variables.R
|
permissive
|
CJCGreen/data_analysis_ESD_study
|
R
| false
| false
| 30,832
|
r
|
# C Green 29 August 2021
# background_variables.R
# R script for results in Appendix 10: Analysis of possibly confounding variables in the main ESD study
#######################################################################################################
#
# Investigate effects of known background variables on all quiz 1 scores (including outlier)
# Most background variables are stored in the presurvey data, and quiz 1 scores are in quiz1 dataframe
# Analysis therefore requires merging two dataframes, presurvey and quiz1, by ParticipantID
# Background variables:
# -----------------------
# 1 Gender
# 2 Age Categories
# 3 Educational score
# 4 Prior sustainability knowledge
# 5 Prior ST/SD knowledge
# 6 Occupational/educational relevance
# 7 Engagement
# 8 Delay
# -----------------------
# Use xlsx package to import Excel
library(xlsx)
# presurvey dataframe contains presurvey data for all groups
presurvey <- read.xlsx("data/scores_tidy.xlsx", sheetIndex=1)
# Rename the group ids (from 0, 1, 2, and 3), and order them as factors
presurvey$Group <- ifelse(presurvey$Group==0, "Control", ifelse(presurvey$Group==1, "ST", ifelse(presurvey$Group==2, "Sim","ST+Sim")))
presurvey$Group <- factor(presurvey$Group, levels = c("Control", "ST", "Sim", "ST+Sim"))
# Colour palettes for graphs
library(RColorBrewer)
############
# 1 GENDER #
############
# Create a frequency table of group and gender
gender_breakdown <- table(presurvey$Gender)
gender_breakdown
# Result:
# Female Male
# 62 44
# Pie chart with percentages
pie(gender_breakdown,
main="Gender Breakdown: All Participants",
col=c("darkmagenta", "cornflowerblue"),
labels=paste(names(gender_breakdown),"\n", gender_breakdown, " (", round(100*gender_breakdown/sum(gender_breakdown), digits = 1), "%)", sep=""))
gender_by_group <- table(presurvey$Gender, presurvey$Group)
gender_by_group
# Result
# Control ST Sim ST+Sim
# Female 18 14 14 16
# Male 10 12 10 12
# Note that the legend had to be moved - increase the y axis max value with ylim
barplot(gender_by_group,
beside=T,
main="Gender by Group",
legend=TRUE,
ylab="Number of participants",
ylim = c(0,20),
col=c("darkmagenta", "cornflowerblue"),
names.arg= c("Control", "ST", "Sim", "ST+Sim"))
# Gender and QuizScore: Is there a relationship?
presurvey_gender <- data.frame(presurvey$ParticipantID, presurvey$Gender, presurvey$Group)
names(presurvey_gender) <- c('ParticipantID', 'Gender', 'Group')
quiz1_scores_by_participant <- data.frame(quiz1$ParticipantID, quiz1$QuizScore)
names(quiz1_scores_by_participant) <- c('ParticipantID', 'QuizScore')
# Merge with quiz 1 results
quiz1_results_and_gender <- merge(presurvey_gender, quiz1_scores_by_participant)
# Side-by-side boxplots for gender
boxplot(QuizScore ~ Gender,
data = quiz1_results_and_gender,
main="Quiz 1 Scores by Gender",
ylab = "Score (%)",
col = c("aquamarine3", "bisque2"))
# Chi-squared test on quiz1 data: are gender and group independent?
# install.packages("gmodels")
library(gmodels)
# Results for 106 participants:
CrossTable(quiz1_results_and_gender$Group,
quiz1_results_and_gender$Gender,
digits=1,
expected=TRUE,
prop.r=TRUE,
prop.c=TRUE,
prop.t=FALSE,
prop.chisq=TRUE,
sresid=FALSE,
format=c("SPSS"),
dnn = c("Group", "Gender"))
# Result: the p = 0.887335 means we cannot reject the null hypothesis that the variables are independent
# Get a table of means by Group and Gender
gender_group_means <- with(quiz1_results_and_gender, tapply(QuizScore, list(Group, Gender), mean))
# Result:
# Female Male
# Control 67.61111 76.50000
# Sim 74.78571 83.40000
# ST 73.42857 76.91667
# ST+Sim 71.37500 75.00000
barplot(gender_group_means, beside=TRUE, ylab="Quiz 1 Score (%)",
main="Quiz 1 scores by Gender and Group",
legend.text=c("Control", "ST", "Sim", "ST+Sim"),
args.legend = list(x = "top", ncol = 2),
ylim = c(0,90),
col = brewer.pal(4, "Set3"))
#########
# 2 AGE #
#########
# Age Group categories used:
# Age Group Integer
# 18-25 1
# 26-35 2
# 36-45 3
# 46-55 4
# 56-65 5
# Over 65 6
# Age breakdown - all participants
age_breakdown <- table(presurvey$Age)
age_breakdown
# Result
# 18-25 26-35 36-45 46-55 56-65 Over 65
# 7 15 18 24 19 23
barplot(age_breakdown,
main = "Age Breakdown: All Participants",
xlab = "Age in years",
ylab = "No of participants",
ylim = c(0,25),
col = brewer.pal(nrow(age_breakdown), "Set3"))
# Basic statistics
# First, calculate mean and median age for all participants
# Since we're dealing with categorical age groups, a new column is needed first
# Add a numeric age for each category - this will give a value 1 for 18-25, 2 for 26-35 etc.
presurvey$AgeNum <- as.numeric(factor(presurvey$Age))
mean(presurvey$AgeNum) # Result 3.962264 - taking midpoint of range that means age about 50
sort(table(presurvey$AgeNum)) # Result 4, ie age 46-55
median(presurvey$AgeNum) # Result 4, ie age 46-55
# Age breakdown by group
age_by_group <- table(presurvey$Age, presurvey$Group)
age_by_group
# Result
# Control ST Sim ST+Sim
# 18-25 1 2 2 2
# 26-35 4 3 5 3
# 36-45 4 5 4 5
# 46-55 7 6 7 4
# 56-65 9 4 4 2
# Over 65 3 6 2 12
# Get Group data
presurvey_group0 <- presurvey[presurvey$Group == "Control",]
presurvey_group1 <- presurvey[presurvey$Group == "ST",]
presurvey_group2 <- presurvey[presurvey$Group == "Sim",]
presurvey_group3 <- presurvey[presurvey$Group == "ST+Sim",]
median(presurvey$AgeNum) # 4
median(presurvey_group0$AgeNum) # 4
median(presurvey_group1$AgeNum) # 4
median(presurvey_group2$AgeNum) # 4
median(presurvey_group3$AgeNum) # 4.5
# To work out the mode, use a sorted table of frequencies
sort(table(presurvey$AgeNum)) # 4
sort(table(presurvey_group0$AgeNum)) # 5
sort(table(presurvey_group1$AgeNum)) # 4 and 6
sort(table(presurvey_group2$AgeNum)) # 4
sort(table(presurvey_group3$AgeNum)) # 6
# Boxplot age category for all, and by group
boxplot(presurvey$AgeNum,
presurvey_group0$AgeNum,
presurvey_group1$AgeNum,
presurvey_group2$AgeNum,
presurvey_group3$AgeNum,
main="Age Category by Group",
ylab="Age Category",
col= c("aquamarine3", "azure3", "bisque2", "bisque2", "bisque2"),
names = c("All", "Control", "ST", "Sim", "ST+Sim"))
# Is there a relationship between age and score?
presurvey_age <- data.frame(presurvey$ParticipantID, presurvey$Age)
names(presurvey_age) <- c('ParticipantID', 'Age')
quiz1_scores_by_participant <- data.frame(quiz1$ParticipantID, quiz1$QuizScore)
names(quiz1_scores_by_participant) <- c('ParticipantID', 'QuizScore')
# Merge pre-survey age with scores from quiz1
quiz1_results_and_age <- merge(presurvey_age, quiz1_scores_by_participant)
# Get the quiz 1 scores per age group
quiz1_score_18_25 <- quiz1_results_and_age[quiz1_results_and_age$Age == "18-25",]
quiz1_score_26_35 <- quiz1_results_and_age[quiz1_results_and_age$Age == "26-35",]
quiz1_score_36_45 <- quiz1_results_and_age[quiz1_results_and_age$Age == "36-45",]
quiz1_score_46_55 <- quiz1_results_and_age[quiz1_results_and_age$Age == "46-55",]
quiz1_score_56_65 <- quiz1_results_and_age[quiz1_results_and_age$Age == "56-65",]
quiz1_score_over_65 <- quiz1_results_and_age[quiz1_results_and_age$Age == "Over 65",]
# Side-by-side boxplots for all age groups
boxplot(quiz1_results_and_age$QuizScore,
quiz1_score_18_25$QuizScore,
quiz1_score_26_35$QuizScore,
quiz1_score_36_45$QuizScore,
quiz1_score_46_55$QuizScore,
quiz1_score_56_65$QuizScore,
quiz1_score_over_65$QuizScore,
main="Quiz 1 Scores by Age Group",
ylab = "Score (%)",
names = c("All", "18-25", "26-35", "36-45", "46-55", "56-65", "Over 65"),
col = c("aquamarine3", "bisque2", "bisque2", "bisque2", "bisque2", "bisque2", "bisque2"))
# Is age category a confounding variable in the relationship between group and score?
presurvey_group_age <- data.frame(presurvey$ParticipantID, presurvey$Group, presurvey$Age)
names(presurvey_group_age) <- c('ParticipantID', 'Group','Age')
# Merge for quiz 1 results
quiz1_results_and_group_and_age <- merge(presurvey_group_age, quiz1_scores_by_participant)
# Remove ParticipantID column, not needed for aggregating results
quiz1_results_and_group_and_age$ParticipantID <- NULL
# Get a frequency table with age and group
group_by_age <- table(quiz1_results_and_group_and_age$Group, quiz1_results_and_group_and_age$Age)
group_by_age
# Result
# 18-25 26-35 36-45 46-55 56-65 Over 65
# Control 1 4 4 7 9 3
# ST 2 3 5 6 4 6
# Sim 2 5 4 7 4 2
# ST+Sim 2 3 5 4 2 12
# Get a table of means by Group and Age Group
with(quiz1_results_and_group_and_age, tapply(QuizScore, list(Group, Age), mean))
# Result
# 18-25 26-35 36-45 46-55 56-65 Over 65
# Control 78.0 75.25000 74.50 73.00000 66.11111 66.33333
# ST 73.0 75.00000 76.40 74.66667 68.75000 79.16667
# Sim 87.5 84.80000 70.75 72.28571 80.75000 85.00000
# ST+Sim 66.0 75.33333 84.60 61.50000 66.00000 73.58333
# Repeat but reduce the age categories, there are too few observations to stratify according to 6 categories
quiz1_results_and_group_and_age_adjusted <- quiz1_results_and_group_and_age
quiz1_results_and_group_and_age_adjusted$Age_adjusted <-
ifelse(quiz1_results_and_group_and_age_adjusted$Age=='Over 65', 'Over 56',
ifelse(quiz1_results_and_group_and_age_adjusted$Age=='56-65', 'Over 56',
ifelse(quiz1_results_and_group_and_age_adjusted$Age=='46-55', '36-55',
ifelse(quiz1_results_and_group_and_age_adjusted$Age=='36-45', '36-55',
ifelse(quiz1_results_and_group_and_age_adjusted$Age=='26-35', '18-35',
ifelse(quiz1_results_and_group_and_age_adjusted$Age=='18-25', '18-35', ''))))))
# Get a frequency table with adjusted age and group
group_by_age_adjusted <- table(quiz1_results_and_group_and_age_adjusted$Group, quiz1_results_and_group_and_age_adjusted$Age_adjusted)
group_by_age_adjusted
# Result
# 18-35 36-55 Over 56
# Control 5 11 12
# ST 5 11 10
# Sim 7 11 6
# ST+Sim 5 9 14
# Get a table of means by Group and adjusted Age Group
with(quiz1_results_and_group_and_age_adjusted, tapply(QuizScore, list(Group, Age_adjusted), mean))
# Result
# 18-35 36-55 Over 56
# Control 75.80000 73.54545 66.16667
# ST 74.20000 75.45455 75.00000
# Sim 85.57143 71.72727 82.16667
# ST+Sim 71.60000 74.33333 72.50000
#######################
# 3 EDUCATIONAL SCORE #
#######################
# Educational Attainment Scores:
# 1 Leaving Certificate
# 2 Degree or equivalent
# 3 Higher Diploma or Masters Degree
# 4 PhD
# First, calculate mean and median ed score for all participants (categorical data so it's approximate)
mean(presurvey$EdScore) # 2.603774
sort(table(presurvey$EdScore)) # mode is 3
median(presurvey$EdScore) # 3
edscore_all <- table(presurvey$EdScore)
edscore_all
# Result:
# 1 2 3 4
# 9 39 43 15
barplot(edscore_all,
main = "Educational Attainment: All Participants",
names = c("Leaving\nCert", "Degree", "Masters", "PhD"),
ylab = "No of participants",
col = brewer.pal(nrow(edscore_all), "Set2"))
# Educational attainment and quizscore: Is there a relationship?
presurvey_ed <- data.frame(presurvey$ParticipantID, presurvey$Group, presurvey$EdScore)
names(presurvey_ed) <- c('ParticipantID', 'Group', 'EdScore')
# Merge for each set of quiz results
quiz1_results_and_ed <- merge(presurvey_ed, quiz1_scores_by_participant)
# Get a table of means by Ed
ed_means <- with(quiz1_results_and_ed, tapply(QuizScore, EdScore, mean))
ed_means
# Result:
# 1 2 3 4
# 71.77778 70.76923 75.86047 79.20000
# Get quiz 1 scores per age group
quiz1_score_ed1 <- quiz1_results_and_ed[quiz1_results_and_ed$EdScore == 1,]
quiz1_score_ed2 <- quiz1_results_and_ed[quiz1_results_and_ed$EdScore == 2,]
quiz1_score_ed3 <- quiz1_results_and_ed[quiz1_results_and_ed$EdScore == 3,]
quiz1_score_ed4 <- quiz1_results_and_ed[quiz1_results_and_ed$EdScore == 4,]
# Side-by-side boxplots for all age groups
boxplot(quiz1_results_and_ed$QuizScore,
quiz1_score_ed1$QuizScore,
quiz1_score_ed2$QuizScore,
quiz1_score_ed3$QuizScore,
quiz1_score_ed4$QuizScore,
main="Quiz 1 Scores by Educational Attainment",
ylab = "Score (%)",
names = c("All", "Leaving Cert", "Degree", "Masters", "PhD"),
col = c("aquamarine3", "bisque2", "bisque2", "bisque2", "bisque2"))
# Create a frequency table of Group and EdScore
edscore_by_group <- table(presurvey$EdScore, presurvey$Group)
# I used the below for transposing the table, easier for my written report:
edscore_by_group_flipped <- table(presurvey$Group, presurvey$EdScore)
edscore_by_group_flipped
# Result:
# 1 2 3 4
# Control 1 10 12 5
# ST 3 11 11 1
# Sim 3 7 11 3
# ST+Sim 2 11 9 6
# Use Barplot with bars beside option
barplot(edscore_by_group,
beside=T,
main="Educational Attainment by Group",
legend=TRUE,
legend.text=c("Leaving Cert", "Degree", "Masters", "PhD"),
ylab="No of participants",
xlab="Group",
ylim = c(0,15),
names.arg= c("Control", "ST", "Sim", "ST+Sim"),
col = brewer.pal(nrow(edscore_by_group), "Set2"))
# Repeat but reduce the ed levels, there are too few observations to stratify
quiz1_results_and_ed_adjusted <- quiz1_results_and_ed
quiz1_results_and_ed_adjusted$Ed_adjusted <-
ifelse(quiz1_results_and_ed_adjusted$EdScore== 1 | quiz1_results_and_ed_adjusted$EdScore== 2, "1-2",
ifelse(quiz1_results_and_ed_adjusted$EdScore== 3 | quiz1_results_and_ed_adjusted$EdScore== 4, "3-4", ''))
reduce_edscore_by_group <- table(quiz1_results_and_ed_adjusted$Group, quiz1_results_and_ed_adjusted$Ed_adjusted)
reduce_edscore_by_group
# Result:
# 1-2 3-4
# Control 11 17
# ST 14 12
# Sim 10 14
# ST+Sim 13 15
# Chi-squared test on quiz1 data: are educational score and group independent?
library(gmodels)
CrossTable(quiz1_results_and_ed_adjusted$Group,
quiz1_results_and_ed_adjusted$Ed_adjusted,
digits=1,
expected=TRUE,
prop.r=TRUE,
prop.c=TRUE,
prop.t=FALSE,
prop.chisq=TRUE,
sresid=FALSE,
format=c("SPSS"),
dnn = c("Group", "Ed Score"))
# Result: the p = 0.7250024 means we cannot reject the null hypothesis that the variables are independent
####################################
# 4 PRIOR SUSTAINABILITY KNOWLEDGE #
####################################
# Prior Sustainability Scores:
# 0 None at all
# 1 A little
# 2 A moderate amount
# 3 A lot
# First, calculate mean and median for all participants (categorical data so it's approximate)
mean(presurvey$PriorSustKnowledgeAdjusted) # 1.198113
sort(table(presurvey$PriorSustKnowledgeAdjusted)) # mode is 0
median(presurvey$PriorSustKnowledgeAdjusted) # 1
sus_score_all <- table(presurvey$PriorSustKnowledgeAdjusted)
sus_score_all
# Result:
# 0 1 2 3
# 38 28 21 19
barplot(sus_score_all,
main = "Prior Sustainability Knowledge: All Participants",
names = c("None at all", "A little", "A moderate amount", "A lot"),
ylab = "No of participants",
col = brewer.pal(nrow(sus_score_all), "Set2"))
# Prior sustainability knowledge by group
# Create a frequency table
sus_score_by_group <- table(presurvey$PriorSustKnowledgeAdjusted, presurvey$Group)
sus_score_by_group_flipped <- table(presurvey$Group, presurvey$PriorSustKnowledgeAdjusted)
sus_score_by_group_flipped
# Result
# 0 1 2 3
# Control 10 8 5 5
# ST 6 4 6 10
# Sim 8 9 5 2
# ST+Sim 14 7 5 2
# Barplot sustainability knowledge by group
barplot(sus_score_by_group,
beside=T,
main="Prior Sustainability Knowledge by Group",
legend=TRUE,
legend.text=c("None at all", "A little", "A moderate amount", "A lot"),
ylab="No of participants",
args.legend = list(x = "top"),
names.arg= c("Control", "ST", "Sim", "ST+Sim"),
col = brewer.pal(nrow(sus_score_all), "Set2"))
# Boxplot sustainability knowledge for all, and by group
boxplot(presurvey$PriorSustKnowledgeAdjusted,
presurvey_group0$PriorSustKnowledgeAdjusted,
presurvey_group1$PriorSustKnowledgeAdjusted,
presurvey_group2$PriorSustKnowledgeAdjusted,
presurvey_group3$PriorSustKnowledgeAdjusted,
main="Prior Sustainability Knowledge Scores by Group",
ylab="Sustainability Knowledge Score",
col= c("aquamarine3", "azure3", "bisque2", "bisque2", "bisque2"),
names = c("All", "Control", "ST", "Sim", "ST+Sim"))
# Prior sustainability knowledge and quizscore: is there a relationship?
presurvey_prior_sust_know <- data.frame(presurvey$ParticipantID, as.factor(presurvey$PriorSustKnowledgeAdjusted))
names(presurvey_prior_sust_know) <- c('ParticipantID', 'PriorSustKnowledgeAdjusted')
# Merge
quiz1_results_and_prior_sust_know <- merge(presurvey_prior_sust_know, quiz1_scores_by_participant)
# Get the quiz 1 scores per prior sust knowledge category
quiz1_score_0 <- quiz1_results_and_prior_sust_know[quiz1_results_and_prior_sust_know$PriorSustKnowledgeAdjusted == "0",]
quiz1_score_1 <- quiz1_results_and_prior_sust_know[quiz1_results_and_prior_sust_know$PriorSustKnowledgeAdjusted == "1",]
quiz1_score_2 <- quiz1_results_and_prior_sust_know[quiz1_results_and_prior_sust_know$PriorSustKnowledgeAdjusted == "2",]
quiz1_score_3 <- quiz1_results_and_prior_sust_know[quiz1_results_and_prior_sust_know$PriorSustKnowledgeAdjusted == "3",]
# Side-by-side boxplots
boxplot(quiz1_results_and_prior_sust_know$QuizScore,
quiz1_score_0$QuizScore,
quiz1_score_1$QuizScore,
quiz1_score_2$QuizScore,
quiz1_score_3$QuizScore,
main="Quiz 1 Scores by Prior Sustainability Knowledge",
ylab = "Score (%)",
names = c("All", "None at all", "A little", "Moderate", "A lot"),
xlab = "Prior Sustainability Knowledge",
col = c("aquamarine3", "bisque2", "bisque2", "bisque2", "bisque2"))
# Is prior sustainability experience a confounding variable in the relationship between group and score?
presurvey_group_sus_knowledge <- data.frame(presurvey$ParticipantID, presurvey$Group, presurvey$PriorSustKnowledgeAdjusted)
names(presurvey_group_sus_knowledge) <- c('ParticipantID', 'Group','PriorSustKnowledgeAdjusted')
# Merge for quiz 1 results
quiz1_results_and_group_and_sus_knowledge <- merge(presurvey_group_sus_knowledge, quiz1_scores_by_participant)
# Remove ParticipantID column, not needed for aggregating results
quiz1_results_and_group_and_sus_knowledge$ParticipantID <- NULL
# Get a table of means by Group and Prior Sus Knowledge
with(quiz1_results_and_group_and_sus_knowledge, tapply(QuizScore, list(Group, PriorSustKnowledgeAdjusted), mean))
# Result
# 0 1 2 3
# Control 70.10000 69.25000 73.20000 72.2
# ST 67.33333 73.00000 78.83333 78.2
# Sim 80.62500 77.22222 73.20000 87.5
# ST+Sim 75.21429 67.71429 69.40000 84.0
# Try chi squared test to test independence of group and prior sustainability knowledge:
# library(gmodels)
CrossTable(quiz1_results_and_group_and_sus_knowledge$Group,
quiz1_results_and_group_and_sus_knowledge$PriorSustKnowledgeAdjusted,
digits=1,
expected=TRUE,
prop.r=TRUE,
prop.c=TRUE,
prop.t=FALSE,
prop.chisq=TRUE,
sresid=FALSE,
format=c("SPSS"),
dnn = c("Group", "Prior Sus Knowledge"))
# Not enough observations in some of the cells, so not valid for Chi-squared test
# Repeat but reduce the levels
quiz1_results_and_sus_adjusted <- quiz1_results_and_group_and_sus_knowledge
quiz1_results_and_sus_adjusted$sus_adjusted <-
ifelse(quiz1_results_and_sus_adjusted$PriorSustKnowledgeAdjusted== 0 |
quiz1_results_and_sus_adjusted$PriorSustKnowledgeAdjusted== 1, "0-1",
ifelse(quiz1_results_and_sus_adjusted$PriorSustKnowledgeAdjusted== 2 |
quiz1_results_and_sus_adjusted$PriorSustKnowledgeAdjusted== 3, "2-3", ''))
CrossTable(quiz1_results_and_sus_adjusted$Group,
quiz1_results_and_sus_adjusted$sus_adjusted,
digits=1,
expected=TRUE,
prop.r=TRUE,
prop.c=TRUE,
prop.t=FALSE,
prop.chisq=TRUE,
sresid=FALSE,
format=c("SPSS"),
dnn = c("Group", "Prior Sus Knowledge"))
# Result: the p = 0.02927516 is significant, so we REJECT the null hypothesis that the variables are independent
###########################
# 5 Prior ST/SD knowledge #
###########################
# Prior STSD Scores:
# 0 None at all
# 1 A little
# 2 A moderate amount
# 3 A lot
# First, calculate mean and median for all participants (categorical data so it's approximate)
mean(presurvey$PriorSTSDKnowledge) # 0.2264151
sort(table(presurvey$PriorSTSDKnowledge)) # Mode 0
median(presurvey$PriorSTSDKnowledge) # Result 0
sdst_score_all <- table(presurvey$PriorSTSDKnowledge)
sdst_score_all
# Result:
# 0 1 2 3
# 92 7 4 3
barplot(sdst_score_all,
main = "Prior Systems Thinking / System Dynamics Knowledge: All Participants",
names = c("None at all", "A little", "A moderate amount", "A lot"),
ylab = "No of participants",
ylim = c(0, 90),
col = brewer.pal(nrow(sdst_score_all), "Set2"))
# Prior Systems Thinking / System Dynamics Knowledge by group
# Create a frequency table of Group and Prior ST/SD Knowledge
stsd_score_by_group <- table(presurvey$PriorSTSDKnowledge, presurvey$Group)
stsd_score_by_group_flipped <- table(presurvey$Group, presurvey$PriorSTSDKnowledge)
stsd_score_by_group_flipped
# Result
# 0 1 2 3
# Control 21 4 2 1
# ST 25 1 0 0
# Sim 21 2 0 1
# ST+Sim 25 0 2 1
barplot(stsd_score_by_group,
beside=T,
main="Prior Systems Thinking / System Dynamics Knowledge by Group",
legend=TRUE,
legend.text=c("None", "A little", "A moderate amount", "A lot"),
args.legend = list(x = "top", ncol = 2),
ylim = c(0,30),
ylab="No of participants",
names.arg= c("Control", "ST", "Sim", "ST+Sim"),
col = brewer.pal(nrow(sdst_score_all), "Set2"))
# Prior ST knowledge and quiz score: Is there a relationship?
presurvey_prior_st_know <- data.frame(presurvey$ParticipantID, as.factor(presurvey$PriorSTSDKnowledge))
names(presurvey_prior_st_know) <- c('ParticipantID', 'PriorSTSDKnowledge')
# Merge for quiz 1 results
quiz1_results_and_prior_st_know <- merge(presurvey_prior_st_know, quiz1_scores_by_participant)
# Get the quiz 1 scores per prior ST knowledge knowledge category
quiz1_st_score_0 <- quiz1_results_and_prior_st_know[quiz1_results_and_prior_st_know$PriorSTSDKnowledge == "0",]
quiz1_st_score_1 <- quiz1_results_and_prior_st_know[quiz1_results_and_prior_st_know$PriorSTSDKnowledge == "1",]
quiz1_st_score_2 <- quiz1_results_and_prior_st_know[quiz1_results_and_prior_st_know$PriorSTSDKnowledge == "2",]
quiz1_st_score_3 <- quiz1_results_and_prior_st_know[quiz1_results_and_prior_st_know$PriorSTSDKnowledge == "3",]
# Side-by-side boxplots
boxplot(quiz1_results_and_prior_st_know$QuizScore,
quiz1_st_score_0$QuizScore,
quiz1_st_score_1$QuizScore,
quiz1_st_score_2$QuizScore,
quiz1_st_score_3$QuizScore,
main="Quiz 1 Scores by Prior Systems Thinking Knowledge",
ylab = "Score (%)",
names = c("All", "None at all", "A little", "Moderate", "A lot"),
xlab = "Prior Sustainability Knowledge",
col = c("aquamarine3", "bisque2", "bisque2", "bisque2", "bisque2"))
##############################################
# 6 Occupational/educational relevance score #
##############################################
# Prior OccOrStudyRelevanceScore Scores:
# 0 Not at all relevant
# 1 A little relevant
# 2 Moderately relevant
# 3 Quite relevant
# 4 Highly relevant
# First, calculate mean and median ed score for all participants (categorical data so it's approximate)
mean(presurvey$OccOrStudyRelevanceScore) # 1.301887
sort(table(presurvey$OccOrStudyRelevanceScore)) # Mode is 0
median(presurvey$OccOrStudyRelevanceScore) # Result 1
occ_score_all <- table(presurvey$OccOrStudyRelevanceScore)
occ_score_all
# Result:
# 0 1 2 3 4
# 38 30 10 24 4
barplot(occ_score_all,
main = "Occupational or Educational Relevance: All Participants",
names = c("Not at all", "A little", "Moderately", "Quite", "Highly"),
ylab = "No of participants",
xlab = "How Relevant",
col = brewer.pal(nrow(occ_score_all), "Set2"))
# Create a frequency table of Group and EdOccRelevance
ed_occ_rel_score_by_group <- table(presurvey$OccOrStudyRelevanceScore, presurvey$Group)
# Flip the matrix for reporting purposes
ed_occ_rel_score_by_group_flipped <- table(presurvey$Group, presurvey$OccOrStudyRelevanceScore)
ed_occ_rel_score_by_group_flipped
barplot(ed_occ_rel_score_by_group,
beside=T,
main="Occupational or Educational Relevance by Group",
legend=TRUE,
legend.text=c("Not at all", "A little", "Moderately", "Quite", "Highly"),
ylab="No of participants",
xlab = "How Relevant",
ylim = c(0,13),
names.arg= c("Control", "ST", "Sim", "ST+Sim"),
col = brewer.pal(nrow(occ_score_all), "Set2"))
# Occupational or educational relevance and quiz score: is there a relationship?
presurvey_prior_occ <- data.frame(presurvey$ParticipantID, as.factor(presurvey$OccOrStudyRelevanceScore))
names(presurvey_prior_occ) <- c('ParticipantID', 'OccOrStudyRelevanceScore')
# Merge for quiz 1 results
quiz1_results_and_prior_occ <- merge(presurvey_prior_occ, quiz1_scores_by_participant)
# Get the quiz 1 scores per relevance category
quiz1_occ_score_0 <- quiz1_results_and_prior_occ[quiz1_results_and_prior_occ$OccOrStudyRelevanceScore == "0",]
quiz1_occ_score_1 <- quiz1_results_and_prior_occ[quiz1_results_and_prior_occ$OccOrStudyRelevanceScore == "1",]
quiz1_occ_score_2 <- quiz1_results_and_prior_occ[quiz1_results_and_prior_occ$OccOrStudyRelevanceScore == "2",]
quiz1_occ_score_3 <- quiz1_results_and_prior_occ[quiz1_results_and_prior_occ$OccOrStudyRelevanceScore == "3",]
quiz1_occ_score_4 <- quiz1_results_and_prior_occ[quiz1_results_and_prior_occ$OccOrStudyRelevanceScore == "4",]
# Side-by-side boxplots
boxplot(quiz1_results_and_prior_occ$QuizScore,
quiz1_occ_score_0$QuizScore,
quiz1_occ_score_1$QuizScore,
quiz1_occ_score_2$QuizScore,
quiz1_occ_score_3$QuizScore,
quiz1_occ_score_4$QuizScore,
main="Quiz 1 Scores by Occupational or Educational Relevance",
ylab = "Score (%)",
names = c("All", "Not at all", "A little", "Moderately", "Quite", "Highly"),
xlab = "Whether Occupation or Education Relevant",
col = c("aquamarine3", "bisque2", "bisque2", "bisque2", "bisque2", "bisque2"))
################
# 7 ENGAGEMENT #
################
# Note: These engagement scores are for quiz 1. Zero engagers for quiz 1 were already removed from
# the dataset of 106, because they did not properly engage with the introduction, ST and/or Sim sections,
# making the data unsuitable for assessing the impact of ST and/or Sim on learning outcomes.
# Engagement scores:
# 0 Unacceptable
# 1 Minimal
# 2 Good
# Engagement for ALL participants for Quiz 1
engagement_quiz1_all <- quiz1$Engagement
# Crosstab: Engagement frequencies
quiz1_engagement_table <- table(engagement_quiz1_all)
quiz1_engagement_table
# Result
# 1 2
# 6 100
# Crosstab: Engagement level by group
quiz1_engagement_by_group <- table(quiz1$Engagement, quiz1$Group)
quiz1_engagement_by_group
# Result
# Control ST Sim ST+Sim
# 1 2 2 2 0
# 2 26 24 22 28
############
# 8 DELAYS #
############
# Note: These delay scores are for quiz 1.
# Delay scores:
# 0 No significant delay
# 1 Significant delay
# Delays for ALL participants for Quiz 1
delays_quiz1_all <- quiz1$Delay
# Crosstab: Delay frequencies
quiz1_delays_table <- table(delays_quiz1_all)
quiz1_delays_table
# Result
# 0 1
# 89 17
# Is number of delays a confounding variable in the relationship between group and score?
# Extract quiz1 entries where there was a delay
delay_quiz1 <- quiz1[quiz1$Delay>0,]
# Create a frequency table, no of delays by group
quiz1_delays <- table(delay_quiz1$Delay, delay_quiz1$Group)
quiz1_delays
# Result (Delay=1)
# Control ST Sim ST+Sim
# 1 0 4 2 11
# Delays by group barplot
barplot(quiz1_delays,
main = "Delays for Quiz 1 by Group",
names = c("Control", "ST", "Sim", "ST+Sim"),
ylab = "No of participants",
col = brewer.pal(ncol(quiz1_delays), "Set2"))
# Get a table of means by Group and Delays
quiz1_scores_delays <- data.frame(quiz1$Group, quiz1$Delay, quiz1$QuizScore)
names(quiz1_scores_delays) <- c('Group', 'Delay', 'QuizScore')
with(quiz1_scores_delays, tapply(QuizScore, list(Group, Delay), mean))
# Results:
# 0 1
# Control 70.78571 NA
# ST 75.09091 79.5
# Sim 79.45455 66.5
# ST+Sim 72.88235 73.0
|
library(blavaan)
### Name: blavCompare
### Title: Bayesian model comparisons.
### Aliases: blavCompare BF
### ** Examples
## Not run:
##D hsm1 <- ' visual =~ x1 + x2 + x3 + x4
##D textual =~ x4 + x5 + x6
##D speed =~ x7 + x8 + x9 '
##D
##D fit1 <- bcfa(hsm1, data=HolzingerSwineford1939)
##D
##D hsm2 <- ' visual =~ x1 + x2 + x3
##D textual =~ x4 + x5 + x6 + x7
##D speed =~ x7 + x8 + x9 '
##D
##D fit2 <- bcfa(hsm2, data=HolzingerSwineford1939)
##D
##D blavCompare(fit1, fit2)
## End(Not run)
|
/data/genthat_extracted_code/blavaan/examples/blavCompare.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 552
|
r
|
library(blavaan)
### Name: blavCompare
### Title: Bayesian model comparisons.
### Aliases: blavCompare BF
### ** Examples
## Not run:
##D hsm1 <- ' visual =~ x1 + x2 + x3 + x4
##D textual =~ x4 + x5 + x6
##D speed =~ x7 + x8 + x9 '
##D
##D fit1 <- bcfa(hsm1, data=HolzingerSwineford1939)
##D
##D hsm2 <- ' visual =~ x1 + x2 + x3
##D textual =~ x4 + x5 + x6 + x7
##D speed =~ x7 + x8 + x9 '
##D
##D fit2 <- bcfa(hsm2, data=HolzingerSwineford1939)
##D
##D blavCompare(fit1, fit2)
## End(Not run)
|
#' @title
#' @examples
create_lexicon <- function(x, doc_prop_max = 1, word_min = 5, word_max = Inf, out_dtm = FALSE, ...) {
if(!is.character(x)) {
stop("x is not a character vector")
}
.it <- text2vec::itoken(x, progressbar = FALSE)
.vocab <- text2vec::create_vocabulary(.it)
.vocab <- text2vec::prune_vocabulary(.vocab, doc_proportion_max = doc_prop_max,
term_count_min = word_min,
term_count_max = word_max, ...)
.vec <- text2vec::vocab_vectorizer(.vocab)
if(out_dtm) {
return(text2vec::create_dtm(.it, .vec))
}
return(list(tkn = .it, vec = .vec, vocab = .vocab))
}
#' @title
#'
#' @param text
#' @param skip_win
#' @param co_occur_max
#' @param word_vec_len
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
create_gloVe_embeddings <- function(text, skip_win = 5L, co_occur_max = 100L, word_vec_len = 50L, ...){
.x <- create_lexicon(text, ...)
.dtm <- text2vec::create_tcm(.x[[1]], .x[[2]], skip_win)
.gloVe <- text2vec::GlobalVectors$new(word_vectors_size = word_vec_len, vocabulary = .x[[3]], x_max = co_occur_max)
.wd_vec <- text2vec::fit_transform(x = .dtm, model = .gloVe, n_iter = 10L, convergence_tol = 0.01)
.wd_vec_context <- .gloVe$components
.word_vectors <- .wd_vec + t(.wd_vec_context)
return(as.data.frame(.word_vectors))
}
#' @title
#'
#' @param text Required. A character vector of documents.
#' @param word_vec Required. A pre-trained matrix or data.frame of word embeddings.
#' @param has_words_as_row_names Optional. Logical. Are words row names in \code{word_vec}?
#' @param mean_vec Optional. Logical. Use the average of word embeddings?
#' @param wts Optional. Weight
#' @param FUN Optional. Function for transforming word embedding data to document embedding.
#' If blank, will use mean.
#'
#' @return
#' @export
#'
#' @examples
create_doc_vectors <-
function(text,
word_vec,
has_words_as_row_names = TRUE,
mean_vec = TRUE,
wts = NULL,
FUN = NULL) {
.wordEmbed_helper <- function(.res, wts, FUN) {
if (is.null(wts) && is.null(FUN)) {
return(.res)
}
.n <- sapply(.res, nrow)
if (!is.null(wts)) {
if (length(wts) == 1)
wts <- rep(wts, sum(.n))
if (length(wts) != sum(.n)) {
warning(
"weights argument must be equal to number of valid words in text returning object for inspection"
)
return(.res)
} else {
.indx <- rep(seq_along(.n), .n)
.res <- do.call(rbind, .res)
.f <- ncol(.res)
.wvals <- unique(wts)
if (all(.wvals %in% c(1, 0))) {
wts <- as.logical(wts)
.res <- split(as.data.frame(.res[wts, ]), .indx[wts])
} else {
.wres <- sweep(x = .res,
MARGIN = 1,
STATS = wts,
`*`)
.res <- split(as.data.frame(.wres), .indx)
}
}
}
if (!is.null(FUN)) {
.res <- sapply(unlist(.res, recursive = F), function(x) {
x <- FUN(x)
})
.res <- matrix(.res, ncol = .f, byrow = T)
}
return(.res)
}
if (!inherits(word_vec, "data.frame")) {
word_vec <- as.data.frame(word_vec)
}
if (has_words_as_row_names || !is.character(word_vec[, 1])) {
word_vec <- data.frame(row.names(word_vec), word_vec)
}
if (!all(sapply(word_vec[, -1], is.numeric))) {
word_vec[, -1] <- sapply(word_vec[, -1], as.numeric)
}
if (!mean_vec || !is.null(wts)) {
.res <- softmaxreg::wordEmbed(text, word_vec, meanVec = FALSE)
if (is.null(FUN)) {
.res <- .wordEmbed_helper(.res, wts, FUN = mean)
} else {
.res <- .wordEmbed_helper(.res, wts, FUN)
}
} else {
.res <- softmaxreg::wordEmbed(text, word_vec, meanVec = mean_vec)
}
return(.res)
}
plot_rstne <- function(mat, group, labs = NULL, plot = FALSE, ...) {
if(is.character(group)) {
group <- as.factor(group)
}
.pal <- viridisLite::viridis(n = length(levels(group)))
tsne_out <- Rtsne::Rtsne(as.matrix(mat), check_duplicates = F, ...)# Run TSNE
if(plot) {
plot(tsne_out$Y, col = .pal[group], xlab = "word2vecx", ylab = "word2vecy", main = "t-SNE of Personality Items",
asp = 1)
if(!is.null(labs)) {
text(tsne_out$Y, pos = 3, labels = labs, cex = .75)# Plot the result
}
} else {
return(tsne_out)
}
}
|
/R/machine_learning_functions.R
|
no_license
|
Shea-Fyffe/PsychStudent
|
R
| false
| false
| 4,551
|
r
|
#' @title
#' @examples
create_lexicon <- function(x, doc_prop_max = 1, word_min = 5, word_max = Inf, out_dtm = FALSE, ...) {
if(!is.character(x)) {
stop("x is not a character vector")
}
.it <- text2vec::itoken(x, progressbar = FALSE)
.vocab <- text2vec::create_vocabulary(.it)
.vocab <- text2vec::prune_vocabulary(.vocab, doc_proportion_max = doc_prop_max,
term_count_min = word_min,
term_count_max = word_max, ...)
.vec <- text2vec::vocab_vectorizer(.vocab)
if(out_dtm) {
return(text2vec::create_dtm(.it, .vec))
}
return(list(tkn = .it, vec = .vec, vocab = .vocab))
}
#' @title
#'
#' @param text
#' @param skip_win
#' @param co_occur_max
#' @param word_vec_len
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
create_gloVe_embeddings <- function(text, skip_win = 5L, co_occur_max = 100L, word_vec_len = 50L, ...){
.x <- create_lexicon(text, ...)
.dtm <- text2vec::create_tcm(.x[[1]], .x[[2]], skip_win)
.gloVe <- text2vec::GlobalVectors$new(word_vectors_size = word_vec_len, vocabulary = .x[[3]], x_max = co_occur_max)
.wd_vec <- text2vec::fit_transform(x = .dtm, model = .gloVe, n_iter = 10L, convergence_tol = 0.01)
.wd_vec_context <- .gloVe$components
.word_vectors <- .wd_vec + t(.wd_vec_context)
return(as.data.frame(.word_vectors))
}
#' @title
#'
#' @param text Required. A character vector of documents.
#' @param word_vec Required. A pre-trained matrix or data.frame of word embeddings.
#' @param has_words_as_row_names Optional. Logical. Are words row names in \code{word_vec}?
#' @param mean_vec Optional. Logical. Use the average of word embeddings?
#' @param wts Optional. Weight
#' @param FUN Optional. Function for transforming word embedding data to document embedding.
#' If blank, will use mean.
#'
#' @return
#' @export
#'
#' @examples
create_doc_vectors <-
function(text,
word_vec,
has_words_as_row_names = TRUE,
mean_vec = TRUE,
wts = NULL,
FUN = NULL) {
.wordEmbed_helper <- function(.res, wts, FUN) {
if (is.null(wts) && is.null(FUN)) {
return(.res)
}
.n <- sapply(.res, nrow)
if (!is.null(wts)) {
if (length(wts) == 1)
wts <- rep(wts, sum(.n))
if (length(wts) != sum(.n)) {
warning(
"weights argument must be equal to number of valid words in text returning object for inspection"
)
return(.res)
} else {
.indx <- rep(seq_along(.n), .n)
.res <- do.call(rbind, .res)
.f <- ncol(.res)
.wvals <- unique(wts)
if (all(.wvals %in% c(1, 0))) {
wts <- as.logical(wts)
.res <- split(as.data.frame(.res[wts, ]), .indx[wts])
} else {
.wres <- sweep(x = .res,
MARGIN = 1,
STATS = wts,
`*`)
.res <- split(as.data.frame(.wres), .indx)
}
}
}
if (!is.null(FUN)) {
.res <- sapply(unlist(.res, recursive = F), function(x) {
x <- FUN(x)
})
.res <- matrix(.res, ncol = .f, byrow = T)
}
return(.res)
}
if (!inherits(word_vec, "data.frame")) {
word_vec <- as.data.frame(word_vec)
}
if (has_words_as_row_names || !is.character(word_vec[, 1])) {
word_vec <- data.frame(row.names(word_vec), word_vec)
}
if (!all(sapply(word_vec[, -1], is.numeric))) {
word_vec[, -1] <- sapply(word_vec[, -1], as.numeric)
}
if (!mean_vec || !is.null(wts)) {
.res <- softmaxreg::wordEmbed(text, word_vec, meanVec = FALSE)
if (is.null(FUN)) {
.res <- .wordEmbed_helper(.res, wts, FUN = mean)
} else {
.res <- .wordEmbed_helper(.res, wts, FUN)
}
} else {
.res <- softmaxreg::wordEmbed(text, word_vec, meanVec = mean_vec)
}
return(.res)
}
plot_rstne <- function(mat, group, labs = NULL, plot = FALSE, ...) {
if(is.character(group)) {
group <- as.factor(group)
}
.pal <- viridisLite::viridis(n = length(levels(group)))
tsne_out <- Rtsne::Rtsne(as.matrix(mat), check_duplicates = F, ...)# Run TSNE
if(plot) {
plot(tsne_out$Y, col = .pal[group], xlab = "word2vecx", ylab = "word2vecy", main = "t-SNE of Personality Items",
asp = 1)
if(!is.null(labs)) {
text(tsne_out$Y, pos = 3, labels = labs, cex = .75)# Plot the result
}
} else {
return(tsne_out)
}
}
|
#EasyShu团队出品,更多精彩内容请关注微信公众号【EasyShu】
#如有问题修正与深入学习,可联系微信:EasyCharts
library(ggplot2)
library(RColorBrewer)
color_theme<-brewer.pal(7,"Set2")[c(1,2,4,5)]
mydata<-read.csv("Boxplot_Data.csv",stringsAsFactors=FALSE)
ggplot(mydata, aes(Class, Value))+
geom_boxplot(aes(fill = Class),size=0.25) +
geom_jitter(width=0.2,size=1.)+
#geom_dotplot(aes(fill = Class),binaxis = "y", stackdir = "center",dotsize = 0.4)+
scale_fill_manual(values=c("#4F81BD","#C0504D","#9BBB59","#8064A2"))+
theme_bw()+
theme(legend.position="none")
ggplot(mydata, aes(Class, Value))+
geom_boxplot(aes(fill = Class),size=0.25) +
geom_jitter(width=0.2,size=1.)+
#geom_dotplot(aes(fill = Class),binaxis = "y", stackdir = "center",dotsize = 0.4)+
scale_fill_manual(values=c("#FF0000","#0000FF","#00FFFF","#FF00FF"))+
theme_bw()+
theme(legend.position="none")
ggplot(mydata, aes(Class, Value))+
geom_boxplot(aes(fill = Class),size=0.25,outlier.color=NA) +
geom_jitter(width=0.2,size=1.)+
theme_bw()+
theme(legend.position="none")
|
/第1章 R语言编程与绘图基础/图1-7-8 不同颜色主题的图表效果.R
|
no_license
|
Easy-Shu/Beautiful-Visualization-with-R
|
R
| false
| false
| 1,120
|
r
|
#EasyShu团队出品,更多精彩内容请关注微信公众号【EasyShu】
#如有问题修正与深入学习,可联系微信:EasyCharts
library(ggplot2)
library(RColorBrewer)
color_theme<-brewer.pal(7,"Set2")[c(1,2,4,5)]
mydata<-read.csv("Boxplot_Data.csv",stringsAsFactors=FALSE)
ggplot(mydata, aes(Class, Value))+
geom_boxplot(aes(fill = Class),size=0.25) +
geom_jitter(width=0.2,size=1.)+
#geom_dotplot(aes(fill = Class),binaxis = "y", stackdir = "center",dotsize = 0.4)+
scale_fill_manual(values=c("#4F81BD","#C0504D","#9BBB59","#8064A2"))+
theme_bw()+
theme(legend.position="none")
ggplot(mydata, aes(Class, Value))+
geom_boxplot(aes(fill = Class),size=0.25) +
geom_jitter(width=0.2,size=1.)+
#geom_dotplot(aes(fill = Class),binaxis = "y", stackdir = "center",dotsize = 0.4)+
scale_fill_manual(values=c("#FF0000","#0000FF","#00FFFF","#FF00FF"))+
theme_bw()+
theme(legend.position="none")
ggplot(mydata, aes(Class, Value))+
geom_boxplot(aes(fill = Class),size=0.25,outlier.color=NA) +
geom_jitter(width=0.2,size=1.)+
theme_bw()+
theme(legend.position="none")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/plot.clumps.r
\name{plot.clumps}
\alias{plot.clumps}
\title{Plot marker clumps on Manhattan plot.}
\usage{
plot.clumps(gwas.result, clumps, chr, region, clambda = F)
}
\arguments{
\item{gwas.result}{an object of the \code{\link[GenABEL]{gwaa.data-class}},}
\item{clumps}{a result of running the \code{\link[cgmisc]{clump.markers}} function,}
\item{chr}{chromosome to display,}
\item{region}{a vector of start and stop coordinates of a region to display,}
\item{clambda}{a logical indicating whether corrected Pc1df p-values are
to be used.}
}
\description{
Plot clumps resulting from running the \code{\link[cgmisc]{clump.markers}} function.
}
\examples{
\dontrun{plot.clumps(data, myclumps, 1, c(14172, 19239))}
}
\author{
Marcin Kierczak <\email{Marcin.Kierczak@imbim.uu.se}>
}
\seealso{
\code{\link[cgmisc]{clump.markers}}
}
\keyword{clumping}
\keyword{clumps}
\keyword{plot}
|
/man/plot.clumps.Rd
|
no_license
|
cgmisc-team/cgmisc
|
R
| false
| false
| 970
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/plot.clumps.r
\name{plot.clumps}
\alias{plot.clumps}
\title{Plot marker clumps on Manhattan plot.}
\usage{
plot.clumps(gwas.result, clumps, chr, region, clambda = F)
}
\arguments{
\item{gwas.result}{an object of the \code{\link[GenABEL]{gwaa.data-class}},}
\item{clumps}{a result of running the \code{\link[cgmisc]{clump.markers}} function,}
\item{chr}{chromosome to display,}
\item{region}{a vector of start and stop coordinates of a region to display,}
\item{clambda}{a logical indicating whether corrected Pc1df p-values are
to be used.}
}
\description{
Plot clumps resulting from running the \code{\link[cgmisc]{clump.markers}} function.
}
\examples{
\dontrun{plot.clumps(data, myclumps, 1, c(14172, 19239))}
}
\author{
Marcin Kierczak <\email{Marcin.Kierczak@imbim.uu.se}>
}
\seealso{
\code{\link[cgmisc]{clump.markers}}
}
\keyword{clumping}
\keyword{clumps}
\keyword{plot}
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Estimated Marathon Time"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
h1("select your fastest time"),
sliderInput("slider_mins", "Mins:", 0,150,30),
sliderInput("distance", "KM:", 0,21,5)
),
mainPanel(
h3("Expected Marathon Time in minutes is:"),
textOutput("result")
)
)
))
|
/ui.R
|
no_license
|
bmillaard/Shiny-app-and-repr-pitch
|
R
| false
| false
| 423
|
r
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Estimated Marathon Time"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
h1("select your fastest time"),
sliderInput("slider_mins", "Mins:", 0,150,30),
sliderInput("distance", "KM:", 0,21,5)
),
mainPanel(
h3("Expected Marathon Time in minutes is:"),
textOutput("result")
)
)
))
|
# Exploratory Data Analysis Course Project 1: Plot 1
# Read the data.
source("read.R")
# Create the PNG file as required.
png(filename="plot1.png", width = 480, height = 480)
# Draw a histogram with red bars and given labels.
with(data, hist(Global_active_power, col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)"))
dev.off()
|
/plot1.R
|
no_license
|
thfetcke/ExData_Plotting1
|
R
| false
| false
| 388
|
r
|
# Exploratory Data Analysis Course Project 1: Plot 1
# Read the data.
source("read.R")
# Create the PNG file as required.
png(filename="plot1.png", width = 480, height = 480)
# Draw a histogram with red bars and given labels.
with(data, hist(Global_active_power, col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)"))
dev.off()
|
CallOMICFS <- function(dt,lb,index,fs){
dirpath <- "..\\FS\\OMICFS\\"
dt_train <- dt[index,]
lb_train <- data.frame(lb = as.numeric(lb[index]))
# OMICFS Parameter
psfeanum <- fs$K
expfeanum <- fs$E
parameter <- data.frame(psfeanum,expfeanum)
# Prepare the data for OMICFS in matlab
write.dat(dt_train , paste0(dirpath,"dt_train.dat"))
write.dat(lb_train , paste0(dirpath,"lb_train.dat"))
write.dat(parameter , paste0(dirpath,"parameter.dat"))
# Call OMICFS from matlab,and write to the csv file
run_matlab_script(paste0(dirpath,"MatlabCall.m"),verbose = TRUE,desktop = FALSE,splash = FALSE, display = FALSE, wait = TRUE,
single_thread = FALSE)
# Read the result by csv
K_index <- unlist(read.table(paste0(dirpath,"OMICFS_RF.csv"),sep = ","))
return(K_index)
}
|
/Evaluate/CallOMICFS.R
|
permissive
|
ZongN/FeatureSelection
|
R
| false
| false
| 861
|
r
|
CallOMICFS <- function(dt,lb,index,fs){
dirpath <- "..\\FS\\OMICFS\\"
dt_train <- dt[index,]
lb_train <- data.frame(lb = as.numeric(lb[index]))
# OMICFS Parameter
psfeanum <- fs$K
expfeanum <- fs$E
parameter <- data.frame(psfeanum,expfeanum)
# Prepare the data for OMICFS in matlab
write.dat(dt_train , paste0(dirpath,"dt_train.dat"))
write.dat(lb_train , paste0(dirpath,"lb_train.dat"))
write.dat(parameter , paste0(dirpath,"parameter.dat"))
# Call OMICFS from matlab,and write to the csv file
run_matlab_script(paste0(dirpath,"MatlabCall.m"),verbose = TRUE,desktop = FALSE,splash = FALSE, display = FALSE, wait = TRUE,
single_thread = FALSE)
# Read the result by csv
K_index <- unlist(read.table(paste0(dirpath,"OMICFS_RF.csv"),sep = ","))
return(K_index)
}
|
#' @include addClusterCols.R
#' @include spatialPlot.R
#' @import shiny
#'
NULL
#' RunShinySpaniel
#'
#' A function to visualise Spatial Transcriptomics. It requires a prepocessed
#' Seurat Object or a SingleCellExperiment object as well as a rasterised image
#' saved as an .rds object. There are 4 plots available in the app showing:
#' a) the number of genes detected per spot,
#' b) the number of reads detected per spot,
#' c) clustering results,
#' d) the gene expression of a selected gene."
#' To view the clustering results the columns of the meta.data or colData
#' containing clustering results must be prefixed with cluster_ . This can be
#' done by using the markClusterCol() function included in Spaniel.
#'
#' @return Runs a Shiny App
#'
#' @examples
#' ## mark the columns of metadata/colData that contain clustering
#' ## information see ?markClusterCol for more details#'
#' sObj <- readRDS(file.path(system.file(package = "Spaniel"),
#' "extdata/SeuratData.rds"))
#' sObj <- markClusterCol(sObj, "res")
#'
#' ### parse background image
#' imgFile <- file.path(system.file(package = "Spaniel"),
#' "HE_Rep1_resized.jpg")
#' img <- parseImage(imgFile)
#'
#' ## run shinySpaniel (upload data.rds and image.rds in the shiny app)
#' ## Not Run:
#' # runShinySpaniel()
#' @export
runShinySpaniel <-function(){
options(shiny.maxRequestSize=100*1024^2)
ui <- pageWithSidebar(
# App title ----
headerPanel("Spatial Transcriptomics"),
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Select a file ----
fileInput("dataFile", "Upload Data File",
multiple = FALSE,
accept = c(".rds")),
# Title: Upload image file ----
fileInput("imageFile", "Upload Image File",
multiple = FALSE,
accept = c(".rds")),
# Extra options for cluster or gene plots
uiOutput("plotType"),
# Input: for type of plot ----
uiOutput("moreControls"),
p(
#"Side End"
)
),
# Main panel for displaying outputs ----
mainPanel(
#plotOutput("plotPressed"),
tabsetPanel(id = "inTabset",
type = "tabs",
tabPanel("Getting started",
value = "panel1",
h3("Plotting Spatial Data"),
p("1. Upload the data.rds file and image.rds
files. It can take a couple of minutes for
the data to upload"),
p("2. Select the type of plot you want to
look at. There are 4 plots available
showing:
a) the number of genes detected per spot,
b) the number of reads detected per spot,
c) clustering results,
d) the gene expression of a selected
gene."),
p("3. For the cluster plot you must
also select the cluster resolution you
wish to plot
(generally a lower resolution equates to
fewer clusters."),
p("4. For the gene plot you must select a gene
from the drop downlist. There is a bit of a
delay whilst the gene list is loading.
You can jump to the gene in list by typing
the first few letters of the gene
of interest."),
p("5. Click 'Plot' button in the side bar ")
),
tabPanel(title = "View Plots",
value = "panel2",
plotOutput("plotPressed"))
)
)
)
############ Server #################################
# Define server ----
server <- function(input, output, session) {
output$summary <- renderPrint({
"1. Upload the data.rds file and image.rds files.
It can take a couple of minutes for the data to upload"
})
### S4 object
Object <- reactive({
req(input$dataFile)
readRDS(input$dataFile$datapath)
})
### Image object
imageObj <- reactive({
req(input$imageFile)
readRDS(input$imageFile$datapath)
})
## Choose plot type
## if image and seurat objects uploaded
output$plotType <- renderUI({
req(input$dataFile)
req(input$imageFile)
radioButtons("Type_Of_Plot", "Type of plot:",
c("Gene Number Per Spot Plot" = "NoGenes",
"Counts Per Spot Plot" = "CountsPerSpot",
"Cluster Plot" = "Cluster",
"Gene Plot" = "Gene"))
})
#### Cluster list object TO ADD!!
clusterList <- reactive({
req(Object())
metadata = getMetadata(Object())
colnames(metadata)[grep("cluster_", colnames(metadata))]
})
### Extra options for Gene or Cluster plots
output$moreControls <- renderUI({
if (req(input$Type_Of_Plot) == "Cluster") {
list(selectInput("noClusters", "Select clustering resolution:",
clusterList()),
actionButton("doPlot", "Plot")
)
}
else if (req(input$Type_Of_Plot) == "Gene") {
s = Object()
geneList = rownames(s)
list(selectInput("gene", "Select gene to plot:",
geneList),
actionButton("doPlot", "Plot")
)
}
else {
actionButton("doPlot", "Plot")
}
})
output$plotPressed = renderPlot({
## seurat object
req(input$doPlot)
s = Object()
##create coordinates df
# coordinates = s@meta.data[, c("x", "y")]
# coordinates$spot = rownames(coordinates)
metadata = getMetadata(s)
coordinates = getCoordinates(metadata)
## image grob
g = imageObj()
## plot type
pType = input$Type_Of_Plot
## set features (NULL for all plots except Gene)
f = NULL
if (input$Type_Of_Plot == "Gene"){
f = input$gene
}
## set clusters (NULL for all plots except Cluster)
cl = NULL
if (input$Type_Of_Plot == "Cluster"){
cl = input$noClusters
}
### create plot
spanielPlot(object = s,
grob = g,
plotType = pType,
gene = f,
clusterRes = cl,
customTitle = NULL,
scaleData = TRUE)
},
height = 800, width = 800
)
observeEvent(input$doPlot, {
updateTabsetPanel(session, "inTabset",
selected = "panel2"
)
})
}
shinyApp(ui, server)
}
|
/R/shinySpaniel.R
|
permissive
|
stephenwilliams22/Spaniel
|
R
| false
| false
| 8,304
|
r
|
#' @include addClusterCols.R
#' @include spatialPlot.R
#' @import shiny
#'
NULL
#' RunShinySpaniel
#'
#' A function to visualise Spatial Transcriptomics. It requires a prepocessed
#' Seurat Object or a SingleCellExperiment object as well as a rasterised image
#' saved as an .rds object. There are 4 plots available in the app showing:
#' a) the number of genes detected per spot,
#' b) the number of reads detected per spot,
#' c) clustering results,
#' d) the gene expression of a selected gene."
#' To view the clustering results the columns of the meta.data or colData
#' containing clustering results must be prefixed with cluster_ . This can be
#' done by using the markClusterCol() function included in Spaniel.
#'
#' @return Runs a Shiny App
#'
#' @examples
#' ## mark the columns of metadata/colData that contain clustering
#' ## information see ?markClusterCol for more details#'
#' sObj <- readRDS(file.path(system.file(package = "Spaniel"),
#' "extdata/SeuratData.rds"))
#' sObj <- markClusterCol(sObj, "res")
#'
#' ### parse background image
#' imgFile <- file.path(system.file(package = "Spaniel"),
#' "HE_Rep1_resized.jpg")
#' img <- parseImage(imgFile)
#'
#' ## run shinySpaniel (upload data.rds and image.rds in the shiny app)
#' ## Not Run:
#' # runShinySpaniel()
#' @export
runShinySpaniel <-function(){
options(shiny.maxRequestSize=100*1024^2)
ui <- pageWithSidebar(
# App title ----
headerPanel("Spatial Transcriptomics"),
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Select a file ----
fileInput("dataFile", "Upload Data File",
multiple = FALSE,
accept = c(".rds")),
# Title: Upload image file ----
fileInput("imageFile", "Upload Image File",
multiple = FALSE,
accept = c(".rds")),
# Extra options for cluster or gene plots
uiOutput("plotType"),
# Input: for type of plot ----
uiOutput("moreControls"),
p(
#"Side End"
)
),
# Main panel for displaying outputs ----
mainPanel(
#plotOutput("plotPressed"),
tabsetPanel(id = "inTabset",
type = "tabs",
tabPanel("Getting started",
value = "panel1",
h3("Plotting Spatial Data"),
p("1. Upload the data.rds file and image.rds
files. It can take a couple of minutes for
the data to upload"),
p("2. Select the type of plot you want to
look at. There are 4 plots available
showing:
a) the number of genes detected per spot,
b) the number of reads detected per spot,
c) clustering results,
d) the gene expression of a selected
gene."),
p("3. For the cluster plot you must
also select the cluster resolution you
wish to plot
(generally a lower resolution equates to
fewer clusters."),
p("4. For the gene plot you must select a gene
from the drop downlist. There is a bit of a
delay whilst the gene list is loading.
You can jump to the gene in list by typing
the first few letters of the gene
of interest."),
p("5. Click 'Plot' button in the side bar ")
),
tabPanel(title = "View Plots",
value = "panel2",
plotOutput("plotPressed"))
)
)
)
############ Server #################################
# Define server ----
server <- function(input, output, session) {
output$summary <- renderPrint({
"1. Upload the data.rds file and image.rds files.
It can take a couple of minutes for the data to upload"
})
### S4 object
Object <- reactive({
req(input$dataFile)
readRDS(input$dataFile$datapath)
})
### Image object
imageObj <- reactive({
req(input$imageFile)
readRDS(input$imageFile$datapath)
})
## Choose plot type
## if image and seurat objects uploaded
output$plotType <- renderUI({
req(input$dataFile)
req(input$imageFile)
radioButtons("Type_Of_Plot", "Type of plot:",
c("Gene Number Per Spot Plot" = "NoGenes",
"Counts Per Spot Plot" = "CountsPerSpot",
"Cluster Plot" = "Cluster",
"Gene Plot" = "Gene"))
})
#### Cluster list object TO ADD!!
clusterList <- reactive({
req(Object())
metadata = getMetadata(Object())
colnames(metadata)[grep("cluster_", colnames(metadata))]
})
### Extra options for Gene or Cluster plots
output$moreControls <- renderUI({
if (req(input$Type_Of_Plot) == "Cluster") {
list(selectInput("noClusters", "Select clustering resolution:",
clusterList()),
actionButton("doPlot", "Plot")
)
}
else if (req(input$Type_Of_Plot) == "Gene") {
s = Object()
geneList = rownames(s)
list(selectInput("gene", "Select gene to plot:",
geneList),
actionButton("doPlot", "Plot")
)
}
else {
actionButton("doPlot", "Plot")
}
})
output$plotPressed = renderPlot({
## seurat object
req(input$doPlot)
s = Object()
##create coordinates df
# coordinates = s@meta.data[, c("x", "y")]
# coordinates$spot = rownames(coordinates)
metadata = getMetadata(s)
coordinates = getCoordinates(metadata)
## image grob
g = imageObj()
## plot type
pType = input$Type_Of_Plot
## set features (NULL for all plots except Gene)
f = NULL
if (input$Type_Of_Plot == "Gene"){
f = input$gene
}
## set clusters (NULL for all plots except Cluster)
cl = NULL
if (input$Type_Of_Plot == "Cluster"){
cl = input$noClusters
}
### create plot
spanielPlot(object = s,
grob = g,
plotType = pType,
gene = f,
clusterRes = cl,
customTitle = NULL,
scaleData = TRUE)
},
height = 800, width = 800
)
observeEvent(input$doPlot, {
updateTabsetPanel(session, "inTabset",
selected = "panel2"
)
})
}
shinyApp(ui, server)
}
|
library(pwr)
# 3(c)
pwr.p.test(h=0.03, sig.level=0.001, power=0.8, alternative='two.sided')
# 3(e)
pwr.p.test(h=0.01, sig.level=0.001, power=0.5, alternative='two.sided')
|
/ABtest_pwr.R
|
no_license
|
Sarah-Zhang/Business_Strategy_Projects
|
R
| false
| false
| 171
|
r
|
library(pwr)
# 3(c)
pwr.p.test(h=0.03, sig.level=0.001, power=0.8, alternative='two.sided')
# 3(e)
pwr.p.test(h=0.01, sig.level=0.001, power=0.5, alternative='two.sided')
|
# tcpa data processing
tcpa_info_file <- '~/Documents/workspace/phospho_network/processed_data/tcpa/tcpa_mapping.csv'
tcpa_data_file <- '~/scratch/TCPA_2016-03-22/TCGA-BRCA-L3-S35.csv'
tcpa_info_outfile <- '~/Documents/workspace/phospho_network/processed_data/tcpa/tcpa_data_processed.csv'
data_parser <- function(tcpa_data,tcpa_info){
tcpa_data2 <- tcpa_data[-(1:2),]
antibody <- tcpa_data[-(1:2),1]
colnames(tcpa_data2) <- c('antibody',tcpa_data[1,-1])
tcpa_data3 <- tcpa_data2[tcpa_data2[,1] %in% tcpa_info[,'antibody'],]
return_matrix <- matrix(0,nrow = 0,ncol = 2+ncol(tcpa_data3))
colnames(return_matrix) <- c(colnames(tcpa_info),colnames(tcpa_data3)[-1])
for(i in 1:nrow(tcpa_data3)){
data_info <- tcpa_info[tcpa_info[,1]==tcpa_data3[i,1],]
genes <- strsplit(data_info$gene_symbol,split = ', ')[[1]]
if(length(genes) == 1){
return_matrix <- rbind(return_matrix,c(data_info,tcpa_data3[i,-1]))
}else if(length(genes) > 1){
data_info2 <- matrix(rep(data_info,length(genes)),nrow = length(genes),byrow = T)
data_info2[,2] <- genes
return_matrix <- rbind(return_matrix,cbind(data_info2,matrix(rep(tcpa_data3[i,-1],length(genes)),nrow = length(genes),byrow = T)))
}
}
return(return_matrix)
}
#mannul correction:
data_correction <- function(tcpa_data_processed){
tcpa_data_processed[tcpa_data_processed[,'gene_symbol'] == 'PIK3R1/2','gene_symbol'] <- 'PIK3R1;PIK3R2'
tcpa_data_processed[,'gene_symbol'] <- gsub(';$','',tcpa_data_processed[,'gene_symbol'])
tcpa_data_processed[,'gene_symbol'] <- gsub('^ ','',tcpa_data_processed[,'gene_symbol'])
return(tcpa_data_processed)
}
#main body
tcpa_info <- read.csv(tcpa_info_file,as.is = T)
tcpa_data_raw <- t(read.csv(tcpa_data_file,header = F, as.is = T))
tcpa_data_processed <- data_parser(tcpa_data_raw,tcpa_info)
tcpa_data_corrected <- data_correction(tcpa_data_processed)
write.csv(tcpa_data_corrected,tcpa_info_outfile,row.names = F)
|
/src/scratch/TCPA_analysis/preprocessing/tcpa_processing.R
|
no_license
|
chrischen1/phospho_network
|
R
| false
| false
| 1,979
|
r
|
# tcpa data processing
tcpa_info_file <- '~/Documents/workspace/phospho_network/processed_data/tcpa/tcpa_mapping.csv'
tcpa_data_file <- '~/scratch/TCPA_2016-03-22/TCGA-BRCA-L3-S35.csv'
tcpa_info_outfile <- '~/Documents/workspace/phospho_network/processed_data/tcpa/tcpa_data_processed.csv'
data_parser <- function(tcpa_data,tcpa_info){
tcpa_data2 <- tcpa_data[-(1:2),]
antibody <- tcpa_data[-(1:2),1]
colnames(tcpa_data2) <- c('antibody',tcpa_data[1,-1])
tcpa_data3 <- tcpa_data2[tcpa_data2[,1] %in% tcpa_info[,'antibody'],]
return_matrix <- matrix(0,nrow = 0,ncol = 2+ncol(tcpa_data3))
colnames(return_matrix) <- c(colnames(tcpa_info),colnames(tcpa_data3)[-1])
for(i in 1:nrow(tcpa_data3)){
data_info <- tcpa_info[tcpa_info[,1]==tcpa_data3[i,1],]
genes <- strsplit(data_info$gene_symbol,split = ', ')[[1]]
if(length(genes) == 1){
return_matrix <- rbind(return_matrix,c(data_info,tcpa_data3[i,-1]))
}else if(length(genes) > 1){
data_info2 <- matrix(rep(data_info,length(genes)),nrow = length(genes),byrow = T)
data_info2[,2] <- genes
return_matrix <- rbind(return_matrix,cbind(data_info2,matrix(rep(tcpa_data3[i,-1],length(genes)),nrow = length(genes),byrow = T)))
}
}
return(return_matrix)
}
#mannul correction:
data_correction <- function(tcpa_data_processed){
tcpa_data_processed[tcpa_data_processed[,'gene_symbol'] == 'PIK3R1/2','gene_symbol'] <- 'PIK3R1;PIK3R2'
tcpa_data_processed[,'gene_symbol'] <- gsub(';$','',tcpa_data_processed[,'gene_symbol'])
tcpa_data_processed[,'gene_symbol'] <- gsub('^ ','',tcpa_data_processed[,'gene_symbol'])
return(tcpa_data_processed)
}
#main body
tcpa_info <- read.csv(tcpa_info_file,as.is = T)
tcpa_data_raw <- t(read.csv(tcpa_data_file,header = F, as.is = T))
tcpa_data_processed <- data_parser(tcpa_data_raw,tcpa_info)
tcpa_data_corrected <- data_correction(tcpa_data_processed)
write.csv(tcpa_data_corrected,tcpa_info_outfile,row.names = F)
|
#' Access files in the current app
#'
#' @param ... Character vector specifying directory and or file to
#' point to inside the current package.
#'
#' @noRd
app_sys <- function(...){
system.file(..., package = "contratoscovid")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config R_CONFIG_ACTIVE value.
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @importFrom config get
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv("R_CONFIG_ACTIVE", "default"),
use_parent = TRUE
){
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
|
/Golem/contratoscovid/R/app_config.R
|
permissive
|
manosaladata/contrataciones-estado-emergencia
|
R
| false
| false
| 788
|
r
|
#' Access files in the current app
#'
#' @param ... Character vector specifying directory and or file to
#' point to inside the current package.
#'
#' @noRd
app_sys <- function(...){
system.file(..., package = "contratoscovid")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config R_CONFIG_ACTIVE value.
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @importFrom config get
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv("R_CONFIG_ACTIVE", "default"),
use_parent = TRUE
){
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
|
## TODO: chunk size for evaluate = FALSE
`pdredge` <-
function(global.model, cluster = NA,
beta = c("none", "sd", "partial.sd"),
evaluate = TRUE,
rank = "AICc", fixed = NULL, m.lim = NULL, m.min, m.max, subset,
trace = FALSE, varying, extra, ct.args = NULL, check = FALSE, ...) {
#FIXME: m.max cannot be 0 - e.g. for intercept only model
trace <- min(as.integer(trace), 2L)
strbeta <- betaMode <- NULL
eval(.expr_beta_arg)
###PAR
qlen <- 25L
# Imports: clusterCall, clusterApply
doParallel <- evaluate && inherits(cluster, "cluster")
if(doParallel) {
.parallelPkgCheck() # XXX: workaround to avoid importing from 'parallel'
clusterCall <- get("clusterCall")
clusterApply <- get("clusterApply")
clusterCall(cluster, "require", .packageName, character.only = TRUE)
.getRow <- function(X) clusterApply(cluster, X, fun = ".pdredge_process_model")
} else {
.getRow <- function(X) lapply(X, pdredge_process_model, envir = props)
clusterCall <- function(...) NULL
message("Not using cluster.")
}
###PAR
gmEnv <- parent.frame()
gmNobs <- nobs(global.model)
gmCall <- get_call(global.model)
if (is.null(gmCall)) {
gmCall <- substitute(global.model)
if(!is.call(gmCall)) {
stop("need a 'global.model' with a call component. Consider using ",
if(inherits(global.model, c("gamm", "gamm4")))
"'uGamm'" else "'updateable'")
}
#"For objects without a 'call' component the call to the fitting function \n",
#" must be used directly as an argument to 'dredge'.")
# NB: this is unlikely to happen
if(!is.function(eval.parent(gmCall[[1L]])))
cry(, "could not find function '%s'", asChar(gmCall[[1L]]))
} else {
# if 'update' method does not expand dots, we have a problem with
# expressions like ..1, ..2 in the call. So try to replace them with
# respective arguments in the original call
isDotted <- grep("^\\.\\.", sapply(as.list(gmCall), asChar))
if(length(isDotted) != 0L) {
if(is.name(substitute(global.model))) {
cry(, "call stored in 'global.model' contains dotted names and cannot be updated. \n Consider using 'updateable' on the modelling function")
} else gmCall[isDotted] <-
substitute(global.model)[names(gmCall[isDotted])]
}
# object from 'run.mark.model' has $call of 'make.mark.model' - fixing
# it here:
if(inherits(global.model, "mark") && gmCall[[1L]] == "make.mark.model") {
gmCall <- call("run.mark.model", model = gmCall, invisible = TRUE)
}
}
lik <- .getLik(global.model)
logLik <- lik$logLik
# *** Rank ***
rank.custom <- !missing(rank)
if(!rank.custom && lik$name == "qLik") {
rank <- "QIC"
cry(, "using 'QIC' instead of 'AICc'", warn = TRUE)
}
rankArgs <- list(...)
if(any(badargs <- names(rankArgs) == "marg.ex")) {
cry(, "argument \"marg.ex\" is defunct and has been ignored",
warn = TRUE)
rankArgs <- rankArgs[!badargs]
}
if(any(names(rankArgs) == "na.action"))
cry("RTFM", "argument \"na.action\" is inappropriate here",
warn = FALSE)
IC <- .getRank(rank, rankArgs)
if(any(badargs <- is.na(match(names(rankArgs),
c(names(formals(get("rank", environment(IC))))[-1L], names(formals()))))))
cry("RTFM", ngettext(sum(badargs),
"argument %s is not a name of formal argument of %s",
"arguments %s are not names of formal arguments of %s"),
prettyEnumStr(names(rankArgs[badargs])), "'pdredge' or 'rank'",
warn = TRUE)
ICName <- as.character(attr(IC, "call")[[1L]])
if(length(tryCatch(IC(global.model), error = function(e) {
stop(simpleError(conditionMessage(e), subst(attr(IC, "call"),
x = as.name("global.model"))))
})) != 1L) {
cry(, "result of '%s' is not of length 1", asChar(attr(IC, "call")))
}
allTerms <- allTerms0 <- getAllTerms(global.model, intercept = TRUE,
data = eval(gmCall$data, envir = gmEnv))
# Intercept(s)
interceptLabel <- attr(allTerms, "interceptLabel")
if(is.null(interceptLabel)) interceptLabel <- "(Intercept)"
nIntercepts <- sum(attr(allTerms, "intercept"))
###PAR
# parallel: check whether the models would be identical:
if(doParallel && check) testUpdatedObj(cluster, global.model, gmCall, level = check)
###PAR
# Check for na.omit
if(!(gmNaAction <- .checkNaAction(cl = gmCall, what = "'global.model'")))
cry(, attr(gmNaAction, "message"))
if(names(gmCall)[2L] == "") gmCall <-
match.call(gmCall, definition = eval.parent(gmCall[[1L]]),
expand.dots = TRUE)
gmCoefNames <- names(coeffs(global.model))
if(any(dup <- duplicated(gmCoefNames)))
cry(, "model cannot have duplicated coefficient names: ",
prettyEnumStr(gmCoefNames[dup]))
gmCoefNames <- fixCoefNames(gmCoefNames)
nVars <- length(allTerms)
if(isTRUE(rankArgs$REML) || (isTRUE(.isREMLFit(global.model)) && is.null(rankArgs$REML)))
cry(, "comparing models fitted by REML", warn = TRUE)
if ((betaMode != 0L) && is.null(tryCatch(std.coef(global.model, betaMode == 2L),
error = return_null, warning = return_null))) {
cry(, "do not know how to standardize coefficients of '%s', argument 'beta' ignored",
class(global.model)[1L], warn = TRUE)
betaMode <- 0L
strbeta <- "none"
}
if(nomlim <- is.null(m.lim)) m.lim <- c(0, NA)
## XXX: backward compatibility:
if(!missing(m.max) || !missing(m.min)) {
warning("arguments 'm.min' and 'm.max' are deprecated, use 'm.lim' instead")
if(!nomlim) stop("cannot use both 'm.lim' and 'm.min' or 'm.max'")
if(!missing(m.min)) m.lim[1L] <- m.min[1L]
if(!missing(m.max)) m.lim[2L] <- m.max[1L]
}
if(!is.numeric(m.lim) || length(m.lim) != 2L || any(m.lim < 0, na.rm = TRUE))
stop("invalid 'm.lim' value")
m.lim[2L] <- if (!is.finite(m.lim[2L])) (nVars - nIntercepts) else
min(nVars - nIntercepts, m.lim[2L])
if (!is.finite(m.lim[1L])) m.lim[1L] <- 0
m.min <- m.lim[1L]
m.max <- m.lim[2L]
# fixed variables:
if (!is.null(fixed)) {
if (inherits(fixed, "formula")) {
if (fixed[[1L]] != "~" || length(fixed) != 2L)
cry(, "'fixed' should be a one-sided formula", warn = TRUE)
fixed <- as.vector(getAllTerms(fixed))
} else if (identical(fixed, TRUE)) {
fixed <- as.vector(allTerms[!(allTerms %in% interceptLabel)])
} else if (!is.character(fixed)) {
cry(, paste("'fixed' should be either a character vector with",
" names of variables or a one-sided formula"))
}
if (!all(i <- (fixed %in% allTerms))) {
cry(, "some terms in 'fixed' do not exist in 'global.model': %s",
prettyEnumStr(fixed[!i]), warn = TRUE)
fixed <- fixed[i]
}
}
deps <- attr(allTerms0, "deps")
fixed <- union(fixed, rownames(deps)[rowSums(deps, na.rm = TRUE) == ncol(deps)])
fixed <- c(fixed, allTerms[allTerms %in% interceptLabel])
nFixed <- length(fixed)
if(nFixed != 0L) message(sprintf(ngettext(nFixed, "Fixed term is %s", "Fixed terms are %s"),
prettyEnumStr(fixed)))
termsOrder <- order(allTerms %in% fixed)
allTerms <- allTerms[termsOrder]
di <- match(allTerms, rownames(deps))
deps <- deps[di, di]
gmFormulaEnv <- environment(as.formula(formula(global.model), env = gmEnv))
# TODO: gmEnv <- gmFormulaEnv ???
### BEGIN Manage 'varying'
## @param: varying
## @value: varying, varyingNames, variants, nVariants, nVarying
if(!missing(varying) && !is.null(varying)) {
nVarying <- length(varying)
varyingNames <- names(varying)
fvarying <- unlist(varying, recursive = FALSE, use.names = FALSE)
vlen <- vapply(varying, length, 1L)
nVariants <- prod(vlen)
variants <- as.matrix(expand.grid(split(seq_len(sum(vlen)),
rep(seq_len(nVarying), vlen))))
variantsFlat <- unlist(lapply(varying, .makeListNames),
recursive = FALSE, use.names = FALSE)
} else {
variants <- varyingNames <- NULL
nVariants <- 1L
nVarying <- 0L
}
## END: varying
## BEGIN Manage 'extra'
## @param: extra, global.model, gmFormulaEnv,
## @value: extra, nextra, extraNames, nullfit_
if(!missing(extra) && length(extra) != 0L) {
# a cumbersome way of evaluating a non-exported function in a parent frame:
extra <- eval(as.call(list(call("get", ".get.extras",
envir = call("asNamespace", .packageName), inherits = FALSE),
substitute(extra), r2nullfit = TRUE)), parent.frame())
#extra <- eval(call(".get.extras", substitute(extra), r2nullfit = TRUE), parent.frame())
if(any(c("adjR^2", "R^2") %in% names(extra))) {
nullfit_ <- null.fit(global.model, evaluate = TRUE, envir = gmFormulaEnv)
}
applyExtras <- function(x) unlist(lapply(extra, function(f) f(x)))
extraResult <- applyExtras(global.model)
if(!is.numeric(extraResult))
cry(, "function in 'extra' returned non-numeric result")
nextra <- length(extraResult)
extraNames <- names(extraResult)
} else {
nextra <- 0L
extraNames <- character(0L)
}
## END: manage 'extra'
nov <- as.integer(nVars - nFixed)
ncomb <- (2L ^ nov) * nVariants
if(nov > 31L) cry(, "number of predictors [%d] exceeds allowed maximum of 31", nov)
#if(nov > 10L) warning(gettextf("%d predictors will generate up to %.0f combinations", nov, ncomb))
nmax <- ncomb * nVariants
rvChunk <- 25L
if(evaluate) {
rvNcol <- nVars + nVarying + 3L + nextra
rval <- matrix(NA_real_, ncol = rvNcol, nrow = rvChunk)
coefTables <- vector(rvChunk, mode = "list")
}
## BEGIN: Manage 'subset'
## @param: hasSubset, subset, allTerms, [interceptLabel],
## @value: hasSubset, subset
if(missing(subset)) {
hasSubset <- 1L
} else {
if(!tryCatch(is.language(subset) || is.matrix(subset), error = function(e) FALSE))
subset <- substitute(subset)
if(is.matrix(subset)) {
dn <- dimnames(subset)
#at <- allTerms[!(allTerms %in% interceptLabel)]
n <- length(allTerms)
if(is.null(dn) || any(sapply(dn, is.null))) {
di <- dim(subset)
if(any(di != n)) stop("unnamed 'subset' matrix does not have both dimensions",
" equal to number of terms in 'global.model': %d", n)
dimnames(subset) <- list(allTerms, allTerms)
} else {
if(!all(unique(unlist(dn)) %in% allTerms))
warning("at least some dimnames of 'subset' matrix do not ",
"match term names in 'global.model'")
subset0 <- subset
subset <- matrix(subset[
match(allTerms, rownames(subset)),
match(allTerms, colnames(subset))],
dimnames = list(allTerms, allTerms),
nrow = n, ncol = n)
nas <- is.na(subset)
lotri <- lower.tri(subset)
i <- lotri & nas & !t(nas)
subset[i] <- t(subset)[i]
subset[!lotri] <- NA
}
if(any(!is.na(subset[!lower.tri(subset)]))) {
warning("non-missing values exist outside the lower triangle of 'subset'")
subset[!lower.tri(subset)] <- NA
}
mode(subset) <- "logical"
hasSubset <- 2L # subset as matrix
} else {
if(inherits(subset, "formula")) {
if (subset[[1L]] != "~" || length(subset) != 2L)
stop("'subset' formula should be one-sided")
subset <- subset[[2L]]
}
subset <- as.expression(subset)
ssValidNames <- c("comb", "*nvar*")
tmpTerms <- terms(reformulate(allTerms0[!(allTerms0 %in% interceptLabel)]))
gloFactorTable <- t(attr(tmpTerms, "factors") != 0)
offsetNames <- sapply(attr(tmpTerms, "variables")[attr(tmpTerms, "offset") + 1L], asChar)
if(length(offsetNames) != 0L) {
gloFactorTable <- rbind(gloFactorTable,
matrix(FALSE, ncol = ncol(gloFactorTable), nrow = length(offsetNames),
dimnames = list(offsetNames, NULL)))
for(i in offsetNames) gloFactorTable[offsetNames, offsetNames] <- TRUE
#Note `diag<-` does not work for x[1x1] matrix:
# diag(gloFactorTable[offsetNames, offsetNames, drop = FALSE]) <- TRUE
}
DebugPrint(gloFactorTable)
# fix interaction names in rownames:
rownames(gloFactorTable) <- allTerms0[!(allTerms0 %in% interceptLabel)]
subsetExpr <- subset[[1L]]
subsetExpr <- exprapply0(subsetExpr, ".", .sub_dot, gloFactorTable,
allTerms, as.name("comb"))
subsetExpr <- exprapply0(subsetExpr, c("{", "Term"), .sub_Term)
tmp <- updateDeps(subsetExpr, deps)
subsetExpr <- tmp$expr
deps <- tmp$deps
subsetExpr <- exprapply0(subsetExpr, "dc", .sub_args_as_vars)
subsetExpr <- .subst4Vec(subsetExpr, allTerms, "comb")
if(nVarying) {
ssValidNames <- c("cVar", "comb", "*nvar*")
subsetExpr <- exprapply0(subsetExpr, "V", .sub_V,
as.name("cVar"), varyingNames)
if(!all(all.vars(subsetExpr) %in% ssValidNames))
subsetExpr <- .subst4Vec(subsetExpr, varyingNames,
"cVar", fun = "[[")
}
ssVars <- all.vars(subsetExpr)
okVars <- ssVars %in% ssValidNames
if(!all(okVars)) stop("unrecognized names in 'subset' expression: ",
prettyEnumStr(ssVars[!okVars]))
ssEnv <- new.env(parent = parent.frame())
ssFunc <- setdiff(all.vars(subsetExpr, functions = TRUE), ssVars)
if("dc" %in% ssFunc) assign("dc", .subset_dc, ssEnv)
hasSubset <- if(any(ssVars == "cVar")) 4L else # subset as expression
3L # subset as expression using 'varying' variables
}
} # END: manage 'subset'
comb.sfx <- rep(TRUE, nFixed)
comb.seq <- if(nov != 0L) seq_len(nov) else 0L
k <- 0L
extraResult1 <- integer(0L)
calls <- vector(mode = "list", length = rvChunk)
ord <- integer(rvChunk)
argsOptions <- list(
response = attr(allTerms0, "response"),
intercept = nIntercepts,
interceptLabel = interceptLabel,
random = attr(allTerms0, "random"),
gmCall = gmCall,
gmEnv = gmEnv,
allTerms = allTerms0,
gmCoefNames = gmCoefNames,
gmDataHead = if(!is.null(gmCall$data)) {
if(eval(call("is.data.frame", gmCall$data), gmEnv))
eval(call("head", gmCall$data, 1L), gmEnv) else gmCall$data
} else NULL,
gmFormulaEnv = gmFormulaEnv
)
# BEGIN parallel
qi <- 0L
queued <- vector(qlen, mode = "list")
props <- list(
gmEnv = gmEnv,
IC = IC,
# beta = beta,
# allTerms = allTerms,
nextra = nextra,
matchCoefCall = as.call(c(list(
as.name("matchCoef"), as.name("fit1"),
all.terms = allTerms, beta = betaMode,
allCoef = TRUE), ct.args))
# matchCoefCall = as.call(c(alist(matchCoef, fit1, all.terms = Z$allTerms,
# beta = Z$beta, allCoef = TRUE), ct.args))
)
if(nextra) {
props$applyExtras <- applyExtras
props$extraResultNames <- names(extraResult)
}
props <- as.environment(props)
if(doParallel) {
clusterVExport(cluster, pdredge_props = props,
.pdredge_process_model = pdredge_process_model
)
clusterCall(cluster, eval, call("options", options("na.action")), env = 0L)
}
# END parallel
retColIdx <- if(nVarying) -nVars - seq_len(nVarying) else TRUE
if(trace > 1L) {
progressBar <- if(.Platform$GUI == "Rgui") {
utils::winProgressBar(max = ncomb, title = "'dredge' in progress")
} else utils::txtProgressBar(max = ncomb, style = 3)
setProgressBar <- switch(class(progressBar),
txtProgressBar = utils::setTxtProgressBar,
winProgressBar = utils::setWinProgressBar,
function(...) {})
on.exit(close(progressBar))
}
warningList <- list()
iComb <- -1L
while((iComb <- iComb + 1L) < ncomb) {
varComb <- iComb %% nVariants
jComb <- (iComb - varComb) / nVariants
#print(c(iComb, jComb, ncomb, varComb + 1L))
if(varComb == 0L) {
isok <- TRUE
comb <- c(as.logical(intToBits(jComb)[comb.seq]), comb.sfx)
nvar <- sum(comb) - nIntercepts
# !!! POSITIVE condition for 'pdredge', NEGATIVE for 'dredge':
if((nvar >= m.min && nvar <= m.max) &&
formula_margin_check(comb, deps) &&
switch(hasSubset,
# 1 - no subset, 2 - matrix, 3 - expression
TRUE, # 1
all(subset[comb, comb], na.rm = TRUE), # 2
evalExprInEnv(subsetExpr, env = ssEnv, enclos = parent.frame(),
comb = comb, `*nvar*` = nvar), # 3
TRUE
)
) {
newArgs <- makeArgs(global.model, allTerms[comb], argsOptions) #comb
formulaList <- if(is.null(attr(newArgs, "formulaList"))) newArgs else
attr(newArgs, "formulaList")
if(!is.null(attr(newArgs, "problems"))) {
print.warnings(structure(vector(mode = "list",
length = length(attr(newArgs, "problems"))),
names = attr(newArgs, "problems")))
} # end if <problems>
cl <- gmCall
cl[names(newArgs)] <- newArgs
} else isok <- FALSE # end if <subset, m.max >= nvar >= m.min>
} # end if(jComb != prevJComb)
if(isok) {
## --- Variants ---------------------------
clVariant <- cl
isok2 <- TRUE
if(nVarying) {
cvi <- variants[varComb + 1L, ]
isok2 <- (hasSubset != 4L) || evalExprInEnv(subsetExpr, env = ssEnv,
enclos = parent.frame(), comb = comb, `*nvar*` = nvar,
cVar = variantsFlat[cvi])
clVariant[varyingNames] <- fvarying[cvi]
}
if(isok2) {
if(evaluate) {
if(trace == 1L) {
cat(iComb, ": "); print(clVariant)
utils::flush.console()
} else if(trace == 2L) {
setProgressBar(progressBar, value = iComb,
title = sprintf("pdredge: %d of %.0f subsets", k, (k / iComb) * ncomb))
}
qi <- qi + 1L
queued[[(qi)]] <- list(call = clVariant, id = iComb)
} else { # if !evaluate
k <- k + 1L # all OK, add model to table
rvlen <- length(ord)
if(k > rvlen) {
nadd <- min(rvChunk, nmax - rvlen)
#message(sprintf("extending result from %d to %d", rvlen, rvlen + nadd))
addi <- seq.int(rvlen + 1L, length.out = nadd)
calls[addi] <- vector("list", nadd)
ord[addi] <- integer(nadd)
}
calls[[k]] <- clVariant
ord[k] <- iComb
}
}
} # if isok
#if(evaluate && qi && (qi + nvariants > qlen || iComb == ncomb)) {
if(evaluate && qi && (qi > qlen || (iComb + 1L) == ncomb)) {
qseq <- seq_len(qi)
qresult <- .getRow(queued[qseq])
utils::flush.console()
if(any(vapply(qresult, is.null, TRUE)))
stop("some results returned from cluster node(s) are NULL. \n",
"This should not happen and indicates problems with ",
"the cluster node", domain = "R-MuMIn")
haveProblems <- logical(qi)
nadd <- sum(sapply(qresult, function(x) inherits(x$value, "condition")
+ length(x$warnings)))
wi <- length(warningList)
if(nadd) warningList <- c(warningList, vector(nadd, mode = "list"))
# DEBUG: print(sprintf("Added %d warnings, now is %d", nadd, length(warningList)))
for (i in qseq)
for(cond in c(qresult[[i]]$warnings,
if(inherits(qresult[[i]]$value, "condition"))
list(qresult[[i]]$value))) {
wi <- wi + 1L
warningList[[wi]] <- if(is.null(conditionCall(cond)))
queued[[i]]$call else conditionCall(cond)
if(inherits(cond, "error")) {
haveProblems[i] <- TRUE
msgsfx <- "(model %d skipped)"
} else
msgsfx <- "(in model %d)"
names(warningList)[wi] <- paste(conditionMessage(cond),
gettextf(msgsfx, queued[[i]]$id))
attr(warningList[[wi]], "id") <- queued[[i]]$id
}
withoutProblems <- which(!haveProblems)
qrows <- lapply(qresult[withoutProblems], "[[", "value")
qresultLen <- length(qrows)
rvlen <- nrow(rval)
if(retNeedsExtending <- k + qresultLen > rvlen) {
nadd <- min(max(rvChunk, qresultLen), nmax - rvlen)
rval <- rbind(rval, matrix(NA_real_, ncol = rvNcol, nrow = nadd),
deparse.level = 0L)
addi <- seq.int(rvlen + 1L, length.out = nadd)
coefTables[addi] <- vector("list", nadd)
calls[addi] <- vector("list", nadd)
ord[addi] <- integer(nadd)
}
qseqOK <- seq_len(qresultLen)
for(m in qseqOK) rval[k + m, retColIdx] <- qrows[[m]]
ord[k + qseqOK] <- vapply(queued[withoutProblems], "[[", 1L, "id")
calls[k + qseqOK] <- lapply(queued[withoutProblems], "[[", "call")
coefTables[k + qseqOK] <- lapply(qresult[withoutProblems], "[[", "coefTable")
k <- k + qresultLen
qi <- 0L
}
} ### for (iComb ...)
if(k == 0L) {
if(length(warningList)) print.warnings(warningList)
stop("the result is empty")
}
names(calls) <- ord
if(!evaluate) return(calls[seq_len(k)])
if(k < nrow(rval)) {
i <- seq_len(k)
rval <- rval[i, , drop = FALSE]
ord <- ord[i]
calls <- calls[i]
coefTables <- coefTables[i]
}
if(nVarying) {
varlev <- ord %% nVariants
varlev[varlev == 0L] <- nVariants
rval[, nVars + seq_len(nVarying)] <- variants[varlev, ]
}
rval <- as.data.frame(rval)
row.names(rval) <- ord
# Convert columns with presence/absence of terms to factors
tfac <- which(!(allTerms %in% gmCoefNames))
rval[tfac] <- lapply(rval[tfac], factor, levels = NaN, labels = "+")
i <- seq_along(allTerms)
v <- order(termsOrder)
rval[, i] <- rval[, v]
allTerms <- allTerms[v]
colnames(rval) <- c(allTerms, varyingNames, extraNames, "df", lik$name, ICName)
if(nVarying) {
variant.names <- vapply(variantsFlat, asChar, "", width.cutoff = 20L)
vnum <- split(seq_len(sum(vlen)), rep(seq_len(nVarying), vlen))
names(vnum) <- varyingNames
for (i in varyingNames) rval[, i] <-
factor(rval[, i], levels = vnum[[i]], labels = variant.names[vnum[[i]]])
}
rval <- rval[o <- order(rval[, ICName], decreasing = FALSE), ]
coefTables <- coefTables[o]
rval$delta <- rval[, ICName] - min(rval[, ICName])
rval$weight <- exp(-rval$delta / 2) / sum(exp(-rval$delta / 2))
mode(rval$df) <- "integer"
rval <-
structure(rval,
model.calls = calls[o],
global = global.model,
global.call = gmCall,
terms = structure(allTerms, interceptLabel = interceptLabel),
rank = IC,
beta = strbeta,
call = match.call(expand.dots = TRUE),
coefTables = coefTables,
nobs = gmNobs,
vCols = varyingNames, ## XXX: remove
column.types = {
colTypes <- c(terms = length(allTerms), varying = length(varyingNames),
extra = length(extraNames), df = 1L, loglik = 1L, ic = 1L, delta = 1L,
weight = 1L)
column.types <- rep(1L:length(colTypes), colTypes)
names(column.types) <- colnames(rval)
lv <- 1L:length(colTypes)
factor(column.types, levels = lv, labels = names(colTypes)[lv])
},
class = c("model.selection", "data.frame")
)
if(length(warningList)) {
class(warningList) <- c("warnings", "list")
attr(rval, "warnings") <- warningList
}
if (!is.null(attr(allTerms0, "random.terms")))
attr(rval, "random.terms") <- attr(allTerms0, "random.terms")
if(doParallel) clusterCall(cluster, "rm",
list = c(".pdredge_process_model", "pdredge_props"), envir = .GlobalEnv)
return(rval)
} ######
`pdredge_process_model` <- function(modv, envir = get("pdredge_props", .GlobalEnv)) {
### modv == list(call = clVariant, id = modelId)
result <- tryCatchWE(eval(modv$call, get("gmEnv", envir)))
if (inherits(result$value, "condition")) return(result)
fit1 <- result$value
if(get("nextra", envir) != 0L) {
extraResult1 <- get("applyExtras", envir)(fit1)
nextra <- get("nextra", envir)
if(length(extraResult1) < nextra) {
tmp <- rep(NA_real_, nextra)
tmp[match(names(extraResult1), get("extraResultNames", envir))] <-
extraResult1
extraResult1 <- tmp
}
} else extraResult1 <- NULL
ll <- .getLik(fit1)$logLik(fit1)
#mcoef <- matchCoef(fit1, all.terms = get("allTerms", envir),
# beta = get("beta", envir), allCoef = TRUE)
mcoef <- eval(get("matchCoefCall", envir))
list(value = c(mcoef, extraResult1, df = attr(ll, "df"), ll = ll,
ic = get("IC", envir)(fit1)),
nobs = nobs(fit1),
coefTable = attr(mcoef, "coefTable"),
warnings = result$warnings)
}
.test_pdredge <- function(dd) {
cl <- attr(dd, "call")
cl$cluster <- cl$check <- NULL
cl[[1L]] <- as.name("dredge")
if(!identical(c(dd), c(eval(cl)))) stop("Whoops...")
dd
}
|
/MuMIn/R/pdredge.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 24,040
|
r
|
## TODO: chunk size for evaluate = FALSE
`pdredge` <-
function(global.model, cluster = NA,
beta = c("none", "sd", "partial.sd"),
evaluate = TRUE,
rank = "AICc", fixed = NULL, m.lim = NULL, m.min, m.max, subset,
trace = FALSE, varying, extra, ct.args = NULL, check = FALSE, ...) {
#FIXME: m.max cannot be 0 - e.g. for intercept only model
trace <- min(as.integer(trace), 2L)
strbeta <- betaMode <- NULL
eval(.expr_beta_arg)
###PAR
qlen <- 25L
# Imports: clusterCall, clusterApply
doParallel <- evaluate && inherits(cluster, "cluster")
if(doParallel) {
.parallelPkgCheck() # XXX: workaround to avoid importing from 'parallel'
clusterCall <- get("clusterCall")
clusterApply <- get("clusterApply")
clusterCall(cluster, "require", .packageName, character.only = TRUE)
.getRow <- function(X) clusterApply(cluster, X, fun = ".pdredge_process_model")
} else {
.getRow <- function(X) lapply(X, pdredge_process_model, envir = props)
clusterCall <- function(...) NULL
message("Not using cluster.")
}
###PAR
gmEnv <- parent.frame()
gmNobs <- nobs(global.model)
gmCall <- get_call(global.model)
if (is.null(gmCall)) {
gmCall <- substitute(global.model)
if(!is.call(gmCall)) {
stop("need a 'global.model' with a call component. Consider using ",
if(inherits(global.model, c("gamm", "gamm4")))
"'uGamm'" else "'updateable'")
}
#"For objects without a 'call' component the call to the fitting function \n",
#" must be used directly as an argument to 'dredge'.")
# NB: this is unlikely to happen
if(!is.function(eval.parent(gmCall[[1L]])))
cry(, "could not find function '%s'", asChar(gmCall[[1L]]))
} else {
# if 'update' method does not expand dots, we have a problem with
# expressions like ..1, ..2 in the call. So try to replace them with
# respective arguments in the original call
isDotted <- grep("^\\.\\.", sapply(as.list(gmCall), asChar))
if(length(isDotted) != 0L) {
if(is.name(substitute(global.model))) {
cry(, "call stored in 'global.model' contains dotted names and cannot be updated. \n Consider using 'updateable' on the modelling function")
} else gmCall[isDotted] <-
substitute(global.model)[names(gmCall[isDotted])]
}
# object from 'run.mark.model' has $call of 'make.mark.model' - fixing
# it here:
if(inherits(global.model, "mark") && gmCall[[1L]] == "make.mark.model") {
gmCall <- call("run.mark.model", model = gmCall, invisible = TRUE)
}
}
lik <- .getLik(global.model)
logLik <- lik$logLik
# *** Rank ***
rank.custom <- !missing(rank)
if(!rank.custom && lik$name == "qLik") {
rank <- "QIC"
cry(, "using 'QIC' instead of 'AICc'", warn = TRUE)
}
rankArgs <- list(...)
if(any(badargs <- names(rankArgs) == "marg.ex")) {
cry(, "argument \"marg.ex\" is defunct and has been ignored",
warn = TRUE)
rankArgs <- rankArgs[!badargs]
}
if(any(names(rankArgs) == "na.action"))
cry("RTFM", "argument \"na.action\" is inappropriate here",
warn = FALSE)
IC <- .getRank(rank, rankArgs)
if(any(badargs <- is.na(match(names(rankArgs),
c(names(formals(get("rank", environment(IC))))[-1L], names(formals()))))))
cry("RTFM", ngettext(sum(badargs),
"argument %s is not a name of formal argument of %s",
"arguments %s are not names of formal arguments of %s"),
prettyEnumStr(names(rankArgs[badargs])), "'pdredge' or 'rank'",
warn = TRUE)
ICName <- as.character(attr(IC, "call")[[1L]])
if(length(tryCatch(IC(global.model), error = function(e) {
stop(simpleError(conditionMessage(e), subst(attr(IC, "call"),
x = as.name("global.model"))))
})) != 1L) {
cry(, "result of '%s' is not of length 1", asChar(attr(IC, "call")))
}
allTerms <- allTerms0 <- getAllTerms(global.model, intercept = TRUE,
data = eval(gmCall$data, envir = gmEnv))
# Intercept(s)
interceptLabel <- attr(allTerms, "interceptLabel")
if(is.null(interceptLabel)) interceptLabel <- "(Intercept)"
nIntercepts <- sum(attr(allTerms, "intercept"))
###PAR
# parallel: check whether the models would be identical:
if(doParallel && check) testUpdatedObj(cluster, global.model, gmCall, level = check)
###PAR
# Check for na.omit
if(!(gmNaAction <- .checkNaAction(cl = gmCall, what = "'global.model'")))
cry(, attr(gmNaAction, "message"))
if(names(gmCall)[2L] == "") gmCall <-
match.call(gmCall, definition = eval.parent(gmCall[[1L]]),
expand.dots = TRUE)
gmCoefNames <- names(coeffs(global.model))
if(any(dup <- duplicated(gmCoefNames)))
cry(, "model cannot have duplicated coefficient names: ",
prettyEnumStr(gmCoefNames[dup]))
gmCoefNames <- fixCoefNames(gmCoefNames)
nVars <- length(allTerms)
if(isTRUE(rankArgs$REML) || (isTRUE(.isREMLFit(global.model)) && is.null(rankArgs$REML)))
cry(, "comparing models fitted by REML", warn = TRUE)
if ((betaMode != 0L) && is.null(tryCatch(std.coef(global.model, betaMode == 2L),
error = return_null, warning = return_null))) {
cry(, "do not know how to standardize coefficients of '%s', argument 'beta' ignored",
class(global.model)[1L], warn = TRUE)
betaMode <- 0L
strbeta <- "none"
}
if(nomlim <- is.null(m.lim)) m.lim <- c(0, NA)
## XXX: backward compatibility:
if(!missing(m.max) || !missing(m.min)) {
warning("arguments 'm.min' and 'm.max' are deprecated, use 'm.lim' instead")
if(!nomlim) stop("cannot use both 'm.lim' and 'm.min' or 'm.max'")
if(!missing(m.min)) m.lim[1L] <- m.min[1L]
if(!missing(m.max)) m.lim[2L] <- m.max[1L]
}
if(!is.numeric(m.lim) || length(m.lim) != 2L || any(m.lim < 0, na.rm = TRUE))
stop("invalid 'm.lim' value")
m.lim[2L] <- if (!is.finite(m.lim[2L])) (nVars - nIntercepts) else
min(nVars - nIntercepts, m.lim[2L])
if (!is.finite(m.lim[1L])) m.lim[1L] <- 0
m.min <- m.lim[1L]
m.max <- m.lim[2L]
# fixed variables:
if (!is.null(fixed)) {
if (inherits(fixed, "formula")) {
if (fixed[[1L]] != "~" || length(fixed) != 2L)
cry(, "'fixed' should be a one-sided formula", warn = TRUE)
fixed <- as.vector(getAllTerms(fixed))
} else if (identical(fixed, TRUE)) {
fixed <- as.vector(allTerms[!(allTerms %in% interceptLabel)])
} else if (!is.character(fixed)) {
cry(, paste("'fixed' should be either a character vector with",
" names of variables or a one-sided formula"))
}
if (!all(i <- (fixed %in% allTerms))) {
cry(, "some terms in 'fixed' do not exist in 'global.model': %s",
prettyEnumStr(fixed[!i]), warn = TRUE)
fixed <- fixed[i]
}
}
deps <- attr(allTerms0, "deps")
fixed <- union(fixed, rownames(deps)[rowSums(deps, na.rm = TRUE) == ncol(deps)])
fixed <- c(fixed, allTerms[allTerms %in% interceptLabel])
nFixed <- length(fixed)
if(nFixed != 0L) message(sprintf(ngettext(nFixed, "Fixed term is %s", "Fixed terms are %s"),
prettyEnumStr(fixed)))
termsOrder <- order(allTerms %in% fixed)
allTerms <- allTerms[termsOrder]
di <- match(allTerms, rownames(deps))
deps <- deps[di, di]
gmFormulaEnv <- environment(as.formula(formula(global.model), env = gmEnv))
# TODO: gmEnv <- gmFormulaEnv ???
### BEGIN Manage 'varying'
## @param: varying
## @value: varying, varyingNames, variants, nVariants, nVarying
if(!missing(varying) && !is.null(varying)) {
nVarying <- length(varying)
varyingNames <- names(varying)
fvarying <- unlist(varying, recursive = FALSE, use.names = FALSE)
vlen <- vapply(varying, length, 1L)
nVariants <- prod(vlen)
variants <- as.matrix(expand.grid(split(seq_len(sum(vlen)),
rep(seq_len(nVarying), vlen))))
variantsFlat <- unlist(lapply(varying, .makeListNames),
recursive = FALSE, use.names = FALSE)
} else {
variants <- varyingNames <- NULL
nVariants <- 1L
nVarying <- 0L
}
## END: varying
## BEGIN Manage 'extra'
## @param: extra, global.model, gmFormulaEnv,
## @value: extra, nextra, extraNames, nullfit_
if(!missing(extra) && length(extra) != 0L) {
# a cumbersome way of evaluating a non-exported function in a parent frame:
extra <- eval(as.call(list(call("get", ".get.extras",
envir = call("asNamespace", .packageName), inherits = FALSE),
substitute(extra), r2nullfit = TRUE)), parent.frame())
#extra <- eval(call(".get.extras", substitute(extra), r2nullfit = TRUE), parent.frame())
if(any(c("adjR^2", "R^2") %in% names(extra))) {
nullfit_ <- null.fit(global.model, evaluate = TRUE, envir = gmFormulaEnv)
}
applyExtras <- function(x) unlist(lapply(extra, function(f) f(x)))
extraResult <- applyExtras(global.model)
if(!is.numeric(extraResult))
cry(, "function in 'extra' returned non-numeric result")
nextra <- length(extraResult)
extraNames <- names(extraResult)
} else {
nextra <- 0L
extraNames <- character(0L)
}
## END: manage 'extra'
nov <- as.integer(nVars - nFixed)
ncomb <- (2L ^ nov) * nVariants
if(nov > 31L) cry(, "number of predictors [%d] exceeds allowed maximum of 31", nov)
#if(nov > 10L) warning(gettextf("%d predictors will generate up to %.0f combinations", nov, ncomb))
nmax <- ncomb * nVariants
rvChunk <- 25L
if(evaluate) {
rvNcol <- nVars + nVarying + 3L + nextra
rval <- matrix(NA_real_, ncol = rvNcol, nrow = rvChunk)
coefTables <- vector(rvChunk, mode = "list")
}
## BEGIN: Manage 'subset'
## @param: hasSubset, subset, allTerms, [interceptLabel],
## @value: hasSubset, subset
if(missing(subset)) {
hasSubset <- 1L
} else {
if(!tryCatch(is.language(subset) || is.matrix(subset), error = function(e) FALSE))
subset <- substitute(subset)
if(is.matrix(subset)) {
dn <- dimnames(subset)
#at <- allTerms[!(allTerms %in% interceptLabel)]
n <- length(allTerms)
if(is.null(dn) || any(sapply(dn, is.null))) {
di <- dim(subset)
if(any(di != n)) stop("unnamed 'subset' matrix does not have both dimensions",
" equal to number of terms in 'global.model': %d", n)
dimnames(subset) <- list(allTerms, allTerms)
} else {
if(!all(unique(unlist(dn)) %in% allTerms))
warning("at least some dimnames of 'subset' matrix do not ",
"match term names in 'global.model'")
subset0 <- subset
subset <- matrix(subset[
match(allTerms, rownames(subset)),
match(allTerms, colnames(subset))],
dimnames = list(allTerms, allTerms),
nrow = n, ncol = n)
nas <- is.na(subset)
lotri <- lower.tri(subset)
i <- lotri & nas & !t(nas)
subset[i] <- t(subset)[i]
subset[!lotri] <- NA
}
if(any(!is.na(subset[!lower.tri(subset)]))) {
warning("non-missing values exist outside the lower triangle of 'subset'")
subset[!lower.tri(subset)] <- NA
}
mode(subset) <- "logical"
hasSubset <- 2L # subset as matrix
} else {
if(inherits(subset, "formula")) {
if (subset[[1L]] != "~" || length(subset) != 2L)
stop("'subset' formula should be one-sided")
subset <- subset[[2L]]
}
subset <- as.expression(subset)
ssValidNames <- c("comb", "*nvar*")
tmpTerms <- terms(reformulate(allTerms0[!(allTerms0 %in% interceptLabel)]))
gloFactorTable <- t(attr(tmpTerms, "factors") != 0)
offsetNames <- sapply(attr(tmpTerms, "variables")[attr(tmpTerms, "offset") + 1L], asChar)
if(length(offsetNames) != 0L) {
gloFactorTable <- rbind(gloFactorTable,
matrix(FALSE, ncol = ncol(gloFactorTable), nrow = length(offsetNames),
dimnames = list(offsetNames, NULL)))
for(i in offsetNames) gloFactorTable[offsetNames, offsetNames] <- TRUE
#Note `diag<-` does not work for x[1x1] matrix:
# diag(gloFactorTable[offsetNames, offsetNames, drop = FALSE]) <- TRUE
}
DebugPrint(gloFactorTable)
# fix interaction names in rownames:
rownames(gloFactorTable) <- allTerms0[!(allTerms0 %in% interceptLabel)]
subsetExpr <- subset[[1L]]
subsetExpr <- exprapply0(subsetExpr, ".", .sub_dot, gloFactorTable,
allTerms, as.name("comb"))
subsetExpr <- exprapply0(subsetExpr, c("{", "Term"), .sub_Term)
tmp <- updateDeps(subsetExpr, deps)
subsetExpr <- tmp$expr
deps <- tmp$deps
subsetExpr <- exprapply0(subsetExpr, "dc", .sub_args_as_vars)
subsetExpr <- .subst4Vec(subsetExpr, allTerms, "comb")
if(nVarying) {
ssValidNames <- c("cVar", "comb", "*nvar*")
subsetExpr <- exprapply0(subsetExpr, "V", .sub_V,
as.name("cVar"), varyingNames)
if(!all(all.vars(subsetExpr) %in% ssValidNames))
subsetExpr <- .subst4Vec(subsetExpr, varyingNames,
"cVar", fun = "[[")
}
ssVars <- all.vars(subsetExpr)
okVars <- ssVars %in% ssValidNames
if(!all(okVars)) stop("unrecognized names in 'subset' expression: ",
prettyEnumStr(ssVars[!okVars]))
ssEnv <- new.env(parent = parent.frame())
ssFunc <- setdiff(all.vars(subsetExpr, functions = TRUE), ssVars)
if("dc" %in% ssFunc) assign("dc", .subset_dc, ssEnv)
hasSubset <- if(any(ssVars == "cVar")) 4L else # subset as expression
3L # subset as expression using 'varying' variables
}
} # END: manage 'subset'
comb.sfx <- rep(TRUE, nFixed)
comb.seq <- if(nov != 0L) seq_len(nov) else 0L
k <- 0L
extraResult1 <- integer(0L)
calls <- vector(mode = "list", length = rvChunk)
ord <- integer(rvChunk)
argsOptions <- list(
response = attr(allTerms0, "response"),
intercept = nIntercepts,
interceptLabel = interceptLabel,
random = attr(allTerms0, "random"),
gmCall = gmCall,
gmEnv = gmEnv,
allTerms = allTerms0,
gmCoefNames = gmCoefNames,
gmDataHead = if(!is.null(gmCall$data)) {
if(eval(call("is.data.frame", gmCall$data), gmEnv))
eval(call("head", gmCall$data, 1L), gmEnv) else gmCall$data
} else NULL,
gmFormulaEnv = gmFormulaEnv
)
# BEGIN parallel
qi <- 0L
queued <- vector(qlen, mode = "list")
props <- list(
gmEnv = gmEnv,
IC = IC,
# beta = beta,
# allTerms = allTerms,
nextra = nextra,
matchCoefCall = as.call(c(list(
as.name("matchCoef"), as.name("fit1"),
all.terms = allTerms, beta = betaMode,
allCoef = TRUE), ct.args))
# matchCoefCall = as.call(c(alist(matchCoef, fit1, all.terms = Z$allTerms,
# beta = Z$beta, allCoef = TRUE), ct.args))
)
if(nextra) {
props$applyExtras <- applyExtras
props$extraResultNames <- names(extraResult)
}
props <- as.environment(props)
if(doParallel) {
clusterVExport(cluster, pdredge_props = props,
.pdredge_process_model = pdredge_process_model
)
clusterCall(cluster, eval, call("options", options("na.action")), env = 0L)
}
# END parallel
retColIdx <- if(nVarying) -nVars - seq_len(nVarying) else TRUE
if(trace > 1L) {
progressBar <- if(.Platform$GUI == "Rgui") {
utils::winProgressBar(max = ncomb, title = "'dredge' in progress")
} else utils::txtProgressBar(max = ncomb, style = 3)
setProgressBar <- switch(class(progressBar),
txtProgressBar = utils::setTxtProgressBar,
winProgressBar = utils::setWinProgressBar,
function(...) {})
on.exit(close(progressBar))
}
warningList <- list()
iComb <- -1L
while((iComb <- iComb + 1L) < ncomb) {
varComb <- iComb %% nVariants
jComb <- (iComb - varComb) / nVariants
#print(c(iComb, jComb, ncomb, varComb + 1L))
if(varComb == 0L) {
isok <- TRUE
comb <- c(as.logical(intToBits(jComb)[comb.seq]), comb.sfx)
nvar <- sum(comb) - nIntercepts
# !!! POSITIVE condition for 'pdredge', NEGATIVE for 'dredge':
if((nvar >= m.min && nvar <= m.max) &&
formula_margin_check(comb, deps) &&
switch(hasSubset,
# 1 - no subset, 2 - matrix, 3 - expression
TRUE, # 1
all(subset[comb, comb], na.rm = TRUE), # 2
evalExprInEnv(subsetExpr, env = ssEnv, enclos = parent.frame(),
comb = comb, `*nvar*` = nvar), # 3
TRUE
)
) {
newArgs <- makeArgs(global.model, allTerms[comb], argsOptions) #comb
formulaList <- if(is.null(attr(newArgs, "formulaList"))) newArgs else
attr(newArgs, "formulaList")
if(!is.null(attr(newArgs, "problems"))) {
print.warnings(structure(vector(mode = "list",
length = length(attr(newArgs, "problems"))),
names = attr(newArgs, "problems")))
} # end if <problems>
cl <- gmCall
cl[names(newArgs)] <- newArgs
} else isok <- FALSE # end if <subset, m.max >= nvar >= m.min>
} # end if(jComb != prevJComb)
if(isok) {
## --- Variants ---------------------------
clVariant <- cl
isok2 <- TRUE
if(nVarying) {
cvi <- variants[varComb + 1L, ]
isok2 <- (hasSubset != 4L) || evalExprInEnv(subsetExpr, env = ssEnv,
enclos = parent.frame(), comb = comb, `*nvar*` = nvar,
cVar = variantsFlat[cvi])
clVariant[varyingNames] <- fvarying[cvi]
}
if(isok2) {
if(evaluate) {
if(trace == 1L) {
cat(iComb, ": "); print(clVariant)
utils::flush.console()
} else if(trace == 2L) {
setProgressBar(progressBar, value = iComb,
title = sprintf("pdredge: %d of %.0f subsets", k, (k / iComb) * ncomb))
}
qi <- qi + 1L
queued[[(qi)]] <- list(call = clVariant, id = iComb)
} else { # if !evaluate
k <- k + 1L # all OK, add model to table
rvlen <- length(ord)
if(k > rvlen) {
nadd <- min(rvChunk, nmax - rvlen)
#message(sprintf("extending result from %d to %d", rvlen, rvlen + nadd))
addi <- seq.int(rvlen + 1L, length.out = nadd)
calls[addi] <- vector("list", nadd)
ord[addi] <- integer(nadd)
}
calls[[k]] <- clVariant
ord[k] <- iComb
}
}
} # if isok
#if(evaluate && qi && (qi + nvariants > qlen || iComb == ncomb)) {
if(evaluate && qi && (qi > qlen || (iComb + 1L) == ncomb)) {
qseq <- seq_len(qi)
qresult <- .getRow(queued[qseq])
utils::flush.console()
if(any(vapply(qresult, is.null, TRUE)))
stop("some results returned from cluster node(s) are NULL. \n",
"This should not happen and indicates problems with ",
"the cluster node", domain = "R-MuMIn")
haveProblems <- logical(qi)
nadd <- sum(sapply(qresult, function(x) inherits(x$value, "condition")
+ length(x$warnings)))
wi <- length(warningList)
if(nadd) warningList <- c(warningList, vector(nadd, mode = "list"))
# DEBUG: print(sprintf("Added %d warnings, now is %d", nadd, length(warningList)))
for (i in qseq)
for(cond in c(qresult[[i]]$warnings,
if(inherits(qresult[[i]]$value, "condition"))
list(qresult[[i]]$value))) {
wi <- wi + 1L
warningList[[wi]] <- if(is.null(conditionCall(cond)))
queued[[i]]$call else conditionCall(cond)
if(inherits(cond, "error")) {
haveProblems[i] <- TRUE
msgsfx <- "(model %d skipped)"
} else
msgsfx <- "(in model %d)"
names(warningList)[wi] <- paste(conditionMessage(cond),
gettextf(msgsfx, queued[[i]]$id))
attr(warningList[[wi]], "id") <- queued[[i]]$id
}
withoutProblems <- which(!haveProblems)
qrows <- lapply(qresult[withoutProblems], "[[", "value")
qresultLen <- length(qrows)
rvlen <- nrow(rval)
if(retNeedsExtending <- k + qresultLen > rvlen) {
nadd <- min(max(rvChunk, qresultLen), nmax - rvlen)
rval <- rbind(rval, matrix(NA_real_, ncol = rvNcol, nrow = nadd),
deparse.level = 0L)
addi <- seq.int(rvlen + 1L, length.out = nadd)
coefTables[addi] <- vector("list", nadd)
calls[addi] <- vector("list", nadd)
ord[addi] <- integer(nadd)
}
qseqOK <- seq_len(qresultLen)
for(m in qseqOK) rval[k + m, retColIdx] <- qrows[[m]]
ord[k + qseqOK] <- vapply(queued[withoutProblems], "[[", 1L, "id")
calls[k + qseqOK] <- lapply(queued[withoutProblems], "[[", "call")
coefTables[k + qseqOK] <- lapply(qresult[withoutProblems], "[[", "coefTable")
k <- k + qresultLen
qi <- 0L
}
} ### for (iComb ...)
if(k == 0L) {
if(length(warningList)) print.warnings(warningList)
stop("the result is empty")
}
names(calls) <- ord
if(!evaluate) return(calls[seq_len(k)])
if(k < nrow(rval)) {
i <- seq_len(k)
rval <- rval[i, , drop = FALSE]
ord <- ord[i]
calls <- calls[i]
coefTables <- coefTables[i]
}
if(nVarying) {
varlev <- ord %% nVariants
varlev[varlev == 0L] <- nVariants
rval[, nVars + seq_len(nVarying)] <- variants[varlev, ]
}
rval <- as.data.frame(rval)
row.names(rval) <- ord
# Convert columns with presence/absence of terms to factors
tfac <- which(!(allTerms %in% gmCoefNames))
rval[tfac] <- lapply(rval[tfac], factor, levels = NaN, labels = "+")
i <- seq_along(allTerms)
v <- order(termsOrder)
rval[, i] <- rval[, v]
allTerms <- allTerms[v]
colnames(rval) <- c(allTerms, varyingNames, extraNames, "df", lik$name, ICName)
if(nVarying) {
variant.names <- vapply(variantsFlat, asChar, "", width.cutoff = 20L)
vnum <- split(seq_len(sum(vlen)), rep(seq_len(nVarying), vlen))
names(vnum) <- varyingNames
for (i in varyingNames) rval[, i] <-
factor(rval[, i], levels = vnum[[i]], labels = variant.names[vnum[[i]]])
}
rval <- rval[o <- order(rval[, ICName], decreasing = FALSE), ]
coefTables <- coefTables[o]
rval$delta <- rval[, ICName] - min(rval[, ICName])
rval$weight <- exp(-rval$delta / 2) / sum(exp(-rval$delta / 2))
mode(rval$df) <- "integer"
rval <-
structure(rval,
model.calls = calls[o],
global = global.model,
global.call = gmCall,
terms = structure(allTerms, interceptLabel = interceptLabel),
rank = IC,
beta = strbeta,
call = match.call(expand.dots = TRUE),
coefTables = coefTables,
nobs = gmNobs,
vCols = varyingNames, ## XXX: remove
column.types = {
colTypes <- c(terms = length(allTerms), varying = length(varyingNames),
extra = length(extraNames), df = 1L, loglik = 1L, ic = 1L, delta = 1L,
weight = 1L)
column.types <- rep(1L:length(colTypes), colTypes)
names(column.types) <- colnames(rval)
lv <- 1L:length(colTypes)
factor(column.types, levels = lv, labels = names(colTypes)[lv])
},
class = c("model.selection", "data.frame")
)
if(length(warningList)) {
class(warningList) <- c("warnings", "list")
attr(rval, "warnings") <- warningList
}
if (!is.null(attr(allTerms0, "random.terms")))
attr(rval, "random.terms") <- attr(allTerms0, "random.terms")
if(doParallel) clusterCall(cluster, "rm",
list = c(".pdredge_process_model", "pdredge_props"), envir = .GlobalEnv)
return(rval)
} ######
`pdredge_process_model` <- function(modv, envir = get("pdredge_props", .GlobalEnv)) {
### modv == list(call = clVariant, id = modelId)
result <- tryCatchWE(eval(modv$call, get("gmEnv", envir)))
if (inherits(result$value, "condition")) return(result)
fit1 <- result$value
if(get("nextra", envir) != 0L) {
extraResult1 <- get("applyExtras", envir)(fit1)
nextra <- get("nextra", envir)
if(length(extraResult1) < nextra) {
tmp <- rep(NA_real_, nextra)
tmp[match(names(extraResult1), get("extraResultNames", envir))] <-
extraResult1
extraResult1 <- tmp
}
} else extraResult1 <- NULL
ll <- .getLik(fit1)$logLik(fit1)
#mcoef <- matchCoef(fit1, all.terms = get("allTerms", envir),
# beta = get("beta", envir), allCoef = TRUE)
mcoef <- eval(get("matchCoefCall", envir))
list(value = c(mcoef, extraResult1, df = attr(ll, "df"), ll = ll,
ic = get("IC", envir)(fit1)),
nobs = nobs(fit1),
coefTable = attr(mcoef, "coefTable"),
warnings = result$warnings)
}
.test_pdredge <- function(dd) {
cl <- attr(dd, "call")
cl$cluster <- cl$check <- NULL
cl[[1L]] <- as.name("dredge")
if(!identical(c(dd), c(eval(cl)))) stop("Whoops...")
dd
}
|
\name{.JavaArrayConstructor}
\name{.JavaGetArrayElement}
\name{.JavaSetArrayElement}
\name{.JavaArrayLength}
\alias{.JavaArrayConstructor}
\alias{.JavaGetArrayElement}
\alias{.JavaSetArrayElement}
\alias{.JavaArrayLength}
\title{Create and access elements of Java arrays from R.}
\description{
These functions allow one to create multi-dimensional
Java arrays via R commands using the \code{\link{.Java}}
function. The get and set accessors work element-wise
and not in the vector fashion common in R and S.
One must create and initialize the Java virtual machine
before calling any of these functions. See
\code{\link{.JavaInit}}.
}
\usage{
.JavaArrayConstructor(klass, ..., dim=length(list(...)), .name=NULL, .convert=F)
.JavaGetArrayElement(jobj,..., .name=NULL, .convert=T)
.JavaSetArrayElement(jobj, value, ...)
.JavaArrayLength(jobj)
}
\arguments{
\item{klass}{Typically a string (character vector of length 1)
identifying the name of the class of the element type in the array to be created.
This can also be a foreign reference to a Java class object
obtained via an earlier call to \code{\link{.Java}}
}
\item{\dots}{In the \code{.JavaArrayConstructor}, these
are currently ignored. They are intended to be initializing values
that are used to populate the top-level values of the new array.
That is, they are used to set
\code{arr[0], arr[1], arr[2], \dots}
\item{dim}{When creating an array in \code{.JavaArrayConstructor},
these specify both the number of dimensions and the length of each dimension
in the array to be created.
}
\item{jobj}{This is the reference to the Java array returned
from an earlier call to \code{.JavaArrayConstructor}
or the return value from a call to \code{\link{.Java}}.}
\item{value}{In \code{.JavaA}
}
\details{
This uses the \code{\link{.Java}}
to call methods in the Omegahat Evaluator
which process the array request.
}
\value{
\code{.JavaArrayConstructor} returns a reference
to the newly create Java array object.
\code{.JavaArrayLength} returns a single integer
giving the length of the top-level dimension of the array.
\code{.JavaGetArrayElement} returns the value of
the specified element of the given array, converted
to an R object as usual. Thus it may be a Java reference.
\code{.JavaSetArrayElement} returns \code{NULL}.
}
\references{\url{http://www.javasoft.com}, \url{http://www.omegahat.org}}
\author{Duncan Temple Lang, John Chambers}
\seealso{
\code{\link{.Java}}
}
\examples{
a <- .JavaArrayConstructor("String", dim=3)
.JavaArrayLength(a)
.JavaSetArrayElement(a, "First", 1)
.JavaSetArrayElement(a, "Second", 2)
.JavaSetArrayElement(a, "Third", 3)
.JavaGetArrayElement(a, 2)
}
\keyword{Java}
\keyword{programming}
\keyword{interface}
|
/man/Array.Rd
|
no_license
|
cran/Java
|
R
| false
| false
| 2,733
|
rd
|
\name{.JavaArrayConstructor}
\name{.JavaGetArrayElement}
\name{.JavaSetArrayElement}
\name{.JavaArrayLength}
\alias{.JavaArrayConstructor}
\alias{.JavaGetArrayElement}
\alias{.JavaSetArrayElement}
\alias{.JavaArrayLength}
\title{Create and access elements of Java arrays from R.}
\description{
These functions allow one to create multi-dimensional
Java arrays via R commands using the \code{\link{.Java}}
function. The get and set accessors work element-wise
and not in the vector fashion common in R and S.
One must create and initialize the Java virtual machine
before calling any of these functions. See
\code{\link{.JavaInit}}.
}
\usage{
.JavaArrayConstructor(klass, ..., dim=length(list(...)), .name=NULL, .convert=F)
.JavaGetArrayElement(jobj,..., .name=NULL, .convert=T)
.JavaSetArrayElement(jobj, value, ...)
.JavaArrayLength(jobj)
}
\arguments{
\item{klass}{Typically a string (character vector of length 1)
identifying the name of the class of the element type in the array to be created.
This can also be a foreign reference to a Java class object
obtained via an earlier call to \code{\link{.Java}}
}
\item{\dots}{In the \code{.JavaArrayConstructor}, these
are currently ignored. They are intended to be initializing values
that are used to populate the top-level values of the new array.
That is, they are used to set
\code{arr[0], arr[1], arr[2], \dots}
\item{dim}{When creating an array in \code{.JavaArrayConstructor},
these specify both the number of dimensions and the length of each dimension
in the array to be created.
}
\item{jobj}{This is the reference to the Java array returned
from an earlier call to \code{.JavaArrayConstructor}
or the return value from a call to \code{\link{.Java}}.}
\item{value}{In \code{.JavaA}
}
\details{
This uses the \code{\link{.Java}}
to call methods in the Omegahat Evaluator
which process the array request.
}
\value{
\code{.JavaArrayConstructor} returns a reference
to the newly create Java array object.
\code{.JavaArrayLength} returns a single integer
giving the length of the top-level dimension of the array.
\code{.JavaGetArrayElement} returns the value of
the specified element of the given array, converted
to an R object as usual. Thus it may be a Java reference.
\code{.JavaSetArrayElement} returns \code{NULL}.
}
\references{\url{http://www.javasoft.com}, \url{http://www.omegahat.org}}
\author{Duncan Temple Lang, John Chambers}
\seealso{
\code{\link{.Java}}
}
\examples{
a <- .JavaArrayConstructor("String", dim=3)
.JavaArrayLength(a)
.JavaSetArrayElement(a, "First", 1)
.JavaSetArrayElement(a, "Second", 2)
.JavaSetArrayElement(a, "Third", 3)
.JavaGetArrayElement(a, 2)
}
\keyword{Java}
\keyword{programming}
\keyword{interface}
|
\name{ulog}
\alias{ulog}
\alias{ulog.init}
\title{
System logging functions
}
\description{
\code{ulog} sends output to a system log or ulog daemon.
\code{ulog.init} defines where all logging will be directed to.
}
\usage{
ulog(...)
ulog.init(path = NULL, application = NULL)
}
\arguments{
\item{path}{string, path to the unix socked of the logging daemon or
specification of the form either "udp://host[:port]" or
"tcp://host[:port]" for a remote connection. If \code{NULL} is
passed the path setting is not changed.
}
\item{application}{string, name of the application that will be
reported to the system or \code{NULL} to not change that setting.}
\item{...}{any content to send to the log service - it is used as
\code{paste(..., sep="", collapse="\n")}}
}
\details{
\code{ulog} provides a way to perform logging without cluttering the
console or stdout/stderr. It also allows multi-process and parallel
logging as each log message is transmitted independently and
en-bloc. Also it allow multi-user logging with access control.
Although any syslog damon can be used, a minimalistic implementation
of the daemon is included in the sources in \code{src/ulogd}.
Note that all logging is silent and will not fail even if the
receiving side doesnt' exist. This allows unconditional use of
\code{ulog()}.
This package has been forked from Rserve which has used ulog
internally.
}
\value{
\code{ulog} returns the logged string invisibly
\code{ulog.init} returns the current logging path, thus
\code{ulog.init()} can be used to query the current setting without
changing anything.
}
%\references{
%}
\author{
Simon Urbanek
}
%\seealso{
%}
\examples{
ulog.init("/var/run/syslogd", "R")
ulog("a message from R")
}
\keyword{manip}
|
/man/ulog.Rd
|
no_license
|
s-u/ulog
|
R
| false
| false
| 1,782
|
rd
|
\name{ulog}
\alias{ulog}
\alias{ulog.init}
\title{
System logging functions
}
\description{
\code{ulog} sends output to a system log or ulog daemon.
\code{ulog.init} defines where all logging will be directed to.
}
\usage{
ulog(...)
ulog.init(path = NULL, application = NULL)
}
\arguments{
\item{path}{string, path to the unix socked of the logging daemon or
specification of the form either "udp://host[:port]" or
"tcp://host[:port]" for a remote connection. If \code{NULL} is
passed the path setting is not changed.
}
\item{application}{string, name of the application that will be
reported to the system or \code{NULL} to not change that setting.}
\item{...}{any content to send to the log service - it is used as
\code{paste(..., sep="", collapse="\n")}}
}
\details{
\code{ulog} provides a way to perform logging without cluttering the
console or stdout/stderr. It also allows multi-process and parallel
logging as each log message is transmitted independently and
en-bloc. Also it allow multi-user logging with access control.
Although any syslog damon can be used, a minimalistic implementation
of the daemon is included in the sources in \code{src/ulogd}.
Note that all logging is silent and will not fail even if the
receiving side doesnt' exist. This allows unconditional use of
\code{ulog()}.
This package has been forked from Rserve which has used ulog
internally.
}
\value{
\code{ulog} returns the logged string invisibly
\code{ulog.init} returns the current logging path, thus
\code{ulog.init()} can be used to query the current setting without
changing anything.
}
%\references{
%}
\author{
Simon Urbanek
}
%\seealso{
%}
\examples{
ulog.init("/var/run/syslogd", "R")
ulog("a message from R")
}
\keyword{manip}
|
# Building a Prod-Ready, Robust Shiny Application.
#
# Each step is optional.
#
# 2. All along your project
## 2.1 Add modules
##
golem::add_module( name = "country_select" ) # Name of the module
golem::add_module( name = "country_flag" ) # Name of the module
golem::add_module( name = "country_map" ) # Name of the module
golem::add_module( name = "wb_indicator_text" ) # Name of the module
golem::add_module( name = "wb_indicator_table" ) # Name of the module
golem::add_module( name = "ffd_indicator_table" ) # Name of the module
golem::add_module( name = "ffd_indicator_series" ) # Name of the module
golem::add_module( name = "ffd_country_series" ) # Name of the module
golem::add_module( name = "ffd_product_series" ) # Name of the module
golem::add_module( name = "country_name" ) # Name of the module
golem::add_module( name = "test" ) # Name of the module
## 2.2 Add dependencies
usethis::use_package("ggplot2") # To call each time you need a new package
usethis::use_package("dplyr")
usethis::use_package("tibble")
usethis::use_package("rlang")
usethis::use_package("echarts4r")
usethis::use_package("shinyMobile")
## 2.3 Add tests
usethis::use_test( "app" )
## 2.4 Add a browser button
golem::browser_button()
## 2.5 Add external files
golem::add_js_file( "script" )
golem::add_js_handler( "handlers" )
golem::add_css_file( "custom" )
# 3. Documentation
## 3.1 Vignette
usethis::use_vignette("iapdashboard")
devtools::build_vignettes()
## 3.2 Code coverage
## You'll need GitHub there
usethis::use_github()
usethis::use_travis()
usethis::use_appveyor()
# You're now set!
# go to dev/03_deploy.R
rstudioapi::navigateToFile("dev/03_deploy.R")
|
/dev/02_dev.R
|
permissive
|
lee269/iapdashboard
|
R
| false
| false
| 1,670
|
r
|
# Building a Prod-Ready, Robust Shiny Application.
#
# Each step is optional.
#
# 2. All along your project
## 2.1 Add modules
##
golem::add_module( name = "country_select" ) # Name of the module
golem::add_module( name = "country_flag" ) # Name of the module
golem::add_module( name = "country_map" ) # Name of the module
golem::add_module( name = "wb_indicator_text" ) # Name of the module
golem::add_module( name = "wb_indicator_table" ) # Name of the module
golem::add_module( name = "ffd_indicator_table" ) # Name of the module
golem::add_module( name = "ffd_indicator_series" ) # Name of the module
golem::add_module( name = "ffd_country_series" ) # Name of the module
golem::add_module( name = "ffd_product_series" ) # Name of the module
golem::add_module( name = "country_name" ) # Name of the module
golem::add_module( name = "test" ) # Name of the module
## 2.2 Add dependencies
usethis::use_package("ggplot2") # To call each time you need a new package
usethis::use_package("dplyr")
usethis::use_package("tibble")
usethis::use_package("rlang")
usethis::use_package("echarts4r")
usethis::use_package("shinyMobile")
## 2.3 Add tests
usethis::use_test( "app" )
## 2.4 Add a browser button
golem::browser_button()
## 2.5 Add external files
golem::add_js_file( "script" )
golem::add_js_handler( "handlers" )
golem::add_css_file( "custom" )
# 3. Documentation
## 3.1 Vignette
usethis::use_vignette("iapdashboard")
devtools::build_vignettes()
## 3.2 Code coverage
## You'll need GitHub there
usethis::use_github()
usethis::use_travis()
usethis::use_appveyor()
# You're now set!
# go to dev/03_deploy.R
rstudioapi::navigateToFile("dev/03_deploy.R")
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/oesophagus.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.15,family="gaussian",standardize=FALSE)
sink('./oesophagus_032.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Classifier/oesophagus/oesophagus_032.R
|
no_license
|
esbgkannan/QSMART
|
R
| false
| false
| 358
|
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/oesophagus.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.15,family="gaussian",standardize=FALSE)
sink('./oesophagus_032.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 27646
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 27646
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#128.A#48.c#.w#9.s#13.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9391
c no.of clauses 27646
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 27646
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#128.A#48.c#.w#9.s#13.asp.qdimacs 9391 27646 E1 [] 0 128 9263 27646 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#128.A#48.c#.w#9.s#13.asp/ctrl.e#1.a#3.E#128.A#48.c#.w#9.s#13.asp.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 732
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 27646
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 27646
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#128.A#48.c#.w#9.s#13.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9391
c no.of clauses 27646
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 27646
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#128.A#48.c#.w#9.s#13.asp.qdimacs 9391 27646 E1 [] 0 128 9263 27646 NONE
|
#Read in data and format as dates
data <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?",nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
#Subset Data
data <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
#Convert and format dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
#Create Plot 3
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
plot(data$Global_active_power~data$Datetime, type="l",ylab="Global Active Power (kilowatts)", xlab="")
plot(data$Voltage~data$Datetime, type="l", ylab="Voltage (volt)", xlab="")
plot(data$Sub_metering_1~data$Datetime,type="l",ylab="Global Active Power (kilowatts)",xlab="")
lines(data$Sub_metering_2~data$Datetime,col="Red")
lines(data$Sub_metering_3~data$Datetime,col="Blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(data$Global_reactive_power~data$Datetime, type="l", ylab="Global Rective Power (kilowatts)",xlab="")
#Save File
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
/Plot 4.R
|
no_license
|
agusdon/R-Exploratory-Data-Analysis
|
R
| false
| false
| 1,230
|
r
|
#Read in data and format as dates
data <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?",nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
#Subset Data
data <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
#Convert and format dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
#Create Plot 3
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
plot(data$Global_active_power~data$Datetime, type="l",ylab="Global Active Power (kilowatts)", xlab="")
plot(data$Voltage~data$Datetime, type="l", ylab="Voltage (volt)", xlab="")
plot(data$Sub_metering_1~data$Datetime,type="l",ylab="Global Active Power (kilowatts)",xlab="")
lines(data$Sub_metering_2~data$Datetime,col="Red")
lines(data$Sub_metering_3~data$Datetime,col="Blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(data$Global_reactive_power~data$Datetime, type="l", ylab="Global Rective Power (kilowatts)",xlab="")
#Save File
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
# this file calculates teams' elo ratings after each game played in a season.
# initial elo is set to 1300, the average over time per 538
# the formula for elo is:
# R[i+1] = R[i] + K * (S[a] - E[a])
# where
# e[a] = 1 / 1 + 10^((elo[b] - elo[a]) / 400)
# and
# K = 20 * (MoV[a] + 3)^0.8 / 7.5 + 0.006 * elo_difference[a]
# where R[i] = previous elo, K = 20, S = 1 if team a wins or 0 if it loses, and E[a] is the expected outcome for team a.
# master var: starting elo:
init_elo <- 1505
# function to calculating elo
calculate_elo = function(elo_a, elo_b, pts_a, pts_b){
#elo_a = 1618;elo_b = 1500;pts_a = 94;pts_b = 90
# home-field adv
elo_a = elo_a + 100
# calc mov multiplier, times k
if(pts_a > pts_b){
elo_w = elo_a
elo_l = elo_b
} else if (pts_a < pts_b){
elo_w = elo_b
elo_l = elo_a
}
MOV_mult = (((abs(pts_a - pts_b) + 3)^0.8) / (7.5 + 0.006 * (elo_w - elo_l)))
MOV_mult
K = 20 * MOV_mult
# calc expectation
E <- 1 / (10^((elo_b - elo_a) / 400) + 1)
elo_update = K * (ifelse(pts_a > pts_b,1,0) - E)
return(elo_update)
}
calculate_elo(elo_a = 1618,elo_b = 1500,pts_a = 94,pts_b = 90)
# elo for current season, set at 1505 initially ------------------------------
# compute elo for a year's worth of games
year <- 2019
season <- read.csv(sprintf("data/schedules/NBA-%s_schedule.csv",year),stringsAsFactors = F)
season <- season %>%
arrange(ymd(date_game)) %>%
filter(!is.na(home_pts))
elo_df <- data.frame(team = unique(season$home_team_name),
elo = init_elo,
date = 'pre',
stringsAsFactors = F)
# update after each game
elo_games_played <-
lapply(1:nrow(season),
function(game_idx){
game <- season[game_idx,]
# elo, either from lag or set to 1300
elo_a = elo_df[elo_df$team == game$home_team_name,]$elo
elo_b = elo_df[elo_df$team == game$visitor_team_name,]$elo
# update
elo_update <- calculate_elo(elo_a = elo_a,
elo_b = elo_b,
pts_a = game$home_pts,
pts_b = game$visitor_pts)
elo_a_new <- elo_a + elo_update
elo_b_new <- elo_b - elo_update
# update the elo
elo_df[elo_df$team == game$home_team_name,]$elo <<- elo_a_new
elo_df[elo_df$team == game$visitor_team_name,]$elo <<- elo_b_new
# return a df for graphing
elo_df.a <- data.frame(team = game$home_team_name,
elo = elo_a_new,
date = game$date_game,
stringsAsFactors = F)
elo_df.b <- data.frame(team = game$visitor_team_name,
elo = elo_b_new,
date = game$date_game,
stringsAsFactors = F)
return(rbind(elo_df.a,elo_df.b))
}
) %>% do.call('rbind',.)
top_elo <- elo_games_played %>%
arrange(desc(ymd(date)),desc(elo)) %>%
group_by(team) %>%
summarise(elo = first(elo),date=first(date)) %>%
as.data.frame() %>%
arrange(desc(elo))
top_elo
elo_games_played$team = factor(elo_games_played$team,top_elo$team)
ggplot(elo_games_played,# %>% filter(team %in% top_elo$team),
aes(x=ymd(date),y=elo,col=team)) +
geom_step() +
#geom_label_repel(data = top_elo,aes(x=ymd(date),y=elo,col=team,label=team),alpha=0.9) +
theme_minimal() +
theme(legend.position = 'none') +
facet_wrap(~team)
# compute historical elo --------------------------------------------------
# start in 1950 with everyone set at 1950, loop through every game, at the end of each season carrying over elo = to (final elo * 0.75) + (1505*0.25), per 538
years <- substr(dir("data/schedules/"),5,8) %>% as.numeric()
years <- 1950:2019
elo_overtime <- vector('list',length(years))
print("####################################")
print("CALCULATE HISTORICAL ELO:")
print("####################################")
for (year_idx in 1:length(years)){
year <- years[[year_idx]]
print(sprintf("Getting elo for %s",year))
# get schedule
season <- read.csv(sprintf("data/schedules/NBA-%s_schedule.csv",year),stringsAsFactors = F)
season <- season %>%
arrange(ymd(date_game)) %>%
filter(!is.na(home_pts))
# initialize elo at 1505 for 1950, else take recent season's elo * 1950
if(year == min(years)){
elo_df <- data.frame(team = unique(season$home_team_name),
elo = init_elo,
date = 'pre',
stringsAsFactors = F)
} else {
# if new team, append at 1505, else take the carryover
elo_df <- data.frame(team = unique(season$home_team_name),
elo = init_elo,
date = 'pre',
stringsAsFactors = F)
elo_df[elo_df$team %in% elo_carryover$team,]$elo <- elo_carryover[
match(elo_df[elo_df$team %in% elo_carryover$team,]$team,
elo_carryover$team),]$elo
}
# update after each game
elo_games_played <-
lapply(1:nrow(season),
function(game_idx){
game <- season[game_idx,]
# elo, either from lag or set to 1300
elo_a = elo_df[elo_df$team == game$home_team_name,]$elo
elo_b = elo_df[elo_df$team == game$visitor_team_name,]$elo
# update
elo_update <- calculate_elo(elo_a = elo_a,
elo_b = elo_b,
pts_a = game$home_pts,
pts_b = game$visitor_pts)
elo_a_new <- elo_a + elo_update
elo_b_new <- elo_b - elo_update
# update the elo
elo_df[elo_df$team == game$home_team_name,]$elo <<- elo_a_new
elo_df[elo_df$team == game$visitor_team_name,]$elo <<- elo_b_new
# return a df for graphing
elo_df.a <- data.frame(team = game$home_team_name,
elo = elo_a_new,
date = game$date_game,
stringsAsFactors = F)
elo_df.b <- data.frame(team = game$visitor_team_name,
elo = elo_b_new,
date = game$date_game,
stringsAsFactors = F)
return(rbind(elo_df.a,elo_df.b))
}
) %>% do.call('rbind',.)
# get final elo for this season
elo_carryover <- elo_df %>%
mutate(elo = (elo*0.75) + (1505*0.25),
date = 'post')
# return the final elo, and every game played
elo_overtime[[year_idx]] <- list(elo_games_played,elo_carryover)
}
|
/scripts/calculate_team_elo.R
|
no_license
|
elliottmorris/rNBA
|
R
| false
| false
| 7,108
|
r
|
# this file calculates teams' elo ratings after each game played in a season.
# initial elo is set to 1300, the average over time per 538
# the formula for elo is:
# R[i+1] = R[i] + K * (S[a] - E[a])
# where
# e[a] = 1 / 1 + 10^((elo[b] - elo[a]) / 400)
# and
# K = 20 * (MoV[a] + 3)^0.8 / 7.5 + 0.006 * elo_difference[a]
# where R[i] = previous elo, K = 20, S = 1 if team a wins or 0 if it loses, and E[a] is the expected outcome for team a.
# master var: starting elo:
init_elo <- 1505
# function to calculating elo
calculate_elo = function(elo_a, elo_b, pts_a, pts_b){
#elo_a = 1618;elo_b = 1500;pts_a = 94;pts_b = 90
# home-field adv
elo_a = elo_a + 100
# calc mov multiplier, times k
if(pts_a > pts_b){
elo_w = elo_a
elo_l = elo_b
} else if (pts_a < pts_b){
elo_w = elo_b
elo_l = elo_a
}
MOV_mult = (((abs(pts_a - pts_b) + 3)^0.8) / (7.5 + 0.006 * (elo_w - elo_l)))
MOV_mult
K = 20 * MOV_mult
# calc expectation
E <- 1 / (10^((elo_b - elo_a) / 400) + 1)
elo_update = K * (ifelse(pts_a > pts_b,1,0) - E)
return(elo_update)
}
calculate_elo(elo_a = 1618,elo_b = 1500,pts_a = 94,pts_b = 90)
# elo for current season, set at 1505 initially ------------------------------
# compute elo for a year's worth of games
year <- 2019
season <- read.csv(sprintf("data/schedules/NBA-%s_schedule.csv",year),stringsAsFactors = F)
season <- season %>%
arrange(ymd(date_game)) %>%
filter(!is.na(home_pts))
elo_df <- data.frame(team = unique(season$home_team_name),
elo = init_elo,
date = 'pre',
stringsAsFactors = F)
# update after each game
elo_games_played <-
lapply(1:nrow(season),
function(game_idx){
game <- season[game_idx,]
# elo, either from lag or set to 1300
elo_a = elo_df[elo_df$team == game$home_team_name,]$elo
elo_b = elo_df[elo_df$team == game$visitor_team_name,]$elo
# update
elo_update <- calculate_elo(elo_a = elo_a,
elo_b = elo_b,
pts_a = game$home_pts,
pts_b = game$visitor_pts)
elo_a_new <- elo_a + elo_update
elo_b_new <- elo_b - elo_update
# update the elo
elo_df[elo_df$team == game$home_team_name,]$elo <<- elo_a_new
elo_df[elo_df$team == game$visitor_team_name,]$elo <<- elo_b_new
# return a df for graphing
elo_df.a <- data.frame(team = game$home_team_name,
elo = elo_a_new,
date = game$date_game,
stringsAsFactors = F)
elo_df.b <- data.frame(team = game$visitor_team_name,
elo = elo_b_new,
date = game$date_game,
stringsAsFactors = F)
return(rbind(elo_df.a,elo_df.b))
}
) %>% do.call('rbind',.)
top_elo <- elo_games_played %>%
arrange(desc(ymd(date)),desc(elo)) %>%
group_by(team) %>%
summarise(elo = first(elo),date=first(date)) %>%
as.data.frame() %>%
arrange(desc(elo))
top_elo
elo_games_played$team = factor(elo_games_played$team,top_elo$team)
ggplot(elo_games_played,# %>% filter(team %in% top_elo$team),
aes(x=ymd(date),y=elo,col=team)) +
geom_step() +
#geom_label_repel(data = top_elo,aes(x=ymd(date),y=elo,col=team,label=team),alpha=0.9) +
theme_minimal() +
theme(legend.position = 'none') +
facet_wrap(~team)
# compute historical elo --------------------------------------------------
# start in 1950 with everyone set at 1950, loop through every game, at the end of each season carrying over elo = to (final elo * 0.75) + (1505*0.25), per 538
years <- substr(dir("data/schedules/"),5,8) %>% as.numeric()
years <- 1950:2019
elo_overtime <- vector('list',length(years))
print("####################################")
print("CALCULATE HISTORICAL ELO:")
print("####################################")
for (year_idx in 1:length(years)){
year <- years[[year_idx]]
print(sprintf("Getting elo for %s",year))
# get schedule
season <- read.csv(sprintf("data/schedules/NBA-%s_schedule.csv",year),stringsAsFactors = F)
season <- season %>%
arrange(ymd(date_game)) %>%
filter(!is.na(home_pts))
# initialize elo at 1505 for 1950, else take recent season's elo * 1950
if(year == min(years)){
elo_df <- data.frame(team = unique(season$home_team_name),
elo = init_elo,
date = 'pre',
stringsAsFactors = F)
} else {
# if new team, append at 1505, else take the carryover
elo_df <- data.frame(team = unique(season$home_team_name),
elo = init_elo,
date = 'pre',
stringsAsFactors = F)
elo_df[elo_df$team %in% elo_carryover$team,]$elo <- elo_carryover[
match(elo_df[elo_df$team %in% elo_carryover$team,]$team,
elo_carryover$team),]$elo
}
# update after each game
elo_games_played <-
lapply(1:nrow(season),
function(game_idx){
game <- season[game_idx,]
# elo, either from lag or set to 1300
elo_a = elo_df[elo_df$team == game$home_team_name,]$elo
elo_b = elo_df[elo_df$team == game$visitor_team_name,]$elo
# update
elo_update <- calculate_elo(elo_a = elo_a,
elo_b = elo_b,
pts_a = game$home_pts,
pts_b = game$visitor_pts)
elo_a_new <- elo_a + elo_update
elo_b_new <- elo_b - elo_update
# update the elo
elo_df[elo_df$team == game$home_team_name,]$elo <<- elo_a_new
elo_df[elo_df$team == game$visitor_team_name,]$elo <<- elo_b_new
# return a df for graphing
elo_df.a <- data.frame(team = game$home_team_name,
elo = elo_a_new,
date = game$date_game,
stringsAsFactors = F)
elo_df.b <- data.frame(team = game$visitor_team_name,
elo = elo_b_new,
date = game$date_game,
stringsAsFactors = F)
return(rbind(elo_df.a,elo_df.b))
}
) %>% do.call('rbind',.)
# get final elo for this season
elo_carryover <- elo_df %>%
mutate(elo = (elo*0.75) + (1505*0.25),
date = 'post')
# return the final elo, and every game played
elo_overtime[[year_idx]] <- list(elo_games_played,elo_carryover)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/evaluation.R
\name{pit}
\alias{pit}
\alias{pit.idr}
\alias{pit.data.frame}
\title{Probability integral transform (PIT)}
\usage{
pit(predictions, y, randomize = TRUE, seed = NULL)
\method{pit}{idr}(predictions, y, randomize = TRUE, seed = NULL)
\method{pit}{data.frame}(predictions, y, randomize = TRUE, seed = NULL)
}
\arguments{
\item{predictions}{either an object of class \code{idr} (output of
\code{\link{predict.idrfit}}), or a \code{data.frame} of numeric variables. In
the latter case, the PIT is computed using the empirical distribution of
the variables in \code{predictions}.}
\item{y}{a numeric vector of obervations of the same length as the number of
predictions.}
\item{randomize}{PIT values should be randomized at discontinuity points of the
predictive CDF (e.g. at zero for precipitation forecasts). Set \code{
randomize = TRUE} to randomize.}
\item{seed}{argument to \code{set.seed} for random number generation (if
\code{randomize} is \code{TRUE}).}
}
\value{
Vector of PIT values.
}
\description{
Computes the probability integral transform (PIT) of IDR or raw
forecasts.
}
\examples{
data("rain")
require("graphics")
## Postprocess HRES forecast using data of 4 years
X <- rain[1:(4 * 365), "HRES", drop = FALSE]
y <- rain[1:(4 * 365), "obs"]
fit <- idr(y = y, X = X)
## Assess calibration of the postprocessed HRES forecast using data of next 4
## years and compare to calibration of the raw ensemble
data <- rain[(4 * 365 + 1):(8 * 365), "HRES", drop = FALSE]
obs <- rain[(4 * 365 + 1):(8 * 365), "obs"]
predictions <- predict(fit, data = data)
idrPit <- pit(predictions, obs, seed = 123)
rawData <- rain[(4 * 365 + 1):(8 * 365), c("HRES", "CTR", paste0("P", 1:50))]
rawPit <- pit(rawData, obs, seed = 123)
par(mfrow = c(1, 2))
hist(idrPit, xlab = "Probability Integral Transform",
ylab = "Density", freq = FALSE, main = "Postprocessed HRES")
hist(rawPit, xlab = "Probability Integral Transform",
ylab = "Density", freq = FALSE, main = "Raw ensemble")
}
\references{
Gneiting, T., Balabdaoui, F. and Raftery, A. E. (2007), 'Probabilistic
forecasts, calibration and sharpness', Journal of the Royal Statistical
Society: Series B (Statistical Methodology) 69(2), 243-268.
}
\seealso{
\code{\link{predict.idrfit}}
}
|
/man/pit.Rd
|
no_license
|
evwalz/isodistrreg
|
R
| false
| true
| 2,332
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/evaluation.R
\name{pit}
\alias{pit}
\alias{pit.idr}
\alias{pit.data.frame}
\title{Probability integral transform (PIT)}
\usage{
pit(predictions, y, randomize = TRUE, seed = NULL)
\method{pit}{idr}(predictions, y, randomize = TRUE, seed = NULL)
\method{pit}{data.frame}(predictions, y, randomize = TRUE, seed = NULL)
}
\arguments{
\item{predictions}{either an object of class \code{idr} (output of
\code{\link{predict.idrfit}}), or a \code{data.frame} of numeric variables. In
the latter case, the PIT is computed using the empirical distribution of
the variables in \code{predictions}.}
\item{y}{a numeric vector of obervations of the same length as the number of
predictions.}
\item{randomize}{PIT values should be randomized at discontinuity points of the
predictive CDF (e.g. at zero for precipitation forecasts). Set \code{
randomize = TRUE} to randomize.}
\item{seed}{argument to \code{set.seed} for random number generation (if
\code{randomize} is \code{TRUE}).}
}
\value{
Vector of PIT values.
}
\description{
Computes the probability integral transform (PIT) of IDR or raw
forecasts.
}
\examples{
data("rain")
require("graphics")
## Postprocess HRES forecast using data of 4 years
X <- rain[1:(4 * 365), "HRES", drop = FALSE]
y <- rain[1:(4 * 365), "obs"]
fit <- idr(y = y, X = X)
## Assess calibration of the postprocessed HRES forecast using data of next 4
## years and compare to calibration of the raw ensemble
data <- rain[(4 * 365 + 1):(8 * 365), "HRES", drop = FALSE]
obs <- rain[(4 * 365 + 1):(8 * 365), "obs"]
predictions <- predict(fit, data = data)
idrPit <- pit(predictions, obs, seed = 123)
rawData <- rain[(4 * 365 + 1):(8 * 365), c("HRES", "CTR", paste0("P", 1:50))]
rawPit <- pit(rawData, obs, seed = 123)
par(mfrow = c(1, 2))
hist(idrPit, xlab = "Probability Integral Transform",
ylab = "Density", freq = FALSE, main = "Postprocessed HRES")
hist(rawPit, xlab = "Probability Integral Transform",
ylab = "Density", freq = FALSE, main = "Raw ensemble")
}
\references{
Gneiting, T., Balabdaoui, F. and Raftery, A. E. (2007), 'Probabilistic
forecasts, calibration and sharpness', Journal of the Royal Statistical
Society: Series B (Statistical Methodology) 69(2), 243-268.
}
\seealso{
\code{\link{predict.idrfit}}
}
|
######################################################
##### -- climate-vs-habitat-change-california -- #####
######################################################
##################### FUNCTIONS ######################
##### -- multispeciesPP_edit() -- #####
##### Modified multispeciesPP function: function was edited to return fit object, which includes estimates of residual deviance
##### R code from multispeciesPP by Will Fithian (https://github.com/wfithian/multispeciesPP/blob/master/R/multispeciesPP.R).
##### For more information, see
##### Fithian et al. (2014) Bias correction in species distribution models: pooling survey and collection data for multiple species. Methods in Ecology and Evolution
multispeciesPP_edit <-
function (sdm.formula, bias.formula, PA, PO, BG, species = names(PO),
species.PA = species, species.PO = species, quadrat.size = 1,
region.size = 1, start = NULL, inverse.hessian = FALSE, penalty.l2.sdm = 0.1,
penalty.l2.bias = 0.1, penalty.l2.intercept = 1e-04, weights = rep(1,
n.species * nrow(x)), control = list())
{
control <- do.call("glm.control", control)
species <- union(species.PO, species.PA)
sdm.formula <- update(sdm.formula, ~. + 1)
bias.formula <- update(bias.formula, ~. - 1)
sdm.mf <- model.frame(sdm.formula, data = BG)
bias.mf <- model.frame(bias.formula, data = BG)
sdm.BG.model.matrix <- model.matrix(terms(sdm.mf), BG)
sdm.means <- c(0, apply(sdm.BG.model.matrix[, -1, drop = FALSE],
2, mean))
sdm.BG.model.matrix <- sweep(sdm.BG.model.matrix, 2, sdm.means,
"-")
sdm.sds <- c(1, apply(sdm.BG.model.matrix[, -1, drop = FALSE],
2, sd))
sdm.BG.model.matrix <- sweep(sdm.BG.model.matrix, 2, sdm.sds,
"/")
sdm.standardize <- function(mat) sweep(sweep(mat, 2, sdm.means,
"-"), 2, sdm.sds, "/")
bias.BG.model.matrix <- model.matrix(terms(bias.mf), BG)
bias.means <- apply(bias.BG.model.matrix, 2, mean)
bias.BG.model.matrix <- sweep(bias.BG.model.matrix, 2, bias.means,
"-")
bias.sds <- apply(bias.BG.model.matrix, 2, sd)
bias.BG.model.matrix <- sweep(bias.BG.model.matrix, 2, bias.sds,
"/")
bias.standardize <- function(mat) sweep(sweep(mat, 2, bias.means,
"-"), 2, bias.sds, "/")
BG.good.rows <- intersect(rownames(sdm.BG.model.matrix),
rownames(bias.BG.model.matrix))
sdm.PA.model.matrix <- sdm.standardize(model.matrix(terms(sdm.mf),
PA))
PA.good.rows <- rownames(sdm.PA.model.matrix)
if (!is.null(species.PO)) {
sdm.PO.model.matrices <- lapply(as.list(species.PO),
function(sp) sdm.standardize(model.matrix(terms(sdm.mf),
PO[[sp]])))
names(sdm.PO.model.matrices) <- species.PO
bias.PO.model.matrices <- lapply(as.list(species.PO),
function(sp) bias.standardize(model.matrix(terms(bias.mf),
PO[[sp]])))
names(bias.PO.model.matrices) <- species.PO
PO.good.rows <- lapply(as.list(species.PO), function(sp) intersect(rownames(sdm.PO.model.matrices[[sp]]),
rownames(bias.PO.model.matrices[[sp]])))
names(PO.good.rows) <- species.PO
}
n.species <- length(species)
p.sdm <- ncol(sdm.BG.model.matrix) - 1
p.bias <- ncol(bias.BG.model.matrix)
sdm.margins.ab <- matrix(0, n.species, p.sdm + 1, dimnames = list(species,
colnames(sdm.BG.model.matrix)))
sdm.margins.gamma <- matrix(0, n.species, 1, dimnames = list(species,
"isPO"))
bias.margins <- matrix(0, 1, p.bias, dimnames = list(NULL,
colnames(bias.BG.model.matrix)))
for (sp in species.PO) {
k <- match(sp, species)
sdm.margins.ab[k, ] <- colSums(sdm.PO.model.matrices[[sp]][PO.good.rows[[sp]],
, drop = FALSE])
sdm.margins.gamma[k, ] <- length(PO.good.rows[[sp]])
bias.margins <- bias.margins + colSums(bias.PO.model.matrices[[sp]][PO.good.rows[[sp]],
, drop = FALSE])
}
abcd.from.all.coef <- function(all.coef) {
sdm.coef <- matrix(all.coef[1:(n.species * (p.sdm + 2))],
p.sdm + 2, n.species)
alpha <- sdm.coef[1, ]
beta <- t(sdm.coef[2:(p.sdm + 1), , drop = FALSE])
gamma <- sdm.coef[p.sdm + 2, ]
delta <- all.coef[-(1:(n.species * (p.sdm + 2)))]
names(alpha) <- names(gamma) <- species
colnames(beta) <- colnames(sdm.margins.ab)[-1]
rownames(beta) <- species
names(delta) <- colnames(bias.BG.model.matrix)
return(list(alpha = alpha, beta = beta, gamma = gamma,
delta = delta))
}
all.coef.from.abcd <- function(alpha, beta, gamma, delta) {
c(rbind(alpha, beta, gamma), delta)
}
n.PA <- length(PA.good.rows)
n.BG <- length(BG.good.rows)
subsamp.PA.offset <- 0
subsamp.BG.offset <- 0
n.sites <- n.BG + n.PA
x <- cbind(rbind(sdm.margins.ab, 0, sdm.PA.model.matrix[PA.good.rows,
, drop = FALSE], sdm.BG.model.matrix[BG.good.rows, ,
drop = FALSE]), c(sdm.margins.gamma, rep(0:1, c(1 + n.PA,
n.BG))))
x <- rbind(x, diag(sqrt(c(penalty.l2.intercept, rep(penalty.l2.sdm,
p.sdm), penalty.l2.intercept))), matrix(0, p.bias, p.sdm +
2))
z <- rbind(matrix(0, n.species, p.bias), bias.margins, matrix(0,
n.PA, p.bias), bias.BG.model.matrix[BG.good.rows, , drop = FALSE],
matrix(0, p.sdm + 2, p.bias), sqrt(penalty.l2.bias/n.species) *
diag(p.bias))
y <- rep(0, nrow(x) * n.species)
offset <- rep(0, nrow(x) * n.species)
for (k in 1:n.species) {
yk <- rep(0, nrow(x))
yk[1:n.species] <- 1 * (1:n.species == k)
yk[1 + n.species] <- 1 * (1 == k)
if (species[k] %in% species.PA) {
yk[1 + n.species + (1:n.PA)] <- PA[PA.good.rows,
species[k]]
}
else {
yk[1 + n.species + (1:n.PA)] <- NA
}
if (species[k] %in% species.PO) {
yk[1 + n.species + n.PA + (1:n.BG)] <- 0
}
else {
yk[1 + n.species + n.PA + (1:n.BG)] <- NA
}
yk[1 + n.species + n.sites + (1:(p.sdm + 2 + p.bias))] <- 0
y[(k - 1) * nrow(x) + 1:nrow(x)] <- yk
offk <- rep(0, nrow(x))
offk[1 + n.species + (1:n.PA)] <- log(quadrat.size)
offk[1 + n.species + n.PA + (1:n.BG)] <- log(region.size) -
log(n.BG)
offset[(k - 1) * nrow(x) + 1:nrow(x)] <- offk
}
which.PA <- (2 + n.species):(1 + n.species + n.PA) + rep((0:(n.species -
1)) * nrow(x), each = n.PA)
which.BG <- (2 + n.species + n.PA):(1 + n.species + n.PA +
n.BG) + rep((0:(n.species - 1)) * nrow(x), each = n.BG)
if (is.null(start)) {
start.alpha <- start.gamma <- rep(0, n.species)
for (k in 1:n.species) {
if ((species[k] %in% species.PA) && sum(!is.na(PA[PA.good.rows,
species[k]]) > 0))
start.alpha[k] <- log((1 + sum(PA[PA.good.rows,
species[k]], na.rm = TRUE))/n.PA/quadrat.size)
if (species[k] %in% species.PO)
start.gamma[k] <- log1p(sdm.margins.gamma[k,
]) - start.alpha[k] - log(region.size)
}
start <- all.coef.from.abcd(start.alpha, matrix(0, p.sdm,
n.species), start.gamma, rep(0, p.bias))
}
fit <- block.glm.fit(x, z, y, weights = weights, start = start,
offset = offset, families = list(linear(), binomial(link = "cloglog"),
poisson(), gaussian()), row.families = rep(rep(1:4,
c(1 + n.species, n.PA, n.BG, p.sdm + p.bias + 2)),
n.species), control = control)
all.coef <- fit$coefficients
eta <- fit$linear.predictors
mu <- fit$fitted.values
names(all.coef)[1:(n.species * (p.sdm + 2))] <- paste(rep(species,
each = p.sdm + 2), c(colnames(sdm.BG.model.matrix)[1:(p.sdm +
1)], "isPO"), sep = ":")
names(all.coef)[-(1:(n.species * (p.sdm + 2)))] <- paste("isPO:",
colnames(bias.BG.model.matrix), sep = "")
std.errs <- fit$fit$std.errs
names(std.errs) <- names(all.coef)
species.coef <- matrix(all.coef[1:(n.species * (p.sdm + 2))],
p.sdm + 2, n.species, dimnames = list(c(colnames(sdm.margins.ab),
"isPO"), species))
bias.coef <- all.coef[-(1:(n.species * (p.sdm + 2)))]
names(bias.coef) <- colnames(bias.BG.model.matrix)
fit.PA <- linear.fit.PA <- matrix(NA, nrow(PA), length(species),
dimnames = list(dimnames(PA)[[1]], species))
linear.fit.PA[PA.good.rows, ] <- eta[which.PA]
fit.PA[PA.good.rows, ] <- mu[which.PA]
fit.BG <- linear.fit.BG <- bias.fit.BG <- linear.bias.fit.BG <- matrix(NA,
nrow(BG), length(species), dimnames = list(dimnames(BG)[[1]],
species))
linear.fit.BG[BG.good.rows, ] <- matrix(eta[which.BG], ncol = n.species) +
log(n.BG) - log(region.size)
fit.BG[BG.good.rows, ] <- matrix(mu[which.BG], ncol = n.species) *
n.BG/region.size
linear.bias.fit.BG[BG.good.rows, ] <- c(bias.BG.model.matrix[BG.good.rows,
, drop = FALSE] %*% bias.coef)
bias.fit.BG[BG.good.rows, ] <- exp(linear.bias.fit.BG[BG.good.rows,
])
fitted.sdm.margins.gamma <- colSums(fit.BG[BG.good.rows,
, drop = FALSE]) * region.size/n.BG
fitted.bias.margins <- colSums(t(fit.BG[BG.good.rows, species.PO,
drop = FALSE]) %*% bias.BG.model.matrix[BG.good.rows,
, drop = FALSE] * region.size/n.BG)
score.check.gamma <- fitted.sdm.margins.gamma - sdm.margins.gamma +
penalty.l2.intercept * species.coef[p.sdm + 2, ]
score.check.gamma <- score.check.gamma[species %in% species.PO]
score.check.bias <- fitted.bias.margins - bias.margins +
penalty.l2.bias * bias.coef
if (length(score.check.gamma) > 0)
stopifnot(mean((score.check.gamma/fit$deviance)^2) <
control$epsilon)
stopifnot(mean((score.check.bias/fit$deviance)^2) < control$epsilon)
sd.normalizer <- c(rep(c(sdm.sds, 1), n.species), bias.sds)
unstandardized.coef <- all.coef/sd.normalizer
gamma.adjust <- sum(unstandardized.coef[-(1:(n.species *
(p.sdm + 2)))] * bias.means)
for (k in 1:n.species) {
jk <- (p.sdm + 2) * (k - 1) + 1:(p.sdm + 1)
coef.block <- unstandardized.coef[jk]
unstandardized.coef[jk[1]] <- coef.block[1] - sum(coef.block[-1] *
sdm.means[-1])
unstandardized.coef[jk[1] + p.sdm + 1] <- unstandardized.coef[jk[1] +
p.sdm + 1] - gamma.adjust
}
unstandardized.species.coef <- matrix(unstandardized.coef[1:(n.species *
(p.sdm + 2))], p.sdm + 2, n.species, dimnames = list(c(colnames(sdm.margins.ab),
"isPO"), species))
unstandardized.bias.coef <- unstandardized.coef[-(1:(n.species *
(p.sdm + 2)))]
names(unstandardized.bias.coef) <- colnames(bias.BG.model.matrix)
tr <- list(sdm.formula = sdm.formula, bias.formula = bias.formula, fit = fit,
normalized.species.coef = species.coef, normalized.bias.coef = bias.coef,
normalized.all.coef = all.coef, normalized.std.errs = std.errs,
all.coef = unstandardized.coef, std.errs = std.errs/sd.normalizer,
species.coef = unstandardized.species.coef, bias.coef = unstandardized.bias.coef,
linear.fit.PA = linear.fit.PA, fit.PA = fit.PA, linear.bias.fit.BG = linear.bias.fit.BG,
bias.fit.BG = bias.fit.BG, linear.fit.BG = linear.fit.BG,
fit.BG = fit.BG)
class(tr) <- c("multispeciesPP", "list")
tr
}
##### -- multispeciesPP_wrapper() -- #####
##### Wrapper around function multispeciesPP() from library(multispeciesPP) to facilitate running of models with different types of information
##### R code from multispeciesPP by Will Fithian (https://github.com/wfithian/multispeciesPP/blob/master/R/multispeciesPP.R).
##### For more information, see
##### Fithian et al. (2014) Bias correction in species distribution models: pooling survey and collection data for multiple species. Methods in Ecology and Evolution
multispeciesPP_wrapper <- function(pa_data = NULL,
po_data = NULL,
bg = NULL,
species_names = NULL,
climate_predictors = paste("bio", c(1, 6, 12), sep = ""),
habitat_associations = NULL,
group = c("bird", "mamm", "odon"), ## Taxonomic group to model (birds/mammals/odonates)
predictor_set = c("climate", "habitat", "full"), ## Use only climate, habitat, or both (full) as model predictors
out_name = "out",
...){
### Match function arguments
group <- match.arg(group)
predictor_set <- match.arg(predictor_set)
### Create directory to save model output
dir.create(paste(getwd(), "/output/multispeciesPP", sep = ""), showWarnings = FALSE)
dir.create(paste(getwd(), "/output/multispeciesPP/models", sep = ""), showWarnings = FALSE)
### Generate useful objects
## Character vector of climate predictors
climate_pred <- climate_predictors
## Character vector of habitat predictors
habitat_pred <- habitat_associations
## Character vector of bias predictors
bias_pred <- c("ruggedness", "dist_from_urban", "dist_from_stream", "dist_from_survey")
## Size of study area
study_area <- nrow(bg)
### Pick appropriate variables from background object
bg <- bg[c(intersect(names(bg), c(climate_pred, habitat_pred, bias_pred)), paste("dist_from_survey", group, sep = "_"))]
names(bg)[grep("survey", names(bg))] <- "dist_from_survey"
### Select the desired species set from pa_data and po_data, if necessary
if (!is.null(pa_data)) pa_data <- pa_data[c(species_names, climate_pred, habitat_pred)]
if (!is.null(po_data)) po_data <- po_data[species_names]
### Standardize covariates
if (!is.null(pa_data)) pa_data[, c(climate_pred, habitat_pred)] <- apply(pa_data[, c(climate_pred, habitat_pred)], 2, scale) %>% data.frame()
if (!is.null(po_data)) po_data <- lapply(po_data, function(x) apply(x[c(climate_pred, habitat_pred, bias_pred)], 2, scale) %>% data.frame())
bg[, c(climate_pred, habitat_pred, bias_pred)] <- apply(bg[c(climate_pred, habitat_pred, bias_pred)], 2, scale) %>% data.frame()
### Specify formulas
climate_pred <- paste(climate_pred, collapse = " + ")
habitat_pred <- paste(habitat_pred, collapse = " + ")
bias_pred <- paste(bias_pred, collapse = " + ")
## Bias formula
bias_formula <- as.formula(paste("~ ", bias_pred, sep = ""))
## SDM formula
if (predictor_set == "full"){
sdm_formula <- as.formula(paste("~ ", climate_pred, " + ", habitat_pred, sep = ""))
}
if (predictor_set == "climate"){
sdm_formula <- as.formula(paste("~ ", climate_pred, sep = ""))
}
if (predictor_set == "habitat"){
sdm_formula <- as.formula(paste("~ ", habitat_pred, sep = ""))
}
### Run model
mPP <- multispeciesPP_edit(
sdm.formula = sdm_formula,
bias.formula = bias_formula,
PA = pa_data,
PO = po_data,
BG = bg,
region.size = study_area,
...
)
### Save output
saveRDS(mPP, file = paste("output/multispeciesPP/models/mPP_", out_name, ".rds", sep = ""))
}
##### -- multispeciesPP_output() -- #####
##### Extract useful output from saved multispeciesPP models
multispeciesPP_output <- function(mPP_directory = "output/multispeciesPP/models/"){
mPP_list <- list.files(mPP_directory)
mPP_out <- lapply(mPP_list, function(x){
mPP <- readRDS(paste(mPP_directory, x, sep = ""))
# Coefficients
coefs <- mPP$normalized.all.coef
se <- mPP$normalized.std.errs
summary <- data.frame(coefs, se, coefs/se, 2*pnorm(-abs(coefs/se)))
colnames(summary) <- c("estimate","se","z","p")
summary$species <- factor(unlist(lapply(strsplit(row.names(summary), ':'), function(y) y[1])))
summary$variable <- unlist(lapply(strsplit(row.names(summary), ':'), function(y) y[2]))
summary$model <- x
list(summary = summary,
deviance = mPP$fit$deviance
)
}
)
names(mPP_out) <- unlist(lapply(strsplit(mPP_list, "\\."), function(x) x[[1]]))
return(mPP_out)
}
###############
#### roc() ####
###############
"roc" <-
function (obsdat, preddat)
{
# code adapted from Ferrier, Pearce and Watson's code, by J.Elith
#
# see:
# Hanley, J.A. & McNeil, B.J. (1982) The meaning and use of the area
# under a Receiver Operating Characteristic (ROC) curve.
# Radiology, 143, 29-36
#
# Pearce, J. & Ferrier, S. (2000) Evaluating the predictive performance
# of habitat models developed using logistic regression.
# Ecological Modelling, 133, 225-245.
# this is the non-parametric calculation for area under the ROC curve,
# using the fact that a MannWhitney U statistic is closely related to
# the area
#
if (length(obsdat) != length(preddat))
stop("obs and preds must be equal lengths")
n.x <- length(obsdat[obsdat == 0])
n.y <- length(obsdat[obsdat == 1])
xy <- c(preddat[obsdat == 0], preddat[obsdat == 1])
rnk <- rank(xy)
wilc <- ((n.x * n.y) + ((n.x * (n.x + 1))/2) - sum(rnk[1:n.x]))/(n.x *
n.y)
return(round(wilc, 4))
}
#########################
#### calc_deviance() ####
#########################
"calc_deviance" <-
function(obs.values, fitted.values, weights = rep(1,length(obs.values)), family="binomial", calc.mean = TRUE)
{
# j. leathwick/j. elith
#
# version 2.1 - 5th Sept 2005
#
# function to calculate deviance given two vectors of raw and fitted values
# requires a family argument which is set to binomial by default
#
#
if (length(obs.values) != length(fitted.values))
stop("observations and predictions must be of equal length")
y_i <- obs.values
u_i <- fitted.values
if (family == "binomial" | family == "bernoulli") {
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance <- -2 * sum(deviance.contribs * weights)
}
if (family == "poisson" | family == "Poisson") {
deviance.contribs <- ifelse(y_i == 0, 0, (y_i * log(y_i/u_i))) - (y_i - u_i)
deviance <- 2 * sum(deviance.contribs * weights)
}
if (family == "laplace") {
deviance <- sum(abs(y_i - u_i))
}
if (family == "gaussian") {
deviance <- sum((y_i - u_i) * (y_i - u_i))
}
if (calc.mean) deviance <- deviance/length(obs.values)
return(deviance)
}
#######################
##### eval_pred() #####
#######################
eval_pred <- function(obs_table = NA, pred_table = NA, species_names = NA){
eval_table <- data.frame(species = species_names, auc = NA, cor = NA, dev = NA)
if (nrow(eval_table) > 1){
for (i in seq(along = species_names)){
obs <- obs_table[, grep(species_names[i], names(obs_table))]
pred <- pred_table[, grep(species_names[i], names(pred_table))]
eval_table$auc[i] <- roc(obs, pred)
eval_table$dev[i] <- calc_deviance(obs, pred)
eval_table$cor[i] <- cor(obs, pred, use = "complete.obs", method = "pearson")
}
} else {
obs <- obs_table[, grep(species_names, names(obs_table))]
pred <- pred_table[, grep(species_names, names(pred_table))]
eval_table$auc <- roc(obs, pred)
eval_table$dev <- calc_deviance(obs, pred)
eval_table$cor <- cor(obs, pred, use = "complete.obs", method = "pearson")
}
return(eval_table)
}
multispeciesPP_predictions <- function(mPP_directory = "output/multispeciesPP/models/"){
# Create directory to save model predictions
dir.create(paste(getwd(), "/output/multispeciesPP/predictions", sep = ""), showWarnings = FALSE)
mPP_list <- list.files(mPP_directory)
mPP_eval_output <- vector('list', length(mPP_list))
for(i in seq(along = mPP_eval_output)){
mPP <- readRDS(paste(mPP_directory, mPP_list[i], sep = ""))
if (grepl("bird", mPP_list[i])){
t1_pa <- t1_pa_bird
t2_pa <- t2_pa_bird
}
if (grepl("mamm", mPP_list[i])){
t1_pa <- t1_pa_mamm
t2_pa <- t2_pa_mamm
}
# t1_bg predictions
predictions_t1_bg <- data.frame(t1_bg[c('longitude', 'latitude')], (1 - exp(-exp(predict.multispeciesPP(mPP, newdata = t1_bg)))))
# t2_bg predictions
predictions_t2_bg <- data.frame(t2_bg[c('longitude', 'latitude')], (1 - exp(-exp(predict.multispeciesPP(mPP, newdata = t2_bg)))))
# t1_pa predictions
predictions_t1_pa <- data.frame(t1_pa[c('longitude', 'latitude')], (1 - exp(-exp(predict.multispeciesPP(mPP, newdata = t1_pa)))))
# t2_pa predictions
predictions_t2_pa <- data.frame(t2_pa[c('longitude', 'latitude')], (1 - exp(-exp(predict.multispeciesPP(mPP, newdata = t2_pa)))))
# save predictions
#saveRDS(predictions_t1_bg, paste("output/multispeciesPP/", strsplit(mPP_list[i], "\\.")[[1]][1], '_bg.rds', sep = ''))
#saveRDS(predictions_t1_bg, paste('output/multispeciesPP/predictions/', strsplit(mPP_list[i], "\\.")[[1]][1], '_bg.rds', sep = ''))
#saveRDS(predictions_t2_bg, paste('output/multispeciesPP/predictions/', strsplit(mPP_list[i], "\\.")[[1]][1], '_bg.rds', sep = ''))
#saveRDS(predictions_t1_pa, paste('output/multispeciesPP/predictions/', strsplit(mPP_list[i], "\\.")[[1]][1], '_pa.rds', sep = ''))
#saveRDS(predictions_t2_pa, paste('output/multispeciesPP/predictions/', strsplit(mPP_list[i], "\\.")[[1]][1], '_pa.rds', sep = ''))
#saveRDS(predictions_change_bg, paste('output/multispeciesPP/predictions/', strsplit(mPP_list[i], "\\.")[[1]][1], '_predictions_change_bg.rds', sep = ''))
eval_t1_pa <- eval_pred(obs_table = t1_pa, pred_table = predictions_t1_pa, species_names = colnames(mPP$normalized.species.coef))
eval_t2_pa <- eval_pred(obs_table = t2_pa, pred_table = predictions_t2_pa, species_names = colnames(mPP$normalized.species.coef))
mPP_eval_output[[i]] <- list(eval_t1_pa = eval_t1_pa, eval_t2_pa = eval_t2_pa)
names(mPP_eval_output) <- mPP_list
rm(mPP)
}
saveRDS(mPP_eval_output, 'output/multispeciesPP/mPP_eval_output.rds')
return(mPP_eval_output)
}
##### -- multispeciesPP_coef_plot() -- #####
#### Function to plot standardized model coefficients from the various different models of a given species
multispeciesPP_coef_plot <- function(species_name, group = c("bird", "mamm"), mPP_out){
group <- match.arg(group)
species_models <- species_coefs <- mPP_out[grep(species_name, names(mPP_out))]
for (i in seq(along = species_models)){
species_coefs[[i]] <- data.frame(species_models[[i]][[1]], model = names(species_models)[i])
species_coefs[[i]] <- subset(species_coefs[[i]], !(species_coefs[[i]]$variable %in% c("(Intercept)", "isPO", "ruggedness", "dist_from_urban", "dist_from_stream", "dist_from_survey")))
}
multi_models <- multi_coefs <- mPP_out[grep(paste(group, "multispecies", sep = "_"), names(mPP_out))]
for (i in seq(along = multi_models)){
multi_coefs[[i]] <- data.frame(multi_models[[i]][[1]], model = names(multi_models)[i])
multi_coefs[[i]] <- subset(multi_coefs[[i]], multi_coefs[[i]]$species == species_name & !(multi_coefs[[i]]$variable %in% c("(Intercept)", "isPO", "ruggedness", "dist_from_urban", "dist_from_stream", "dist_from_survey")))
}
species_coefs <- do.call("rbind", c(species_coefs, multi_coefs))
species_coefs$model <- as.factor(species_coefs$model)
species_coefs$model <- factor(species_coefs$model,
levels = levels(species_coefs$model)[c(
which(!grepl("multispecies", levels(species_coefs$model)) & grepl("t1", levels(species_coefs$model)) & grepl("climate", levels(species_coefs$model))),
which(!grepl("multispecies", levels(species_coefs$model)) & grepl("t1", levels(species_coefs$model)) & grepl("habitat", levels(species_coefs$model))),
which(!grepl("multispecies", levels(species_coefs$model)) & grepl("t1", levels(species_coefs$model)) & grepl("full", levels(species_coefs$model))),
which(grepl("multispecies", levels(species_coefs$model)) & grepl("t1", levels(species_coefs$model)) & grepl("climate", levels(species_coefs$model))),
which(grepl("multispecies", levels(species_coefs$model)) & grepl("t1", levels(species_coefs$model)) & grepl("habitat", levels(species_coefs$model))),
which(grepl("multispecies", levels(species_coefs$model)) & grepl("t1", levels(species_coefs$model)) & grepl("full", levels(species_coefs$model))),
which(!grepl("multispecies", levels(species_coefs$model)) & grepl("t2", levels(species_coefs$model)) & grepl("climate", levels(species_coefs$model))),
which(!grepl("multispecies", levels(species_coefs$model)) & grepl("t2", levels(species_coefs$model)) & grepl("habitat", levels(species_coefs$model))),
which(!grepl("multispecies", levels(species_coefs$model)) & grepl("t2", levels(species_coefs$model)) & grepl("full", levels(species_coefs$model))),
which(grepl("multispecies", levels(species_coefs$model)) & grepl("t2", levels(species_coefs$model)) & grepl("climate", levels(species_coefs$model))),
which(grepl("multispecies", levels(species_coefs$model)) & grepl("t2", levels(species_coefs$model)) & grepl("habitat", levels(species_coefs$model))),
which(grepl("multispecies", levels(species_coefs$model)) & grepl("t2", levels(species_coefs$model)) & grepl("full", levels(species_coefs$model)))
)])
levels(species_coefs$model) <- c("Historic climate-only single-species model", "Historic habitat-only single-species model",
"Historic full single-species model", "Historic climate-only multi-species model",
"Historic habitat-only multi-species model", "Historic full multi-species model",
"Modern climate-only single-species model", "Modern habitat-only single-species model",
"Modern full single-species model", "Modern climate-only multi-species model",
"Modern habitat-only multi-species model", "Modern full multi-species model")
species_coefs$variable <- as.factor(as.character(species_coefs$variable))
species_coefs$variable <- factor(species_coefs$variable, levels = names(sort(tapply(abs(species_coefs$estimate), species_coefs$variable, mean), decreasing = TRUE)))
species_coefs <- species_coefs[order(species_coefs$variable, species_coefs$model), ]
species_coefs$higher <- species_coefs$estimate + (2 * species_coefs$se)
species_coefs$lower <- species_coefs$estimate - (2 * species_coefs$se)
coef_plot <- ggplot(species_coefs, aes(x = model, y = estimate)) +
geom_bar(aes(fill = model), position = position_dodge(width=0.3), stat="identity", alpha=0) +
geom_point(aes(color = model), position = position_dodge(width = .8), size = 3) +
geom_hline(aes(yintercept = 0), linetype = 2) +
geom_errorbar(aes(ymax = higher, ymin = lower, color = model), position = position_dodge(width = .8), size = 1, width = 0.6) +
facet_wrap(~ variable) +
theme_bw() +
ylab("Standardized regression coefficient") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
axis.text.x=element_blank(),
axis.text=element_text(size=14),
#strip.text.x = element_blank(),
axis.title.y = element_text(size = 14),
#legend.position="none",
plot.margin=unit(c(0,1,1,1), "cm")
)
return(coef_plot)
}
##### -- multispeciesPP_dev_plot() -- #####
#### Function to produce a barplot of model deviance for each species and across all species
multispeciesPP_dev_plot <- function(mPP_out, taxon_name = NA){
deviance_df <- data.frame(model = names(mPP_out), deviance = unlist(lapply(mPP_out, function(x) x[[2]])))
deviance_df$predictor_set <- unlist(lapply(strsplit(as.character(deviance_df$model), "_"), function(x) x[length(x)]))
deviance_df$time_period <- unlist(lapply(strsplit(as.character(deviance_df$model), "_"), function(x) x[length(x) - 1]))
deviance_df$time_period <- as.factor(deviance_df$time_period)
deviance_df$group <- unlist(lapply(strsplit(as.character(deviance_df$model), "_"), function(x) x[2]))
deviance_df$species <- unlist(lapply(strsplit(as.character(deviance_df$model), "_"), function(x) paste(x[c(3, 4)], collapse = "_")))
deviance_df$species[grepl("multispecies", deviance_df$species)] <- paste(deviance_df$group[grepl("multispecies", deviance_df$species)], "multispecies", sep = "_")
deviance_df <- subset(deviance_df, species == taxon_name)
deviance_df$model <- unlist(lapply(strsplit(as.character(deviance_df$model), "_"), function(x) paste(x[-c(length(x)-1, length(x))], collapse = "_")))
deviance_df$model <- as.factor(deviance_df$model)
ggplot(deviance_df, aes(x = model, y = deviance)) +
geom_bar(aes(fill = predictor_set), position = position_dodge(width=1), stat="identity") +
facet_wrap(~ time_period) +
theme_bw() +
ylab("Unexplained deviance") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
axis.text.x=element_blank(),
axis.text=element_text(size=14),
#strip.text.x = element_blank(),
axis.title.y = element_text(size = 14),
#legend.position="none",
plot.margin=unit(c(0,1,1,1), "cm")
)
}
##### -- multispeciesPP_eval_plot() -- #####
#### Function to produce a barplot of predictive performance (meawsured using auc and cor) for each species and across all species
multispeciesPP_eval_plot <- function(mPP_eval_output, taxon_name = NA, measure = c("auc", "cor")){
measure <- match.arg(measure)
for (i in seq(along = mPP_eval_output)){
if (grepl("t1", names(mPP_eval_output)[1])){
mPP_eval_output[[i]] <- mPP_eval_output[[i]][[2]]
} else mPP_eval_output[[i]] <- mPP_eval_output[[i]][[1]]
mPP_eval_output[[i]]$model <- strsplit(names(mPP_eval_output)[i], "\\.")[[1]][1]
}
eval_df <- do.call("rbind", mPP_eval_output)
eval_df$predictor_set <- unlist(lapply(strsplit(as.character(eval_df$model), "_"), function(x) x[length(x)]))
eval_df$time_period <- unlist(lapply(strsplit(as.character(eval_df$model), "_"), function(x) x[length(x) - 1]))
eval_df$time_period <- as.factor(eval_df$time_period)
eval_df$group <- unlist(lapply(strsplit(as.character(eval_df$model), "_"), function(x) x[2]))
eval_df$type <- "single species"
eval_df$type[grepl("multispecies", eval_df$model)] <- "multispecies"
eval_df$type <- as.factor(eval_df$type)
eval_df <- subset(eval_df, species == taxon_name)
eval_df$model <- unlist(lapply(strsplit(as.character(eval_df$model), "_"), function(x) paste(x[-c(length(x)-1, length(x))], collapse = "_")))
eval_df$model <- as.factor(eval_df$model)
ggplot(eval_df, aes_string(x = "predictor_set", y = measure)) +
geom_bar(aes(fill = predictor_set), position = position_dodge(width=1), stat="identity") +
facet_wrap(~ time_period + type) +
ylim(c(0, 1)) +
theme_bw() +
ylab("Unexplained deviance") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
axis.text.x=element_blank(),
axis.text=element_text(size=14),
#strip.text.x = element_blank(),
axis.title.y = element_text(size = 14),
#legend.position="none",
plot.margin=unit(c(0,1,1,1), "cm")
)
}
|
/src/climate-vs-habitat-change-california-functions.R
|
no_license
|
giorap/climate-vs-habitat-change-california
|
R
| false
| false
| 41,916
|
r
|
######################################################
##### -- climate-vs-habitat-change-california -- #####
######################################################
##################### FUNCTIONS ######################
##### -- multispeciesPP_edit() -- #####
##### Modified multispeciesPP function: function was edited to return fit object, which includes estimates of residual deviance
##### R code from multispeciesPP by Will Fithian (https://github.com/wfithian/multispeciesPP/blob/master/R/multispeciesPP.R).
##### For more information, see
##### Fithian et al. (2014) Bias correction in species distribution models: pooling survey and collection data for multiple species. Methods in Ecology and Evolution
multispeciesPP_edit <-
function (sdm.formula, bias.formula, PA, PO, BG, species = names(PO),
species.PA = species, species.PO = species, quadrat.size = 1,
region.size = 1, start = NULL, inverse.hessian = FALSE, penalty.l2.sdm = 0.1,
penalty.l2.bias = 0.1, penalty.l2.intercept = 1e-04, weights = rep(1,
n.species * nrow(x)), control = list())
{
control <- do.call("glm.control", control)
species <- union(species.PO, species.PA)
sdm.formula <- update(sdm.formula, ~. + 1)
bias.formula <- update(bias.formula, ~. - 1)
sdm.mf <- model.frame(sdm.formula, data = BG)
bias.mf <- model.frame(bias.formula, data = BG)
sdm.BG.model.matrix <- model.matrix(terms(sdm.mf), BG)
sdm.means <- c(0, apply(sdm.BG.model.matrix[, -1, drop = FALSE],
2, mean))
sdm.BG.model.matrix <- sweep(sdm.BG.model.matrix, 2, sdm.means,
"-")
sdm.sds <- c(1, apply(sdm.BG.model.matrix[, -1, drop = FALSE],
2, sd))
sdm.BG.model.matrix <- sweep(sdm.BG.model.matrix, 2, sdm.sds,
"/")
sdm.standardize <- function(mat) sweep(sweep(mat, 2, sdm.means,
"-"), 2, sdm.sds, "/")
bias.BG.model.matrix <- model.matrix(terms(bias.mf), BG)
bias.means <- apply(bias.BG.model.matrix, 2, mean)
bias.BG.model.matrix <- sweep(bias.BG.model.matrix, 2, bias.means,
"-")
bias.sds <- apply(bias.BG.model.matrix, 2, sd)
bias.BG.model.matrix <- sweep(bias.BG.model.matrix, 2, bias.sds,
"/")
bias.standardize <- function(mat) sweep(sweep(mat, 2, bias.means,
"-"), 2, bias.sds, "/")
BG.good.rows <- intersect(rownames(sdm.BG.model.matrix),
rownames(bias.BG.model.matrix))
sdm.PA.model.matrix <- sdm.standardize(model.matrix(terms(sdm.mf),
PA))
PA.good.rows <- rownames(sdm.PA.model.matrix)
if (!is.null(species.PO)) {
sdm.PO.model.matrices <- lapply(as.list(species.PO),
function(sp) sdm.standardize(model.matrix(terms(sdm.mf),
PO[[sp]])))
names(sdm.PO.model.matrices) <- species.PO
bias.PO.model.matrices <- lapply(as.list(species.PO),
function(sp) bias.standardize(model.matrix(terms(bias.mf),
PO[[sp]])))
names(bias.PO.model.matrices) <- species.PO
PO.good.rows <- lapply(as.list(species.PO), function(sp) intersect(rownames(sdm.PO.model.matrices[[sp]]),
rownames(bias.PO.model.matrices[[sp]])))
names(PO.good.rows) <- species.PO
}
n.species <- length(species)
p.sdm <- ncol(sdm.BG.model.matrix) - 1
p.bias <- ncol(bias.BG.model.matrix)
sdm.margins.ab <- matrix(0, n.species, p.sdm + 1, dimnames = list(species,
colnames(sdm.BG.model.matrix)))
sdm.margins.gamma <- matrix(0, n.species, 1, dimnames = list(species,
"isPO"))
bias.margins <- matrix(0, 1, p.bias, dimnames = list(NULL,
colnames(bias.BG.model.matrix)))
for (sp in species.PO) {
k <- match(sp, species)
sdm.margins.ab[k, ] <- colSums(sdm.PO.model.matrices[[sp]][PO.good.rows[[sp]],
, drop = FALSE])
sdm.margins.gamma[k, ] <- length(PO.good.rows[[sp]])
bias.margins <- bias.margins + colSums(bias.PO.model.matrices[[sp]][PO.good.rows[[sp]],
, drop = FALSE])
}
abcd.from.all.coef <- function(all.coef) {
sdm.coef <- matrix(all.coef[1:(n.species * (p.sdm + 2))],
p.sdm + 2, n.species)
alpha <- sdm.coef[1, ]
beta <- t(sdm.coef[2:(p.sdm + 1), , drop = FALSE])
gamma <- sdm.coef[p.sdm + 2, ]
delta <- all.coef[-(1:(n.species * (p.sdm + 2)))]
names(alpha) <- names(gamma) <- species
colnames(beta) <- colnames(sdm.margins.ab)[-1]
rownames(beta) <- species
names(delta) <- colnames(bias.BG.model.matrix)
return(list(alpha = alpha, beta = beta, gamma = gamma,
delta = delta))
}
all.coef.from.abcd <- function(alpha, beta, gamma, delta) {
c(rbind(alpha, beta, gamma), delta)
}
n.PA <- length(PA.good.rows)
n.BG <- length(BG.good.rows)
subsamp.PA.offset <- 0
subsamp.BG.offset <- 0
n.sites <- n.BG + n.PA
x <- cbind(rbind(sdm.margins.ab, 0, sdm.PA.model.matrix[PA.good.rows,
, drop = FALSE], sdm.BG.model.matrix[BG.good.rows, ,
drop = FALSE]), c(sdm.margins.gamma, rep(0:1, c(1 + n.PA,
n.BG))))
x <- rbind(x, diag(sqrt(c(penalty.l2.intercept, rep(penalty.l2.sdm,
p.sdm), penalty.l2.intercept))), matrix(0, p.bias, p.sdm +
2))
z <- rbind(matrix(0, n.species, p.bias), bias.margins, matrix(0,
n.PA, p.bias), bias.BG.model.matrix[BG.good.rows, , drop = FALSE],
matrix(0, p.sdm + 2, p.bias), sqrt(penalty.l2.bias/n.species) *
diag(p.bias))
y <- rep(0, nrow(x) * n.species)
offset <- rep(0, nrow(x) * n.species)
for (k in 1:n.species) {
yk <- rep(0, nrow(x))
yk[1:n.species] <- 1 * (1:n.species == k)
yk[1 + n.species] <- 1 * (1 == k)
if (species[k] %in% species.PA) {
yk[1 + n.species + (1:n.PA)] <- PA[PA.good.rows,
species[k]]
}
else {
yk[1 + n.species + (1:n.PA)] <- NA
}
if (species[k] %in% species.PO) {
yk[1 + n.species + n.PA + (1:n.BG)] <- 0
}
else {
yk[1 + n.species + n.PA + (1:n.BG)] <- NA
}
yk[1 + n.species + n.sites + (1:(p.sdm + 2 + p.bias))] <- 0
y[(k - 1) * nrow(x) + 1:nrow(x)] <- yk
offk <- rep(0, nrow(x))
offk[1 + n.species + (1:n.PA)] <- log(quadrat.size)
offk[1 + n.species + n.PA + (1:n.BG)] <- log(region.size) -
log(n.BG)
offset[(k - 1) * nrow(x) + 1:nrow(x)] <- offk
}
which.PA <- (2 + n.species):(1 + n.species + n.PA) + rep((0:(n.species -
1)) * nrow(x), each = n.PA)
which.BG <- (2 + n.species + n.PA):(1 + n.species + n.PA +
n.BG) + rep((0:(n.species - 1)) * nrow(x), each = n.BG)
if (is.null(start)) {
start.alpha <- start.gamma <- rep(0, n.species)
for (k in 1:n.species) {
if ((species[k] %in% species.PA) && sum(!is.na(PA[PA.good.rows,
species[k]]) > 0))
start.alpha[k] <- log((1 + sum(PA[PA.good.rows,
species[k]], na.rm = TRUE))/n.PA/quadrat.size)
if (species[k] %in% species.PO)
start.gamma[k] <- log1p(sdm.margins.gamma[k,
]) - start.alpha[k] - log(region.size)
}
start <- all.coef.from.abcd(start.alpha, matrix(0, p.sdm,
n.species), start.gamma, rep(0, p.bias))
}
fit <- block.glm.fit(x, z, y, weights = weights, start = start,
offset = offset, families = list(linear(), binomial(link = "cloglog"),
poisson(), gaussian()), row.families = rep(rep(1:4,
c(1 + n.species, n.PA, n.BG, p.sdm + p.bias + 2)),
n.species), control = control)
all.coef <- fit$coefficients
eta <- fit$linear.predictors
mu <- fit$fitted.values
names(all.coef)[1:(n.species * (p.sdm + 2))] <- paste(rep(species,
each = p.sdm + 2), c(colnames(sdm.BG.model.matrix)[1:(p.sdm +
1)], "isPO"), sep = ":")
names(all.coef)[-(1:(n.species * (p.sdm + 2)))] <- paste("isPO:",
colnames(bias.BG.model.matrix), sep = "")
std.errs <- fit$fit$std.errs
names(std.errs) <- names(all.coef)
species.coef <- matrix(all.coef[1:(n.species * (p.sdm + 2))],
p.sdm + 2, n.species, dimnames = list(c(colnames(sdm.margins.ab),
"isPO"), species))
bias.coef <- all.coef[-(1:(n.species * (p.sdm + 2)))]
names(bias.coef) <- colnames(bias.BG.model.matrix)
fit.PA <- linear.fit.PA <- matrix(NA, nrow(PA), length(species),
dimnames = list(dimnames(PA)[[1]], species))
linear.fit.PA[PA.good.rows, ] <- eta[which.PA]
fit.PA[PA.good.rows, ] <- mu[which.PA]
fit.BG <- linear.fit.BG <- bias.fit.BG <- linear.bias.fit.BG <- matrix(NA,
nrow(BG), length(species), dimnames = list(dimnames(BG)[[1]],
species))
linear.fit.BG[BG.good.rows, ] <- matrix(eta[which.BG], ncol = n.species) +
log(n.BG) - log(region.size)
fit.BG[BG.good.rows, ] <- matrix(mu[which.BG], ncol = n.species) *
n.BG/region.size
linear.bias.fit.BG[BG.good.rows, ] <- c(bias.BG.model.matrix[BG.good.rows,
, drop = FALSE] %*% bias.coef)
bias.fit.BG[BG.good.rows, ] <- exp(linear.bias.fit.BG[BG.good.rows,
])
fitted.sdm.margins.gamma <- colSums(fit.BG[BG.good.rows,
, drop = FALSE]) * region.size/n.BG
fitted.bias.margins <- colSums(t(fit.BG[BG.good.rows, species.PO,
drop = FALSE]) %*% bias.BG.model.matrix[BG.good.rows,
, drop = FALSE] * region.size/n.BG)
score.check.gamma <- fitted.sdm.margins.gamma - sdm.margins.gamma +
penalty.l2.intercept * species.coef[p.sdm + 2, ]
score.check.gamma <- score.check.gamma[species %in% species.PO]
score.check.bias <- fitted.bias.margins - bias.margins +
penalty.l2.bias * bias.coef
if (length(score.check.gamma) > 0)
stopifnot(mean((score.check.gamma/fit$deviance)^2) <
control$epsilon)
stopifnot(mean((score.check.bias/fit$deviance)^2) < control$epsilon)
sd.normalizer <- c(rep(c(sdm.sds, 1), n.species), bias.sds)
unstandardized.coef <- all.coef/sd.normalizer
gamma.adjust <- sum(unstandardized.coef[-(1:(n.species *
(p.sdm + 2)))] * bias.means)
for (k in 1:n.species) {
jk <- (p.sdm + 2) * (k - 1) + 1:(p.sdm + 1)
coef.block <- unstandardized.coef[jk]
unstandardized.coef[jk[1]] <- coef.block[1] - sum(coef.block[-1] *
sdm.means[-1])
unstandardized.coef[jk[1] + p.sdm + 1] <- unstandardized.coef[jk[1] +
p.sdm + 1] - gamma.adjust
}
unstandardized.species.coef <- matrix(unstandardized.coef[1:(n.species *
(p.sdm + 2))], p.sdm + 2, n.species, dimnames = list(c(colnames(sdm.margins.ab),
"isPO"), species))
unstandardized.bias.coef <- unstandardized.coef[-(1:(n.species *
(p.sdm + 2)))]
names(unstandardized.bias.coef) <- colnames(bias.BG.model.matrix)
tr <- list(sdm.formula = sdm.formula, bias.formula = bias.formula, fit = fit,
normalized.species.coef = species.coef, normalized.bias.coef = bias.coef,
normalized.all.coef = all.coef, normalized.std.errs = std.errs,
all.coef = unstandardized.coef, std.errs = std.errs/sd.normalizer,
species.coef = unstandardized.species.coef, bias.coef = unstandardized.bias.coef,
linear.fit.PA = linear.fit.PA, fit.PA = fit.PA, linear.bias.fit.BG = linear.bias.fit.BG,
bias.fit.BG = bias.fit.BG, linear.fit.BG = linear.fit.BG,
fit.BG = fit.BG)
class(tr) <- c("multispeciesPP", "list")
tr
}
##### -- multispeciesPP_wrapper() -- #####
##### Wrapper around function multispeciesPP() from library(multispeciesPP) to facilitate running of models with different types of information
##### R code from multispeciesPP by Will Fithian (https://github.com/wfithian/multispeciesPP/blob/master/R/multispeciesPP.R).
##### For more information, see
##### Fithian et al. (2014) Bias correction in species distribution models: pooling survey and collection data for multiple species. Methods in Ecology and Evolution
multispeciesPP_wrapper <- function(pa_data = NULL,
po_data = NULL,
bg = NULL,
species_names = NULL,
climate_predictors = paste("bio", c(1, 6, 12), sep = ""),
habitat_associations = NULL,
group = c("bird", "mamm", "odon"), ## Taxonomic group to model (birds/mammals/odonates)
predictor_set = c("climate", "habitat", "full"), ## Use only climate, habitat, or both (full) as model predictors
out_name = "out",
...){
### Match function arguments
group <- match.arg(group)
predictor_set <- match.arg(predictor_set)
### Create directory to save model output
dir.create(paste(getwd(), "/output/multispeciesPP", sep = ""), showWarnings = FALSE)
dir.create(paste(getwd(), "/output/multispeciesPP/models", sep = ""), showWarnings = FALSE)
### Generate useful objects
## Character vector of climate predictors
climate_pred <- climate_predictors
## Character vector of habitat predictors
habitat_pred <- habitat_associations
## Character vector of bias predictors
bias_pred <- c("ruggedness", "dist_from_urban", "dist_from_stream", "dist_from_survey")
## Size of study area
study_area <- nrow(bg)
### Pick appropriate variables from background object
bg <- bg[c(intersect(names(bg), c(climate_pred, habitat_pred, bias_pred)), paste("dist_from_survey", group, sep = "_"))]
names(bg)[grep("survey", names(bg))] <- "dist_from_survey"
### Select the desired species set from pa_data and po_data, if necessary
if (!is.null(pa_data)) pa_data <- pa_data[c(species_names, climate_pred, habitat_pred)]
if (!is.null(po_data)) po_data <- po_data[species_names]
### Standardize covariates
if (!is.null(pa_data)) pa_data[, c(climate_pred, habitat_pred)] <- apply(pa_data[, c(climate_pred, habitat_pred)], 2, scale) %>% data.frame()
if (!is.null(po_data)) po_data <- lapply(po_data, function(x) apply(x[c(climate_pred, habitat_pred, bias_pred)], 2, scale) %>% data.frame())
bg[, c(climate_pred, habitat_pred, bias_pred)] <- apply(bg[c(climate_pred, habitat_pred, bias_pred)], 2, scale) %>% data.frame()
### Specify formulas
climate_pred <- paste(climate_pred, collapse = " + ")
habitat_pred <- paste(habitat_pred, collapse = " + ")
bias_pred <- paste(bias_pred, collapse = " + ")
## Bias formula
bias_formula <- as.formula(paste("~ ", bias_pred, sep = ""))
## SDM formula
if (predictor_set == "full"){
sdm_formula <- as.formula(paste("~ ", climate_pred, " + ", habitat_pred, sep = ""))
}
if (predictor_set == "climate"){
sdm_formula <- as.formula(paste("~ ", climate_pred, sep = ""))
}
if (predictor_set == "habitat"){
sdm_formula <- as.formula(paste("~ ", habitat_pred, sep = ""))
}
### Run model
mPP <- multispeciesPP_edit(
sdm.formula = sdm_formula,
bias.formula = bias_formula,
PA = pa_data,
PO = po_data,
BG = bg,
region.size = study_area,
...
)
### Save output
saveRDS(mPP, file = paste("output/multispeciesPP/models/mPP_", out_name, ".rds", sep = ""))
}
##### -- multispeciesPP_output() -- #####
##### Extract useful output from saved multispeciesPP models
multispeciesPP_output <- function(mPP_directory = "output/multispeciesPP/models/"){
mPP_list <- list.files(mPP_directory)
mPP_out <- lapply(mPP_list, function(x){
mPP <- readRDS(paste(mPP_directory, x, sep = ""))
# Coefficients
coefs <- mPP$normalized.all.coef
se <- mPP$normalized.std.errs
summary <- data.frame(coefs, se, coefs/se, 2*pnorm(-abs(coefs/se)))
colnames(summary) <- c("estimate","se","z","p")
summary$species <- factor(unlist(lapply(strsplit(row.names(summary), ':'), function(y) y[1])))
summary$variable <- unlist(lapply(strsplit(row.names(summary), ':'), function(y) y[2]))
summary$model <- x
list(summary = summary,
deviance = mPP$fit$deviance
)
}
)
names(mPP_out) <- unlist(lapply(strsplit(mPP_list, "\\."), function(x) x[[1]]))
return(mPP_out)
}
###############
#### roc() ####
###############
"roc" <-
function (obsdat, preddat)
{
# code adapted from Ferrier, Pearce and Watson's code, by J.Elith
#
# see:
# Hanley, J.A. & McNeil, B.J. (1982) The meaning and use of the area
# under a Receiver Operating Characteristic (ROC) curve.
# Radiology, 143, 29-36
#
# Pearce, J. & Ferrier, S. (2000) Evaluating the predictive performance
# of habitat models developed using logistic regression.
# Ecological Modelling, 133, 225-245.
# this is the non-parametric calculation for area under the ROC curve,
# using the fact that a MannWhitney U statistic is closely related to
# the area
#
if (length(obsdat) != length(preddat))
stop("obs and preds must be equal lengths")
n.x <- length(obsdat[obsdat == 0])
n.y <- length(obsdat[obsdat == 1])
xy <- c(preddat[obsdat == 0], preddat[obsdat == 1])
rnk <- rank(xy)
wilc <- ((n.x * n.y) + ((n.x * (n.x + 1))/2) - sum(rnk[1:n.x]))/(n.x *
n.y)
return(round(wilc, 4))
}
#########################
#### calc_deviance() ####
#########################
"calc_deviance" <-
function(obs.values, fitted.values, weights = rep(1,length(obs.values)), family="binomial", calc.mean = TRUE)
{
# j. leathwick/j. elith
#
# version 2.1 - 5th Sept 2005
#
# function to calculate deviance given two vectors of raw and fitted values
# requires a family argument which is set to binomial by default
#
#
if (length(obs.values) != length(fitted.values))
stop("observations and predictions must be of equal length")
y_i <- obs.values
u_i <- fitted.values
if (family == "binomial" | family == "bernoulli") {
deviance.contribs <- (y_i * log(u_i)) + ((1-y_i) * log(1 - u_i))
deviance <- -2 * sum(deviance.contribs * weights)
}
if (family == "poisson" | family == "Poisson") {
deviance.contribs <- ifelse(y_i == 0, 0, (y_i * log(y_i/u_i))) - (y_i - u_i)
deviance <- 2 * sum(deviance.contribs * weights)
}
if (family == "laplace") {
deviance <- sum(abs(y_i - u_i))
}
if (family == "gaussian") {
deviance <- sum((y_i - u_i) * (y_i - u_i))
}
if (calc.mean) deviance <- deviance/length(obs.values)
return(deviance)
}
#######################
##### eval_pred() #####
#######################
eval_pred <- function(obs_table = NA, pred_table = NA, species_names = NA){
eval_table <- data.frame(species = species_names, auc = NA, cor = NA, dev = NA)
if (nrow(eval_table) > 1){
for (i in seq(along = species_names)){
obs <- obs_table[, grep(species_names[i], names(obs_table))]
pred <- pred_table[, grep(species_names[i], names(pred_table))]
eval_table$auc[i] <- roc(obs, pred)
eval_table$dev[i] <- calc_deviance(obs, pred)
eval_table$cor[i] <- cor(obs, pred, use = "complete.obs", method = "pearson")
}
} else {
obs <- obs_table[, grep(species_names, names(obs_table))]
pred <- pred_table[, grep(species_names, names(pred_table))]
eval_table$auc <- roc(obs, pred)
eval_table$dev <- calc_deviance(obs, pred)
eval_table$cor <- cor(obs, pred, use = "complete.obs", method = "pearson")
}
return(eval_table)
}
multispeciesPP_predictions <- function(mPP_directory = "output/multispeciesPP/models/"){
# Create directory to save model predictions
dir.create(paste(getwd(), "/output/multispeciesPP/predictions", sep = ""), showWarnings = FALSE)
mPP_list <- list.files(mPP_directory)
mPP_eval_output <- vector('list', length(mPP_list))
for(i in seq(along = mPP_eval_output)){
mPP <- readRDS(paste(mPP_directory, mPP_list[i], sep = ""))
if (grepl("bird", mPP_list[i])){
t1_pa <- t1_pa_bird
t2_pa <- t2_pa_bird
}
if (grepl("mamm", mPP_list[i])){
t1_pa <- t1_pa_mamm
t2_pa <- t2_pa_mamm
}
# t1_bg predictions
predictions_t1_bg <- data.frame(t1_bg[c('longitude', 'latitude')], (1 - exp(-exp(predict.multispeciesPP(mPP, newdata = t1_bg)))))
# t2_bg predictions
predictions_t2_bg <- data.frame(t2_bg[c('longitude', 'latitude')], (1 - exp(-exp(predict.multispeciesPP(mPP, newdata = t2_bg)))))
# t1_pa predictions
predictions_t1_pa <- data.frame(t1_pa[c('longitude', 'latitude')], (1 - exp(-exp(predict.multispeciesPP(mPP, newdata = t1_pa)))))
# t2_pa predictions
predictions_t2_pa <- data.frame(t2_pa[c('longitude', 'latitude')], (1 - exp(-exp(predict.multispeciesPP(mPP, newdata = t2_pa)))))
# save predictions
#saveRDS(predictions_t1_bg, paste("output/multispeciesPP/", strsplit(mPP_list[i], "\\.")[[1]][1], '_bg.rds', sep = ''))
#saveRDS(predictions_t1_bg, paste('output/multispeciesPP/predictions/', strsplit(mPP_list[i], "\\.")[[1]][1], '_bg.rds', sep = ''))
#saveRDS(predictions_t2_bg, paste('output/multispeciesPP/predictions/', strsplit(mPP_list[i], "\\.")[[1]][1], '_bg.rds', sep = ''))
#saveRDS(predictions_t1_pa, paste('output/multispeciesPP/predictions/', strsplit(mPP_list[i], "\\.")[[1]][1], '_pa.rds', sep = ''))
#saveRDS(predictions_t2_pa, paste('output/multispeciesPP/predictions/', strsplit(mPP_list[i], "\\.")[[1]][1], '_pa.rds', sep = ''))
#saveRDS(predictions_change_bg, paste('output/multispeciesPP/predictions/', strsplit(mPP_list[i], "\\.")[[1]][1], '_predictions_change_bg.rds', sep = ''))
eval_t1_pa <- eval_pred(obs_table = t1_pa, pred_table = predictions_t1_pa, species_names = colnames(mPP$normalized.species.coef))
eval_t2_pa <- eval_pred(obs_table = t2_pa, pred_table = predictions_t2_pa, species_names = colnames(mPP$normalized.species.coef))
mPP_eval_output[[i]] <- list(eval_t1_pa = eval_t1_pa, eval_t2_pa = eval_t2_pa)
names(mPP_eval_output) <- mPP_list
rm(mPP)
}
saveRDS(mPP_eval_output, 'output/multispeciesPP/mPP_eval_output.rds')
return(mPP_eval_output)
}
##### -- multispeciesPP_coef_plot() -- #####
#### Function to plot standardized model coefficients from the various different models of a given species
multispeciesPP_coef_plot <- function(species_name, group = c("bird", "mamm"), mPP_out){
group <- match.arg(group)
species_models <- species_coefs <- mPP_out[grep(species_name, names(mPP_out))]
for (i in seq(along = species_models)){
species_coefs[[i]] <- data.frame(species_models[[i]][[1]], model = names(species_models)[i])
species_coefs[[i]] <- subset(species_coefs[[i]], !(species_coefs[[i]]$variable %in% c("(Intercept)", "isPO", "ruggedness", "dist_from_urban", "dist_from_stream", "dist_from_survey")))
}
multi_models <- multi_coefs <- mPP_out[grep(paste(group, "multispecies", sep = "_"), names(mPP_out))]
for (i in seq(along = multi_models)){
multi_coefs[[i]] <- data.frame(multi_models[[i]][[1]], model = names(multi_models)[i])
multi_coefs[[i]] <- subset(multi_coefs[[i]], multi_coefs[[i]]$species == species_name & !(multi_coefs[[i]]$variable %in% c("(Intercept)", "isPO", "ruggedness", "dist_from_urban", "dist_from_stream", "dist_from_survey")))
}
species_coefs <- do.call("rbind", c(species_coefs, multi_coefs))
species_coefs$model <- as.factor(species_coefs$model)
species_coefs$model <- factor(species_coefs$model,
levels = levels(species_coefs$model)[c(
which(!grepl("multispecies", levels(species_coefs$model)) & grepl("t1", levels(species_coefs$model)) & grepl("climate", levels(species_coefs$model))),
which(!grepl("multispecies", levels(species_coefs$model)) & grepl("t1", levels(species_coefs$model)) & grepl("habitat", levels(species_coefs$model))),
which(!grepl("multispecies", levels(species_coefs$model)) & grepl("t1", levels(species_coefs$model)) & grepl("full", levels(species_coefs$model))),
which(grepl("multispecies", levels(species_coefs$model)) & grepl("t1", levels(species_coefs$model)) & grepl("climate", levels(species_coefs$model))),
which(grepl("multispecies", levels(species_coefs$model)) & grepl("t1", levels(species_coefs$model)) & grepl("habitat", levels(species_coefs$model))),
which(grepl("multispecies", levels(species_coefs$model)) & grepl("t1", levels(species_coefs$model)) & grepl("full", levels(species_coefs$model))),
which(!grepl("multispecies", levels(species_coefs$model)) & grepl("t2", levels(species_coefs$model)) & grepl("climate", levels(species_coefs$model))),
which(!grepl("multispecies", levels(species_coefs$model)) & grepl("t2", levels(species_coefs$model)) & grepl("habitat", levels(species_coefs$model))),
which(!grepl("multispecies", levels(species_coefs$model)) & grepl("t2", levels(species_coefs$model)) & grepl("full", levels(species_coefs$model))),
which(grepl("multispecies", levels(species_coefs$model)) & grepl("t2", levels(species_coefs$model)) & grepl("climate", levels(species_coefs$model))),
which(grepl("multispecies", levels(species_coefs$model)) & grepl("t2", levels(species_coefs$model)) & grepl("habitat", levels(species_coefs$model))),
which(grepl("multispecies", levels(species_coefs$model)) & grepl("t2", levels(species_coefs$model)) & grepl("full", levels(species_coefs$model)))
)])
levels(species_coefs$model) <- c("Historic climate-only single-species model", "Historic habitat-only single-species model",
"Historic full single-species model", "Historic climate-only multi-species model",
"Historic habitat-only multi-species model", "Historic full multi-species model",
"Modern climate-only single-species model", "Modern habitat-only single-species model",
"Modern full single-species model", "Modern climate-only multi-species model",
"Modern habitat-only multi-species model", "Modern full multi-species model")
species_coefs$variable <- as.factor(as.character(species_coefs$variable))
species_coefs$variable <- factor(species_coefs$variable, levels = names(sort(tapply(abs(species_coefs$estimate), species_coefs$variable, mean), decreasing = TRUE)))
species_coefs <- species_coefs[order(species_coefs$variable, species_coefs$model), ]
species_coefs$higher <- species_coefs$estimate + (2 * species_coefs$se)
species_coefs$lower <- species_coefs$estimate - (2 * species_coefs$se)
coef_plot <- ggplot(species_coefs, aes(x = model, y = estimate)) +
geom_bar(aes(fill = model), position = position_dodge(width=0.3), stat="identity", alpha=0) +
geom_point(aes(color = model), position = position_dodge(width = .8), size = 3) +
geom_hline(aes(yintercept = 0), linetype = 2) +
geom_errorbar(aes(ymax = higher, ymin = lower, color = model), position = position_dodge(width = .8), size = 1, width = 0.6) +
facet_wrap(~ variable) +
theme_bw() +
ylab("Standardized regression coefficient") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
axis.text.x=element_blank(),
axis.text=element_text(size=14),
#strip.text.x = element_blank(),
axis.title.y = element_text(size = 14),
#legend.position="none",
plot.margin=unit(c(0,1,1,1), "cm")
)
return(coef_plot)
}
##### -- multispeciesPP_dev_plot() -- #####
#### Function to produce a barplot of model deviance for each species and across all species
multispeciesPP_dev_plot <- function(mPP_out, taxon_name = NA){
deviance_df <- data.frame(model = names(mPP_out), deviance = unlist(lapply(mPP_out, function(x) x[[2]])))
deviance_df$predictor_set <- unlist(lapply(strsplit(as.character(deviance_df$model), "_"), function(x) x[length(x)]))
deviance_df$time_period <- unlist(lapply(strsplit(as.character(deviance_df$model), "_"), function(x) x[length(x) - 1]))
deviance_df$time_period <- as.factor(deviance_df$time_period)
deviance_df$group <- unlist(lapply(strsplit(as.character(deviance_df$model), "_"), function(x) x[2]))
deviance_df$species <- unlist(lapply(strsplit(as.character(deviance_df$model), "_"), function(x) paste(x[c(3, 4)], collapse = "_")))
deviance_df$species[grepl("multispecies", deviance_df$species)] <- paste(deviance_df$group[grepl("multispecies", deviance_df$species)], "multispecies", sep = "_")
deviance_df <- subset(deviance_df, species == taxon_name)
deviance_df$model <- unlist(lapply(strsplit(as.character(deviance_df$model), "_"), function(x) paste(x[-c(length(x)-1, length(x))], collapse = "_")))
deviance_df$model <- as.factor(deviance_df$model)
ggplot(deviance_df, aes(x = model, y = deviance)) +
geom_bar(aes(fill = predictor_set), position = position_dodge(width=1), stat="identity") +
facet_wrap(~ time_period) +
theme_bw() +
ylab("Unexplained deviance") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
axis.text.x=element_blank(),
axis.text=element_text(size=14),
#strip.text.x = element_blank(),
axis.title.y = element_text(size = 14),
#legend.position="none",
plot.margin=unit(c(0,1,1,1), "cm")
)
}
##### -- multispeciesPP_eval_plot() -- #####
#### Function to produce a barplot of predictive performance (meawsured using auc and cor) for each species and across all species
multispeciesPP_eval_plot <- function(mPP_eval_output, taxon_name = NA, measure = c("auc", "cor")){
measure <- match.arg(measure)
for (i in seq(along = mPP_eval_output)){
if (grepl("t1", names(mPP_eval_output)[1])){
mPP_eval_output[[i]] <- mPP_eval_output[[i]][[2]]
} else mPP_eval_output[[i]] <- mPP_eval_output[[i]][[1]]
mPP_eval_output[[i]]$model <- strsplit(names(mPP_eval_output)[i], "\\.")[[1]][1]
}
eval_df <- do.call("rbind", mPP_eval_output)
eval_df$predictor_set <- unlist(lapply(strsplit(as.character(eval_df$model), "_"), function(x) x[length(x)]))
eval_df$time_period <- unlist(lapply(strsplit(as.character(eval_df$model), "_"), function(x) x[length(x) - 1]))
eval_df$time_period <- as.factor(eval_df$time_period)
eval_df$group <- unlist(lapply(strsplit(as.character(eval_df$model), "_"), function(x) x[2]))
eval_df$type <- "single species"
eval_df$type[grepl("multispecies", eval_df$model)] <- "multispecies"
eval_df$type <- as.factor(eval_df$type)
eval_df <- subset(eval_df, species == taxon_name)
eval_df$model <- unlist(lapply(strsplit(as.character(eval_df$model), "_"), function(x) paste(x[-c(length(x)-1, length(x))], collapse = "_")))
eval_df$model <- as.factor(eval_df$model)
ggplot(eval_df, aes_string(x = "predictor_set", y = measure)) +
geom_bar(aes(fill = predictor_set), position = position_dodge(width=1), stat="identity") +
facet_wrap(~ time_period + type) +
ylim(c(0, 1)) +
theme_bw() +
ylab("Unexplained deviance") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
axis.text.x=element_blank(),
axis.text=element_text(size=14),
#strip.text.x = element_blank(),
axis.title.y = element_text(size = 14),
#legend.position="none",
plot.margin=unit(c(0,1,1,1), "cm")
)
}
|
chum <- final.fish[ which(final.fish$Species=='Chum'), ]
myvars <- c("ID_code", "Length_TSFT_mm", "Chum_total", "total", "Injury","Sex", "Ocean_age", "Set_time")
chum <- chum[myvars]
# make sure data makes sense
chum <- chum[ which(chum$Chum_total > 0), ]
# create unique id and add it to data (enter total number of data entries for id number)
id <- numeric(14993)
id[1] <- 1
chum <- chum[order(chum$ID_code), ]
for (i in 2:14993){
if (chum$ID_code[i] == chum$ID_code[i-1]){
id[i] <- id[i-1]
}
else{
id[i] <- id[i-1]+1
}
}
chum <- data.frame(chum,id)
#RELATIVE SIZE
# add mean size and relative size of group to each data entry
temp <- data.frame("id"=1:930, "mean.size"=1:930)
for (i in 1:930){
temp$mean.size[i] <- mean(chum$Length_TSFT_mm[chum$id==i])
}
chum <- merge(chum, temp, by="id")
for (i in 1:14993){
chum$relative.size[i] <- chum$Length_TSFT_mm[i]/chum$mean.size[i]
}
# plot raw data for fish relative size
# normal bins
bin.y <- vector(mode="numeric", length=4)
error <- vector(mode="numeric", length=4)
bin.x <- vector(mode="numeric", length=4)
# remove outliers
plot(chum$relative.size, chum$Injury)
no.injury <- chum$relative.size[ which(chum$Injury=="0")]
boxplot.stats(no.injury)
hist(no.injury)
chum <- chum[ which(chum$relative.size < "1.7"),]
# bins
bin <- (max(chum$relative.size) - min(chum$relative.size))/4
for (i in 1:4){
data <- chum[ which(chum$relative.size >= min(chum$relative.size)+((i-1)*bin) & chum$relative.size < min(chum$relative.size)+(i*bin)), ]
bin.y[i] <- sum(data$Injury)/length(data$Injury)
bin.x[i] <- mean(data$relative.size)
n <- length(data$Injury)
error[i] <- qt(0.975,df=n-1)*sd(data$Injury)/sqrt(n)
}
lower <- bin.y-error
upper <- bin.y+error
raw <- cbind(bin.x, bin.y, bin.y-error, bin.y+error)
colnames(raw) <- c("x", "y", "ymin", "ymax")
raw <- as.data.frame(raw)
write.csv(raw, "chum_rs_raw.csv")
|
/predator/relative size/chum/chum_rs_raw.R
|
permissive
|
annepolyakov/PacificSalmonProject
|
R
| false
| false
| 1,950
|
r
|
chum <- final.fish[ which(final.fish$Species=='Chum'), ]
myvars <- c("ID_code", "Length_TSFT_mm", "Chum_total", "total", "Injury","Sex", "Ocean_age", "Set_time")
chum <- chum[myvars]
# make sure data makes sense
chum <- chum[ which(chum$Chum_total > 0), ]
# create unique id and add it to data (enter total number of data entries for id number)
id <- numeric(14993)
id[1] <- 1
chum <- chum[order(chum$ID_code), ]
for (i in 2:14993){
if (chum$ID_code[i] == chum$ID_code[i-1]){
id[i] <- id[i-1]
}
else{
id[i] <- id[i-1]+1
}
}
chum <- data.frame(chum,id)
#RELATIVE SIZE
# add mean size and relative size of group to each data entry
temp <- data.frame("id"=1:930, "mean.size"=1:930)
for (i in 1:930){
temp$mean.size[i] <- mean(chum$Length_TSFT_mm[chum$id==i])
}
chum <- merge(chum, temp, by="id")
for (i in 1:14993){
chum$relative.size[i] <- chum$Length_TSFT_mm[i]/chum$mean.size[i]
}
# plot raw data for fish relative size
# normal bins
bin.y <- vector(mode="numeric", length=4)
error <- vector(mode="numeric", length=4)
bin.x <- vector(mode="numeric", length=4)
# remove outliers
plot(chum$relative.size, chum$Injury)
no.injury <- chum$relative.size[ which(chum$Injury=="0")]
boxplot.stats(no.injury)
hist(no.injury)
chum <- chum[ which(chum$relative.size < "1.7"),]
# bins
bin <- (max(chum$relative.size) - min(chum$relative.size))/4
for (i in 1:4){
data <- chum[ which(chum$relative.size >= min(chum$relative.size)+((i-1)*bin) & chum$relative.size < min(chum$relative.size)+(i*bin)), ]
bin.y[i] <- sum(data$Injury)/length(data$Injury)
bin.x[i] <- mean(data$relative.size)
n <- length(data$Injury)
error[i] <- qt(0.975,df=n-1)*sd(data$Injury)/sqrt(n)
}
lower <- bin.y-error
upper <- bin.y+error
raw <- cbind(bin.x, bin.y, bin.y-error, bin.y+error)
colnames(raw) <- c("x", "y", "ymin", "ymax")
raw <- as.data.frame(raw)
write.csv(raw, "chum_rs_raw.csv")
|
#####################################################################
########## packages, data import, settings ##########
#####################################################################
options(repr.plot.width = 7.5, repr.plot.height = 7.5)
library(tidyverse)
# load ML helper functions
source("https://raw.githubusercontent.com/HenrikEckermann/in_use/master/ml_helper.R")
# helper function for renaming vars specific to this project
source(here::here("R/helper.R"))
load(here::here("rdata/data.Rds"))
#####################################################################
########## Random Forrest ##########
#####################################################################
# first test code on single imputed dataset
# create time point specific data sets in wide format
# y = week 2
d0 <- select(
dw_imp,
-cortisol_12,
-cortisol_6,
-matches("postnatalweek_6|12"),
-matches("collectiontime_6|12"),
-matches("interval_awake_6|12"),
-matches("sharedactivities_caregiving_6|12"),
-matches("cortisol_diff\\d+"),
-matches("season_w6|12"),
-matches("depression_w6|12")
) %>%
rename(cortisol = cortisol_2)
# y = week 6
d1 <- select(
dw_imp,
-cortisol_12,
-matches("postnatalweek_[212]"),
-matches("collectiontime_[212]"),
-matches("interval_awake_[212]"),
-matches("sharedactivities_caregiving_[212]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[212]"),
-matches("depression_w[212]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_6)
#x = week 6, y = week 12
d2 <- select(
dw_imp,
-cortisol_2,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_6, cortisol = cortisol_12)
# x = week 2, y = week 12
d3 <- select(
dw_imp,
-cortisol_6,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_12)
# RF pipeline
if (!file.exists(here::here("rdata/rf2_test.Rds"))) {
# fit models per timepoints and per imputation method
result <- map(list(d0, d1, d2, d3), function(d) {
y <- "cortisol"
# first base model with only the known predictors
X_null <- select(
d,
contains("cortisol_pre"),
contains("postnatal"),
contains("collection"),
contains("interval_awake")) %>%
colnames()
# tune RF hyperparameters
pars_null <- tune_rf(
d,
X_null,
y,
regression = TRUE,
iters = 70,
iters.warmup = 30,
ntree = 5000,
parameters = list(
replace = FALSE,
respect.unordered.factors = "order"
),
tune.parameters = c(
"mtry",
"min.node.size",
"sample.fraction"
),
show.info = getOption("mlrMBO.show.info", TRUE)
)
# fit model using above hyperparameters
model_null <- ranger(
x = select(d, all_of(X_null)),
y = d[[y]],
importance = "permutation",
num.tree = 5000,
mtry = pars_null$recommended.pars$mtry,
min.node.size = pars_null$recommended.pars$min.node.size,
sample.fraction = ifelse(
pars_null$recommended.pars$sample.fraction < 0.25,
0.25, pars_null$recommended.pars$sample.fraction)
)
# now the full models
X <- select(d, -id, -cortisol) %>%
colnames()
# tune RF hyperparameters
pars <- tune_rf(
d,
X,
y,
regression = TRUE,
iters = 70,
iters.warmup = 30,
ntree = 5000,
parameters = list(
replace = FALSE,
respect.unordered.factors = "order"
),
tune.parameters = c(
"mtry",
"min.node.size",
"sample.fraction"
),
show.info = getOption("mlrMBO.show.info", TRUE)
)
# fit model using above hyperparameters
model <- ranger(
x = select(d, all_of(X)),
y = d[[y]],
importance = "permutation",
num.tree = 5000,
mtry = pars$recommended.pars$mtry,
min.node.size = pars$recommended.pars$min.node.size,
sample.fraction = ifelse(
pars$recommended.pars$sample.fraction < 0.25,
0.25, pars$recommended.pars$sample.fraction)
)
list(
model_null = model_null,
pars_null = pars_null$recommended.pars,
plot_null = plot_importance(model_null),
model = model,
pars = pars$recommended.pars,
plot = plot_importance(model)
)
})
save(result, file = here::here("rdata/rf2_test.Rds"))
} else {
load(file = here::here("rdata/rf2_test.Rds"))
}
result
# the results indicate overfitting at least because of some of our candidate
# predictors. The base models predict better except for y = week 2.
# Nevertheless, we can compute variable importances to evaluate which
# predictors help and which carry no signal in them. But first lets see how it
# varies between imputations.
impvariation <- map2(1:50, dw_imp_all, function(m, d_imp) {
if (!file.exists(here::here(glue::glue("rdata/rf2_impvar{m}_out.Rds")))) {
# create time point specific data sets in wide format
# predicting at week 2
d0 <- select(
d_imp,
-cortisol_12,
-cortisol_6,
-matches("postnatalweek_6|12"),
-matches("collectiontime_6|12"),
-matches("interval_awake_6|12"),
-matches("sharedactivities_caregiving_6|12"),
-matches("cortisol_diff\\d+"),
-matches("season_w6|12"),
-matches("depression_w6|12")
) %>%
rename(cortisol = cortisol_2)
# change 2 -> 6
d1 <- select(
d_imp,
-cortisol_12,
-matches("postnatalweek_[212]"),
-matches("collectiontime_[212]"),
-matches("interval_awake_[212]"),
-matches("sharedactivities_caregiving_[212]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[212]"),
-matches("depression_w[212]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_6)
# change 6 -> 12
d2 <- select(
d_imp,
-cortisol_2,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_6, cortisol = cortisol_12)
# change 2 -> 12
d3 <- select(
d_imp,
-cortisol_6,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_12)
# fit models per timepoints
result <- map(list(d0, d1, d2, d3), function(d) {
y <- "cortisol"
# first model with only the known predictors
X_null <- select(
d,
contains("cortisol_pre"),
contains("postnatal"),
contains("collection"),
contains("interval_awake")) %>%
colnames()
# tune RF hyperparameters
pars_null <- tune_rf(
d,
X_null,
y,
regression = TRUE,
iters = 70,
iters.warmup = 30,
ntree = 5000,
parameters = list(
replace = FALSE,
respect.unordered.factors = "order"
),
tune.parameters = c(
"mtry",
"min.node.size",
"sample.fraction"
),
show.info = getOption("mlrMBO.show.info", TRUE)
)
# fit model using above hyperparameters
model_null <- ranger(
x = select(d, all_of(X_null)),
y = d[[y]],
importance = "permutation",
num.tree = 5000,
mtry = pars_null$recommended.pars$mtry,
min.node.size = pars_null$recommended.pars$min.node.size,
sample.fraction = ifelse(
pars_null$recommended.pars$sample.fraction < 0.25,
0.25, pars_null$recommended.pars$sample.fraction)
)
# now the full models
X <- select(d, -id, -cortisol) %>%
colnames()
# tune RF hyperparameters
pars <- tune_rf(
d,
X,
y,
regression = TRUE,
iters = 70,
iters.warmup = 30,
ntree = 5000,
parameters = list(
replace = FALSE,
respect.unordered.factors = "order"
),
tune.parameters = c(
"mtry",
"min.node.size",
"sample.fraction"
),
show.info = getOption("mlrMBO.show.info", TRUE)
)
# fit model using above hyperparameters
model <- ranger(
x = select(d, all_of(X)),
y = d[[y]],
importance = "permutation",
num.tree = 5000,
mtry = pars$recommended.pars$mtry,
min.node.size = pars$recommended.pars$min.node.size,
sample.fraction = ifelse(
pars$recommended.pars$sample.fraction < 0.25,
0.25, pars$recommended.pars$sample.fraction)
)
list(
model_null = model_null,
pars_null = pars_null$recommended.pars,
plot_null = plot_importance(model_null),
model = model,
pars = pars$recommended.pars,
plot = plot_importance(model)
)
})
save(result, file = here::here(glue::glue("rdata/rf2_impvar{m}_out.Rds")))
} else {
load(file = here::here(glue::glue("rdata/rf2_impvar{m}_out.Rds")))
}
result
})
# summarise accuracy scores
rsqmedians <- map_dfr(impvariation, function(listobj) {
map2_dfr(1:4, listobj, function(num, models) {
rsqmediannull <- models$model_null$r.squared
rsqmedianfull <- models$model$r.squared
tibble(num = num, null = rsqmediannull, full = rsqmedianfull)
})}) %>%
group_by(num) %>%
summarise(
mnull = median(null),
sdnull = sd(null),
mfull = median(full),
sdfull = sd(full)
)
rsqmedians
# calculate difference between base and full models
rsqdiff <- map_dfr(impvariation, function(listobj) {
map2_dfr(1:4, listobj, function(num, models) {
rsqdiff <- models$model$r.squared - models$model_null$r.squared
tibble(num = num, rsqdiff = rsqdiff)
})
})
rsqdiff %>% group_by(num) %>%
summarise(rsq = median(rsqdiff), sd = sd(rsqdiff))
# accross all the imputed datasets, the rsq difference indicates that the model
# that only has the known covariates fits the data slightly better.
# I think that this is because there are too many noisy variables together
# with a low sample size --> overfitting. Some of the variables might be
# predictive. Therefore, I stick with calculating pvalues of the models after
# which I will calculate variable importances.
#####################################################################
########## calculate p values for RF models ##########
#####################################################################
nperms <- 100
nulldist <- map_dfr(1:nperms, function(nperm) {
map(1:50, function(m) {
if (!file.exists(here::here(glue::glue("rdata/nulldist2_{nperm}_{m}.Rds")))) {
d <- dw_imp_all[[m]]
# create time point specific data sets in wide format
# predicting at week 2
d0 <- select(
d,
-cortisol_12,
-cortisol_6,
-matches("postnatalweek_6|12"),
-matches("collectiontime_6|12"),
-matches("interval_awake_6|12"),
-matches("sharedactivities_caregiving_6|12"),
-matches("cortisol_diff\\d+"),
-matches("season_w6|12"),
-matches("depression_w6|12")
) %>%
rename(cortisol = cortisol_2)
# change 2 -> 6
d1 <- select(
d,
-cortisol_12,
-matches("postnatalweek_[212]"),
-matches("collectiontime_[212]"),
-matches("interval_awake_[212]"),
-matches("sharedactivities_caregiving_[212]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[212]"),
-matches("depression_w[212]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_6)
# change 6 -> 12
d2 <- select(
d,
-cortisol_2,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_6, cortisol = cortisol_12)
# change 2 -> 12
d3 <- select(
d,
-cortisol_6,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_12)
# fit models per timepoints
result <- map2_dfr(list(d0, d1, d2, d3), 1:4, function(df, num) {
d <- df
y <- "cortisol"
X <- select(d, -id, -cortisol) %>%
colnames()
d[[y]] <- sample(df[[y]], replace = FALSE)
# tune RF hyperparameters
pars <- tune_rf(
d,
X,
y,
regression = TRUE,
iters = 70,
iters.warmup = 30,
ntree = 5000,
parameters = list(
replace = FALSE,
respect.unordered.factors = "order"
),
tune.parameters = c(
"mtry",
"min.node.size",
"sample.fraction"
),
show.info = getOption("mlrMBO.show.info", TRUE)
)
# fit model using above hyperparameters
model <- ranger(
x = select(d, all_of(X)),
y = d[[y]],
importance = "permutation",
num.tree = 5000,
mtry = pars$recommended.pars$mtry,
min.node.size = pars$recommended.pars$min.node.size,
sample.fraction = ifelse(
pars$recommended.pars$sample.fraction < 0.25,
0.25, pars$recommended.pars$sample.fraction)
)
rsq <- model$r.squared
list(
num = num,
rsq = rsq
)
})
save(result, file = here::here(glue::glue("rdata/nulldist2_{nperm}.Rds")))
} else {
load(file = here::here(glue::glue("rdata/nulldist2_{nperm}_{m}.Rds")))
}
result
})
})
nulldist_nested <- nulldist %>% group_by(num) %>% nest()
# base models
map2(rsqmedians$mnull, nulldist_nested$data, function(rsq, dist) {
mean(rsq <= dist$rsq)
})
map2(rsqmedians$mfull, nulldist_nested$data, function(rsq, dist) {
mean(rsq <= dist$rsq)
})
#####################################################################
########## Caculate pvalues for features ##########
#####################################################################
featpm <- map2(1:50, dw_imp_all, function(m, d_imp) {
if (!file.exists(here::here(glue::glue("rdata/rf2_altmann_m{m}_out.Rds")))) {
# create time point specific data sets in wide format
# predicting at week 2
d0 <- select(
d_imp,
-cortisol_12,
-cortisol_6,
-matches("postnatalweek_6|12"),
-matches("collectiontime_6|12"),
-matches("interval_awake_6|12"),
-matches("sharedactivities_caregiving_6|12"),
-matches("cortisol_diff\\d+"),
-matches("season_w6|12"),
-matches("depression_w6|12")
) %>%
rename(cortisol = cortisol_2)
# change 2 -> 6
d1 <- select(
d_imp,
-cortisol_12,
-matches("postnatalweek_[212]"),
-matches("collectiontime_[212]"),
-matches("interval_awake_[212]"),
-matches("sharedactivities_caregiving_[212]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[212]"),
-matches("depression_w[212]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_6)
# change 6 -> 12
d2 <- select(
d_imp,
-cortisol_2,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_6, cortisol = cortisol_12)
# change 2 -> 12
d3 <- select(
d_imp,
-cortisol_6,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_12)
# fit models per timepoints and per imputation
featp <- map2(list(d0, d1, d2, d3), 1:4, function(d, num) {
model <- impvariation[[m]][[num]]$model
pars <- impvariation[[m]][[num]]$pars
pimp <- importance_pvalues(
model,
method = "altmann",
num.permutations = 1000,
data = select(d, -id),
formula = cortisol ~ .,
)
pimp
})
save(featp, file = here::here(glue::glue("rdata/rf2_altmann_m{m}_out.Rds")))
} else {
load(file = here::here(glue::glue("rdata/rf2_altmann_m{m}_out.Rds")))
}
featp
})
# find median importance value and corresponding p value;
# create table for paper
featp_average <- map2_dfr(1:50, featpm, function(m, x) {
map2_dfr(1:4, x, function(num, d) {
as.data.frame(d) %>%
rownames_to_column("feature") %>%
mutate(num = num, m = m)
})
})
nums <- 1:3
# rename the variables
table_renamed <- map(nums, function(timepoint) {
tbl <- featp_average %>% group_by(feature, num) %>%
# since median of 50 value will be averaged I find the value closest to that
# average value to get the correct pvalue, if there are several, i just pick
# randomly
mutate(
median = median(importance),
mediandist = abs(median - importance),
mdist = abs(m - runif(1, 50, n = 1))
) %>%
filter(mediandist == min(mediandist)) %>%
filter(mdist == min(mdist)) %>% # pick one out of duplicates randomly
ungroup() %>%
mutate(across(where(is.numeric), round, 3)) %>%
arrange(num, pvalue, desc(importance)) %>%
select(feature, importance, pvalue, num) %>%
filter(num == timepoint) %>%
rename_vars()
colnames(tbl) <- str_to_title(colnames(tbl))
tbl
})
save(table_renamed, file = here::here("rdata/tables.Rds"))
|
/R/random_forest.R
|
no_license
|
HenrikEckermann/pred_bmc2022
|
R
| false
| false
| 20,060
|
r
|
#####################################################################
########## packages, data import, settings ##########
#####################################################################
options(repr.plot.width = 7.5, repr.plot.height = 7.5)
library(tidyverse)
# load ML helper functions
source("https://raw.githubusercontent.com/HenrikEckermann/in_use/master/ml_helper.R")
# helper function for renaming vars specific to this project
source(here::here("R/helper.R"))
load(here::here("rdata/data.Rds"))
#####################################################################
########## Random Forrest ##########
#####################################################################
# first test code on single imputed dataset
# create time point specific data sets in wide format
# y = week 2
d0 <- select(
dw_imp,
-cortisol_12,
-cortisol_6,
-matches("postnatalweek_6|12"),
-matches("collectiontime_6|12"),
-matches("interval_awake_6|12"),
-matches("sharedactivities_caregiving_6|12"),
-matches("cortisol_diff\\d+"),
-matches("season_w6|12"),
-matches("depression_w6|12")
) %>%
rename(cortisol = cortisol_2)
# y = week 6
d1 <- select(
dw_imp,
-cortisol_12,
-matches("postnatalweek_[212]"),
-matches("collectiontime_[212]"),
-matches("interval_awake_[212]"),
-matches("sharedactivities_caregiving_[212]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[212]"),
-matches("depression_w[212]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_6)
#x = week 6, y = week 12
d2 <- select(
dw_imp,
-cortisol_2,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_6, cortisol = cortisol_12)
# x = week 2, y = week 12
d3 <- select(
dw_imp,
-cortisol_6,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_12)
# RF pipeline
if (!file.exists(here::here("rdata/rf2_test.Rds"))) {
# fit models per timepoints and per imputation method
result <- map(list(d0, d1, d2, d3), function(d) {
y <- "cortisol"
# first base model with only the known predictors
X_null <- select(
d,
contains("cortisol_pre"),
contains("postnatal"),
contains("collection"),
contains("interval_awake")) %>%
colnames()
# tune RF hyperparameters
pars_null <- tune_rf(
d,
X_null,
y,
regression = TRUE,
iters = 70,
iters.warmup = 30,
ntree = 5000,
parameters = list(
replace = FALSE,
respect.unordered.factors = "order"
),
tune.parameters = c(
"mtry",
"min.node.size",
"sample.fraction"
),
show.info = getOption("mlrMBO.show.info", TRUE)
)
# fit model using above hyperparameters
model_null <- ranger(
x = select(d, all_of(X_null)),
y = d[[y]],
importance = "permutation",
num.tree = 5000,
mtry = pars_null$recommended.pars$mtry,
min.node.size = pars_null$recommended.pars$min.node.size,
sample.fraction = ifelse(
pars_null$recommended.pars$sample.fraction < 0.25,
0.25, pars_null$recommended.pars$sample.fraction)
)
# now the full models
X <- select(d, -id, -cortisol) %>%
colnames()
# tune RF hyperparameters
pars <- tune_rf(
d,
X,
y,
regression = TRUE,
iters = 70,
iters.warmup = 30,
ntree = 5000,
parameters = list(
replace = FALSE,
respect.unordered.factors = "order"
),
tune.parameters = c(
"mtry",
"min.node.size",
"sample.fraction"
),
show.info = getOption("mlrMBO.show.info", TRUE)
)
# fit model using above hyperparameters
model <- ranger(
x = select(d, all_of(X)),
y = d[[y]],
importance = "permutation",
num.tree = 5000,
mtry = pars$recommended.pars$mtry,
min.node.size = pars$recommended.pars$min.node.size,
sample.fraction = ifelse(
pars$recommended.pars$sample.fraction < 0.25,
0.25, pars$recommended.pars$sample.fraction)
)
list(
model_null = model_null,
pars_null = pars_null$recommended.pars,
plot_null = plot_importance(model_null),
model = model,
pars = pars$recommended.pars,
plot = plot_importance(model)
)
})
save(result, file = here::here("rdata/rf2_test.Rds"))
} else {
load(file = here::here("rdata/rf2_test.Rds"))
}
result
# the results indicate overfitting at least because of some of our candidate
# predictors. The base models predict better except for y = week 2.
# Nevertheless, we can compute variable importances to evaluate which
# predictors help and which carry no signal in them. But first lets see how it
# varies between imputations.
impvariation <- map2(1:50, dw_imp_all, function(m, d_imp) {
if (!file.exists(here::here(glue::glue("rdata/rf2_impvar{m}_out.Rds")))) {
# create time point specific data sets in wide format
# predicting at week 2
d0 <- select(
d_imp,
-cortisol_12,
-cortisol_6,
-matches("postnatalweek_6|12"),
-matches("collectiontime_6|12"),
-matches("interval_awake_6|12"),
-matches("sharedactivities_caregiving_6|12"),
-matches("cortisol_diff\\d+"),
-matches("season_w6|12"),
-matches("depression_w6|12")
) %>%
rename(cortisol = cortisol_2)
# change 2 -> 6
d1 <- select(
d_imp,
-cortisol_12,
-matches("postnatalweek_[212]"),
-matches("collectiontime_[212]"),
-matches("interval_awake_[212]"),
-matches("sharedactivities_caregiving_[212]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[212]"),
-matches("depression_w[212]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_6)
# change 6 -> 12
d2 <- select(
d_imp,
-cortisol_2,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_6, cortisol = cortisol_12)
# change 2 -> 12
d3 <- select(
d_imp,
-cortisol_6,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_12)
# fit models per timepoints
result <- map(list(d0, d1, d2, d3), function(d) {
y <- "cortisol"
# first model with only the known predictors
X_null <- select(
d,
contains("cortisol_pre"),
contains("postnatal"),
contains("collection"),
contains("interval_awake")) %>%
colnames()
# tune RF hyperparameters
pars_null <- tune_rf(
d,
X_null,
y,
regression = TRUE,
iters = 70,
iters.warmup = 30,
ntree = 5000,
parameters = list(
replace = FALSE,
respect.unordered.factors = "order"
),
tune.parameters = c(
"mtry",
"min.node.size",
"sample.fraction"
),
show.info = getOption("mlrMBO.show.info", TRUE)
)
# fit model using above hyperparameters
model_null <- ranger(
x = select(d, all_of(X_null)),
y = d[[y]],
importance = "permutation",
num.tree = 5000,
mtry = pars_null$recommended.pars$mtry,
min.node.size = pars_null$recommended.pars$min.node.size,
sample.fraction = ifelse(
pars_null$recommended.pars$sample.fraction < 0.25,
0.25, pars_null$recommended.pars$sample.fraction)
)
# now the full models
X <- select(d, -id, -cortisol) %>%
colnames()
# tune RF hyperparameters
pars <- tune_rf(
d,
X,
y,
regression = TRUE,
iters = 70,
iters.warmup = 30,
ntree = 5000,
parameters = list(
replace = FALSE,
respect.unordered.factors = "order"
),
tune.parameters = c(
"mtry",
"min.node.size",
"sample.fraction"
),
show.info = getOption("mlrMBO.show.info", TRUE)
)
# fit model using above hyperparameters
model <- ranger(
x = select(d, all_of(X)),
y = d[[y]],
importance = "permutation",
num.tree = 5000,
mtry = pars$recommended.pars$mtry,
min.node.size = pars$recommended.pars$min.node.size,
sample.fraction = ifelse(
pars$recommended.pars$sample.fraction < 0.25,
0.25, pars$recommended.pars$sample.fraction)
)
list(
model_null = model_null,
pars_null = pars_null$recommended.pars,
plot_null = plot_importance(model_null),
model = model,
pars = pars$recommended.pars,
plot = plot_importance(model)
)
})
save(result, file = here::here(glue::glue("rdata/rf2_impvar{m}_out.Rds")))
} else {
load(file = here::here(glue::glue("rdata/rf2_impvar{m}_out.Rds")))
}
result
})
# summarise accuracy scores
rsqmedians <- map_dfr(impvariation, function(listobj) {
map2_dfr(1:4, listobj, function(num, models) {
rsqmediannull <- models$model_null$r.squared
rsqmedianfull <- models$model$r.squared
tibble(num = num, null = rsqmediannull, full = rsqmedianfull)
})}) %>%
group_by(num) %>%
summarise(
mnull = median(null),
sdnull = sd(null),
mfull = median(full),
sdfull = sd(full)
)
rsqmedians
# calculate difference between base and full models
rsqdiff <- map_dfr(impvariation, function(listobj) {
map2_dfr(1:4, listobj, function(num, models) {
rsqdiff <- models$model$r.squared - models$model_null$r.squared
tibble(num = num, rsqdiff = rsqdiff)
})
})
rsqdiff %>% group_by(num) %>%
summarise(rsq = median(rsqdiff), sd = sd(rsqdiff))
# accross all the imputed datasets, the rsq difference indicates that the model
# that only has the known covariates fits the data slightly better.
# I think that this is because there are too many noisy variables together
# with a low sample size --> overfitting. Some of the variables might be
# predictive. Therefore, I stick with calculating pvalues of the models after
# which I will calculate variable importances.
#####################################################################
########## calculate p values for RF models ##########
#####################################################################
nperms <- 100
nulldist <- map_dfr(1:nperms, function(nperm) {
map(1:50, function(m) {
if (!file.exists(here::here(glue::glue("rdata/nulldist2_{nperm}_{m}.Rds")))) {
d <- dw_imp_all[[m]]
# create time point specific data sets in wide format
# predicting at week 2
d0 <- select(
d,
-cortisol_12,
-cortisol_6,
-matches("postnatalweek_6|12"),
-matches("collectiontime_6|12"),
-matches("interval_awake_6|12"),
-matches("sharedactivities_caregiving_6|12"),
-matches("cortisol_diff\\d+"),
-matches("season_w6|12"),
-matches("depression_w6|12")
) %>%
rename(cortisol = cortisol_2)
# change 2 -> 6
d1 <- select(
d,
-cortisol_12,
-matches("postnatalweek_[212]"),
-matches("collectiontime_[212]"),
-matches("interval_awake_[212]"),
-matches("sharedactivities_caregiving_[212]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[212]"),
-matches("depression_w[212]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_6)
# change 6 -> 12
d2 <- select(
d,
-cortisol_2,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_6, cortisol = cortisol_12)
# change 2 -> 12
d3 <- select(
d,
-cortisol_6,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_12)
# fit models per timepoints
result <- map2_dfr(list(d0, d1, d2, d3), 1:4, function(df, num) {
d <- df
y <- "cortisol"
X <- select(d, -id, -cortisol) %>%
colnames()
d[[y]] <- sample(df[[y]], replace = FALSE)
# tune RF hyperparameters
pars <- tune_rf(
d,
X,
y,
regression = TRUE,
iters = 70,
iters.warmup = 30,
ntree = 5000,
parameters = list(
replace = FALSE,
respect.unordered.factors = "order"
),
tune.parameters = c(
"mtry",
"min.node.size",
"sample.fraction"
),
show.info = getOption("mlrMBO.show.info", TRUE)
)
# fit model using above hyperparameters
model <- ranger(
x = select(d, all_of(X)),
y = d[[y]],
importance = "permutation",
num.tree = 5000,
mtry = pars$recommended.pars$mtry,
min.node.size = pars$recommended.pars$min.node.size,
sample.fraction = ifelse(
pars$recommended.pars$sample.fraction < 0.25,
0.25, pars$recommended.pars$sample.fraction)
)
rsq <- model$r.squared
list(
num = num,
rsq = rsq
)
})
save(result, file = here::here(glue::glue("rdata/nulldist2_{nperm}.Rds")))
} else {
load(file = here::here(glue::glue("rdata/nulldist2_{nperm}_{m}.Rds")))
}
result
})
})
nulldist_nested <- nulldist %>% group_by(num) %>% nest()
# base models
map2(rsqmedians$mnull, nulldist_nested$data, function(rsq, dist) {
mean(rsq <= dist$rsq)
})
map2(rsqmedians$mfull, nulldist_nested$data, function(rsq, dist) {
mean(rsq <= dist$rsq)
})
#####################################################################
########## Caculate pvalues for features ##########
#####################################################################
featpm <- map2(1:50, dw_imp_all, function(m, d_imp) {
if (!file.exists(here::here(glue::glue("rdata/rf2_altmann_m{m}_out.Rds")))) {
# create time point specific data sets in wide format
# predicting at week 2
d0 <- select(
d_imp,
-cortisol_12,
-cortisol_6,
-matches("postnatalweek_6|12"),
-matches("collectiontime_6|12"),
-matches("interval_awake_6|12"),
-matches("sharedactivities_caregiving_6|12"),
-matches("cortisol_diff\\d+"),
-matches("season_w6|12"),
-matches("depression_w6|12")
) %>%
rename(cortisol = cortisol_2)
# change 2 -> 6
d1 <- select(
d_imp,
-cortisol_12,
-matches("postnatalweek_[212]"),
-matches("collectiontime_[212]"),
-matches("interval_awake_[212]"),
-matches("sharedactivities_caregiving_[212]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[212]"),
-matches("depression_w[212]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_6)
# change 6 -> 12
d2 <- select(
d_imp,
-cortisol_2,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_6, cortisol = cortisol_12)
# change 2 -> 12
d3 <- select(
d_imp,
-cortisol_6,
-matches("postnatalweek_[26]"),
-matches("collectiontime_[26]"),
-matches("interval_awake_[26]"),
-matches("sharedactivities_caregiving_[26]"),
-matches("cortisol_diff\\d+"),
-matches("season_w[26]"),
-matches("depression_w[26]")
) %>%
rename(cortisol_pre = cortisol_2, cortisol = cortisol_12)
# fit models per timepoints and per imputation
featp <- map2(list(d0, d1, d2, d3), 1:4, function(d, num) {
model <- impvariation[[m]][[num]]$model
pars <- impvariation[[m]][[num]]$pars
pimp <- importance_pvalues(
model,
method = "altmann",
num.permutations = 1000,
data = select(d, -id),
formula = cortisol ~ .,
)
pimp
})
save(featp, file = here::here(glue::glue("rdata/rf2_altmann_m{m}_out.Rds")))
} else {
load(file = here::here(glue::glue("rdata/rf2_altmann_m{m}_out.Rds")))
}
featp
})
# find median importance value and corresponding p value;
# create table for paper
featp_average <- map2_dfr(1:50, featpm, function(m, x) {
map2_dfr(1:4, x, function(num, d) {
as.data.frame(d) %>%
rownames_to_column("feature") %>%
mutate(num = num, m = m)
})
})
nums <- 1:3
# rename the variables
table_renamed <- map(nums, function(timepoint) {
tbl <- featp_average %>% group_by(feature, num) %>%
# since median of 50 value will be averaged I find the value closest to that
# average value to get the correct pvalue, if there are several, i just pick
# randomly
mutate(
median = median(importance),
mediandist = abs(median - importance),
mdist = abs(m - runif(1, 50, n = 1))
) %>%
filter(mediandist == min(mediandist)) %>%
filter(mdist == min(mdist)) %>% # pick one out of duplicates randomly
ungroup() %>%
mutate(across(where(is.numeric), round, 3)) %>%
arrange(num, pvalue, desc(importance)) %>%
select(feature, importance, pvalue, num) %>%
filter(num == timepoint) %>%
rename_vars()
colnames(tbl) <- str_to_title(colnames(tbl))
tbl
})
save(table_renamed, file = here::here("rdata/tables.Rds"))
|
# desaparición forzada
library(tidyverse)
library(lubridate)
desfor_comun <- read.csv("data/secretariado_rnped/rnped_comun.csv") %>%
as_tibble() %>%
mutate(fuerocomun_desapfecha_fmt = dmy(fuerocomun_desapfecha),
date = ymd(date))
names(desfor_comun) <- str_replace(names(desfor_comun), "fuerocomun_", "")
desfor_comun %>% data.frame() %>% head()
desfor_federal <- read.csv("data/secretariado_rnped/rnped_federal.csv") %>%
as_tibble() %>%
mutate(fuerofederal_ultimafecha_fmt = dmy(fuerofederal_ultimafecha))
names(desfor_federal) <- str_replace(names(desfor_federal), "fuerofederal_", "")
desfor_federal %>% data.frame() %>% head()
cache("desfor_comun")
cache("desfor_federal")
|
/munge/01_rnped.R
|
no_license
|
CADSalud/imunic
|
R
| false
| false
| 707
|
r
|
# desaparición forzada
library(tidyverse)
library(lubridate)
desfor_comun <- read.csv("data/secretariado_rnped/rnped_comun.csv") %>%
as_tibble() %>%
mutate(fuerocomun_desapfecha_fmt = dmy(fuerocomun_desapfecha),
date = ymd(date))
names(desfor_comun) <- str_replace(names(desfor_comun), "fuerocomun_", "")
desfor_comun %>% data.frame() %>% head()
desfor_federal <- read.csv("data/secretariado_rnped/rnped_federal.csv") %>%
as_tibble() %>%
mutate(fuerofederal_ultimafecha_fmt = dmy(fuerofederal_ultimafecha))
names(desfor_federal) <- str_replace(names(desfor_federal), "fuerofederal_", "")
desfor_federal %>% data.frame() %>% head()
cache("desfor_comun")
cache("desfor_federal")
|
## Why not use assert_that() here? It's possibly a bit slow:
## microbenchmark(assert_that(is.numeric(1)), assert_numeric(1))
## Lazy evaluation saves us most of the time, but most of the time in
## assert_that is spent on carefully evaluating things. I'm open to
## moving to it.
assert_inherits <- function(x, what, name=deparse(substitute(x))) {
if (!inherits(x, what)) {
stop(sprintf("%s must be a %s", name,
paste(what, collapse=" / ")), call.=FALSE)
}
}
assert_function <- function(x, name=deparse(substitute(x))) {
if (!is.function(x)) {
stop(sprintf("%s must be a function", name), call.=FALSE)
}
}
assert_null <- function(x, name=deparse(substitute(x))) {
if (!is.null(x)) {
stop(sprintf("%s must be NULL", name), call.=FALSE)
}
}
assert_list <- function(x, name=deparse(substitute(x))) {
if (!is.list(x)) {
stop(sprintf("%s must be a list", name), call.=FALSE)
}
}
assert_nonnegative <- function(x, name=deparse(substitute(x))) {
if (x < 0) {
stop(sprintf("%s must be nonnegative", name), call.=FALSE)
}
}
assert_numeric <- function(x, name=deparse(substitute(x))) {
if (!is.numeric(x)) {
stop(sprintf("%s must be numeric", name), call.=FALSE)
}
}
assert_character <- function(x, name=deparse(substitute(x))) {
if (!is.character(x)) {
stop(sprintf("%s must be character", name), call.=FALSE)
}
}
assert_length <- function(x, n, name=deparse(substitute(x))) {
if (length(x) != n) {
stop(sprintf("%s must have %d elements", name, n), call.=FALSE)
}
}
assert_integer <- function(x, strict=FALSE, name=deparse(substitute(x))) {
if (!(is.integer(x))) {
usable_as_integer <-
!strict && is.numeric(x) && (max(abs(as.integer(x) - x)) < 1e-8)
if (!usable_as_integer) {
stop(sprintf("%s must be integer", name), call.=FALSE)
}
}
}
## Useful for things handled with size_t, though these are passed
## through a function that will also warn. This function is preferred
## though as it generates more useful error messages -- the compiled
## one prevents crashes!
assert_size <- function(x, strict=FALSE, name=deparse(substitute(x))) {
assert_integer(x, strict, name)
assert_nonnegative(x, name)
}
assert_logical <- function(x, name=deparse(substitute(x))) {
if (!is.logical(x)) {
stop(sprintf("%s must be logical", name), call.=FALSE)
}
}
assert_scalar <- function(x, name=deparse(substitute(x))) {
if (length(x) != 1) {
stop(sprintf("%s must be a scalar", name), call.=FALSE)
}
}
assert_nonempty <- function(x, name=deparse(substitute(x))) {
if (length(x) == 0) {
stop(sprintf("%s must not be empty", name), call.=FALSE)
}
}
assert_scalar_list <- function(x, name=deparse(substitute(x))) {
assert_scalar(x, name)
assert_list(x, name)
}
assert_scalar_numeric <- function(x, name=deparse(substitute(x))) {
assert_scalar(x, name)
assert_numeric(x, name)
}
assert_scalar_integer <- function(x, strict=FALSE,
name=deparse(substitute(x))) {
assert_scalar(x, name)
assert_integer(x, strict, name)
}
assert_scalar_logical <- function(x, name=deparse(substitute(x))) {
assert_scalar(x, name)
assert_logical(x, name)
}
assert_scalar_character <- function(x, name=deparse(substitute(x))) {
assert_scalar(x, name)
assert_character(x, name)
}
assert_scalar_size <- function(x, strict=FALSE,
name=deparse(substitute(x))) {
assert_scalar(x, name)
assert_size(x, strict, name)
}
assert_named <- function(x,
empty_can_be_unnamed=TRUE,
unique_names=TRUE,
name=deparse(substitute(x))) {
nx <- names(x)
if (is.null(nx) || any(nx == "")) {
if (length(x) > 0 || !empty_can_be_unnamed) {
stop(sprintf("%s must be named", name))
}
} else if (any(duplicated(nx))) {
stop(sprintf("%s must have unique names", name))
}
}
|
/R/utils_assert.R
|
no_license
|
karthik/maker
|
R
| false
| false
| 3,944
|
r
|
## Why not use assert_that() here? It's possibly a bit slow:
## microbenchmark(assert_that(is.numeric(1)), assert_numeric(1))
## Lazy evaluation saves us most of the time, but most of the time in
## assert_that is spent on carefully evaluating things. I'm open to
## moving to it.
assert_inherits <- function(x, what, name=deparse(substitute(x))) {
if (!inherits(x, what)) {
stop(sprintf("%s must be a %s", name,
paste(what, collapse=" / ")), call.=FALSE)
}
}
assert_function <- function(x, name=deparse(substitute(x))) {
if (!is.function(x)) {
stop(sprintf("%s must be a function", name), call.=FALSE)
}
}
assert_null <- function(x, name=deparse(substitute(x))) {
if (!is.null(x)) {
stop(sprintf("%s must be NULL", name), call.=FALSE)
}
}
assert_list <- function(x, name=deparse(substitute(x))) {
if (!is.list(x)) {
stop(sprintf("%s must be a list", name), call.=FALSE)
}
}
assert_nonnegative <- function(x, name=deparse(substitute(x))) {
if (x < 0) {
stop(sprintf("%s must be nonnegative", name), call.=FALSE)
}
}
assert_numeric <- function(x, name=deparse(substitute(x))) {
if (!is.numeric(x)) {
stop(sprintf("%s must be numeric", name), call.=FALSE)
}
}
assert_character <- function(x, name=deparse(substitute(x))) {
if (!is.character(x)) {
stop(sprintf("%s must be character", name), call.=FALSE)
}
}
assert_length <- function(x, n, name=deparse(substitute(x))) {
if (length(x) != n) {
stop(sprintf("%s must have %d elements", name, n), call.=FALSE)
}
}
assert_integer <- function(x, strict=FALSE, name=deparse(substitute(x))) {
if (!(is.integer(x))) {
usable_as_integer <-
!strict && is.numeric(x) && (max(abs(as.integer(x) - x)) < 1e-8)
if (!usable_as_integer) {
stop(sprintf("%s must be integer", name), call.=FALSE)
}
}
}
## Useful for things handled with size_t, though these are passed
## through a function that will also warn. This function is preferred
## though as it generates more useful error messages -- the compiled
## one prevents crashes!
assert_size <- function(x, strict=FALSE, name=deparse(substitute(x))) {
assert_integer(x, strict, name)
assert_nonnegative(x, name)
}
assert_logical <- function(x, name=deparse(substitute(x))) {
if (!is.logical(x)) {
stop(sprintf("%s must be logical", name), call.=FALSE)
}
}
assert_scalar <- function(x, name=deparse(substitute(x))) {
if (length(x) != 1) {
stop(sprintf("%s must be a scalar", name), call.=FALSE)
}
}
assert_nonempty <- function(x, name=deparse(substitute(x))) {
if (length(x) == 0) {
stop(sprintf("%s must not be empty", name), call.=FALSE)
}
}
assert_scalar_list <- function(x, name=deparse(substitute(x))) {
assert_scalar(x, name)
assert_list(x, name)
}
assert_scalar_numeric <- function(x, name=deparse(substitute(x))) {
assert_scalar(x, name)
assert_numeric(x, name)
}
assert_scalar_integer <- function(x, strict=FALSE,
name=deparse(substitute(x))) {
assert_scalar(x, name)
assert_integer(x, strict, name)
}
assert_scalar_logical <- function(x, name=deparse(substitute(x))) {
assert_scalar(x, name)
assert_logical(x, name)
}
assert_scalar_character <- function(x, name=deparse(substitute(x))) {
assert_scalar(x, name)
assert_character(x, name)
}
assert_scalar_size <- function(x, strict=FALSE,
name=deparse(substitute(x))) {
assert_scalar(x, name)
assert_size(x, strict, name)
}
assert_named <- function(x,
empty_can_be_unnamed=TRUE,
unique_names=TRUE,
name=deparse(substitute(x))) {
nx <- names(x)
if (is.null(nx) || any(nx == "")) {
if (length(x) > 0 || !empty_can_be_unnamed) {
stop(sprintf("%s must be named", name))
}
} else if (any(duplicated(nx))) {
stop(sprintf("%s must have unique names", name))
}
}
|
### old 330 samples
dataPathRNASeq_old <- '/export/home/pfkuan/WTCproject/Epigenetics/Data/RNASeq/ProcessedData_RemovedDuplicates/'
load(file=paste(dataPathRNASeq_old,'clinical_rnaseq_16Aug2017.RData',sep=''))
load(paste(dataPathRNASeq_old,'GeneCounts.RData',sep='')) ### load genecounts
load(paste(dataPathRNASeq_old,'FPKM.RData',sep='')) ## load fpkm
### new 203 samples
dat1 <- read.csv('/export/home/pfkuan/WTCproject/Epigenetics/Data/RNASeq/RQ-009443_Quantitation_data_hg19/Counts.csv',header=T)
dat2 <- dat1[,-1]
fpkm2 <- read.csv('/export/home/pfkuan/WTCproject/Epigenetics/Data/RNASeq/RQ-009443_Quantitation_data_hg19/RPKMedgeR.csv',header=T)
fpkm3 <- fpkm2[,-1]
#clinical1 <- cbind(c(1:533),data.frame(c(as.character(clinical.rnaseq$w_mrn),colnames(dat2))),rep(c(1,2),c(dim(clinical.rnaseq)[1],dim(dat2)[2])))
#colnames(clinical1) <- c('No','w_mrn','batch')
#write.csv(clinical1,'/export/home/pfkuan/WTCproject/Epigenetics/Data/RNASeq/Merged533Samples/clinicalVar533Samples.csv',row.names=F)
### checking if the rows of 330 samples matched with rows of 203 samples
# tmp <- cbind(rownames(genecounts),as.character(dat1[,1]))
# tmp1 <- cbind(rownames(fpkm),as.character(fpkm2[,1]))
sum(rownames(genecounts) %in% as.character(dat1[,1]))
sum(rownames(genecounts)!=as.character(dat1[,1])) ## this value is not 0, which means the two gene lists are not in the same order
sum(rownames(fpkm) %in% as.character(fpkm2[,1]))
sum(rownames(fpkm)!=as.character(fpkm2[,1])) ## this value is also not 0
### reorder the genes and combine the samples
match_counts = match(rownames(genecounts), as.character(dat1[,1]))
genecounts = cbind(genecounts, dat2[match_counts, ])
# dim(genecounts)
match_fpkm = match(rownames(fpkm), as.character(fpkm2[,1]))
fpkm = cbind(fpkm, fpkm3[match_fpkm, ])
# dim(fpkm)
match_2 = match(rownames(genecounts), rownames(fpkm))
fpkm <- fpkm[match_2,]
sum(rownames(fpkm)!=rownames(genecounts))
dataPathRNASeq <- '/export/home/xurren/WTCProject/Data/'
save(genecounts,file=paste(dataPathRNASeq,'GeneCounts533.RData',sep=''))
save(fpkm,file=paste(dataPathRNASeq,'FPKM533.RData',sep=''))
#> dim(fpkm)
#[1] 25830 533
#> genecounts <- cbind(genecounts,dat2)
#> dim(genecounts)
#[1] 25830 533
### check if these samples are outliers: W15659, W29979, W26904
#id <- which(rownames(genecounts)=='FKBP5')
#genecounts[id,]
### note, use xcell to estimate the cell types on FPKM, store the estimated cell type matrix into dataPathRNASeq <- '/export/home/pfkuan/WTCproject/Epigenetics/Data/RNASeq/ProcessedData_533Samples/
|
/Renxu/ProcessRNASeqData_CombineBatch_08.25.2017.R
|
no_license
|
chang-che/Work
|
R
| false
| false
| 2,555
|
r
|
### old 330 samples
dataPathRNASeq_old <- '/export/home/pfkuan/WTCproject/Epigenetics/Data/RNASeq/ProcessedData_RemovedDuplicates/'
load(file=paste(dataPathRNASeq_old,'clinical_rnaseq_16Aug2017.RData',sep=''))
load(paste(dataPathRNASeq_old,'GeneCounts.RData',sep='')) ### load genecounts
load(paste(dataPathRNASeq_old,'FPKM.RData',sep='')) ## load fpkm
### new 203 samples
dat1 <- read.csv('/export/home/pfkuan/WTCproject/Epigenetics/Data/RNASeq/RQ-009443_Quantitation_data_hg19/Counts.csv',header=T)
dat2 <- dat1[,-1]
fpkm2 <- read.csv('/export/home/pfkuan/WTCproject/Epigenetics/Data/RNASeq/RQ-009443_Quantitation_data_hg19/RPKMedgeR.csv',header=T)
fpkm3 <- fpkm2[,-1]
#clinical1 <- cbind(c(1:533),data.frame(c(as.character(clinical.rnaseq$w_mrn),colnames(dat2))),rep(c(1,2),c(dim(clinical.rnaseq)[1],dim(dat2)[2])))
#colnames(clinical1) <- c('No','w_mrn','batch')
#write.csv(clinical1,'/export/home/pfkuan/WTCproject/Epigenetics/Data/RNASeq/Merged533Samples/clinicalVar533Samples.csv',row.names=F)
### checking if the rows of 330 samples matched with rows of 203 samples
# tmp <- cbind(rownames(genecounts),as.character(dat1[,1]))
# tmp1 <- cbind(rownames(fpkm),as.character(fpkm2[,1]))
sum(rownames(genecounts) %in% as.character(dat1[,1]))
sum(rownames(genecounts)!=as.character(dat1[,1])) ## this value is not 0, which means the two gene lists are not in the same order
sum(rownames(fpkm) %in% as.character(fpkm2[,1]))
sum(rownames(fpkm)!=as.character(fpkm2[,1])) ## this value is also not 0
### reorder the genes and combine the samples
match_counts = match(rownames(genecounts), as.character(dat1[,1]))
genecounts = cbind(genecounts, dat2[match_counts, ])
# dim(genecounts)
match_fpkm = match(rownames(fpkm), as.character(fpkm2[,1]))
fpkm = cbind(fpkm, fpkm3[match_fpkm, ])
# dim(fpkm)
match_2 = match(rownames(genecounts), rownames(fpkm))
fpkm <- fpkm[match_2,]
sum(rownames(fpkm)!=rownames(genecounts))
dataPathRNASeq <- '/export/home/xurren/WTCProject/Data/'
save(genecounts,file=paste(dataPathRNASeq,'GeneCounts533.RData',sep=''))
save(fpkm,file=paste(dataPathRNASeq,'FPKM533.RData',sep=''))
#> dim(fpkm)
#[1] 25830 533
#> genecounts <- cbind(genecounts,dat2)
#> dim(genecounts)
#[1] 25830 533
### check if these samples are outliers: W15659, W29979, W26904
#id <- which(rownames(genecounts)=='FKBP5')
#genecounts[id,]
### note, use xcell to estimate the cell types on FPKM, store the estimated cell type matrix into dataPathRNASeq <- '/export/home/pfkuan/WTCproject/Epigenetics/Data/RNASeq/ProcessedData_533Samples/
|
i = 475
library(isoform, lib.loc="/nas02/home/w/e/weisun/R/Rlibs/")
bedFile = "/nas02/home/w/e/weisun/research/data/human/Homo_sapiens.GRCh37.66.nonoverlap.exon.bed"
setwd("/lustre/scr/w/e/weisun/TCGA/bam/")
cmd = "ls *_asCounts_hetSNP_EA_hap1.bam"
ffs = system(cmd, intern=TRUE)
length(ffs)
head(ffs)
sams = gsub("_asCounts_hetSNP_EA_hap1.bam", "", ffs)
sam1 = sams[i]
cat(i, sam1, date(), "\n")
bamFile = ffs[i]
outFile = sprintf("%s_asCounts_hap1.txt", sam1)
countReads(bamFile, bedFile, outFile)
bamFile = gsub("_hap1", "_hap2", ffs[i], fixed=TRUE)
outFile = sprintf("%s_asCounts_hap2.txt", sam1)
countReads(bamFile, bedFile, outFile)
|
/data_preparation/R_batch3/_step3/step3_countReads_EA.474.R
|
no_license
|
jasa-acs/Mapping-Tumor-Specific-Expression-QTLs-in-Impure-Tumor-Samples
|
R
| false
| false
| 651
|
r
|
i = 475
library(isoform, lib.loc="/nas02/home/w/e/weisun/R/Rlibs/")
bedFile = "/nas02/home/w/e/weisun/research/data/human/Homo_sapiens.GRCh37.66.nonoverlap.exon.bed"
setwd("/lustre/scr/w/e/weisun/TCGA/bam/")
cmd = "ls *_asCounts_hetSNP_EA_hap1.bam"
ffs = system(cmd, intern=TRUE)
length(ffs)
head(ffs)
sams = gsub("_asCounts_hetSNP_EA_hap1.bam", "", ffs)
sam1 = sams[i]
cat(i, sam1, date(), "\n")
bamFile = ffs[i]
outFile = sprintf("%s_asCounts_hap1.txt", sam1)
countReads(bamFile, bedFile, outFile)
bamFile = gsub("_hap1", "_hap2", ffs[i], fixed=TRUE)
outFile = sprintf("%s_asCounts_hap2.txt", sam1)
countReads(bamFile, bedFile, outFile)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_max_mode_share_scenarios.R
\name{create_max_mode_share_scenarios}
\alias{create_max_mode_share_scenarios}
\title{Create scenarios defined by maximum mode share}
\usage{
create_max_mode_share_scenarios(trip_set)
}
\arguments{
\item{trip_set}{data frame, baseline scenario}
}
\value{
list of baseline scenario and five mode scenarios
}
\description{
Creates five scenarios where, in each one, the mode share is elevated to the maximum observed across the cities.
The scenario-modes are walking, cycling, car, motorcycle and bus
}
|
/man/create_max_mode_share_scenarios.Rd
|
no_license
|
CHUANKOUCONG/ITHIM-R
|
R
| false
| true
| 613
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_max_mode_share_scenarios.R
\name{create_max_mode_share_scenarios}
\alias{create_max_mode_share_scenarios}
\title{Create scenarios defined by maximum mode share}
\usage{
create_max_mode_share_scenarios(trip_set)
}
\arguments{
\item{trip_set}{data frame, baseline scenario}
}
\value{
list of baseline scenario and five mode scenarios
}
\description{
Creates five scenarios where, in each one, the mode share is elevated to the maximum observed across the cities.
The scenario-modes are walking, cycling, car, motorcycle and bus
}
|
#copied from Dprangle's gnk implementation
check.params <-
function(A,B,g,k,c=0.8,theta){
if(!is.null(theta)) {
if(!is.matrix(theta)) {
if(length(theta)==4) {
A <- theta[1]
B <- theta[2]
g <- theta[3]
k <- theta[4]
} else if(length(theta)==5) {
A <- theta[1]
B <- theta[2]
c <- theta[3]
g <- theta[4]
k <- theta[5]
} else {
stop("gk function called with wrong number of parameters")
}
} else {
if(ncol(theta)==4) {
A <- theta[,1]
B <- theta[,2]
c <- rep(0.8, nrow(theta))
g <- theta[,3]
k <- theta[,4]
} else if(ncol(theta)==5) {
A <- theta[,1]
B <- theta[,2]
c <- theta[,3]
g <- theta[,4]
k <- theta[,5]
} else {
stop("gk function called with wrong number of parameters")
}
}
} else {
if (length(c) == 1 & length(A)>1) {
c <- rep(c, length(A))
}
}
if (length(B) != length(A) | length(c) != length(A) | length(g) != length(A) | length(k) != length(A)) stop("gk function called with parameters vectors of different lengths")
if (any(B<=0)) stop("gk functions require B>0")
if (any(k<=-0.5)) stop("gk functions require k>-0.5")
return(data.frame(A=A,B=B,c=c,g=g,k=k))
}
z2gk <- function(z, A, B, g, k, c=0.8, theta=NULL){
params <- check.params(A,B,g,k,c,theta)
if (length(z) != length(params$A) & length(params$A) > 1) stop("Number of parameters supplied does not equal 1 or number of z values")
temp <- exp(-params$g*z)
infcases <- is.infinite(temp)
temp[!infcases] <- (1-temp[!infcases])/(1+temp[!infcases])
temp[infcases] <- -1 ##Otherwise we get NaNs
temp <- params$A + params$B * (1+params$c*temp) * (1+z^2)^params$k * z
temp <- ifelse(params$k<0 & is.infinite(z), z, temp) ##Otherwise get NaNs
return(temp)
}
rgk <-function(n, A, B, g, k, c=0.8, theta=NULL){
##nb No need to check parameters here, done in z2gk
z <- rnorm(n)
z2gk(z, A, B, g, k, c, theta)
}
|
/gk.R
|
no_license
|
nayyarv/ABCThesis
|
R
| false
| false
| 2,132
|
r
|
#copied from Dprangle's gnk implementation
check.params <-
function(A,B,g,k,c=0.8,theta){
if(!is.null(theta)) {
if(!is.matrix(theta)) {
if(length(theta)==4) {
A <- theta[1]
B <- theta[2]
g <- theta[3]
k <- theta[4]
} else if(length(theta)==5) {
A <- theta[1]
B <- theta[2]
c <- theta[3]
g <- theta[4]
k <- theta[5]
} else {
stop("gk function called with wrong number of parameters")
}
} else {
if(ncol(theta)==4) {
A <- theta[,1]
B <- theta[,2]
c <- rep(0.8, nrow(theta))
g <- theta[,3]
k <- theta[,4]
} else if(ncol(theta)==5) {
A <- theta[,1]
B <- theta[,2]
c <- theta[,3]
g <- theta[,4]
k <- theta[,5]
} else {
stop("gk function called with wrong number of parameters")
}
}
} else {
if (length(c) == 1 & length(A)>1) {
c <- rep(c, length(A))
}
}
if (length(B) != length(A) | length(c) != length(A) | length(g) != length(A) | length(k) != length(A)) stop("gk function called with parameters vectors of different lengths")
if (any(B<=0)) stop("gk functions require B>0")
if (any(k<=-0.5)) stop("gk functions require k>-0.5")
return(data.frame(A=A,B=B,c=c,g=g,k=k))
}
z2gk <- function(z, A, B, g, k, c=0.8, theta=NULL){
params <- check.params(A,B,g,k,c,theta)
if (length(z) != length(params$A) & length(params$A) > 1) stop("Number of parameters supplied does not equal 1 or number of z values")
temp <- exp(-params$g*z)
infcases <- is.infinite(temp)
temp[!infcases] <- (1-temp[!infcases])/(1+temp[!infcases])
temp[infcases] <- -1 ##Otherwise we get NaNs
temp <- params$A + params$B * (1+params$c*temp) * (1+z^2)^params$k * z
temp <- ifelse(params$k<0 & is.infinite(z), z, temp) ##Otherwise get NaNs
return(temp)
}
rgk <-function(n, A, B, g, k, c=0.8, theta=NULL){
##nb No need to check parameters here, done in z2gk
z <- rnorm(n)
z2gk(z, A, B, g, k, c, theta)
}
|
# Swaggy Jenkins
#
# Jenkins API clients generated from Swagger / Open API specification
#
# OpenAPI spec version: 1.1.1
# Contact: blah@cliffano.com
# Generated by: https://openapi-generator.tech
#' GithubScmlinks Class
#'
#' @field self
#' @field _class
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
GithubScmlinks <- R6::R6Class(
'GithubScmlinks',
public = list(
`self` = NULL,
`_class` = NULL,
initialize = function(`self`, `_class`){
if (!missing(`self`)) {
stopifnot(R6::is.R6(`self`))
self$`self` <- `self`
}
if (!missing(`_class`)) {
stopifnot(is.character(`_class`), length(`_class`) == 1)
self$`_class` <- `_class`
}
},
toJSON = function() {
GithubScmlinksObject <- list()
if (!is.null(self$`self`)) {
GithubScmlinksObject[['self']] <- self$`self`$toJSON()
}
if (!is.null(self$`_class`)) {
GithubScmlinksObject[['_class']] <- self$`_class`
}
GithubScmlinksObject
},
fromJSON = function(GithubScmlinksJson) {
GithubScmlinksObject <- jsonlite::fromJSON(GithubScmlinksJson)
if (!is.null(GithubScmlinksObject$`self`)) {
selfObject <- Link$new()
selfObject$fromJSON(jsonlite::toJSON(GithubScmlinksObject$self, auto_unbox = TRUE))
self$`self` <- selfObject
}
if (!is.null(GithubScmlinksObject$`_class`)) {
self$`_class` <- GithubScmlinksObject$`_class`
}
},
toJSONString = function() {
sprintf(
'{
"self": %s,
"_class": %s
}',
self$`self`$toJSON(),
self$`_class`
)
},
fromJSONString = function(GithubScmlinksJson) {
GithubScmlinksObject <- jsonlite::fromJSON(GithubScmlinksJson)
LinkObject <- Link$new()
self$`self` <- LinkObject$fromJSON(jsonlite::toJSON(GithubScmlinksObject$self, auto_unbox = TRUE))
self$`_class` <- GithubScmlinksObject$`_class`
}
)
)
|
/clients/r/generated/R/GithubScmlinks.r
|
permissive
|
miao1007/swaggy-jenkins
|
R
| false
| false
| 2,014
|
r
|
# Swaggy Jenkins
#
# Jenkins API clients generated from Swagger / Open API specification
#
# OpenAPI spec version: 1.1.1
# Contact: blah@cliffano.com
# Generated by: https://openapi-generator.tech
#' GithubScmlinks Class
#'
#' @field self
#' @field _class
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
GithubScmlinks <- R6::R6Class(
'GithubScmlinks',
public = list(
`self` = NULL,
`_class` = NULL,
initialize = function(`self`, `_class`){
if (!missing(`self`)) {
stopifnot(R6::is.R6(`self`))
self$`self` <- `self`
}
if (!missing(`_class`)) {
stopifnot(is.character(`_class`), length(`_class`) == 1)
self$`_class` <- `_class`
}
},
toJSON = function() {
GithubScmlinksObject <- list()
if (!is.null(self$`self`)) {
GithubScmlinksObject[['self']] <- self$`self`$toJSON()
}
if (!is.null(self$`_class`)) {
GithubScmlinksObject[['_class']] <- self$`_class`
}
GithubScmlinksObject
},
fromJSON = function(GithubScmlinksJson) {
GithubScmlinksObject <- jsonlite::fromJSON(GithubScmlinksJson)
if (!is.null(GithubScmlinksObject$`self`)) {
selfObject <- Link$new()
selfObject$fromJSON(jsonlite::toJSON(GithubScmlinksObject$self, auto_unbox = TRUE))
self$`self` <- selfObject
}
if (!is.null(GithubScmlinksObject$`_class`)) {
self$`_class` <- GithubScmlinksObject$`_class`
}
},
toJSONString = function() {
sprintf(
'{
"self": %s,
"_class": %s
}',
self$`self`$toJSON(),
self$`_class`
)
},
fromJSONString = function(GithubScmlinksJson) {
GithubScmlinksObject <- jsonlite::fromJSON(GithubScmlinksJson)
LinkObject <- Link$new()
self$`self` <- LinkObject$fromJSON(jsonlite::toJSON(GithubScmlinksObject$self, auto_unbox = TRUE))
self$`_class` <- GithubScmlinksObject$`_class`
}
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/InternalSimple_functions.R
\name{transpar}
\alias{transpar}
\title{Internal function: Transparent named colour}
\usage{
transpar(Colour, alpha = 100)
}
\arguments{
\item{Colour}{A colour name from colours() function which is desired in transparent form.}
\item{alpha}{The level of transparency from 1 (completely transparent) to 100 (completely opaque) that the returned colour should be.}
}
\value{
The transparent equivalent of a named colour
}
\description{
This function takes a named colour and returns the transparent equivalent
}
\author{
Ardern Hulme-Beaman
}
\keyword{colour}
\keyword{internal}
\keyword{transparency}
|
/man/transpar.Rd
|
no_license
|
ArdernHB/KnnDist
|
R
| false
| true
| 706
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/InternalSimple_functions.R
\name{transpar}
\alias{transpar}
\title{Internal function: Transparent named colour}
\usage{
transpar(Colour, alpha = 100)
}
\arguments{
\item{Colour}{A colour name from colours() function which is desired in transparent form.}
\item{alpha}{The level of transparency from 1 (completely transparent) to 100 (completely opaque) that the returned colour should be.}
}
\value{
The transparent equivalent of a named colour
}
\description{
This function takes a named colour and returns the transparent equivalent
}
\author{
Ardern Hulme-Beaman
}
\keyword{colour}
\keyword{internal}
\keyword{transparency}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/event-loop.R
\name{event_loop}
\alias{event_loop}
\title{Event loop}
\description{
Event loop
}
\section{Usage}{
\preformatted{el <- event_loop$new()
el$run_http(handle, callback)
el$run_delay(delay, callback)
}
}
\section{Arguments}{
\describe{
\item{handle}{A \code{curl} handle to use for the \code{HTTP} operation.}
\item{callback}{Callback function to call when the asynchronous
operation is done. See details below.}
\item{delay}{Number of seconds to delay the execution of the callback.}
}
}
\section{Details}{
\code{$run_http()} starts an asynchronous HTTP request, with the specified
\code{curl} handle. Once the request is done, and the response is available
(or an error happens), the callback is called with two arguments, the
error object or message (if any) and the \code{curl} response object.
\code{$run_delay()} starts a task with the specified delay.
}
\section{The default event loop}{
The \code{async} package creates a default event loop when it is loaded.
All asyncronous constructs use this event loop by default.
}
\keyword{internal}
|
/man/event_loop.Rd
|
permissive
|
strategist922/async-2
|
R
| false
| true
| 1,146
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/event-loop.R
\name{event_loop}
\alias{event_loop}
\title{Event loop}
\description{
Event loop
}
\section{Usage}{
\preformatted{el <- event_loop$new()
el$run_http(handle, callback)
el$run_delay(delay, callback)
}
}
\section{Arguments}{
\describe{
\item{handle}{A \code{curl} handle to use for the \code{HTTP} operation.}
\item{callback}{Callback function to call when the asynchronous
operation is done. See details below.}
\item{delay}{Number of seconds to delay the execution of the callback.}
}
}
\section{Details}{
\code{$run_http()} starts an asynchronous HTTP request, with the specified
\code{curl} handle. Once the request is done, and the response is available
(or an error happens), the callback is called with two arguments, the
error object or message (if any) and the \code{curl} response object.
\code{$run_delay()} starts a task with the specified delay.
}
\section{The default event loop}{
The \code{async} package creates a default event loop when it is loaded.
All asyncronous constructs use this event loop by default.
}
\keyword{internal}
|
require(testthat)
context("Conversion from Solar Dates to Lunar Dates")
test_that("correct conversion of Solar Dates to Lunar Dates", {
expect_that(lunarCal(as.Date("1981-07-21")), equals(c(Year=1981, Month=6, Day=20, Leap=0)))
expect_that(lunarCal(as.Date("1987-06-26")), equals(c(Year=1987, Month=6, Day=1, Leap=0)))
expect_that(lunarCal(as.Date("1950-06-26")), equals(c(Year=1950, Month=5, Day=12, Leap=0)))
expect_that(lunarCal(as.Date("2099-12-31")), equals(c(Year=2099, Month=11, Day=20, Leap=0)))
expect_that(lunarCal(as.Date("2099-12-31")), equals(c(Year=2099, Month=11, Day=20, Leap=0)))
}
)
test_that("correct conversion of Leap month", {
expect_that(lunarCal(as.Date("2012-06-04")), equals(c(Year=2012, Month=4, Day=15, Leap=1)))
expect_that(lunarCal(as.Date("1903-07-01")), equals(c(Year=1903, Month=5, Day=7, Leap=1)))
expect_that(lunarCal(as.Date("1922-06-30")), equals(c(Year=1922, Month=5, Day=6, Leap=1)))
expect_that(lunarCal(as.Date("1995-09-25")), equals(c(Year=1995, Month=8, Day=1, Leap=1))) #一九九五閏八月
}
)
test_that("correct conversion of Year with Leap Month", {
expect_that(lunarCal(as.Date("2012-08-20")), equals(c(Year=2012, Month=7, Day=4, Leap=0)))
expect_that(lunarCal(as.Date("2011-01-05")), equals(c(Year=2010, Month=12, Day=2, Leap=0)))
}
)
test_that("Throw error when Solar Date is not in the supported range, or solarDate is not a POSIX", {
expect_that(lunarCal(as.Date("1892-01-05")), throws_error())
expect_that(lunarCal(as.Date("2200-01-05")), throws_error())
expect_that(lunarCal(x="2000-01-05"), throws_error()) #not a date, but a string!
expect_that(lunarCal(x=123), throws_error())
expect_that(lunarCal(123), throws_error())
}
)
test_that("Formatting of lunar date", {
expect_that(lunarCal(x=as.Date("1981-07-21"), toString=TRUE), matches("辛酉年六月廿日"))
expect_that(lunarCal(x=as.Date("1981-07-21"), toString=TRUE, withZodiac=TRUE), matches("辛酉年六月廿日肖鷄"))
expect_that(lunarCal(x=as.Date("1995-09-25"), toString=TRUE), matches("乙亥年閏八月初一日"))
expect_that(lunarCal(x=as.Date("2011-01-05"), toString=TRUE), matches("庚寅年十二月初二日"))
expect_that(lunarCal(x=as.Date("1950-06-26"), toString=TRUE), matches("庚寅年五月十二日"))
expect_that(lunarCal(x=as.Date("2099-12-31"), toString=TRUE, withZodiac=TRUE), matches("己未年十一月廿日肖羊"))
}
)
|
/tests/test_lunarCalSet1.R
|
no_license
|
chainsawriot/hongkong
|
R
| false
| false
| 2,460
|
r
|
require(testthat)
context("Conversion from Solar Dates to Lunar Dates")
test_that("correct conversion of Solar Dates to Lunar Dates", {
expect_that(lunarCal(as.Date("1981-07-21")), equals(c(Year=1981, Month=6, Day=20, Leap=0)))
expect_that(lunarCal(as.Date("1987-06-26")), equals(c(Year=1987, Month=6, Day=1, Leap=0)))
expect_that(lunarCal(as.Date("1950-06-26")), equals(c(Year=1950, Month=5, Day=12, Leap=0)))
expect_that(lunarCal(as.Date("2099-12-31")), equals(c(Year=2099, Month=11, Day=20, Leap=0)))
expect_that(lunarCal(as.Date("2099-12-31")), equals(c(Year=2099, Month=11, Day=20, Leap=0)))
}
)
test_that("correct conversion of Leap month", {
expect_that(lunarCal(as.Date("2012-06-04")), equals(c(Year=2012, Month=4, Day=15, Leap=1)))
expect_that(lunarCal(as.Date("1903-07-01")), equals(c(Year=1903, Month=5, Day=7, Leap=1)))
expect_that(lunarCal(as.Date("1922-06-30")), equals(c(Year=1922, Month=5, Day=6, Leap=1)))
expect_that(lunarCal(as.Date("1995-09-25")), equals(c(Year=1995, Month=8, Day=1, Leap=1))) #一九九五閏八月
}
)
test_that("correct conversion of Year with Leap Month", {
expect_that(lunarCal(as.Date("2012-08-20")), equals(c(Year=2012, Month=7, Day=4, Leap=0)))
expect_that(lunarCal(as.Date("2011-01-05")), equals(c(Year=2010, Month=12, Day=2, Leap=0)))
}
)
test_that("Throw error when Solar Date is not in the supported range, or solarDate is not a POSIX", {
expect_that(lunarCal(as.Date("1892-01-05")), throws_error())
expect_that(lunarCal(as.Date("2200-01-05")), throws_error())
expect_that(lunarCal(x="2000-01-05"), throws_error()) #not a date, but a string!
expect_that(lunarCal(x=123), throws_error())
expect_that(lunarCal(123), throws_error())
}
)
test_that("Formatting of lunar date", {
expect_that(lunarCal(x=as.Date("1981-07-21"), toString=TRUE), matches("辛酉年六月廿日"))
expect_that(lunarCal(x=as.Date("1981-07-21"), toString=TRUE, withZodiac=TRUE), matches("辛酉年六月廿日肖鷄"))
expect_that(lunarCal(x=as.Date("1995-09-25"), toString=TRUE), matches("乙亥年閏八月初一日"))
expect_that(lunarCal(x=as.Date("2011-01-05"), toString=TRUE), matches("庚寅年十二月初二日"))
expect_that(lunarCal(x=as.Date("1950-06-26"), toString=TRUE), matches("庚寅年五月十二日"))
expect_that(lunarCal(x=as.Date("2099-12-31"), toString=TRUE, withZodiac=TRUE), matches("己未年十一月廿日肖羊"))
}
)
|
library(spdep)
### Name: spautolm
### Title: Spatial conditional and simultaneous autoregression model
### estimation
### Aliases: spautolm residuals.spautolm deviance.spautolm coef.spautolm
### fitted.spautolm print.spautolm summary.spautolm LR1.spautolm
### logLik.spautolm print.summary.spautolm
### Keywords: spatial
### ** Examples
## Not run:
##D if (require(foreign, quietly=TRUE)) {
##D example(NY_data, package="spData")
##D lm0 <- lm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata)
##D summary(lm0)
##D lm0w <- lm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata, weights=POP8)
##D summary(lm0w)
##D esar0 <- errorsarlm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata,
##D listw=listw_NY)
##D summary(esar0)
##D system.time(esar1f <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME,
##D data=nydata, listw=listw_NY, family="SAR", method="eigen", verbose=TRUE))
##D res <- summary(esar1f)
##D print(res)
##D sqrt(diag(res$resvar))
##D sqrt(diag(esar1f$fit$imat)*esar1f$fit$s2)
##D sqrt(diag(esar1f$fdHess))
##D system.time(esar1M <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME,
##D data=nydata, listw=listw_NY, family="SAR", method="Matrix", verbose=TRUE))
##D summary(esar1M)
##D system.time(esar1M <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME,
##D data=nydata, listw=listw_NY, family="SAR", method="Matrix", verbose=TRUE,
##D control=list(super=TRUE)))
##D summary(esar1M)
##D esar1wf <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata,
##D listw=listw_NY, weights=POP8, family="SAR", method="eigen")
##D summary(esar1wf)
##D system.time(esar1wM <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME,
##D data=nydata, listw=listw_NY, weights=POP8, family="SAR", method="Matrix"))
##D summary(esar1wM)
##D esar1wlu <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata,
##D listw=listw_NY, weights=POP8, family="SAR", method="LU")
##D summary(esar1wlu)
##D esar1wch <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata,
##D listw=listw_NY, weights=POP8, family="SAR", method="Chebyshev")
##D summary(esar1wch)
##D ecar1f <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata,
##D listw=listw_NY, family="CAR", method="eigen")
##D summary(ecar1f)
##D system.time(ecar1M <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME,
##D data=nydata, listw=listw_NY, family="CAR", method="Matrix"))
##D summary(ecar1M)
##D ecar1wf <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata,
##D listw=listw_NY, weights=nydata$POP8, family="CAR", method="eigen")
##D summary(ecar1wf)
##D system.time(ecar1wM <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME,
##D data=nydata, listw=listw_NY, weights=POP8, family="CAR", method="Matrix"))
##D summary(ecar1wM)
##D }
## End(Not run)
if (require(rgdal, quietly=TRUE)) {
example(nc.sids, package="spData")
ft.SID74 <- sqrt(1000)*(sqrt(nc.sids$SID74/nc.sids$BIR74) +
sqrt((nc.sids$SID74+1)/nc.sids$BIR74))
lm_nc <- lm(ft.SID74 ~ 1)
sids.nhbr30 <- dnearneigh(cbind(nc.sids$east, nc.sids$north), 0, 30, row.names=row.names(nc.sids))
sids.nhbr30.dist <- nbdists(sids.nhbr30, cbind(nc.sids$east, nc.sids$north))
sids.nhbr <- listw2sn(nb2listw(sids.nhbr30, glist=sids.nhbr30.dist, style="B", zero.policy=TRUE))
dij <- sids.nhbr[,3]
n <- nc.sids$BIR74
el1 <- min(dij)/dij
el2 <- sqrt(n[sids.nhbr$to]/n[sids.nhbr$from])
sids.nhbr$weights <- el1*el2
sids.nhbr.listw <- sn2listw(sids.nhbr)
both <- factor(paste(nc.sids$L_id, nc.sids$M_id, sep=":"))
ft.NWBIR74 <- sqrt(1000)*(sqrt(nc.sids$NWBIR74/nc.sids$BIR74) +
sqrt((nc.sids$NWBIR74+1)/nc.sids$BIR74))
mdata <- data.frame(both, ft.NWBIR74, ft.SID74, BIR74=nc.sids$BIR74)
outl <- which.max(rstandard(lm_nc))
as.character(nc.sids$names[outl])
mdata.4 <- mdata[-outl,]
W <- listw2mat(sids.nhbr.listw)
W.4 <- W[-outl, -outl]
sids.nhbr.listw.4 <- mat2listw(W.4)
esarI <- errorsarlm(ft.SID74 ~ 1, data=mdata, listw=sids.nhbr.listw,
zero.policy=TRUE)
summary(esarI)
esarIa <- spautolm(ft.SID74 ~ 1, data=mdata, listw=sids.nhbr.listw,
family="SAR")
summary(esarIa)
esarIV <- errorsarlm(ft.SID74 ~ ft.NWBIR74, data=mdata, listw=sids.nhbr.listw,
zero.policy=TRUE)
summary(esarIV)
esarIVa <- spautolm(ft.SID74 ~ ft.NWBIR74, data=mdata, listw=sids.nhbr.listw,
family="SAR")
summary(esarIVa)
esarIaw <- spautolm(ft.SID74 ~ 1, data=mdata, listw=sids.nhbr.listw,
weights=BIR74, family="SAR")
summary(esarIaw)
esarIIaw <- spautolm(ft.SID74 ~ both - 1, data=mdata, listw=sids.nhbr.listw,
weights=BIR74, family="SAR")
summary(esarIIaw)
esarIVaw <- spautolm(ft.SID74 ~ ft.NWBIR74, data=mdata,
listw=sids.nhbr.listw, weights=BIR74, family="SAR")
summary(esarIVaw)
ecarIaw <- spautolm(ft.SID74 ~ 1, data=mdata.4, listw=sids.nhbr.listw.4,
weights=BIR74, family="CAR")
summary(ecarIaw)
ecarIIaw <- spautolm(ft.SID74 ~ both - 1, data=mdata.4,
listw=sids.nhbr.listw.4, weights=BIR74, family="CAR")
summary(ecarIIaw)
ecarIVaw <- spautolm(ft.SID74 ~ ft.NWBIR74, data=mdata.4,
listw=sids.nhbr.listw.4, weights=BIR74, family="CAR")
summary(ecarIVaw)
nc.sids$fitIV <- append(fitted.values(ecarIVaw), NA, outl-1)
spplot(nc.sids, c("fitIV"), cuts=12) # Cressie 1993, p. 565
}
## Not run:
##D data(oldcol)
##D COL.errW.eig <- errorsarlm(CRIME ~ INC + HOVAL, data=COL.OLD,
##D nb2listw(COL.nb, style="W"))
##D summary(COL.errW.eig)
##D COL.errW.sar <- spautolm(CRIME ~ INC + HOVAL, data=COL.OLD,
##D nb2listw(COL.nb, style="W"))
##D summary(COL.errW.sar)
##D data(boston, package="spData")
##D gp1 <- spautolm(log(CMEDV) ~ CRIM + ZN + INDUS + CHAS + I(NOX^2)
##D + I(RM^2) + AGE + log(DIS) + log(RAD) + TAX + PTRATIO + B + log(LSTAT),
##D data=boston.c, nb2listw(boston.soi), family="SMA")
##D summary(gp1)
## End(Not run)
|
/data/genthat_extracted_code/spdep/examples/spautolm.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 5,687
|
r
|
library(spdep)
### Name: spautolm
### Title: Spatial conditional and simultaneous autoregression model
### estimation
### Aliases: spautolm residuals.spautolm deviance.spautolm coef.spautolm
### fitted.spautolm print.spautolm summary.spautolm LR1.spautolm
### logLik.spautolm print.summary.spautolm
### Keywords: spatial
### ** Examples
## Not run:
##D if (require(foreign, quietly=TRUE)) {
##D example(NY_data, package="spData")
##D lm0 <- lm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata)
##D summary(lm0)
##D lm0w <- lm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata, weights=POP8)
##D summary(lm0w)
##D esar0 <- errorsarlm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata,
##D listw=listw_NY)
##D summary(esar0)
##D system.time(esar1f <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME,
##D data=nydata, listw=listw_NY, family="SAR", method="eigen", verbose=TRUE))
##D res <- summary(esar1f)
##D print(res)
##D sqrt(diag(res$resvar))
##D sqrt(diag(esar1f$fit$imat)*esar1f$fit$s2)
##D sqrt(diag(esar1f$fdHess))
##D system.time(esar1M <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME,
##D data=nydata, listw=listw_NY, family="SAR", method="Matrix", verbose=TRUE))
##D summary(esar1M)
##D system.time(esar1M <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME,
##D data=nydata, listw=listw_NY, family="SAR", method="Matrix", verbose=TRUE,
##D control=list(super=TRUE)))
##D summary(esar1M)
##D esar1wf <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata,
##D listw=listw_NY, weights=POP8, family="SAR", method="eigen")
##D summary(esar1wf)
##D system.time(esar1wM <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME,
##D data=nydata, listw=listw_NY, weights=POP8, family="SAR", method="Matrix"))
##D summary(esar1wM)
##D esar1wlu <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata,
##D listw=listw_NY, weights=POP8, family="SAR", method="LU")
##D summary(esar1wlu)
##D esar1wch <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata,
##D listw=listw_NY, weights=POP8, family="SAR", method="Chebyshev")
##D summary(esar1wch)
##D ecar1f <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata,
##D listw=listw_NY, family="CAR", method="eigen")
##D summary(ecar1f)
##D system.time(ecar1M <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME,
##D data=nydata, listw=listw_NY, family="CAR", method="Matrix"))
##D summary(ecar1M)
##D ecar1wf <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME, data=nydata,
##D listw=listw_NY, weights=nydata$POP8, family="CAR", method="eigen")
##D summary(ecar1wf)
##D system.time(ecar1wM <- spautolm(Z ~ PEXPOSURE + PCTAGE65P + PCTOWNHOME,
##D data=nydata, listw=listw_NY, weights=POP8, family="CAR", method="Matrix"))
##D summary(ecar1wM)
##D }
## End(Not run)
if (require(rgdal, quietly=TRUE)) {
example(nc.sids, package="spData")
ft.SID74 <- sqrt(1000)*(sqrt(nc.sids$SID74/nc.sids$BIR74) +
sqrt((nc.sids$SID74+1)/nc.sids$BIR74))
lm_nc <- lm(ft.SID74 ~ 1)
sids.nhbr30 <- dnearneigh(cbind(nc.sids$east, nc.sids$north), 0, 30, row.names=row.names(nc.sids))
sids.nhbr30.dist <- nbdists(sids.nhbr30, cbind(nc.sids$east, nc.sids$north))
sids.nhbr <- listw2sn(nb2listw(sids.nhbr30, glist=sids.nhbr30.dist, style="B", zero.policy=TRUE))
dij <- sids.nhbr[,3]
n <- nc.sids$BIR74
el1 <- min(dij)/dij
el2 <- sqrt(n[sids.nhbr$to]/n[sids.nhbr$from])
sids.nhbr$weights <- el1*el2
sids.nhbr.listw <- sn2listw(sids.nhbr)
both <- factor(paste(nc.sids$L_id, nc.sids$M_id, sep=":"))
ft.NWBIR74 <- sqrt(1000)*(sqrt(nc.sids$NWBIR74/nc.sids$BIR74) +
sqrt((nc.sids$NWBIR74+1)/nc.sids$BIR74))
mdata <- data.frame(both, ft.NWBIR74, ft.SID74, BIR74=nc.sids$BIR74)
outl <- which.max(rstandard(lm_nc))
as.character(nc.sids$names[outl])
mdata.4 <- mdata[-outl,]
W <- listw2mat(sids.nhbr.listw)
W.4 <- W[-outl, -outl]
sids.nhbr.listw.4 <- mat2listw(W.4)
esarI <- errorsarlm(ft.SID74 ~ 1, data=mdata, listw=sids.nhbr.listw,
zero.policy=TRUE)
summary(esarI)
esarIa <- spautolm(ft.SID74 ~ 1, data=mdata, listw=sids.nhbr.listw,
family="SAR")
summary(esarIa)
esarIV <- errorsarlm(ft.SID74 ~ ft.NWBIR74, data=mdata, listw=sids.nhbr.listw,
zero.policy=TRUE)
summary(esarIV)
esarIVa <- spautolm(ft.SID74 ~ ft.NWBIR74, data=mdata, listw=sids.nhbr.listw,
family="SAR")
summary(esarIVa)
esarIaw <- spautolm(ft.SID74 ~ 1, data=mdata, listw=sids.nhbr.listw,
weights=BIR74, family="SAR")
summary(esarIaw)
esarIIaw <- spautolm(ft.SID74 ~ both - 1, data=mdata, listw=sids.nhbr.listw,
weights=BIR74, family="SAR")
summary(esarIIaw)
esarIVaw <- spautolm(ft.SID74 ~ ft.NWBIR74, data=mdata,
listw=sids.nhbr.listw, weights=BIR74, family="SAR")
summary(esarIVaw)
ecarIaw <- spautolm(ft.SID74 ~ 1, data=mdata.4, listw=sids.nhbr.listw.4,
weights=BIR74, family="CAR")
summary(ecarIaw)
ecarIIaw <- spautolm(ft.SID74 ~ both - 1, data=mdata.4,
listw=sids.nhbr.listw.4, weights=BIR74, family="CAR")
summary(ecarIIaw)
ecarIVaw <- spautolm(ft.SID74 ~ ft.NWBIR74, data=mdata.4,
listw=sids.nhbr.listw.4, weights=BIR74, family="CAR")
summary(ecarIVaw)
nc.sids$fitIV <- append(fitted.values(ecarIVaw), NA, outl-1)
spplot(nc.sids, c("fitIV"), cuts=12) # Cressie 1993, p. 565
}
## Not run:
##D data(oldcol)
##D COL.errW.eig <- errorsarlm(CRIME ~ INC + HOVAL, data=COL.OLD,
##D nb2listw(COL.nb, style="W"))
##D summary(COL.errW.eig)
##D COL.errW.sar <- spautolm(CRIME ~ INC + HOVAL, data=COL.OLD,
##D nb2listw(COL.nb, style="W"))
##D summary(COL.errW.sar)
##D data(boston, package="spData")
##D gp1 <- spautolm(log(CMEDV) ~ CRIM + ZN + INDUS + CHAS + I(NOX^2)
##D + I(RM^2) + AGE + log(DIS) + log(RAD) + TAX + PTRATIO + B + log(LSTAT),
##D data=boston.c, nb2listw(boston.soi), family="SMA")
##D summary(gp1)
## End(Not run)
|
# This function is used in the solver function and has no independent usages
vectortransmissioneq <- function(t, y, parms)
{
with(
as.list(c(y,parms)), #lets us access variables and parameters stored in y and pars by name
{
#the ordinary differential equations
dSh = - Sh * b1 * Iv + w * Rh; #susceptibles
dIh = Sh * b1 * Iv - g * Ih #infected, symptomatic
dRh = g * Ih - w * Rh #recovered, immune
dSv = m - n * Sv - b2 * Ih * Sv; #susceptible vectors
dIv = b2 * Ih * Sv - n * Iv ; #susceptible hosts
list(c(dSh, dIh, dRh, dSv, dIv))
}
) #close with statement
} #end function specifying the ODEs
#' Simulation of a compartmental infectious disease transmission model illustrating vector-borne transmission
#'
#' @description This model allows for the simulation of a vector-borne infectious disease
#'
#'
#' @param Sh0 initial number of susceptible hosts
#' @param Ih0 initial number of infected hosts
#' @param Sv0 initial number of susceptible vectors
#' @param Iv0 initial number of infected vectors
#' @param tmax maximum simulation time, units of months
#' @param b1 rate of transmission from infected vector to susceptible host
#' @param b2 rate of transmission from infected host to susceptible vector
#' @param m the rate of births of vectors
#' @param n the rate of natural death of vectors
#' @param g the rate at which infected hosts recover/die
#' @param w the rate at which host immunity wanes
#' @return This function returns the simulation result as obtained from a call
#' to the deSolve ode solver.
#' @details A compartmental ID model with several states/compartments
#' is simulated as a set of ordinary differential
#' equations. The compartments are Sh, Ih, Rh, and Sv, Iv.
#' The function returns the output from the odesolver as a matrix,
#' with one column per compartment/variable. The first column is time.
#' @section Warning:
#' This function does not perform any error checking. So if you try to do
#' something nonsensical (e.g. any negative values or fractions > 1),
#' the code will likely abort with an error message.
#' @examples
#' # To run the simulation with default parameters just call the function:
#' result <- simulate_vectortransmission()
#' # To choose parameter values other than the standard one, specify them like such:
#' result <- simulate_vectortransmission(Sh0 = 100, Sv0 = 1e5, tmax = 100)
#' # You should then use the simulation result returned from the function, like this:
#' plot(result$ts[ , "Time"],result$ts[ , "Sh"],xlab='Time',ylab='Number Susceptible',type='l')
#' @seealso The UI of the Shiny app 'VectorTransmission', which is part of this package, contains more details on the model.
#' @author Andreas Handel
#' @references See the information in the corresponding Shiny app for model details.
#' See the documentation for the deSolve package for details on ODE solvers.
#' @export
simulate_vectortransmission <- function(Sh0 = 1e3, Ih0 = 1, Sv0 = 0, Iv0 = 0, tmax = 120, b1 = 0.01, b2 = 0, m = 0, n = 0, g = 1, w = 0)
{
############################################################
Y0 = c(Sh = Sh0, Ih = Ih0, Rh = 0, Sv = Sv0, Iv = Iv0); #combine initial conditions into a vector
dt = min(0.1, tmax / 1000); #time step for which to get results back
timevec = seq(0, tmax, dt); #vector of times for which solution is returned (not that internal timestep of the integrator is different)
############################################################
#vector of parameters which is sent to the ODE function
pars=c(b1 = b1, b2 = b2, m = m, n = n, g = g, w = w);
#this line runs the simulation, i.e. integrates the differential equations describing the infection process
#the result is saved in the odeoutput matrix, with the 1st column the time, the 2nd, 3rd, 4th column the variables S, I, R
#This odeoutput matrix will be re-created every time you run the code, so any previous results will be overwritten
odeoutput = deSolve::lsoda(Y0, timevec, func = vectortransmissioneq, parms=pars, atol=1e-12, rtol=1e-12);
colnames(odeoutput) <- c('Time',"Sh","Ih","Rh","Sv","Iv")
result <- list()
result$ts <- as.data.frame(odeoutput)
return(result)
}
|
/inst/simulatorfunctions/simulate_vectortransmission.R
|
no_license
|
cgolden1993/DSAIDE
|
R
| false
| false
| 4,286
|
r
|
# This function is used in the solver function and has no independent usages
vectortransmissioneq <- function(t, y, parms)
{
with(
as.list(c(y,parms)), #lets us access variables and parameters stored in y and pars by name
{
#the ordinary differential equations
dSh = - Sh * b1 * Iv + w * Rh; #susceptibles
dIh = Sh * b1 * Iv - g * Ih #infected, symptomatic
dRh = g * Ih - w * Rh #recovered, immune
dSv = m - n * Sv - b2 * Ih * Sv; #susceptible vectors
dIv = b2 * Ih * Sv - n * Iv ; #susceptible hosts
list(c(dSh, dIh, dRh, dSv, dIv))
}
) #close with statement
} #end function specifying the ODEs
#' Simulation of a compartmental infectious disease transmission model illustrating vector-borne transmission
#'
#' @description This model allows for the simulation of a vector-borne infectious disease
#'
#'
#' @param Sh0 initial number of susceptible hosts
#' @param Ih0 initial number of infected hosts
#' @param Sv0 initial number of susceptible vectors
#' @param Iv0 initial number of infected vectors
#' @param tmax maximum simulation time, units of months
#' @param b1 rate of transmission from infected vector to susceptible host
#' @param b2 rate of transmission from infected host to susceptible vector
#' @param m the rate of births of vectors
#' @param n the rate of natural death of vectors
#' @param g the rate at which infected hosts recover/die
#' @param w the rate at which host immunity wanes
#' @return This function returns the simulation result as obtained from a call
#' to the deSolve ode solver.
#' @details A compartmental ID model with several states/compartments
#' is simulated as a set of ordinary differential
#' equations. The compartments are Sh, Ih, Rh, and Sv, Iv.
#' The function returns the output from the odesolver as a matrix,
#' with one column per compartment/variable. The first column is time.
#' @section Warning:
#' This function does not perform any error checking. So if you try to do
#' something nonsensical (e.g. any negative values or fractions > 1),
#' the code will likely abort with an error message.
#' @examples
#' # To run the simulation with default parameters just call the function:
#' result <- simulate_vectortransmission()
#' # To choose parameter values other than the standard one, specify them like such:
#' result <- simulate_vectortransmission(Sh0 = 100, Sv0 = 1e5, tmax = 100)
#' # You should then use the simulation result returned from the function, like this:
#' plot(result$ts[ , "Time"],result$ts[ , "Sh"],xlab='Time',ylab='Number Susceptible',type='l')
#' @seealso The UI of the Shiny app 'VectorTransmission', which is part of this package, contains more details on the model.
#' @author Andreas Handel
#' @references See the information in the corresponding Shiny app for model details.
#' See the documentation for the deSolve package for details on ODE solvers.
#' @export
simulate_vectortransmission <- function(Sh0 = 1e3, Ih0 = 1, Sv0 = 0, Iv0 = 0, tmax = 120, b1 = 0.01, b2 = 0, m = 0, n = 0, g = 1, w = 0)
{
############################################################
Y0 = c(Sh = Sh0, Ih = Ih0, Rh = 0, Sv = Sv0, Iv = Iv0); #combine initial conditions into a vector
dt = min(0.1, tmax / 1000); #time step for which to get results back
timevec = seq(0, tmax, dt); #vector of times for which solution is returned (not that internal timestep of the integrator is different)
############################################################
#vector of parameters which is sent to the ODE function
pars=c(b1 = b1, b2 = b2, m = m, n = n, g = g, w = w);
#this line runs the simulation, i.e. integrates the differential equations describing the infection process
#the result is saved in the odeoutput matrix, with the 1st column the time, the 2nd, 3rd, 4th column the variables S, I, R
#This odeoutput matrix will be re-created every time you run the code, so any previous results will be overwritten
odeoutput = deSolve::lsoda(Y0, timevec, func = vectortransmissioneq, parms=pars, atol=1e-12, rtol=1e-12);
colnames(odeoutput) <- c('Time',"Sh","Ih","Rh","Sv","Iv")
result <- list()
result$ts <- as.data.frame(odeoutput)
return(result)
}
|
#' Write a metabolism modeling configuration file
#'
#' Write a table (tsv) of configuration information for individual metabolism
#' modeling jobs (one row/job per site-strategy combination). This tsv should
#' reflect the full information needed to re-run a set of jobs. The jobs will
#' probably, but not necessarily, be run on a Condor cluster.
#'
#' @section Data Source Format:
#'
#' Every parameter whose definition begins with Data Source should be supplied
#' as a 4-column data.frame with column names c('type','site','src','logic').
#' These specify where to find the data for a given variable. The easiest way
#' to create such a data.frame is usually with
#' \code{\link{choose_data_source}}, though it may also be created manually.
#'
#' The variables requiring Data Source specification, along with their
#' expected units, are defined in the help file for \code{\link{mm_data}}.
#'
#' @param tag character of form "1.0.2" that uniquely identifies this set of
#' modeling runs.
#' @param strategy character, or vector of length sites, describing this set of
#' modeling runs in concise English.
#' @param date POSIXct indicating the date of config construction. It is
#' strongly recommended to use the default.
#' @param model character. the name of the metabolism model to construct
#' @param model_args character, in R language, specifying any arguments to pass
#' to the model function
#' @param site site names
#' @param sitetime Data Source for mean solar time. See Data Source Format
#' below.
#' @param doobs Data Source for dissolved oxygen concentrations. See Data Source
#' Format below.
#' @param dosat Data Source for dissolved oxygen saturation concentrations. See
#' Data Source Format below.
#' @param depth Data Source for mean stream depth. See Data Source Format below.
#' @param wtr Data Source for water temperature. See Data Source Format below.
#' @param par Data Source for light (photosynthetically available radiation,
#' PAR). See Data Source Format below.
#' @param disch Data Source for unit-value stream discharge, for use in
#' identifying daily priors or fixed values for K600. See Data Source Format
#' below.
#' @param veloc Data Source for unit-value flow velocity, for use in identifying
#' daily priors or fixed values for K600. See Data Source Format below.
#' @param sitedate Data Source for the dates of interest. See Data Source Format
#' below.
#' @param doinit Data Source for the first DO observation on each date to model,
#' for use in data simulation. See Data Source Format below.
#' @param gpp Data Source for daily gross primary productivity rates for use in
#' data simulation. See Data Source Format below.
#' @param er Data Source for ecosystem respiration rates for use in data
#' simulation. See Data Source Format below.
#' @param K600 Data Source for reaeration rates for use in data simulation. See
#' Data Source Format below.
#' @param K600lwr Data Source for lower bound on reaeration rates for use in
#' data simulation. See Data Source Format below.
#' @param K600upr Data Source for upper bound on reaeration rates for use in
#' data simulation. See Data Source Format below.
#' @param dischdaily Data Source for daily mean stream discharge, for use in
#' identifying daily priors or fixed values for K600. See Data Source Format
#' below.
#' @param velocdaily Data Source for daily mean flow velocity, for use in
#' identifying daily priors or fixed values for K600. See Data Source Format
#' below.
#' @param start_date NA or datetime, to be coerced with
#' \code{as.POSIXct(start_date, tz="UTC")}, at which to start the data passed
#' to the metab model
#' @param end_date NA or datetime, to be coerced with \code{as.POSIXct(end_date,
#' tz="UTC")}, at which to end the data passed to the metab model
#' @param omit_incomplete logical. If one or more datasets required for the
#' specified config row is unavailable, should that row be omitted?
#' @param filename character or NULL. If NULL, the function returns a
#' data.frame, otherwise it writes that data.frame to the file specified by
#' filename.
#' @return file name of the config file
#' @import streamMetabolizer
#' @import dplyr
#' @importFrom utils write.table
#' @export
#' @examples
#' \dontrun{
#' login_sb()
#' site="nwis_01646000"
#' cfg <- stage_metab_config(tag="0.0.1", strategy="try stage_metab_config",
#' model="metab_mle", site=site, filename=NULL,
#' sitetime=choose_data_source("sitetime", site, logic="manual", src="calcLon", type="ts"),
#' doobs=choose_data_source("doobs", site, logic="unused var"),
#' dosat=choose_data_source("dosat", site, logic="unused var"),
#' depth=choose_data_source("depth", site, logic="unused var"),
#' wtr=choose_data_source("wtr", site, logic="unused var"),
#' par=choose_data_source("par", site, logic="unused var"),
#' K600=choose_data_source("K600", site, logic="nighttime", src="0.0.6", type="pred"),
#' dischdaily=choose_data_source("dischdaily", site, logic="manual", src="calcDMean", type="ts"),
#' velocdaily=choose_data_source("velocdaily", site, logic="manual", src="calcDMean", type="ts"),
#' omit_incomplete=FALSE)
#' stage_metab_config(tag="0.0.1", strategy="try stage_metab_config",
#' site="nwis_01646000", filename=NULL)
#' stage_metab_config(tag="0.0.1", strategy="test write_metab_config",
#' site=list_sites()[24:33], filename=NULL,
#' omit_incomplete=FALSE)
#' stage_metab_config(tag="0.0.1", strategy="test write_metab_config",
#' site=list_sites()[24:33], filename=NULL)
#' styxsites <- c("styx_001001","styx_001002","styx_001003")
#' mc <- stage_metab_config(tag="0.0.1", strategy="test styx config",
#' model="metab_sim", site=styxsites, filename=NULL,
#' doobs=choose_data_source("doobs", styxsites, logic="unused var"), omit_incomplete=FALSE)
#' }
stage_metab_config <- function(
tag, strategy, date=Sys.time(),
model="metab_mle", model_args="list()",
site=list_sites(c("doobs_nwis","disch_nwis","wtr_nwis")),
sitetime=choose_data_source("sitetime", site),
doobs=choose_data_source("doobs", site),
dosat=choose_data_source("dosat", site),
depth=choose_data_source("depth", site),
wtr=choose_data_source("wtr", site),
par=choose_data_source("par", site),
disch=choose_data_source("disch", site, logic="unused var"),
veloc=choose_data_source("veloc", site, logic="unused var"),
sitedate=choose_data_source("sitedate", site, logic="unused var"),
doinit=choose_data_source("doinit", site, logic="unused var"),
gpp=choose_data_source("gpp", site, logic="unused var"),
er=choose_data_source("er", site, logic="unused var"),
K600=choose_data_source("K600", site, logic="unused var"),
K600lwr=choose_data_source("K600lwr", site, logic="unused var"),
K600upr=choose_data_source("K600upr", site, logic="unused var"),
dischdaily=choose_data_source("dischdaily", site, logic="unused var"),
velocdaily=choose_data_source("velocdaily", site, logic="unused var"),
start_date=NA, end_date=NA,
omit_incomplete=TRUE,
filename="./config.tsv") {
# Create the config table
config <- data.frame(
tag=tag, strategy=strategy, date=as.character(date, format="%Y-%m-%d %H:%M:%S %z"),
model=model, model_args=model_args,
site=site,
sitetime=sitetime,
doobs=doobs, dosat=dosat, depth=depth, wtr=wtr, par=par, disch=disch, veloc=veloc,
sitedate=sitedate, doinit=doinit, gpp=gpp, er=er, K600=K600, K600lwr=K600lwr, K600upr=K600upr,
dischdaily=dischdaily, velocdaily=velocdaily,
start_date=as.POSIXct(start_date, tz="UTC"), end_date=as.POSIXct(end_date, tz="UTC"),
stringsAsFactors=FALSE)
# Filter to only those rows that might work
if(omit_incomplete) {
incomplete <- sapply(1:nrow(config), function(row) {
metab_fun <- config[row, "model"]
# get a list of vars for which we expect complete info
arg_data <- eval(formals(metab_fun)$data)
arg_data_daily <- eval(formals(metab_fun)$data_daily)
needs_data <- if(attr(arg_data,'optional')[1]=='all') NULL else colnames(arg_data)[!(colnames(arg_data) %in% attr(arg_data,'optional'))]
needs_data_daily <- if(attr(arg_data_daily,'optional')[1]=='all') NULL else colnames(arg_data_daily)[!(colnames(arg_data_daily) %in% attr(arg_data_daily,'optional'))]
data_needs <- c(needs_data, needs_data_daily)
var_lookup <- unique(get_var_src_codes(out=c("metab_var","var")))
var_needs <- var_lookup[match(data_needs, var_lookup$metab_var),"var"]
# determine whether we have a specified src for each
unmet_needs <- is.na(config[row,paste0(var_needs, ".src")])
any(unmet_needs)
})
config <- config[!incomplete,]
}
# Add a row index; this could go out of date if the user modifies the config
# file, but better than relying on fragile rownames
config$config.row <- seq_len(nrow(config))
# Write the table to file if requested
if(!is.null(filename)) {
write_config(config, filename)
return(filename)
} else {
return(config)
}
}
|
/R/stage_metab_config.R
|
permissive
|
berdaniera/mda.streams
|
R
| false
| false
| 9,124
|
r
|
#' Write a metabolism modeling configuration file
#'
#' Write a table (tsv) of configuration information for individual metabolism
#' modeling jobs (one row/job per site-strategy combination). This tsv should
#' reflect the full information needed to re-run a set of jobs. The jobs will
#' probably, but not necessarily, be run on a Condor cluster.
#'
#' @section Data Source Format:
#'
#' Every parameter whose definition begins with Data Source should be supplied
#' as a 4-column data.frame with column names c('type','site','src','logic').
#' These specify where to find the data for a given variable. The easiest way
#' to create such a data.frame is usually with
#' \code{\link{choose_data_source}}, though it may also be created manually.
#'
#' The variables requiring Data Source specification, along with their
#' expected units, are defined in the help file for \code{\link{mm_data}}.
#'
#' @param tag character of form "1.0.2" that uniquely identifies this set of
#' modeling runs.
#' @param strategy character, or vector of length sites, describing this set of
#' modeling runs in concise English.
#' @param date POSIXct indicating the date of config construction. It is
#' strongly recommended to use the default.
#' @param model character. the name of the metabolism model to construct
#' @param model_args character, in R language, specifying any arguments to pass
#' to the model function
#' @param site site names
#' @param sitetime Data Source for mean solar time. See Data Source Format
#' below.
#' @param doobs Data Source for dissolved oxygen concentrations. See Data Source
#' Format below.
#' @param dosat Data Source for dissolved oxygen saturation concentrations. See
#' Data Source Format below.
#' @param depth Data Source for mean stream depth. See Data Source Format below.
#' @param wtr Data Source for water temperature. See Data Source Format below.
#' @param par Data Source for light (photosynthetically available radiation,
#' PAR). See Data Source Format below.
#' @param disch Data Source for unit-value stream discharge, for use in
#' identifying daily priors or fixed values for K600. See Data Source Format
#' below.
#' @param veloc Data Source for unit-value flow velocity, for use in identifying
#' daily priors or fixed values for K600. See Data Source Format below.
#' @param sitedate Data Source for the dates of interest. See Data Source Format
#' below.
#' @param doinit Data Source for the first DO observation on each date to model,
#' for use in data simulation. See Data Source Format below.
#' @param gpp Data Source for daily gross primary productivity rates for use in
#' data simulation. See Data Source Format below.
#' @param er Data Source for ecosystem respiration rates for use in data
#' simulation. See Data Source Format below.
#' @param K600 Data Source for reaeration rates for use in data simulation. See
#' Data Source Format below.
#' @param K600lwr Data Source for lower bound on reaeration rates for use in
#' data simulation. See Data Source Format below.
#' @param K600upr Data Source for upper bound on reaeration rates for use in
#' data simulation. See Data Source Format below.
#' @param dischdaily Data Source for daily mean stream discharge, for use in
#' identifying daily priors or fixed values for K600. See Data Source Format
#' below.
#' @param velocdaily Data Source for daily mean flow velocity, for use in
#' identifying daily priors or fixed values for K600. See Data Source Format
#' below.
#' @param start_date NA or datetime, to be coerced with
#' \code{as.POSIXct(start_date, tz="UTC")}, at which to start the data passed
#' to the metab model
#' @param end_date NA or datetime, to be coerced with \code{as.POSIXct(end_date,
#' tz="UTC")}, at which to end the data passed to the metab model
#' @param omit_incomplete logical. If one or more datasets required for the
#' specified config row is unavailable, should that row be omitted?
#' @param filename character or NULL. If NULL, the function returns a
#' data.frame, otherwise it writes that data.frame to the file specified by
#' filename.
#' @return file name of the config file
#' @import streamMetabolizer
#' @import dplyr
#' @importFrom utils write.table
#' @export
#' @examples
#' \dontrun{
#' login_sb()
#' site="nwis_01646000"
#' cfg <- stage_metab_config(tag="0.0.1", strategy="try stage_metab_config",
#' model="metab_mle", site=site, filename=NULL,
#' sitetime=choose_data_source("sitetime", site, logic="manual", src="calcLon", type="ts"),
#' doobs=choose_data_source("doobs", site, logic="unused var"),
#' dosat=choose_data_source("dosat", site, logic="unused var"),
#' depth=choose_data_source("depth", site, logic="unused var"),
#' wtr=choose_data_source("wtr", site, logic="unused var"),
#' par=choose_data_source("par", site, logic="unused var"),
#' K600=choose_data_source("K600", site, logic="nighttime", src="0.0.6", type="pred"),
#' dischdaily=choose_data_source("dischdaily", site, logic="manual", src="calcDMean", type="ts"),
#' velocdaily=choose_data_source("velocdaily", site, logic="manual", src="calcDMean", type="ts"),
#' omit_incomplete=FALSE)
#' stage_metab_config(tag="0.0.1", strategy="try stage_metab_config",
#' site="nwis_01646000", filename=NULL)
#' stage_metab_config(tag="0.0.1", strategy="test write_metab_config",
#' site=list_sites()[24:33], filename=NULL,
#' omit_incomplete=FALSE)
#' stage_metab_config(tag="0.0.1", strategy="test write_metab_config",
#' site=list_sites()[24:33], filename=NULL)
#' styxsites <- c("styx_001001","styx_001002","styx_001003")
#' mc <- stage_metab_config(tag="0.0.1", strategy="test styx config",
#' model="metab_sim", site=styxsites, filename=NULL,
#' doobs=choose_data_source("doobs", styxsites, logic="unused var"), omit_incomplete=FALSE)
#' }
stage_metab_config <- function(
tag, strategy, date=Sys.time(),
model="metab_mle", model_args="list()",
site=list_sites(c("doobs_nwis","disch_nwis","wtr_nwis")),
sitetime=choose_data_source("sitetime", site),
doobs=choose_data_source("doobs", site),
dosat=choose_data_source("dosat", site),
depth=choose_data_source("depth", site),
wtr=choose_data_source("wtr", site),
par=choose_data_source("par", site),
disch=choose_data_source("disch", site, logic="unused var"),
veloc=choose_data_source("veloc", site, logic="unused var"),
sitedate=choose_data_source("sitedate", site, logic="unused var"),
doinit=choose_data_source("doinit", site, logic="unused var"),
gpp=choose_data_source("gpp", site, logic="unused var"),
er=choose_data_source("er", site, logic="unused var"),
K600=choose_data_source("K600", site, logic="unused var"),
K600lwr=choose_data_source("K600lwr", site, logic="unused var"),
K600upr=choose_data_source("K600upr", site, logic="unused var"),
dischdaily=choose_data_source("dischdaily", site, logic="unused var"),
velocdaily=choose_data_source("velocdaily", site, logic="unused var"),
start_date=NA, end_date=NA,
omit_incomplete=TRUE,
filename="./config.tsv") {
# Create the config table
config <- data.frame(
tag=tag, strategy=strategy, date=as.character(date, format="%Y-%m-%d %H:%M:%S %z"),
model=model, model_args=model_args,
site=site,
sitetime=sitetime,
doobs=doobs, dosat=dosat, depth=depth, wtr=wtr, par=par, disch=disch, veloc=veloc,
sitedate=sitedate, doinit=doinit, gpp=gpp, er=er, K600=K600, K600lwr=K600lwr, K600upr=K600upr,
dischdaily=dischdaily, velocdaily=velocdaily,
start_date=as.POSIXct(start_date, tz="UTC"), end_date=as.POSIXct(end_date, tz="UTC"),
stringsAsFactors=FALSE)
# Filter to only those rows that might work
if(omit_incomplete) {
incomplete <- sapply(1:nrow(config), function(row) {
metab_fun <- config[row, "model"]
# get a list of vars for which we expect complete info
arg_data <- eval(formals(metab_fun)$data)
arg_data_daily <- eval(formals(metab_fun)$data_daily)
needs_data <- if(attr(arg_data,'optional')[1]=='all') NULL else colnames(arg_data)[!(colnames(arg_data) %in% attr(arg_data,'optional'))]
needs_data_daily <- if(attr(arg_data_daily,'optional')[1]=='all') NULL else colnames(arg_data_daily)[!(colnames(arg_data_daily) %in% attr(arg_data_daily,'optional'))]
data_needs <- c(needs_data, needs_data_daily)
var_lookup <- unique(get_var_src_codes(out=c("metab_var","var")))
var_needs <- var_lookup[match(data_needs, var_lookup$metab_var),"var"]
# determine whether we have a specified src for each
unmet_needs <- is.na(config[row,paste0(var_needs, ".src")])
any(unmet_needs)
})
config <- config[!incomplete,]
}
# Add a row index; this could go out of date if the user modifies the config
# file, but better than relying on fragile rownames
config$config.row <- seq_len(nrow(config))
# Write the table to file if requested
if(!is.null(filename)) {
write_config(config, filename)
return(filename)
} else {
return(config)
}
}
|
#----- R for Data Science (Hadley Wickham) -----
# https://r4ds.had.co.nz
#----- Chapter 5 -----
rm(list = ls())
library(tidyverse)
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut))
count(diamonds, cut)
diamonds %>%
filter(between(y, 3, 20))
diamonds %>%
filter(between(y, 0, 3))
diamonds2 <- diamonds %>%
mutate(y = ifelse(y < 3 | y > 20, NA, y))
diamonds2 %>%
ggplot(aes(x = x, y = y)) +
geom_point()
install.packages("nycflights13")
library(nycflights13)
nycflights13::flights %>%
mutate(cancelled = is.na(dep_time),
sched_hour = sched_dep_time %/% 100,
sched_min = sched_dep_time %% 100,
sched_dep_time = sched_hour + sched_min / 60) %>%
ggplot(aes(x = sched_dep_time, y = ..density..)) +
geom_freqpoly(aes(color=cancelled,
binwidth = 1/4))
ggplot(diamonds) +
geom_count(aes(x = cut, y = color))
diamonds %>%
count(cut, color)
diamonds %>%
count(cut, color) %>%
ggplot(aes(x = color, y = cut)) +
geom_tile(aes(fill = n))
# two continuous vars
ggplot(diamonds) +
geom_point(aes(x = carat, y = price), alpha = .02)
ggplot(diamonds) +
geom_bin2d(aes(x = carat, y = price))
install.packages("hexbin")
library(hexbin)
ggplot(diamonds) +
geom_hex(aes(x = carat, y = price))
ggplot(diamonds, aes(x = carat, y = price)) +
geom_boxplot(aes(group = cut_width(carat, 0.1)))
#----- Chapter 6 Workflow: Projects -----
# Ctrl + Shift F10 to restart R Studio
# Ctrl + Shift S to rerun current script
getwd()
#----- Chapter 10: Tibbles -----
library(tidyverse)
?tribble
tribble(
~x, ~y, ~z,
"Hello", 4, 5.5,
"Test", 3, 8.8
)
nycflights13::flights %>%
print(n = 10, width = Inf)
?tibble
df <- tibble(
x = runif(5),
y = rnorm(5)
)
df$x
?tibble::enframe
enframe(5:8)
#---- Chapter 11: Data Import -----
read_csv("a,b,c\n1,2,3\n4,5,6") # skip = n, na = "-"
x <- parse_integer(c("123", "345", "abc", "123.45"))
problems(x)
read_csv("a,b,c\n1,2,.", na = ".")
x <- parse_integer(c("123", "345", "abc", "123.45"))
problems(x) # the set of import/parsing problems
?read_csv2
#----- 11.4.2
library(tidyverse)
challenge <- read_csv(readr_example("challenge.csv"))
problems(challenge)
str(challenge)
tail(challenge)
challenge <- read_csv(
readr_example("challenge.csv"),
col_types = cols(
x = col_double(),
y = col_date()
)
)
# Sometimes it's easier to diagnose problems if you just read in all the columns as character vectors:
challenge2 <- read_csv(readr_example("challenge.csv"),
col_types = cols(.default = col_character())
)
# If you're reading a very large file, you might want to set n_max to a smallish number
#like 10,000 or 100,000. That will accelerate your iterations while you eliminate common problems
challenge2 <- read_csv(readr_example("challenge.csv"),
n_max = 100,
col_types = cols(.default = col_character())
)
#----- 11.5 Writing to a file
write_csv(challenge, "challenge.csv") # encodes in UTF-8 and dates as ISO 8601 format
write_tsv() # same as csv, but tab separated
write_excel_csv() # this writes a special character (a "byte order mark") at the start of the file which tells Excel that you're using the UTF-8 encoding.
# RDS is R's custom binary format.
write_rds(challenge, "challenge.rds") # wrapper for saveRDS() and preserves data tyeps
read_rds("challenge.rds")
# The feather package implements a fast binary file format that can be shared across programming languages:
# Feather tends to be faster than RDS and is usable outside of R.
# RDS supports list-columns (which you'll learn about in many models); feather currently does not.
install.packages("feather")
library(feather)
write_feather(challenge, "chalenge.feather")
read_feather("chalenge.feather")
#----- 11.6 other readers
haven # SPSS, Stata and SAS files
readxl()
DBI
jsonlite
xml2
# further info: https://cran.r-project.org/doc/manuals/r-release/R-data.html
#-----
#----- 12 Tidy data -----
#-----
library(tidyverse)
pivot_longer() # replacement for gather https://cmdlinetips.com/2019/09/pivot_longer-and-pivot_wider-in-tidyr/
pivot_wider() # replacement for spread
?separate()
unite()
tidyr::table3 %>%
separate(rate, into = c("cases", "population"), sep = "/", convert = TRUE)
# sep = 2 splits at 2nd character position. Negative values start from end of string
table3 %>%
separate(year, into = c("century", "year"), sep = 2) %>%
unite(newcol, century, year, sep = "")
#---
stocks <- tibble(
year = c(2015, 2015, 2015, 2015, 2016, 2016, 2016),
qtr = c( 1, 2, 3, 4, 2, 3, 4),
return = c(1.88, 0.59, 0.35, NA, 0.92, 0.17, 2.66)
)
# both pivot_wider and complete() explicity show all missing values.
stocks %>%
pivot_wider(names_from = year, values_from = return)
stocks %>%
complete(year, qtr)
#----- Excel-like autofill for when NA means it should be ditto from cell above.
treatment <- tribble(
~ person, ~ treatment, ~response,
"Derrick Whitmore", 1, 7,
NA, 2, 10,
NA, 3, 9,
"Katherine Burke", 1, 4
)
treatment %>%
fill(person)
#----- 12.6 Case Study
# https://www.who.int/tb/country/data/download/en/
View(who)
who1 <- who %>%
pivot_longer(
cols = new_sp_m014:newrel_f65,
names_to = "key",
values_to = "cases",
values_drop_na = TRUE
)
# deprecated gather() equivalent
who %>%
gather(key = "key", val = "cases", new_sp_m014:newrel_f65, na.rm = TRUE)
who1 %>%
count(key)
# fix column name inconsistency. "newrel" should be "new_rel"
who2 <- who1 %>%
mutate(key = stringr::str_replace(key, "newrel", "new_rel"))
# split key into 3 cols
who3 <- who2 %>%
separate(key, c("new", "type", "sexage"), sep = "_")
# select all cols but iso2, iso3 and -new (all cases are new, so no need to keep)
who4 <- who3 %>%
select(-iso2, -iso3, -new)
who5 <- who4 %>%
separate(sexage, c("sex", "age"), sep = 1)
#--- all in one pipe
who %>%
pivot_longer(
cols = new_sp_m014:newrel_f65,
names_to = "key",
values_to = "cases",
values_drop_na = TRUE
) %>%
mutate(
key = stringr::str_replace(key, "newrel", "new_rel")
) %>%
separate(key, c("new", "var", "sexage")) %>%
select(-iso2, -iso3, -new) %>%
separate(sexage, c("sex", "age"), sep = 1)
#--- 12.6.1 Exercises
#----- 12.7 Non-tidy data
#-----
#----- 13 Relational data -----
#-----
library(tidyverse)
install.packages("nycflights13")
library(nycflights13)
airlines
airports %>% filter(faa == "BNA")
planes
weather
# check for uniqueness
planes %>%
count(tailnum) %>%
filter(n > 1)
weather %>%
count(year, month, day, hour, origin) %>%
filter(n > 1)
# Add a row number surrogate key to flights since it has none.
View(flights)
flights2 <- flights %>% mutate(rownum = row_number())
#--- 13.4 Mutating joins
#-- 13.4.6 Exercises
flights %>%
mutate(delay = arr_time - sched_arr_time) %>%
group_by(dest) %>%
mutate(avg_delay = mean(delay, na.rm = TRUE)) %>%
select(year, month, day, dest, avg_delay) %>%
View()
# quick map
airports %>%
semi_join(flights, c("faa" = "dest")) %>%
ggplot(aes(lon, lat)) +
borders("state") +
geom_point() +
coord_quickmap()
#----- 13.5 Filtering joins
semi_join(x, y) # keeps all x that match y
anti_join(x, y) # drops all x that match y
#----- 13.6 Join problems
# 1. identify PKs in each table
# 2. check that none of the PKs are missing
# 3. check for FK orphans
#----- 13.7 Set operations
# All these operations work with a complete row, comparing the values of every variable.
intersect(x, y) # return only observations in both x and y.
union(x, y) # return unique observations in x and y.
setdiff(x, y) # return observations in x, but not in y.
|
/R for data science book - 2019.R
|
no_license
|
wbdill/r-sandbox01
|
R
| false
| false
| 7,742
|
r
|
#----- R for Data Science (Hadley Wickham) -----
# https://r4ds.had.co.nz
#----- Chapter 5 -----
rm(list = ls())
library(tidyverse)
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut))
count(diamonds, cut)
diamonds %>%
filter(between(y, 3, 20))
diamonds %>%
filter(between(y, 0, 3))
diamonds2 <- diamonds %>%
mutate(y = ifelse(y < 3 | y > 20, NA, y))
diamonds2 %>%
ggplot(aes(x = x, y = y)) +
geom_point()
install.packages("nycflights13")
library(nycflights13)
nycflights13::flights %>%
mutate(cancelled = is.na(dep_time),
sched_hour = sched_dep_time %/% 100,
sched_min = sched_dep_time %% 100,
sched_dep_time = sched_hour + sched_min / 60) %>%
ggplot(aes(x = sched_dep_time, y = ..density..)) +
geom_freqpoly(aes(color=cancelled,
binwidth = 1/4))
ggplot(diamonds) +
geom_count(aes(x = cut, y = color))
diamonds %>%
count(cut, color)
diamonds %>%
count(cut, color) %>%
ggplot(aes(x = color, y = cut)) +
geom_tile(aes(fill = n))
# two continuous vars
ggplot(diamonds) +
geom_point(aes(x = carat, y = price), alpha = .02)
ggplot(diamonds) +
geom_bin2d(aes(x = carat, y = price))
install.packages("hexbin")
library(hexbin)
ggplot(diamonds) +
geom_hex(aes(x = carat, y = price))
ggplot(diamonds, aes(x = carat, y = price)) +
geom_boxplot(aes(group = cut_width(carat, 0.1)))
#----- Chapter 6 Workflow: Projects -----
# Ctrl + Shift F10 to restart R Studio
# Ctrl + Shift S to rerun current script
getwd()
#----- Chapter 10: Tibbles -----
library(tidyverse)
?tribble
tribble(
~x, ~y, ~z,
"Hello", 4, 5.5,
"Test", 3, 8.8
)
nycflights13::flights %>%
print(n = 10, width = Inf)
?tibble
df <- tibble(
x = runif(5),
y = rnorm(5)
)
df$x
?tibble::enframe
enframe(5:8)
#---- Chapter 11: Data Import -----
read_csv("a,b,c\n1,2,3\n4,5,6") # skip = n, na = "-"
x <- parse_integer(c("123", "345", "abc", "123.45"))
problems(x)
read_csv("a,b,c\n1,2,.", na = ".")
x <- parse_integer(c("123", "345", "abc", "123.45"))
problems(x) # the set of import/parsing problems
?read_csv2
#----- 11.4.2
library(tidyverse)
challenge <- read_csv(readr_example("challenge.csv"))
problems(challenge)
str(challenge)
tail(challenge)
challenge <- read_csv(
readr_example("challenge.csv"),
col_types = cols(
x = col_double(),
y = col_date()
)
)
# Sometimes it's easier to diagnose problems if you just read in all the columns as character vectors:
challenge2 <- read_csv(readr_example("challenge.csv"),
col_types = cols(.default = col_character())
)
# If you're reading a very large file, you might want to set n_max to a smallish number
#like 10,000 or 100,000. That will accelerate your iterations while you eliminate common problems
challenge2 <- read_csv(readr_example("challenge.csv"),
n_max = 100,
col_types = cols(.default = col_character())
)
#----- 11.5 Writing to a file
write_csv(challenge, "challenge.csv") # encodes in UTF-8 and dates as ISO 8601 format
write_tsv() # same as csv, but tab separated
write_excel_csv() # this writes a special character (a "byte order mark") at the start of the file which tells Excel that you're using the UTF-8 encoding.
# RDS is R's custom binary format.
write_rds(challenge, "challenge.rds") # wrapper for saveRDS() and preserves data tyeps
read_rds("challenge.rds")
# The feather package implements a fast binary file format that can be shared across programming languages:
# Feather tends to be faster than RDS and is usable outside of R.
# RDS supports list-columns (which you'll learn about in many models); feather currently does not.
install.packages("feather")
library(feather)
write_feather(challenge, "chalenge.feather")
read_feather("chalenge.feather")
#----- 11.6 other readers
haven # SPSS, Stata and SAS files
readxl()
DBI
jsonlite
xml2
# further info: https://cran.r-project.org/doc/manuals/r-release/R-data.html
#-----
#----- 12 Tidy data -----
#-----
library(tidyverse)
pivot_longer() # replacement for gather https://cmdlinetips.com/2019/09/pivot_longer-and-pivot_wider-in-tidyr/
pivot_wider() # replacement for spread
?separate()
unite()
tidyr::table3 %>%
separate(rate, into = c("cases", "population"), sep = "/", convert = TRUE)
# sep = 2 splits at 2nd character position. Negative values start from end of string
table3 %>%
separate(year, into = c("century", "year"), sep = 2) %>%
unite(newcol, century, year, sep = "")
#---
stocks <- tibble(
year = c(2015, 2015, 2015, 2015, 2016, 2016, 2016),
qtr = c( 1, 2, 3, 4, 2, 3, 4),
return = c(1.88, 0.59, 0.35, NA, 0.92, 0.17, 2.66)
)
# both pivot_wider and complete() explicity show all missing values.
stocks %>%
pivot_wider(names_from = year, values_from = return)
stocks %>%
complete(year, qtr)
#----- Excel-like autofill for when NA means it should be ditto from cell above.
treatment <- tribble(
~ person, ~ treatment, ~response,
"Derrick Whitmore", 1, 7,
NA, 2, 10,
NA, 3, 9,
"Katherine Burke", 1, 4
)
treatment %>%
fill(person)
#----- 12.6 Case Study
# https://www.who.int/tb/country/data/download/en/
View(who)
who1 <- who %>%
pivot_longer(
cols = new_sp_m014:newrel_f65,
names_to = "key",
values_to = "cases",
values_drop_na = TRUE
)
# deprecated gather() equivalent
who %>%
gather(key = "key", val = "cases", new_sp_m014:newrel_f65, na.rm = TRUE)
who1 %>%
count(key)
# fix column name inconsistency. "newrel" should be "new_rel"
who2 <- who1 %>%
mutate(key = stringr::str_replace(key, "newrel", "new_rel"))
# split key into 3 cols
who3 <- who2 %>%
separate(key, c("new", "type", "sexage"), sep = "_")
# select all cols but iso2, iso3 and -new (all cases are new, so no need to keep)
who4 <- who3 %>%
select(-iso2, -iso3, -new)
who5 <- who4 %>%
separate(sexage, c("sex", "age"), sep = 1)
#--- all in one pipe
who %>%
pivot_longer(
cols = new_sp_m014:newrel_f65,
names_to = "key",
values_to = "cases",
values_drop_na = TRUE
) %>%
mutate(
key = stringr::str_replace(key, "newrel", "new_rel")
) %>%
separate(key, c("new", "var", "sexage")) %>%
select(-iso2, -iso3, -new) %>%
separate(sexage, c("sex", "age"), sep = 1)
#--- 12.6.1 Exercises
#----- 12.7 Non-tidy data
#-----
#----- 13 Relational data -----
#-----
library(tidyverse)
install.packages("nycflights13")
library(nycflights13)
airlines
airports %>% filter(faa == "BNA")
planes
weather
# check for uniqueness
planes %>%
count(tailnum) %>%
filter(n > 1)
weather %>%
count(year, month, day, hour, origin) %>%
filter(n > 1)
# Add a row number surrogate key to flights since it has none.
View(flights)
flights2 <- flights %>% mutate(rownum = row_number())
#--- 13.4 Mutating joins
#-- 13.4.6 Exercises
flights %>%
mutate(delay = arr_time - sched_arr_time) %>%
group_by(dest) %>%
mutate(avg_delay = mean(delay, na.rm = TRUE)) %>%
select(year, month, day, dest, avg_delay) %>%
View()
# quick map
airports %>%
semi_join(flights, c("faa" = "dest")) %>%
ggplot(aes(lon, lat)) +
borders("state") +
geom_point() +
coord_quickmap()
#----- 13.5 Filtering joins
semi_join(x, y) # keeps all x that match y
anti_join(x, y) # drops all x that match y
#----- 13.6 Join problems
# 1. identify PKs in each table
# 2. check that none of the PKs are missing
# 3. check for FK orphans
#----- 13.7 Set operations
# All these operations work with a complete row, comparing the values of every variable.
intersect(x, y) # return only observations in both x and y.
union(x, y) # return unique observations in x and y.
setdiff(x, y) # return observations in x, but not in y.
|
library(hexSticker)
library(yfR)
df_sp500 <- yfR::yf_get('^GSPC', first_date = '1950-01-01') %>%
dplyr::ungroup() %>%
dplyr::select(ref_date, price_adjusted)
s <- sticker(~plot(df_sp500, cex=.5, cex.axis=.5, mgp=c(0,.3,0),
xlab="", ylab="SP500"),
package="yfR",
p_size=11,
s_x=1,
s_y=.8,
s_width=1.4,
s_height=1.2,
filename="inst/figures/yfr_logo.png")
|
/inst/scripts/S_create_logo.R
|
permissive
|
ropensci/yfR
|
R
| false
| false
| 470
|
r
|
library(hexSticker)
library(yfR)
df_sp500 <- yfR::yf_get('^GSPC', first_date = '1950-01-01') %>%
dplyr::ungroup() %>%
dplyr::select(ref_date, price_adjusted)
s <- sticker(~plot(df_sp500, cex=.5, cex.axis=.5, mgp=c(0,.3,0),
xlab="", ylab="SP500"),
package="yfR",
p_size=11,
s_x=1,
s_y=.8,
s_width=1.4,
s_height=1.2,
filename="inst/figures/yfr_logo.png")
|
library(AHMbook)
### Name: sim.spatialHDS
### Title: Simulates data for a hierarchical spatial distance sampling
### model
### Aliases: sim.spatialHDS
### ** Examples
# Generate data with the default arguments and look at the structure:
tmp <- sim.spatialHDS()
str(tmp)
|
/data/genthat_extracted_code/AHMbook/examples/sim.spatialHDS.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 279
|
r
|
library(AHMbook)
### Name: sim.spatialHDS
### Title: Simulates data for a hierarchical spatial distance sampling
### model
### Aliases: sim.spatialHDS
### ** Examples
# Generate data with the default arguments and look at the structure:
tmp <- sim.spatialHDS()
str(tmp)
|
assert_callr_function <- function(callr_function) {
if (!is.null(callr_function)) {
assert_function(
callr_function,
"callr_function must be a function or NULL."
)
}
}
assert_chr <- function(x, msg = NULL) {
if (!is.character(x)) {
throw_validate(msg %||% "x must be a character.")
}
}
assert_chr_no_delim <- function(x, msg = NULL) {
assert_chr(x)
if (any(grepl("|", x, fixed = TRUE) | grepl("*", x, fixed = TRUE))) {
throw_validate(msg %||% "x must not contain | or *")
}
}
assert_correct_fields <- function(object, constructor) {
assert_identical_chr(sort(names(object)), sort(names(formals(constructor))))
}
assert_dag <- function(x, msg = NULL) {
if (!inherits(x, "igraph") || !igraph::is_dag(x)) {
throw_validate(msg %||% "x must be an igraph and directed acyclic graph.")
}
}
assert_dbl <- function(x, msg = NULL) {
if (!is.numeric(x)) {
throw_validate(msg %||% "x must be numeric.")
}
}
assert_df <- function(x, msg = NULL) {
if (!is.data.frame(x)) {
throw_validate(msg %||% "x must be a data frame.")
}
}
assert_envir <- function(x, msg = NULL) {
if (!is.environment(x)) {
throw_validate(msg %||% "x must be an environment")
}
}
assert_expr <- function(x, msg = NULL) {
if (!is.expression(x)) {
throw_validate(msg %||% "x must be an expression.")
}
}
assert_format <- function(format) {
assert_scalar(format)
assert_chr(format)
store_assert_format_setting(as_class(format))
}
assert_function <- function(x, msg = NULL) {
if (!is.function(x)) {
throw_validate(msg %||% "x must be a function.")
}
}
assert_ge <- function(x, threshold, msg = NULL) {
if (any(x < threshold)) {
throw_validate(msg %||% paste("x is less than", threshold))
}
}
assert_identical <- function(x, y, msg = NULL) {
if (!identical(x, y)) {
throw_validate(msg %||% "x and y are not identical.")
}
}
assert_identical_chr <- function(x, y, msg = NULL) {
if (!identical(x, y)) {
msg_x <- paste0(deparse(x), collapse = "")
msg_y <- paste0(deparse(y), collapse = "")
throw_validate(msg %||% paste(msg_x, "and", msg_y, "not identical."))
}
}
assert_in <- function(x, choices, msg = NULL) {
if (!all(x %in% choices)) {
throw_validate(msg %||% paste(deparse(x), "is not in ", deparse(choices)))
}
}
assert_not_in <- function(x, choices, msg = NULL) {
if (any(x %in% choices)) {
throw_validate(msg %||% paste(deparse(x), "is in", deparse(choices)))
}
}
assert_inherits <- function(x, class, msg = NULL) {
if (!inherits(x, class)) {
throw_validate(msg %||% paste("x does not inherit from", class))
}
}
assert_int <- function(x, msg = NULL) {
if (!is.integer(x)) {
throw_validate(msg %||% "x must be an integer vector.")
}
}
assert_internet <- function(msg = NULL) {
assert_package("curl")
if (!curl::has_internet()) {
# This line cannot be covered in automated tests
# because internet is usually on.
throw_run("no internet") # nocov
}
}
assert_le <- function(x, threshold, msg = NULL) {
if (any(x > threshold)) {
throw_validate(msg %||% paste("x is greater than", threshold))
}
}
assert_list <- function(x, msg = NULL) {
if (!is.list(x)) {
throw_validate(msg %||% "x must be a list.")
}
}
assert_lgl <- function(x, msg = NULL) {
if (!is.logical(x)) {
throw_validate(msg %||% "x must be logical.")
}
}
assert_name <- function(name) {
assert_chr(name)
assert_scalar(name)
if (!nzchar(name)) {
throw_validate("name must be a nonempty string.")
}
if (!identical(name, make.names(name))) {
throw_validate(name, " is not a valid symbol name.")
}
if (grepl("\\.$", name)) {
throw_validate(name, " ends with a dot.")
}
}
assert_nonempty <- function(x, msg = NULL) {
if (!length(x)) {
throw_validate(msg %||% "x must not be empty")
}
}
assert_nonmissing <- function(x, msg = NULL) {
if (anyNA(x)) {
throw_validate(msg %||% "x must have no missing values (NA's)")
}
}
assert_nzchar <- function(x, msg = NULL) {
if (any(!nzchar(x))) {
throw_validate(msg %||% "x has empty character strings")
}
}
assert_package <- function(package, msg = NULL) {
if (!requireNamespace(package, quietly = TRUE)) {
throw_validate(msg %||% paste("package", package, "not installed"))
}
}
assert_path <- function(path, msg = NULL) {
missing <- !file.exists(path)
if (any(missing)) {
throw_validate(
msg %||% paste0(
"missing files: ",
paste(path[missing], collapse = ", ")
)
)
}
}
assert_match <- function(x, pattern, msg = NULL) {
if (!grepl(pattern = pattern, x = x)) {
throw_validate(msg %||% paste(x, "does not match pattern", pattern))
}
}
assert_positive <- function(x, msg = NULL) {
if (any(x <= 0)) {
throw_validate(msg %||% paste("x is not all positive."))
}
}
assert_scalar <- function(x, msg = NULL) {
if (length(x) != 1) {
throw_validate(msg %||% "x must have length 1.")
}
}
assert_store <- function() {
assert_path(
path_store(),
paste(
"utility functions like tar_read() and tar_progress() require a",
" _targets/ data store produced by tar_make() or similar."
)
)
}
assert_target <- function(x, msg = NULL) {
msg <- msg %||% paste(
"Found a non-target object.",
"_targets.R must end with a list of tar_target() objects (recommended)",
"or a tar_pipeline() object (deprecated)."
)
assert_inherits(x = x, class = "tar_target", msg = msg)
}
assert_target_list <- function(x) {
msg <- paste(
"_targets.R must end with a list of tar_target() objects (recommended)",
"or a tar_pipeline() object (deprecated). Each element of the target list",
"must be a target object or nested list of target objects."
)
assert_list(x, msg = msg)
map(x, assert_target, msg = msg)
}
assert_script <- function() {
msg <- paste(
"main functions like tar_make() require a special _targets.R script",
"in the current working directory to define the pipeline.",
"Fucntions tar_edit() and tar_script() can help."
)
assert_path(path_script(), msg)
vars <- all.vars(parse(file = path_script()), functions = TRUE)
exclude <- c(
"glimpse",
"make",
"manifest",
"network",
"outdated",
"prune",
"renv",
"sitrep",
"validate",
"visnetwork"
)
pattern <- paste(paste0("^tar_", exclude), collapse = "|")
choices <- grep(pattern, getNamespaceExports("targets"), value = TRUE)
msg <- paste(
"_targets.R must not call tar_make() or similar functions",
"that would source _targets.R again and cause infinite recursion."
)
assert_not_in(vars, choices, msg)
}
assert_true <- function(condition, msg = NULL) {
if (!condition) {
throw_validate(msg %||% "condition does not evaluate not TRUE")
}
}
assert_unique <- function(x, msg = NULL) {
if (anyDuplicated(x)) {
dups <- paste(unique(x[duplicated(x)]), collapse = ", ")
throw_validate(paste(msg %||% "duplicated entries:", dups))
}
}
assert_unique_targets <- function(x) {
assert_unique(x, "duplicated target names:")
}
|
/R/utils_assert.R
|
permissive
|
russHyde/targets
|
R
| false
| false
| 7,113
|
r
|
assert_callr_function <- function(callr_function) {
if (!is.null(callr_function)) {
assert_function(
callr_function,
"callr_function must be a function or NULL."
)
}
}
assert_chr <- function(x, msg = NULL) {
if (!is.character(x)) {
throw_validate(msg %||% "x must be a character.")
}
}
assert_chr_no_delim <- function(x, msg = NULL) {
assert_chr(x)
if (any(grepl("|", x, fixed = TRUE) | grepl("*", x, fixed = TRUE))) {
throw_validate(msg %||% "x must not contain | or *")
}
}
assert_correct_fields <- function(object, constructor) {
assert_identical_chr(sort(names(object)), sort(names(formals(constructor))))
}
assert_dag <- function(x, msg = NULL) {
if (!inherits(x, "igraph") || !igraph::is_dag(x)) {
throw_validate(msg %||% "x must be an igraph and directed acyclic graph.")
}
}
assert_dbl <- function(x, msg = NULL) {
if (!is.numeric(x)) {
throw_validate(msg %||% "x must be numeric.")
}
}
assert_df <- function(x, msg = NULL) {
if (!is.data.frame(x)) {
throw_validate(msg %||% "x must be a data frame.")
}
}
assert_envir <- function(x, msg = NULL) {
if (!is.environment(x)) {
throw_validate(msg %||% "x must be an environment")
}
}
assert_expr <- function(x, msg = NULL) {
if (!is.expression(x)) {
throw_validate(msg %||% "x must be an expression.")
}
}
assert_format <- function(format) {
assert_scalar(format)
assert_chr(format)
store_assert_format_setting(as_class(format))
}
assert_function <- function(x, msg = NULL) {
if (!is.function(x)) {
throw_validate(msg %||% "x must be a function.")
}
}
assert_ge <- function(x, threshold, msg = NULL) {
if (any(x < threshold)) {
throw_validate(msg %||% paste("x is less than", threshold))
}
}
assert_identical <- function(x, y, msg = NULL) {
if (!identical(x, y)) {
throw_validate(msg %||% "x and y are not identical.")
}
}
assert_identical_chr <- function(x, y, msg = NULL) {
if (!identical(x, y)) {
msg_x <- paste0(deparse(x), collapse = "")
msg_y <- paste0(deparse(y), collapse = "")
throw_validate(msg %||% paste(msg_x, "and", msg_y, "not identical."))
}
}
assert_in <- function(x, choices, msg = NULL) {
if (!all(x %in% choices)) {
throw_validate(msg %||% paste(deparse(x), "is not in ", deparse(choices)))
}
}
assert_not_in <- function(x, choices, msg = NULL) {
if (any(x %in% choices)) {
throw_validate(msg %||% paste(deparse(x), "is in", deparse(choices)))
}
}
assert_inherits <- function(x, class, msg = NULL) {
if (!inherits(x, class)) {
throw_validate(msg %||% paste("x does not inherit from", class))
}
}
assert_int <- function(x, msg = NULL) {
if (!is.integer(x)) {
throw_validate(msg %||% "x must be an integer vector.")
}
}
assert_internet <- function(msg = NULL) {
assert_package("curl")
if (!curl::has_internet()) {
# This line cannot be covered in automated tests
# because internet is usually on.
throw_run("no internet") # nocov
}
}
assert_le <- function(x, threshold, msg = NULL) {
if (any(x > threshold)) {
throw_validate(msg %||% paste("x is greater than", threshold))
}
}
assert_list <- function(x, msg = NULL) {
if (!is.list(x)) {
throw_validate(msg %||% "x must be a list.")
}
}
assert_lgl <- function(x, msg = NULL) {
if (!is.logical(x)) {
throw_validate(msg %||% "x must be logical.")
}
}
assert_name <- function(name) {
assert_chr(name)
assert_scalar(name)
if (!nzchar(name)) {
throw_validate("name must be a nonempty string.")
}
if (!identical(name, make.names(name))) {
throw_validate(name, " is not a valid symbol name.")
}
if (grepl("\\.$", name)) {
throw_validate(name, " ends with a dot.")
}
}
assert_nonempty <- function(x, msg = NULL) {
if (!length(x)) {
throw_validate(msg %||% "x must not be empty")
}
}
assert_nonmissing <- function(x, msg = NULL) {
if (anyNA(x)) {
throw_validate(msg %||% "x must have no missing values (NA's)")
}
}
assert_nzchar <- function(x, msg = NULL) {
if (any(!nzchar(x))) {
throw_validate(msg %||% "x has empty character strings")
}
}
assert_package <- function(package, msg = NULL) {
if (!requireNamespace(package, quietly = TRUE)) {
throw_validate(msg %||% paste("package", package, "not installed"))
}
}
assert_path <- function(path, msg = NULL) {
missing <- !file.exists(path)
if (any(missing)) {
throw_validate(
msg %||% paste0(
"missing files: ",
paste(path[missing], collapse = ", ")
)
)
}
}
assert_match <- function(x, pattern, msg = NULL) {
if (!grepl(pattern = pattern, x = x)) {
throw_validate(msg %||% paste(x, "does not match pattern", pattern))
}
}
assert_positive <- function(x, msg = NULL) {
if (any(x <= 0)) {
throw_validate(msg %||% paste("x is not all positive."))
}
}
assert_scalar <- function(x, msg = NULL) {
if (length(x) != 1) {
throw_validate(msg %||% "x must have length 1.")
}
}
assert_store <- function() {
assert_path(
path_store(),
paste(
"utility functions like tar_read() and tar_progress() require a",
" _targets/ data store produced by tar_make() or similar."
)
)
}
assert_target <- function(x, msg = NULL) {
msg <- msg %||% paste(
"Found a non-target object.",
"_targets.R must end with a list of tar_target() objects (recommended)",
"or a tar_pipeline() object (deprecated)."
)
assert_inherits(x = x, class = "tar_target", msg = msg)
}
assert_target_list <- function(x) {
msg <- paste(
"_targets.R must end with a list of tar_target() objects (recommended)",
"or a tar_pipeline() object (deprecated). Each element of the target list",
"must be a target object or nested list of target objects."
)
assert_list(x, msg = msg)
map(x, assert_target, msg = msg)
}
assert_script <- function() {
msg <- paste(
"main functions like tar_make() require a special _targets.R script",
"in the current working directory to define the pipeline.",
"Fucntions tar_edit() and tar_script() can help."
)
assert_path(path_script(), msg)
vars <- all.vars(parse(file = path_script()), functions = TRUE)
exclude <- c(
"glimpse",
"make",
"manifest",
"network",
"outdated",
"prune",
"renv",
"sitrep",
"validate",
"visnetwork"
)
pattern <- paste(paste0("^tar_", exclude), collapse = "|")
choices <- grep(pattern, getNamespaceExports("targets"), value = TRUE)
msg <- paste(
"_targets.R must not call tar_make() or similar functions",
"that would source _targets.R again and cause infinite recursion."
)
assert_not_in(vars, choices, msg)
}
assert_true <- function(condition, msg = NULL) {
if (!condition) {
throw_validate(msg %||% "condition does not evaluate not TRUE")
}
}
assert_unique <- function(x, msg = NULL) {
if (anyDuplicated(x)) {
dups <- paste(unique(x[duplicated(x)]), collapse = ", ")
throw_validate(paste(msg %||% "duplicated entries:", dups))
}
}
assert_unique_targets <- function(x) {
assert_unique(x, "duplicated target names:")
}
|
# Loading libraries
library("easypackages")
libraries("tidyverse", "tidyquant", "gganimate")
# Reading in data directly from github
climate_spend_raw <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-02-12/climate_spending.csv", col_types = "cin")
# This initial conditioning need not have involved the date manipulation, as the year extracted from a date object is still a double.
climate_spend_conditioned <- climate_spend_raw %>%
mutate(year_dt = str_glue("{year}-01-01")) %>%
mutate(year_dt = as.Date(year_dt)) %>%
mutate(gcc_spending_txt = scales::dollar(gcc_spending,
scale = 1e-09,
suffix = "B"
)
)
climate_spend_dept_y <- climate_spend_conditioned %>%
group_by(department, year_dt = year(year_dt)) %>%
summarise(
tot_spend_dept_y = sum(gcc_spending)) %>%
mutate(tot_spend_dept_y_txt = tot_spend_dept_y %>%
scales::dollar(scale = 1e-09,
suffix = "B")
) %>%
ungroup()
glimpse(climate_spend_dept_y)
climate_spend_plt_fn <- function(
data,
y_range_low = 2000,
y_range_hi = 2010,
ncol = 3,
caption = ""
)
{
data %>%
filter(year_dt >= y_range_low & year_dt <= y_range_hi) %>%
ggplot(aes(y = tot_spend_dept_y_txt, x = department, fill = department ))+
geom_col() +
facet_wrap(~ year_dt,
ncol = 3,
scales = "free_y") +
theme_tq() +
scale_fill_tq(theme = "dark") +
theme(
axis.text.x = element_text(angle = 45,
hjust = 1.2),
legend.position = "none",
plot.background=element_rect(fill="#f7f7f7"),
)+
labs(
title = str_glue("Federal R&D budget towards Climate Change: {y_range_low}-{y_range_hi}"),
x = "Department",
y = "Total Budget $ Billion",
subtitle = "NASA literally dwarfs all the other departments, getting to spend upwards of 1.1 Billion dollars every year since 2000.",
caption = caption
)
}
climate_spend_plt_fn(climate_spend_dept_y,
y_range_low = 2000,
y_range_hi = 2017,
caption = "#TidyTuesday:\nDataset 2019-02-12\nShreyas Ragavan"
)
## The remaining code is partially complete and is in place for further exploration planned in the future.
## Code to download all the data.
## fed_rd <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-02-12/fed_r_d_spending.csv")
## energy_spend <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-02-12/energy_spending.csv")
## climate_spend <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-02-12/climate_spending.csv")
## climate_spend_pct_all <- climate_spend_conditioned %>%
## group_by(year_dt = year(year_dt)) %>%
## summarise(
## tot_spend_all_y = sum(gcc_spending)
## ) %>%
## mutate(tot_spend_all_y_txt = tot_spend_all_y %>%
## scales::dollar(scale = 1e-09,
## suffix = "B"
## )
## )%>%
## ungroup() %>%
## mutate(tot_spend_all_lag = lag(tot_spend_all_y, 1)) %>%
## tidyr::fill(tot_spend_all_lag ,.direction = "up") %>%
## mutate(tot_spend_all_pct = (tot_spend_all_y - tot_spend_all_lag)/ tot_spend_all_y,
## tot_spend_all_pct_txt = scales::percent(tot_spend_all_pct, accuracy = 1e-02)
## )
|
/00_scripts/p1_climate_spending.R
|
no_license
|
shrysr/sr-tidytuesday
|
R
| false
| false
| 4,000
|
r
|
# Loading libraries
library("easypackages")
libraries("tidyverse", "tidyquant", "gganimate")
# Reading in data directly from github
climate_spend_raw <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-02-12/climate_spending.csv", col_types = "cin")
# This initial conditioning need not have involved the date manipulation, as the year extracted from a date object is still a double.
climate_spend_conditioned <- climate_spend_raw %>%
mutate(year_dt = str_glue("{year}-01-01")) %>%
mutate(year_dt = as.Date(year_dt)) %>%
mutate(gcc_spending_txt = scales::dollar(gcc_spending,
scale = 1e-09,
suffix = "B"
)
)
climate_spend_dept_y <- climate_spend_conditioned %>%
group_by(department, year_dt = year(year_dt)) %>%
summarise(
tot_spend_dept_y = sum(gcc_spending)) %>%
mutate(tot_spend_dept_y_txt = tot_spend_dept_y %>%
scales::dollar(scale = 1e-09,
suffix = "B")
) %>%
ungroup()
glimpse(climate_spend_dept_y)
climate_spend_plt_fn <- function(
data,
y_range_low = 2000,
y_range_hi = 2010,
ncol = 3,
caption = ""
)
{
data %>%
filter(year_dt >= y_range_low & year_dt <= y_range_hi) %>%
ggplot(aes(y = tot_spend_dept_y_txt, x = department, fill = department ))+
geom_col() +
facet_wrap(~ year_dt,
ncol = 3,
scales = "free_y") +
theme_tq() +
scale_fill_tq(theme = "dark") +
theme(
axis.text.x = element_text(angle = 45,
hjust = 1.2),
legend.position = "none",
plot.background=element_rect(fill="#f7f7f7"),
)+
labs(
title = str_glue("Federal R&D budget towards Climate Change: {y_range_low}-{y_range_hi}"),
x = "Department",
y = "Total Budget $ Billion",
subtitle = "NASA literally dwarfs all the other departments, getting to spend upwards of 1.1 Billion dollars every year since 2000.",
caption = caption
)
}
climate_spend_plt_fn(climate_spend_dept_y,
y_range_low = 2000,
y_range_hi = 2017,
caption = "#TidyTuesday:\nDataset 2019-02-12\nShreyas Ragavan"
)
## The remaining code is partially complete and is in place for further exploration planned in the future.
## Code to download all the data.
## fed_rd <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-02-12/fed_r_d_spending.csv")
## energy_spend <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-02-12/energy_spending.csv")
## climate_spend <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-02-12/climate_spending.csv")
## climate_spend_pct_all <- climate_spend_conditioned %>%
## group_by(year_dt = year(year_dt)) %>%
## summarise(
## tot_spend_all_y = sum(gcc_spending)
## ) %>%
## mutate(tot_spend_all_y_txt = tot_spend_all_y %>%
## scales::dollar(scale = 1e-09,
## suffix = "B"
## )
## )%>%
## ungroup() %>%
## mutate(tot_spend_all_lag = lag(tot_spend_all_y, 1)) %>%
## tidyr::fill(tot_spend_all_lag ,.direction = "up") %>%
## mutate(tot_spend_all_pct = (tot_spend_all_y - tot_spend_all_lag)/ tot_spend_all_y,
## tot_spend_all_pct_txt = scales::percent(tot_spend_all_pct, accuracy = 1e-02)
## )
|
library(SDMTools)
species=list.files('/home/jc148322/Bird_NARP/models_1km/')
sh.dir='/home/jc148322/scripts/NARP_birds/pot_mat/';dir.create(sh.dir) #dir to write sh scripts to
for (spp in species[31:length(species)]){ cat(spp, '\n')
setwd(sh.dir)
##create the sh file
zz = file(paste('05.',spp,'.pot.mat.sh',sep=''),'w')
cat('#!/bin/bash\n',file=zz)
cat('cd $PBS_O_WORKDIR\n',file=zz)
cat("R CMD BATCH --no-save --no-restore '--args spp=\"",spp,"\" ' ~/scripts/NARP_birds/05.run.pot.mat.r 05.",spp,'.pot.mat.Rout \n',sep='',file=zz) #run the R script in the background
close(zz)
##submit the script
system(paste('qsub -l nodes=1:ppn=2 05.',spp,'.pot.mat.sh',sep=''))
}
|
/summaries and images/05.batch.pot.mat.r
|
no_license
|
jjvanderwal/NCCARF_bird_impacts
|
R
| false
| false
| 690
|
r
|
library(SDMTools)
species=list.files('/home/jc148322/Bird_NARP/models_1km/')
sh.dir='/home/jc148322/scripts/NARP_birds/pot_mat/';dir.create(sh.dir) #dir to write sh scripts to
for (spp in species[31:length(species)]){ cat(spp, '\n')
setwd(sh.dir)
##create the sh file
zz = file(paste('05.',spp,'.pot.mat.sh',sep=''),'w')
cat('#!/bin/bash\n',file=zz)
cat('cd $PBS_O_WORKDIR\n',file=zz)
cat("R CMD BATCH --no-save --no-restore '--args spp=\"",spp,"\" ' ~/scripts/NARP_birds/05.run.pot.mat.r 05.",spp,'.pot.mat.Rout \n',sep='',file=zz) #run the R script in the background
close(zz)
##submit the script
system(paste('qsub -l nodes=1:ppn=2 05.',spp,'.pot.mat.sh',sep=''))
}
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:6, WORD_NUM:102">
</head>
<body bgcolor="white">
<a href="#0" id="0">Ship Sinks, Crew Rescued.</a>
<a href="#1" id="1">He did not give the ship's origin or destination.</a>
<a href="#2" id="2">Earlier reports had said that the ship was a passenger ferry, possibly one of the Dutch ships plying the frequent ferry lanes between the coastal islands and the Dutch mainland.</a>
<a href="#3" id="3">It was not immediately clear what had caused the sinking 29 miles off the Dutch island of Ameland, according to the coast guard spokesman, who was not identified.</a>
<a href="#4" id="4">There were no casualties, he said.</a>
<a href="#5" id="5">Freight shipping in the area is also heavy because of its vicinity to the ports of Delfzijl in the Netherlands and Emden in West Germany.</a>
</body>
</html>
|
/DUC-Dataset/Summary_p100_R/D111.AP880913-0070.html.R
|
no_license
|
Angela7126/SLNSumEval
|
R
| false
| false
| 854
|
r
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:6, WORD_NUM:102">
</head>
<body bgcolor="white">
<a href="#0" id="0">Ship Sinks, Crew Rescued.</a>
<a href="#1" id="1">He did not give the ship's origin or destination.</a>
<a href="#2" id="2">Earlier reports had said that the ship was a passenger ferry, possibly one of the Dutch ships plying the frequent ferry lanes between the coastal islands and the Dutch mainland.</a>
<a href="#3" id="3">It was not immediately clear what had caused the sinking 29 miles off the Dutch island of Ameland, according to the coast guard spokesman, who was not identified.</a>
<a href="#4" id="4">There were no casualties, he said.</a>
<a href="#5" id="5">Freight shipping in the area is also heavy because of its vicinity to the ports of Delfzijl in the Netherlands and Emden in West Germany.</a>
</body>
</html>
|
# These two functions work together to check if a supplied matrix already has an inverse calculated
# if it does then it is just retrieved from the cache object and if not it is colculated using solve()
# use by running: aMatrixObject <- makeCacheMatrix() to make the special matrix object
# then use this objects set method to put your matrix of interest (c) in like so: aMatrixObject$set(c)
# and can then calc the inverse the first time using cacheSolve(aMatrixObject)
# any other attempts to get the inverse of this metrix will pull it from the cache
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
# It has getter and setter methods that are part of the new objects environment and
# these allow other functions (eg cacheSolve) to access variables from this parent environment (x and m in this case)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
print(x,m)
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
# If the inverse has already been calculated (and the matrix has not changed),
# then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
cascadenite/ProgrammingAssignment2
|
R
| false
| false
| 1,692
|
r
|
# These two functions work together to check if a supplied matrix already has an inverse calculated
# if it does then it is just retrieved from the cache object and if not it is colculated using solve()
# use by running: aMatrixObject <- makeCacheMatrix() to make the special matrix object
# then use this objects set method to put your matrix of interest (c) in like so: aMatrixObject$set(c)
# and can then calc the inverse the first time using cacheSolve(aMatrixObject)
# any other attempts to get the inverse of this metrix will pull it from the cache
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
# It has getter and setter methods that are part of the new objects environment and
# these allow other functions (eg cacheSolve) to access variables from this parent environment (x and m in this case)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
print(x,m)
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
# If the inverse has already been calculated (and the matrix has not changed),
# then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stable_kendall_distribution.R
\name{pkendSym}
\alias{pkendSym}
\title{CDF of symmetrical Kendall stable distribution}
\usage{
pkendSym(m_alpha)
}
\arguments{
\item{m_alpha}{function giving moments of order alpha of step dist.}
}
\value{
function function giving values of CDF of Kendall stable distribution
}
\description{
CDF of symmetrical Kendall stable distribution
}
\examples{
pKend <- pkendSym(function(x) 1)
# Step distribution: delta_{1}
pKendall <- pKend(1:10, 0.5)
# Values of CDF for arguments 1:10 and alpha = 0.5
}
|
/man/pkendSym.Rd
|
permissive
|
mstaniak/kendallRandomPackage
|
R
| false
| true
| 609
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stable_kendall_distribution.R
\name{pkendSym}
\alias{pkendSym}
\title{CDF of symmetrical Kendall stable distribution}
\usage{
pkendSym(m_alpha)
}
\arguments{
\item{m_alpha}{function giving moments of order alpha of step dist.}
}
\value{
function function giving values of CDF of Kendall stable distribution
}
\description{
CDF of symmetrical Kendall stable distribution
}
\examples{
pKend <- pkendSym(function(x) 1)
# Step distribution: delta_{1}
pKendall <- pKend(1:10, 0.5)
# Values of CDF for arguments 1:10 and alpha = 0.5
}
|
context("indention square brackets")
test_that("square brackets cause indention", {
expect_warning(test_collection(
"indention_square_brackets",
"square_brackets_line_break",
transformer = style_text
), NA)
})
|
/tests/testthat/test-square_brackets.R
|
permissive
|
lorenzwalthert/styler
|
R
| false
| false
| 227
|
r
|
context("indention square brackets")
test_that("square brackets cause indention", {
expect_warning(test_collection(
"indention_square_brackets",
"square_brackets_line_break",
transformer = style_text
), NA)
})
|
# +++Goals+++
#Aim of this project is to simulate Protein Mass Spectra from the Protein Sequence. This includes simulation of the isotope pattern, the pattern generated by several charge states which are usually observed. In a later stage the resolution of the mass spectrometer shall be included to have an idea of the influence of the resolution to the observed isotopic pattern.
#Further features might be the simulation of a mass spectrum by UniProt Accession instead of Protein Sequence, which is possible by looking up the sequence from UniProt, and the generation of a comparision plot to a measured spectrum for protein identification and publication purposes.
# +++ToDo+++
#* Add code to calculate chemical formula from protein sequence, allow addition of common modfications
#* Utilize isotope distribution simulation from http://orgmassspec.github.io/
#* Add spectra generation
#* Add resolution simulation for orbitrap mass spectrometers
#* Add spectra import of as a list of mass and intensity
#* Plot head to tails comparision plot with ggplot
# +++Code+++
#source("https://bioconductor.org/biocLite.R")
#biocLite("UniProt.ws")
library(UniProt.ws)
#library("OrgMassSpecR")
library("ggplot2")
library("plyr")
myo <- read.csv2("Spectrum_Myoglobin.csv", sep = ",", dec = "." )
colnames(myo) <- c("mz", "intensity")
p <- ggplot()
p <- p + geom_line(data = myo, aes(mz, intensity))
p + xlim(c(807.5,812))
crange <- 35:10
protein <- fetchProteinSequence(uniprotSpeciesName = "Equus caballus", proteinAccession = "P68082")
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE)
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE, modification = list( O=1))
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE, modification = list( O=2))
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE, modification = list( O=3))
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE, modification = list( O=4))
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE, modification = list( O=2, P=1))
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE, modification = list( H=-2, O=-1))
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
p + xlim(c(677,685))
p + xlim(c(1058,1065))
p + xlim(c(530,535))
p <- p + xlim(c(807.5,812))
ggsave(filename = "DemoPlot.png", plot = p)
|
/PRoteinMassSpecSim.R
|
no_license
|
AChemist/PRoteinMassSpecSim
|
R
| false
| false
| 4,403
|
r
|
# +++Goals+++
#Aim of this project is to simulate Protein Mass Spectra from the Protein Sequence. This includes simulation of the isotope pattern, the pattern generated by several charge states which are usually observed. In a later stage the resolution of the mass spectrometer shall be included to have an idea of the influence of the resolution to the observed isotopic pattern.
#Further features might be the simulation of a mass spectrum by UniProt Accession instead of Protein Sequence, which is possible by looking up the sequence from UniProt, and the generation of a comparision plot to a measured spectrum for protein identification and publication purposes.
# +++ToDo+++
#* Add code to calculate chemical formula from protein sequence, allow addition of common modfications
#* Utilize isotope distribution simulation from http://orgmassspec.github.io/
#* Add spectra generation
#* Add resolution simulation for orbitrap mass spectrometers
#* Add spectra import of as a list of mass and intensity
#* Plot head to tails comparision plot with ggplot
# +++Code+++
#source("https://bioconductor.org/biocLite.R")
#biocLite("UniProt.ws")
library(UniProt.ws)
#library("OrgMassSpecR")
library("ggplot2")
library("plyr")
myo <- read.csv2("Spectrum_Myoglobin.csv", sep = ",", dec = "." )
colnames(myo) <- c("mz", "intensity")
p <- ggplot()
p <- p + geom_line(data = myo, aes(mz, intensity))
p + xlim(c(807.5,812))
crange <- 35:10
protein <- fetchProteinSequence(uniprotSpeciesName = "Equus caballus", proteinAccession = "P68082")
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE)
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE, modification = list( O=1))
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE, modification = list( O=2))
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE, modification = list( O=3))
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE, modification = list( O=4))
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE, modification = list( O=2, P=1))
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
myo_sim <- generateChargedDist(proteinSequence = protein, charge = crange, removeFirstAA = TRUE, modification = list( H=-2, O=-1))
myo_sim <- fitIntensity(measuredSpectrum = myo, simulatedSpectrum = myo_sim)
p <- p + geom_linerange(data = myo_sim, aes(mz, ymin = 0, ymax = intensity, colour = "red"))
p <- p + geom_line(data = myo_sim, aes(mz, intensity, colour = "red"))
p + xlim(c(807.5,812))
p + xlim(c(677,685))
p + xlim(c(1058,1065))
p + xlim(c(530,535))
p <- p + xlim(c(807.5,812))
ggsave(filename = "DemoPlot.png", plot = p)
|
library(readxl)
library(ggplot2)
library(dplyr)
# 1) Use la funcion read_excel para importar la base de datos "nombres60.xlsx"
# guarde el tibble resultante un objeto de nombre "notas"
notas <- read_excel("nombres60.xlsx")
# 2) Cree un diagrama de dispersión, con las notas de la prueba
# solemne en el eje horizontal y las del examen en el eje vertical
# Puede usar los grafico de base o los de ggplot2
plot(x = notas$solemne, y = notas$examen)
# 3) Cree dos diagramas boxplot: uno para las notas de la solemne
# y otro para las notas del examen
# Puede usar los grafico de base o los de ggplot2
boxplot(x = notas$solemne)
boxplot(x = notas$examen)
# 4) Compute la media de las notas de la prueba solemne y
# la desviacion estándar de las notas de examen
mean(notas$solemne)
sd(notas$examen)
# 5) Compute los quantiles 0.25 y 0.75 de las notas del examen
quantile(notas$examen, c(0.25,0.75))
# 6) Cree una nueva tibble que contenga unicamente las observaciones
# de las personas que obtuvieron un 4 o más en el examen
# azul en el examen opcion 1
azul_examen <- (df <- tibble(notas$examen>=4))
# 7) Compute la proporcion de gente que obtuvo un azul en el examen
# azul en el examen opcion 2
df$prob <- prop.table(notas$examen)*100
# 8) Compute la probabilidad de que en un grupo de 30 personas,
# 2 o más personas tengan el mismo cumpleaños
# 9) Encuentre el número más pequeño de personas en donde la
# probabilidad de que dos o mas personas tengan el mismo cumpleaños
# sea mayor o igual a 1/3
|
/2019_2/control3/Sergio Muñoz - Control 3.R
|
no_license
|
ricardomayerb/ico8305
|
R
| false
| false
| 1,525
|
r
|
library(readxl)
library(ggplot2)
library(dplyr)
# 1) Use la funcion read_excel para importar la base de datos "nombres60.xlsx"
# guarde el tibble resultante un objeto de nombre "notas"
notas <- read_excel("nombres60.xlsx")
# 2) Cree un diagrama de dispersión, con las notas de la prueba
# solemne en el eje horizontal y las del examen en el eje vertical
# Puede usar los grafico de base o los de ggplot2
plot(x = notas$solemne, y = notas$examen)
# 3) Cree dos diagramas boxplot: uno para las notas de la solemne
# y otro para las notas del examen
# Puede usar los grafico de base o los de ggplot2
boxplot(x = notas$solemne)
boxplot(x = notas$examen)
# 4) Compute la media de las notas de la prueba solemne y
# la desviacion estándar de las notas de examen
mean(notas$solemne)
sd(notas$examen)
# 5) Compute los quantiles 0.25 y 0.75 de las notas del examen
quantile(notas$examen, c(0.25,0.75))
# 6) Cree una nueva tibble que contenga unicamente las observaciones
# de las personas que obtuvieron un 4 o más en el examen
# azul en el examen opcion 1
azul_examen <- (df <- tibble(notas$examen>=4))
# 7) Compute la proporcion de gente que obtuvo un azul en el examen
# azul en el examen opcion 2
df$prob <- prop.table(notas$examen)*100
# 8) Compute la probabilidad de que en un grupo de 30 personas,
# 2 o más personas tengan el mismo cumpleaños
# 9) Encuentre el número más pequeño de personas en donde la
# probabilidad de que dos o mas personas tengan el mismo cumpleaños
# sea mayor o igual a 1/3
|
varCompCI <- function(nullMMobj, prop=TRUE){
if(prop){
if(nullMMobj$hetResid){
stop("Estimates of proportional variance are not supported with heterogeneous group residual variances")
}
ci <- matrix(NA, nrow=length(nullMMobj$varComp), ncol=2)
est <- nullMMobj$varComp/sum(nullMMobj$varComp)
varCompCov <- nullMMobj$varCompCov
varCompCov[is.na(varCompCov)] <- 0
for(i in 1:length(est)){
deltaH <- rep(-nullMMobj$varComp[i]/(sum(nullMMobj$varComp)^2),length(nullMMobj$varComp))
deltaH[i] <- deltaH[i] + sum(nullMMobj$varComp)/(sum(nullMMobj$varComp)^2)
varH <- crossprod(deltaH, crossprod(varCompCov, deltaH))
ci[i,] <- est[i] + sqrt(varH)*qnorm(c(0.025,0.975))
}
ci[nullMMobj$zeroFLAG,] <- NA
res <- as.data.frame(cbind(est, ci))
names(res) <- c("Proportion", "Lower 95", "Upper 95")
}else{
ci <- nullMMobj$varComp + sqrt(diag(nullMMobj$varCompCov)) %o% qnorm(c(0.025,0.975))
res <- as.data.frame(cbind(nullMMobj$varComp, ci))
names(res) <- c("Est", "Lower 95", "Upper 95")
}
print(res)
}
|
/R/varCompCI.R
|
no_license
|
hanchenphd/GENESIS
|
R
| false
| false
| 1,191
|
r
|
varCompCI <- function(nullMMobj, prop=TRUE){
if(prop){
if(nullMMobj$hetResid){
stop("Estimates of proportional variance are not supported with heterogeneous group residual variances")
}
ci <- matrix(NA, nrow=length(nullMMobj$varComp), ncol=2)
est <- nullMMobj$varComp/sum(nullMMobj$varComp)
varCompCov <- nullMMobj$varCompCov
varCompCov[is.na(varCompCov)] <- 0
for(i in 1:length(est)){
deltaH <- rep(-nullMMobj$varComp[i]/(sum(nullMMobj$varComp)^2),length(nullMMobj$varComp))
deltaH[i] <- deltaH[i] + sum(nullMMobj$varComp)/(sum(nullMMobj$varComp)^2)
varH <- crossprod(deltaH, crossprod(varCompCov, deltaH))
ci[i,] <- est[i] + sqrt(varH)*qnorm(c(0.025,0.975))
}
ci[nullMMobj$zeroFLAG,] <- NA
res <- as.data.frame(cbind(est, ci))
names(res) <- c("Proportion", "Lower 95", "Upper 95")
}else{
ci <- nullMMobj$varComp + sqrt(diag(nullMMobj$varCompCov)) %o% qnorm(c(0.025,0.975))
res <- as.data.frame(cbind(nullMMobj$varComp, ci))
names(res) <- c("Est", "Lower 95", "Upper 95")
}
print(res)
}
|
#' Summarise event log
#'
#' Returns summary metrics of event log
#'
#' @param eventlog event log
#'
#' @return named vector having summary metrics
#'
#' @export
summarise_eventlog <- function(eventlog) {
case_summary <- summarise_cases(eventlog)
n_cases <- nrow(case_summary)
avg_trace_length <- mean(case_summary[["trace_length"]])
sd_trace_length <- sd(case_summary[["trace_length"]])
avg_unique_activities <- mean(case_summary[["unique_activities"]])
sd_unique_activities <- sd(case_summary[["unique_activities"]])
log_summary <- c("Number of cases" = n_cases,
"Average trace length" = avg_trace_length,
"SD trace length" = sd_trace_length,
"Average unique activities (per trace)" = avg_unique_activities,
"SD unique activities (per trace)" = sd_unique_activities)
log_summary
}
|
/tclust/R/summarise_eventlog.R
|
no_license
|
nirmalpatel/trace_clustering
|
R
| false
| false
| 881
|
r
|
#' Summarise event log
#'
#' Returns summary metrics of event log
#'
#' @param eventlog event log
#'
#' @return named vector having summary metrics
#'
#' @export
summarise_eventlog <- function(eventlog) {
case_summary <- summarise_cases(eventlog)
n_cases <- nrow(case_summary)
avg_trace_length <- mean(case_summary[["trace_length"]])
sd_trace_length <- sd(case_summary[["trace_length"]])
avg_unique_activities <- mean(case_summary[["unique_activities"]])
sd_unique_activities <- sd(case_summary[["unique_activities"]])
log_summary <- c("Number of cases" = n_cases,
"Average trace length" = avg_trace_length,
"SD trace length" = sd_trace_length,
"Average unique activities (per trace)" = avg_unique_activities,
"SD unique activities (per trace)" = sd_unique_activities)
log_summary
}
|
#' @name enrich
#' @title Enrich `sf` object with OSM data
#' @description Perform enriched query on OSM and add as new column.
#'
#' @param name the column name of the feature to be added
#' @param dataset target `sf` dataset to enrich with this package
#' @param key target OSM feature key to add, see [osmdata::add_osm_feature()]
#' @param value target value for OSM feature key to add, see
#' [osmdata::add_osm_feature()]
#' @param type `character` the osm feature type or types to consider
#' (e.g., points, polygons), see details
#' @param measure `character` the measure metric used, see details
#' @param kernel `function` the kernel function used, see details
#' @param r The search radius used by the `kernel` function.
#' @param reduce_fun The aggregation function used by the `kernel` function to
#' aggregate the retrieved data objects
#' @param control The list with configuration variables for the OSRM server.
#' It contains `timeout`, defining the number of seconds before the request
#' to OSRM times out, and `memsize`, defining the maximum size of the query to
#' OSRM.
#' @param .verbose `bool` whether to print info during enrichment
#' @param ... Additional parameters to be passed into the OSM query, such as
#' a user-defined kernel.
#'
#' @details `Type` represents the feature type to be considered. Usually this
#' would be points, but polygons and multipolygons are also possible. This
#' argument can also be a vector of multiple types. Non-point types will be
#' converted to points using the `st_centroid` function from the `sf` package
#' (NB this does not necessarily work well for all features!).
#' Available options are:
#' - points
#' - lines
#' - polygons
#' - multilines
#' - multipolygons
#'
#' `Measure` represents the metric used to compute the distances or durations
#' between the rows in the dataset and the OSM features. The following metrics
#' are available in this package, assuming that the OSRM server is setup as
#' suggested in our guide at:
#' https://github.com/sodascience/osmenrich_docker:
#' - spherical
#' - distance_by_foot
#' - duration_by_foot
#' - distance_by_car
#' - duration_by_car
#' - distance_by_bike
#' - duration_by_bike
#'
#' `Kernel` indicates the kernel function from the `osmenrich` package to be
#' used to weight the objects retrieved by their distances (or durations) from
#' the reference objects and then convert these vectors into single numbers.
#' The simplest kernel allows the user to count the number of occurrences
#' of reference objects within a radius `r` and is called `kernel_uniform`.
#'
#' For more details see the introductory vignette of `osmenrich`:
#' \code{vignette("introduction", package = "osmenrich")}
#'
#' @examples
#' \dontrun{
#' # Load libraries
#' library(tidyverse)
#' library(sf)
#'
#' # Create example dataset
#' sf_example <-
#' tribble(
#' ~person, ~lat, ~lon,
#' "Alice", 52.12, 5.09,
#' "Bob", 52.13, 5.08,
#' ) %>%
#' sf::st_as_sf(
#' coords = c("lon", "lat"),
#' crs = 4326
#' )
#'
#' # Enrich data creating new column `waste_baskets`
#' sf_enriched <- sf_example %>%
#' enrich_osm(
#' name = "n_waste_baskets",
#' key = "amenity",
#' value = "waste_basket",
# ' type = "points",
# ' distance = "duration_by_foot",
# ' r = 100,
# ' kernel = "uniform",
#' reduce_fun = sum
#' )
#' }
#'
#' @seealso [enrich_opq()]
#' @note If you want to get a large number of objects make sure to set the
#' `.timeout` (time before request times out) and `.memsize` (maxmimum
#' size of the request) arguments for the Overpass server and set
#' the "max-table-size" argument correctly when starting the
#' OSRM server(s).
#' @export
enrich_osm <- function(
dataset,
name = NULL,
key = NULL,
value = NULL,
type = "points",
measure = "spherical",
r = NULL,
kernel = "uniform",
reduce_fun = sum,
control = list(),
.verbose = TRUE,
...) {
if (is.null(name)) stop("Enter a query name.")
if (length(name) > 1) {
stop("You can enrich one query at the time only.")
} else {
control <- do.call("control_enrich", control)
# Create query to OSM server
query <- enrich_opq(
dataset = dataset,
name = name, key = key, value = value, type = type,
measure = measure, r = r, kernel = kernel,
reduce_fun = reduce_fun, control = control, .verbose = .verbose,
...
)
# Enrichment call
enriched_data <- data_enrichment(
ref_data = dataset, query = query, colname = name, .verbose = .verbose
)
return(enriched_data)
}
}
#' @rdname enrich
#' @keywords internal
data_enrichment <- function(ref_data, query, colname, .verbose = TRUE) {
# Check inputs
if (!is(ref_data, "sf")) stop("Data should be sf object.")
check_enriched_opq(query)
# Extract the feature points and/or centroids
# Only download points if only points are requested
if (length(query[["type"]]) == 1 && query[["type"]] == "points") {
attr(query, "nodes_only") <- TRUE
}
if (.verbose) {
cli::cli_process_start(
msg = cli::col_cyan(glue::glue("Downloading data for {colname}...")),
msg_done = cli::col_green("Downloaded data for {colname}."),
msg_failed = cli::col_red(glue::glue("Failed to download data for {colname}!"))
)
}
# Retrieve data from OSM server
ftr_data <- osmdata::osmdata_sf(q = query)
if (.verbose) {
cli::cli_process_done()
cli::cli_alert_info(cli::col_cyan(sprintf(
"Downloaded %i points, %i lines, %i polygons, %i mlines, %i mpolygons.",
if (is.null(ftr_data$osm_points)) 0 else
nrow(ftr_data$osm_points),
if (is.null(ftr_data$osm_lines)) 0 else
nrow(ftr_data$osm_lines),
if (is.null(ftr_data$osm_polygons)) 0 else
nrow(ftr_data$osm_polygons),
if (is.null(ftr_data$osm_multilines)) 0 else
nrow(ftr_data$osm_multilines),
if (is.null(ftr_data$osm_multipolygons)) 0 else
nrow(ftr_data$osm_multipolygons)
)))
}
# Get feature sf::geometry
first <- TRUE
for (type in query$type) {
geometry <- ftr_data[[paste0("osm_", type)]][["geometry"]]
if (is.null(geometry)) next
# Whatever the geometry, as long as not points use centroid
# Here one could divide it depending on the geometry or choice of user
if (type != "points") {
geometry <- sf::st_centroid(geometry) # of_largest_polygon = T
}
if (first) {
ftr_geometry <- geometry
first <- FALSE
} else {
ftr_geometry <- c(ftr_geometry, geometry)
}
}
if (.verbose) {
cli::cli_process_start(
msg = cli::col_cyan(glue::glue("Computing measure matrix for {colname}...")),
msg_done = cli::col_green("Computed measure matrix for {colname}."),
msg_failed = cli::col_red(glue::glue("Failed to compute measure matrix for {colname}!"))
)
}
# Modify both ftr and ref to 4326
options(warn=-1)
ref_geometry <- sf::st_transform(ref_data, crs = 4326)
# This command raises a warning due to different versions of GDAL
# see: https://github.com/r-spatial/sf/issues/1419
sf::st_crs(ftr_geometry) <- 4326
options(warn=0)
# Create matrix ref <-> ftr
measure_mat <- measure_matrix(
measure_name = query[["measure"]],
measure_fun = query[["measurefun"]],
ref_geometry = ref_geometry,
ftr_geometry = ftr_geometry
)
# Apply the kernel function over the rows of the measure matrix
apply_args <-
c(
list(
X = measure_mat,
MARGIN = 1,
FUN = query[["kernelfun"]]
),
query[["kernelpars"]]
)
feature <- do.call(what = apply, args = apply_args)
if (.verbose) {
cli::cli_process_done()
cli::cli_alert_info(cli::col_cyan(glue::glue("Adding {colname} to data.")))
}
ref_data[[colname]] <- feature
return(ref_data)
}
#' @rdname enrich
#' @keywords internal
measure_matrix <- function(measure_name,
measure_fun,
ref_geometry,
ftr_geometry) {
# If "spherical" then no call to OSRM necessary
if (measure_name == "spherical") {
matrix <- sf::st_distance(ref_geometry, ftr_geometry)
return(matrix)
}
if (!check_osrm_limits(src = ref_geometry, dst = ftr_geometry)) {
matrix <- measure_fun(ref_geometry, ftr_geometry)
} else {
print("Splitting main call and creating sub-calls...")
tot_nrows <- nrow(ref_geometry) * nrow(sf::st_coordinates(ftr_geometry))
first <- TRUE
chunk_size <- 20000
for (i in seq(1, tot_nrows, chunk_size)) {
seq_size <- chunk_size
if ((i + seq_size) > tot_nrows) seq_size <- tot_nrows - i + 1
matrix <- measure_fun(ref_geometry[i:(i+seq_size),],
ftr_geometry[i:(i+seq_size),])
if (first) {
result <- matrix
first <- FALSE
} else {
result <- rbind(result, matrix)
}
}
}
}
#' @rdname enrich
#' @keywords internal
control_enrich <- function(timeout = 300, memsize = 1073741824) {
if (!is.numeric(timeout) || timeout <= 0) {
stop("Value of 'timeout' must be > 0")
}
if (!is.numeric(memsize) || memsize <= 0) {
stop("Value of 'memsize' must be > 0")
}
list(timeout = timeout, memsize = memsize)
}
|
/R/enrich_osm.R
|
permissive
|
sodascience/osmenrich
|
R
| false
| false
| 9,562
|
r
|
#' @name enrich
#' @title Enrich `sf` object with OSM data
#' @description Perform enriched query on OSM and add as new column.
#'
#' @param name the column name of the feature to be added
#' @param dataset target `sf` dataset to enrich with this package
#' @param key target OSM feature key to add, see [osmdata::add_osm_feature()]
#' @param value target value for OSM feature key to add, see
#' [osmdata::add_osm_feature()]
#' @param type `character` the osm feature type or types to consider
#' (e.g., points, polygons), see details
#' @param measure `character` the measure metric used, see details
#' @param kernel `function` the kernel function used, see details
#' @param r The search radius used by the `kernel` function.
#' @param reduce_fun The aggregation function used by the `kernel` function to
#' aggregate the retrieved data objects
#' @param control The list with configuration variables for the OSRM server.
#' It contains `timeout`, defining the number of seconds before the request
#' to OSRM times out, and `memsize`, defining the maximum size of the query to
#' OSRM.
#' @param .verbose `bool` whether to print info during enrichment
#' @param ... Additional parameters to be passed into the OSM query, such as
#' a user-defined kernel.
#'
#' @details `Type` represents the feature type to be considered. Usually this
#' would be points, but polygons and multipolygons are also possible. This
#' argument can also be a vector of multiple types. Non-point types will be
#' converted to points using the `st_centroid` function from the `sf` package
#' (NB this does not necessarily work well for all features!).
#' Available options are:
#' - points
#' - lines
#' - polygons
#' - multilines
#' - multipolygons
#'
#' `Measure` represents the metric used to compute the distances or durations
#' between the rows in the dataset and the OSM features. The following metrics
#' are available in this package, assuming that the OSRM server is setup as
#' suggested in our guide at:
#' https://github.com/sodascience/osmenrich_docker:
#' - spherical
#' - distance_by_foot
#' - duration_by_foot
#' - distance_by_car
#' - duration_by_car
#' - distance_by_bike
#' - duration_by_bike
#'
#' `Kernel` indicates the kernel function from the `osmenrich` package to be
#' used to weight the objects retrieved by their distances (or durations) from
#' the reference objects and then convert these vectors into single numbers.
#' The simplest kernel allows the user to count the number of occurrences
#' of reference objects within a radius `r` and is called `kernel_uniform`.
#'
#' For more details see the introductory vignette of `osmenrich`:
#' \code{vignette("introduction", package = "osmenrich")}
#'
#' @examples
#' \dontrun{
#' # Load libraries
#' library(tidyverse)
#' library(sf)
#'
#' # Create example dataset
#' sf_example <-
#' tribble(
#' ~person, ~lat, ~lon,
#' "Alice", 52.12, 5.09,
#' "Bob", 52.13, 5.08,
#' ) %>%
#' sf::st_as_sf(
#' coords = c("lon", "lat"),
#' crs = 4326
#' )
#'
#' # Enrich data creating new column `waste_baskets`
#' sf_enriched <- sf_example %>%
#' enrich_osm(
#' name = "n_waste_baskets",
#' key = "amenity",
#' value = "waste_basket",
# ' type = "points",
# ' distance = "duration_by_foot",
# ' r = 100,
# ' kernel = "uniform",
#' reduce_fun = sum
#' )
#' }
#'
#' @seealso [enrich_opq()]
#' @note If you want to get a large number of objects make sure to set the
#' `.timeout` (time before request times out) and `.memsize` (maxmimum
#' size of the request) arguments for the Overpass server and set
#' the "max-table-size" argument correctly when starting the
#' OSRM server(s).
#' @export
enrich_osm <- function(
dataset,
name = NULL,
key = NULL,
value = NULL,
type = "points",
measure = "spherical",
r = NULL,
kernel = "uniform",
reduce_fun = sum,
control = list(),
.verbose = TRUE,
...) {
if (is.null(name)) stop("Enter a query name.")
if (length(name) > 1) {
stop("You can enrich one query at the time only.")
} else {
control <- do.call("control_enrich", control)
# Create query to OSM server
query <- enrich_opq(
dataset = dataset,
name = name, key = key, value = value, type = type,
measure = measure, r = r, kernel = kernel,
reduce_fun = reduce_fun, control = control, .verbose = .verbose,
...
)
# Enrichment call
enriched_data <- data_enrichment(
ref_data = dataset, query = query, colname = name, .verbose = .verbose
)
return(enriched_data)
}
}
#' @rdname enrich
#' @keywords internal
data_enrichment <- function(ref_data, query, colname, .verbose = TRUE) {
# Check inputs
if (!is(ref_data, "sf")) stop("Data should be sf object.")
check_enriched_opq(query)
# Extract the feature points and/or centroids
# Only download points if only points are requested
if (length(query[["type"]]) == 1 && query[["type"]] == "points") {
attr(query, "nodes_only") <- TRUE
}
if (.verbose) {
cli::cli_process_start(
msg = cli::col_cyan(glue::glue("Downloading data for {colname}...")),
msg_done = cli::col_green("Downloaded data for {colname}."),
msg_failed = cli::col_red(glue::glue("Failed to download data for {colname}!"))
)
}
# Retrieve data from OSM server
ftr_data <- osmdata::osmdata_sf(q = query)
if (.verbose) {
cli::cli_process_done()
cli::cli_alert_info(cli::col_cyan(sprintf(
"Downloaded %i points, %i lines, %i polygons, %i mlines, %i mpolygons.",
if (is.null(ftr_data$osm_points)) 0 else
nrow(ftr_data$osm_points),
if (is.null(ftr_data$osm_lines)) 0 else
nrow(ftr_data$osm_lines),
if (is.null(ftr_data$osm_polygons)) 0 else
nrow(ftr_data$osm_polygons),
if (is.null(ftr_data$osm_multilines)) 0 else
nrow(ftr_data$osm_multilines),
if (is.null(ftr_data$osm_multipolygons)) 0 else
nrow(ftr_data$osm_multipolygons)
)))
}
# Get feature sf::geometry
first <- TRUE
for (type in query$type) {
geometry <- ftr_data[[paste0("osm_", type)]][["geometry"]]
if (is.null(geometry)) next
# Whatever the geometry, as long as not points use centroid
# Here one could divide it depending on the geometry or choice of user
if (type != "points") {
geometry <- sf::st_centroid(geometry) # of_largest_polygon = T
}
if (first) {
ftr_geometry <- geometry
first <- FALSE
} else {
ftr_geometry <- c(ftr_geometry, geometry)
}
}
if (.verbose) {
cli::cli_process_start(
msg = cli::col_cyan(glue::glue("Computing measure matrix for {colname}...")),
msg_done = cli::col_green("Computed measure matrix for {colname}."),
msg_failed = cli::col_red(glue::glue("Failed to compute measure matrix for {colname}!"))
)
}
# Modify both ftr and ref to 4326
options(warn=-1)
ref_geometry <- sf::st_transform(ref_data, crs = 4326)
# This command raises a warning due to different versions of GDAL
# see: https://github.com/r-spatial/sf/issues/1419
sf::st_crs(ftr_geometry) <- 4326
options(warn=0)
# Create matrix ref <-> ftr
measure_mat <- measure_matrix(
measure_name = query[["measure"]],
measure_fun = query[["measurefun"]],
ref_geometry = ref_geometry,
ftr_geometry = ftr_geometry
)
# Apply the kernel function over the rows of the measure matrix
apply_args <-
c(
list(
X = measure_mat,
MARGIN = 1,
FUN = query[["kernelfun"]]
),
query[["kernelpars"]]
)
feature <- do.call(what = apply, args = apply_args)
if (.verbose) {
cli::cli_process_done()
cli::cli_alert_info(cli::col_cyan(glue::glue("Adding {colname} to data.")))
}
ref_data[[colname]] <- feature
return(ref_data)
}
#' @rdname enrich
#' @keywords internal
measure_matrix <- function(measure_name,
measure_fun,
ref_geometry,
ftr_geometry) {
# If "spherical" then no call to OSRM necessary
if (measure_name == "spherical") {
matrix <- sf::st_distance(ref_geometry, ftr_geometry)
return(matrix)
}
if (!check_osrm_limits(src = ref_geometry, dst = ftr_geometry)) {
matrix <- measure_fun(ref_geometry, ftr_geometry)
} else {
print("Splitting main call and creating sub-calls...")
tot_nrows <- nrow(ref_geometry) * nrow(sf::st_coordinates(ftr_geometry))
first <- TRUE
chunk_size <- 20000
for (i in seq(1, tot_nrows, chunk_size)) {
seq_size <- chunk_size
if ((i + seq_size) > tot_nrows) seq_size <- tot_nrows - i + 1
matrix <- measure_fun(ref_geometry[i:(i+seq_size),],
ftr_geometry[i:(i+seq_size),])
if (first) {
result <- matrix
first <- FALSE
} else {
result <- rbind(result, matrix)
}
}
}
}
#' @rdname enrich
#' @keywords internal
control_enrich <- function(timeout = 300, memsize = 1073741824) {
if (!is.numeric(timeout) || timeout <= 0) {
stop("Value of 'timeout' must be > 0")
}
if (!is.numeric(memsize) || memsize <= 0) {
stop("Value of 'memsize' must be > 0")
}
list(timeout = timeout, memsize = memsize)
}
|
#' @export
safe_log <- function(rValue)
{
if (rValue > 0)
{
return(log(rValue))
} else {
return(-20)
}
}
#' @export
ExecuteMarxan_paramtest <- function(sParam,rMin,rMax,rUserBLM,rUserSPF,rUserTarg)
{
cat(paste0("ExecuteMarxan_paramtest start\n"))
withProgress(message="Run parameter test",value=0,
{
withProgress(message=sParam,value=0,
{
if (sParam == "BLM")
{
rMinimum <- safe_log(rMin)
rMaximum <- safe_log(rMax)
rInterval <- (rMaximum - rMinimum) / (iCores-1)
rValue <- rMin
}
if (sParam == "SPF")
{
rMinimum <- safe_log(rMin)
rMaximum <- safe_log(rMax)
rInterval <- (rMaximum - rMinimum) / (iCores-1)
rValue <- rMin
}
if (sParam == "Targ")
{
rMinimum <- rMin
rMaximum <- rMax
rInterval <- (rMaximum - rMinimum) / (iCores-1)
rValue <- rMin
}
# create the ramped value file
write(paste0('i,',sParam),file=paste0(sMarxanDir,"/",sParam,".csv"))
write(paste0(1,",",rValue),file=paste0(sMarxanDir,"/",sParam,".csv"),append=TRUE)
for (i in 2:iCores)
{
if (sParam == "Targ")
{
rValue <- rMinimum+((i-1)*rInterval) # linear ramping for Target
} else {
rValue <- exp(rMinimum+((i-1)*rInterval)) # exponential ramping for BLM, SPF and Cost
}
write(paste0(i,",",rValue),file=paste0(sMarxanDir,"/",sParam,".csv"),append=TRUE)
}
# initialise the parameter summary file
sSummary <- paste0(sMarxanDir,"/output/output_",sParam,"summary.csv")
if (sParam == "BLM") { write("i,BLM,cost,boundary length",file=sSummary) }
if (sParam == "SPF") { write("i,SPF,cost,shortfall",file=sSummary) }
if (sParam == "Targ") { write('i,Targ,cost',file=sSummary) }
# load the ramped value file
VALUEcsv <- read.csv(paste0(sMarxanDir,"/",sParam,".csv"))
randomseeds <- round(runif(10)*100000)
#if (fWindows) { registerDoParallel(makeCluster(iCores,type="PSOCK")) }
registerDoParallel(makeCluster(iCores,type="PSOCK"))
# need to export objects not in local environment
export_list <- c('fWindows','sMarxanDir','sShinyDataPath','sExecutable','iRepsPerCore')
# prepare the Marxan input files
foreach(i=1:iCores,.export=export_list) %dopar%
{
dir.create(paste0(sMarxanDir,"/core",i))
file.copy(paste0(sShinyDataPath,"/",sExecutable),paste0(sMarxanDir,"/core",i,"/",sExecutable))
if (!fWindows) { system(paste0("chmod +x ",sMarxanDir,"/core",i,"/",sExecutable)) }
# read input.dat and edit parameters
inputdat <- readLines(paste0(sMarxanDir,"/input.dat"))
iINPUTDIRparam <- which(regexpr("INPUTDIR",inputdat)==1)
iOUTPUTDIRparam <- which(regexpr("OUTPUTDIR",inputdat)==1)
iBLMparam <- which(regexpr("BLM",inputdat)==1)
iSCENNAMEparam <- which(regexpr("SCENNAME",inputdat)==1)
iNUMREPSparam <- which(regexpr("NUMREPS",inputdat)==1)
iSPECNAMEparam <- which(regexpr("SPECNAME",inputdat)==1)
iPUNAMEparam <- which(regexpr("PUNAME",inputdat)==1)
iRANDSEEDparam <- which(regexpr("RANDSEED",inputdat)==1)
# read spec.dat
specdat <- read.csv(paste0(sMarxanDir,"/input/spec.dat"))
if (sParam == "BLM")
{
inputdat[iBLMparam] <- paste0("BLM ",VALUEcsv[i,2])
specdat$spf <- rUserSPF
specdat$prop <- rUserTarg
}
if (sParam == "SPF")
{
inputdat[iBLMparam] <- paste0("BLM ",rUserBLM)
specdat$spf <- VALUEcsv[i,2]
specdat$prop <- rUserTarg
}
if (sParam == "Targ")
{
inputdat[iBLMparam] <- paste0("BLM ",rUserBLM)
specdat$spf <- rUserSPF
specdat$prop <- VALUEcsv[i,2]
}
# save spec.dat
write.csv(specdat,paste0(sMarxanDir,"/input/spec",sParam,i,".dat"),quote=FALSE,row.names=FALSE)
# edit parameters
inputdat[iINPUTDIRparam] <- paste0("INPUTDIR ",sMarxanDir,"/input")
inputdat[iOUTPUTDIRparam] <- paste0("OUTPUTDIR ",sMarxanDir,"/output")
inputdat[iSPECNAMEparam] <- paste0("SPECNAME spec",sParam,i,".dat")
inputdat[iSCENNAMEparam] <- paste0("SCENNAME output",sParam,i)
inputdat[iNUMREPSparam] <- paste0("NUMREPS ",iRepsPerCore)
inputdat[iRANDSEEDparam] <- paste0("RANDSEED ",randomseeds[i])
# save input.dat
writeLines(inputdat,paste0(sMarxanDir,"/core",i,"/input",sParam,i,".dat"))
}
cat("ExecuteMarxan_paramtest before run Marxan\n")
export_list <- c('fWindows','sMarxanDir','sExecutable')
# run Marxan
foreach(i=1:iCores, .export=export_list) %dopar%
{
setwd(paste0(sMarxanDir,"/core",i))
if (fWindows)
{
system2(sExecutable,paste0("-s input",sParam,i,".dat"),wait=T)
} else {
system(paste0("./",sExecutable," -s input",sParam,i,".dat"))
}
#system(paste0("./",sExecutable," -s input",sParam,i,".dat"))
# read the Marxan summary file
sumfile <- read.csv(paste0(sMarxanDir,"/output/output",sParam,i,"_sum.csv"))
# write to the parameter summary file
sSummaryI <- paste0(sMarxanDir,"/output/output_",sParam,"summary",i,".csv")
if (sParam == "BLM") { write(paste(i,VALUEcsv[i,2],mean(sumfile$Cost),mean(sumfile$Connectivity),sep=","),file=sSummaryI) }
if (sParam == "SPF") { write(paste(i,VALUEcsv[i,2],mean(sumfile$Cost),mean(sumfile$Shortfall),sep=","),file=sSummaryI) }
if (sParam == "Targ") { write(paste(i,VALUEcsv[i,2],mean(sumfile$Cost),sep=","),file=sSummaryI) }
}
#if (fWindows) { registerDoSEQ() }
registerDoSEQ()
# compose parameter summary table across all parallel runs
for (i in 1:iCores)
{
sSummaryI <- paste0(sMarxanDir,"/output/output_",sParam,"summary",i,".csv")
if (sParam == "BLM") { write(readLines(con=sSummaryI),file=sSummary,append=TRUE) }
if (sParam == "SPF") { write(readLines(con=sSummaryI),file=sSummary,append=TRUE) }
if (sParam == "Targ") { write(readLines(con=sSummaryI),file=sSummary,append=TRUE) }
}
# compose parameter summary table where values are cumulatively added during workflow
if (sParam == "BLM") { sAppendSummary <<- paste0(sMarxanDir,"/output/output_BLMsummary_SPF",rUserSPF,"_Targ",rUserTarg,".csv") }
if (sParam == "SPF") { sAppendSummary <<- paste0(sMarxanDir,"/output/output_SPFsummary_BLM",rUserBLM,"_Targ",rUserTarg,".csv") }
if (sParam == "Targ") { sAppendSummary <<- paste0(sMarxanDir,"/output/output_Targsummary_BLM",rUserBLM,"_SPF",rUserSPF,".csv") }
if (file.exists(sAppendSummary))
{
# ignore header row in sSummary if sAppendSummary exists
sBuffer <- readLines(con=sSummary)
write(sBuffer[-1],file=sAppendSummary,append=TRUE)
} else {
write(readLines(con=sSummary),file=sAppendSummary,append=FALSE)
}
})
})
cat(paste0("ExecuteMarxan_paramtest end\n"))
}
#' @export
RunMarxan_paramtest <- function(sParam)
{
if (sParam == "BLM")
{
rMin <- 0
rMax <- 10000000000000
}
if (sParam == "SPF")
{
rMin <- 0.0001
rMax <- 10000000000000
}
if (sParam == "Targ")
{
rMin <- 0
rMax <- 1
}
ExecuteMarxan_paramtest(sParam=sParam,rMin=rMin,rMax=rMax,
rUserBLM=0,rUserSPF=1,rUserTarg=0.3)
}
#' @export
RunMarxan_paramtest_app <- function(sParam)
{
# set min, max, interval for value ramping
if (sParam == "BLM")
{
rMin <- rRampBLMmin
rMax <- rRampBLMmax
}
if (sParam == "SPF")
{
rMin <- rRampSPFmin
rMax <- rRampSPFmax
}
if (sParam == "Targ")
{
rMin <- rtargetmin
rMax <- rtargetmax
}
ExecuteMarxan_paramtest(sParam=sParam,rMin=rMin,rMax=rMax,
rUserBLM=ruserblm,rUserSPF=ruserspf,rUserTarg=rusertarg)
}
|
/R/prepare_param_test.R
|
no_license
|
dondealban/marxanui
|
R
| false
| false
| 9,204
|
r
|
#' @export
safe_log <- function(rValue)
{
if (rValue > 0)
{
return(log(rValue))
} else {
return(-20)
}
}
#' @export
ExecuteMarxan_paramtest <- function(sParam,rMin,rMax,rUserBLM,rUserSPF,rUserTarg)
{
cat(paste0("ExecuteMarxan_paramtest start\n"))
withProgress(message="Run parameter test",value=0,
{
withProgress(message=sParam,value=0,
{
if (sParam == "BLM")
{
rMinimum <- safe_log(rMin)
rMaximum <- safe_log(rMax)
rInterval <- (rMaximum - rMinimum) / (iCores-1)
rValue <- rMin
}
if (sParam == "SPF")
{
rMinimum <- safe_log(rMin)
rMaximum <- safe_log(rMax)
rInterval <- (rMaximum - rMinimum) / (iCores-1)
rValue <- rMin
}
if (sParam == "Targ")
{
rMinimum <- rMin
rMaximum <- rMax
rInterval <- (rMaximum - rMinimum) / (iCores-1)
rValue <- rMin
}
# create the ramped value file
write(paste0('i,',sParam),file=paste0(sMarxanDir,"/",sParam,".csv"))
write(paste0(1,",",rValue),file=paste0(sMarxanDir,"/",sParam,".csv"),append=TRUE)
for (i in 2:iCores)
{
if (sParam == "Targ")
{
rValue <- rMinimum+((i-1)*rInterval) # linear ramping for Target
} else {
rValue <- exp(rMinimum+((i-1)*rInterval)) # exponential ramping for BLM, SPF and Cost
}
write(paste0(i,",",rValue),file=paste0(sMarxanDir,"/",sParam,".csv"),append=TRUE)
}
# initialise the parameter summary file
sSummary <- paste0(sMarxanDir,"/output/output_",sParam,"summary.csv")
if (sParam == "BLM") { write("i,BLM,cost,boundary length",file=sSummary) }
if (sParam == "SPF") { write("i,SPF,cost,shortfall",file=sSummary) }
if (sParam == "Targ") { write('i,Targ,cost',file=sSummary) }
# load the ramped value file
VALUEcsv <- read.csv(paste0(sMarxanDir,"/",sParam,".csv"))
randomseeds <- round(runif(10)*100000)
#if (fWindows) { registerDoParallel(makeCluster(iCores,type="PSOCK")) }
registerDoParallel(makeCluster(iCores,type="PSOCK"))
# need to export objects not in local environment
export_list <- c('fWindows','sMarxanDir','sShinyDataPath','sExecutable','iRepsPerCore')
# prepare the Marxan input files
foreach(i=1:iCores,.export=export_list) %dopar%
{
dir.create(paste0(sMarxanDir,"/core",i))
file.copy(paste0(sShinyDataPath,"/",sExecutable),paste0(sMarxanDir,"/core",i,"/",sExecutable))
if (!fWindows) { system(paste0("chmod +x ",sMarxanDir,"/core",i,"/",sExecutable)) }
# read input.dat and edit parameters
inputdat <- readLines(paste0(sMarxanDir,"/input.dat"))
iINPUTDIRparam <- which(regexpr("INPUTDIR",inputdat)==1)
iOUTPUTDIRparam <- which(regexpr("OUTPUTDIR",inputdat)==1)
iBLMparam <- which(regexpr("BLM",inputdat)==1)
iSCENNAMEparam <- which(regexpr("SCENNAME",inputdat)==1)
iNUMREPSparam <- which(regexpr("NUMREPS",inputdat)==1)
iSPECNAMEparam <- which(regexpr("SPECNAME",inputdat)==1)
iPUNAMEparam <- which(regexpr("PUNAME",inputdat)==1)
iRANDSEEDparam <- which(regexpr("RANDSEED",inputdat)==1)
# read spec.dat
specdat <- read.csv(paste0(sMarxanDir,"/input/spec.dat"))
if (sParam == "BLM")
{
inputdat[iBLMparam] <- paste0("BLM ",VALUEcsv[i,2])
specdat$spf <- rUserSPF
specdat$prop <- rUserTarg
}
if (sParam == "SPF")
{
inputdat[iBLMparam] <- paste0("BLM ",rUserBLM)
specdat$spf <- VALUEcsv[i,2]
specdat$prop <- rUserTarg
}
if (sParam == "Targ")
{
inputdat[iBLMparam] <- paste0("BLM ",rUserBLM)
specdat$spf <- rUserSPF
specdat$prop <- VALUEcsv[i,2]
}
# save spec.dat
write.csv(specdat,paste0(sMarxanDir,"/input/spec",sParam,i,".dat"),quote=FALSE,row.names=FALSE)
# edit parameters
inputdat[iINPUTDIRparam] <- paste0("INPUTDIR ",sMarxanDir,"/input")
inputdat[iOUTPUTDIRparam] <- paste0("OUTPUTDIR ",sMarxanDir,"/output")
inputdat[iSPECNAMEparam] <- paste0("SPECNAME spec",sParam,i,".dat")
inputdat[iSCENNAMEparam] <- paste0("SCENNAME output",sParam,i)
inputdat[iNUMREPSparam] <- paste0("NUMREPS ",iRepsPerCore)
inputdat[iRANDSEEDparam] <- paste0("RANDSEED ",randomseeds[i])
# save input.dat
writeLines(inputdat,paste0(sMarxanDir,"/core",i,"/input",sParam,i,".dat"))
}
cat("ExecuteMarxan_paramtest before run Marxan\n")
export_list <- c('fWindows','sMarxanDir','sExecutable')
# run Marxan
foreach(i=1:iCores, .export=export_list) %dopar%
{
setwd(paste0(sMarxanDir,"/core",i))
if (fWindows)
{
system2(sExecutable,paste0("-s input",sParam,i,".dat"),wait=T)
} else {
system(paste0("./",sExecutable," -s input",sParam,i,".dat"))
}
#system(paste0("./",sExecutable," -s input",sParam,i,".dat"))
# read the Marxan summary file
sumfile <- read.csv(paste0(sMarxanDir,"/output/output",sParam,i,"_sum.csv"))
# write to the parameter summary file
sSummaryI <- paste0(sMarxanDir,"/output/output_",sParam,"summary",i,".csv")
if (sParam == "BLM") { write(paste(i,VALUEcsv[i,2],mean(sumfile$Cost),mean(sumfile$Connectivity),sep=","),file=sSummaryI) }
if (sParam == "SPF") { write(paste(i,VALUEcsv[i,2],mean(sumfile$Cost),mean(sumfile$Shortfall),sep=","),file=sSummaryI) }
if (sParam == "Targ") { write(paste(i,VALUEcsv[i,2],mean(sumfile$Cost),sep=","),file=sSummaryI) }
}
#if (fWindows) { registerDoSEQ() }
registerDoSEQ()
# compose parameter summary table across all parallel runs
for (i in 1:iCores)
{
sSummaryI <- paste0(sMarxanDir,"/output/output_",sParam,"summary",i,".csv")
if (sParam == "BLM") { write(readLines(con=sSummaryI),file=sSummary,append=TRUE) }
if (sParam == "SPF") { write(readLines(con=sSummaryI),file=sSummary,append=TRUE) }
if (sParam == "Targ") { write(readLines(con=sSummaryI),file=sSummary,append=TRUE) }
}
# compose parameter summary table where values are cumulatively added during workflow
if (sParam == "BLM") { sAppendSummary <<- paste0(sMarxanDir,"/output/output_BLMsummary_SPF",rUserSPF,"_Targ",rUserTarg,".csv") }
if (sParam == "SPF") { sAppendSummary <<- paste0(sMarxanDir,"/output/output_SPFsummary_BLM",rUserBLM,"_Targ",rUserTarg,".csv") }
if (sParam == "Targ") { sAppendSummary <<- paste0(sMarxanDir,"/output/output_Targsummary_BLM",rUserBLM,"_SPF",rUserSPF,".csv") }
if (file.exists(sAppendSummary))
{
# ignore header row in sSummary if sAppendSummary exists
sBuffer <- readLines(con=sSummary)
write(sBuffer[-1],file=sAppendSummary,append=TRUE)
} else {
write(readLines(con=sSummary),file=sAppendSummary,append=FALSE)
}
})
})
cat(paste0("ExecuteMarxan_paramtest end\n"))
}
#' @export
RunMarxan_paramtest <- function(sParam)
{
if (sParam == "BLM")
{
rMin <- 0
rMax <- 10000000000000
}
if (sParam == "SPF")
{
rMin <- 0.0001
rMax <- 10000000000000
}
if (sParam == "Targ")
{
rMin <- 0
rMax <- 1
}
ExecuteMarxan_paramtest(sParam=sParam,rMin=rMin,rMax=rMax,
rUserBLM=0,rUserSPF=1,rUserTarg=0.3)
}
#' @export
RunMarxan_paramtest_app <- function(sParam)
{
# set min, max, interval for value ramping
if (sParam == "BLM")
{
rMin <- rRampBLMmin
rMax <- rRampBLMmax
}
if (sParam == "SPF")
{
rMin <- rRampSPFmin
rMax <- rRampSPFmax
}
if (sParam == "Targ")
{
rMin <- rtargetmin
rMax <- rtargetmax
}
ExecuteMarxan_paramtest(sParam=sParam,rMin=rMin,rMax=rMax,
rUserBLM=ruserblm,rUserSPF=ruserspf,rUserTarg=rusertarg)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics-dashboard.R
\name{sf_dashboard_set_sticky_filter}
\alias{sf_dashboard_set_sticky_filter}
\title{Set a sticky dashboard filter}
\usage{
sf_dashboard_set_sticky_filter(
dashboard_id,
dashboard_filters = c(character(0))
)
}
\arguments{
\item{dashboard_id}{\code{character}; the Salesforce Id assigned to a created
dashboard. It will start with \code{"01Z"}.}
\item{dashboard_filters}{\code{character}; Dashboard results are always unfiltered, unless you
have specified filter parameters in your request. Use this argument to include
up to three optional filter Ids. You can obtain the list of defined filter Ids
from the dashboard metadata using \link{sf_dashboard_describe}.}
}
\value{
\code{list}
}
\description{
\ifelse{html}{\out{<a href='https://www.tidyverse.org/lifecycle/#experimental'><img src='figures/lifecycle-experimental.svg' alt='Experimental lifecycle'></a>}}{\strong{Experimental}}
Set a default filter value which gets applied to a dashboard when you open
it. The default filter value you specify only applies to you (other people
won’t see it when they open the dashboard). If you change the filter value
while viewing the dashboard, then the filter value you set in the user
interface overwrites the value you set via the API. To set sticky filters for
a dashboard, \code{canUseStickyFilter} must equal true.
Saves any dashboard filters set in the request so that they’re also set the
next time you open the dashboard. NOTE: You can only set dashboard filters for
yourself, not for other users.
}
|
/man/sf_dashboard_set_sticky_filter.Rd
|
permissive
|
carlganz/salesforcer
|
R
| false
| true
| 1,612
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics-dashboard.R
\name{sf_dashboard_set_sticky_filter}
\alias{sf_dashboard_set_sticky_filter}
\title{Set a sticky dashboard filter}
\usage{
sf_dashboard_set_sticky_filter(
dashboard_id,
dashboard_filters = c(character(0))
)
}
\arguments{
\item{dashboard_id}{\code{character}; the Salesforce Id assigned to a created
dashboard. It will start with \code{"01Z"}.}
\item{dashboard_filters}{\code{character}; Dashboard results are always unfiltered, unless you
have specified filter parameters in your request. Use this argument to include
up to three optional filter Ids. You can obtain the list of defined filter Ids
from the dashboard metadata using \link{sf_dashboard_describe}.}
}
\value{
\code{list}
}
\description{
\ifelse{html}{\out{<a href='https://www.tidyverse.org/lifecycle/#experimental'><img src='figures/lifecycle-experimental.svg' alt='Experimental lifecycle'></a>}}{\strong{Experimental}}
Set a default filter value which gets applied to a dashboard when you open
it. The default filter value you specify only applies to you (other people
won’t see it when they open the dashboard). If you change the filter value
while viewing the dashboard, then the filter value you set in the user
interface overwrites the value you set via the API. To set sticky filters for
a dashboard, \code{canUseStickyFilter} must equal true.
Saves any dashboard filters set in the request so that they’re also set the
next time you open the dashboard. NOTE: You can only set dashboard filters for
yourself, not for other users.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoburnin.R
\name{autoburnin}
\alias{autoburnin}
\title{Automatically calculate and apply burnin value}
\usage{
autoburnin(jags_out, return.burnin = FALSE, ...)
}
\arguments{
\item{jags_out}{JAGS output}
\item{return.burnin}{Logical. If \code{TRUE}, return burnin value in addition to
samples (as list). Default = FALSE.}
\item{...}{Additional arguments for \code{getBurnin}, \code{gelman_diag_mw},
and \code{gelman.diag}.}
}
\description{
Automatically calculate and apply burnin value
}
\examples{
z1 <- coda::mcmc(c(rnorm(2500, 5), rnorm(2500, 0)))
z2 <- coda::mcmc(c(rnorm(2500, -5), rnorm(2500, 0)))
z <- coda::mcmc.list(z1, z2)
z_burned <- autoburnin(z)
}
\author{
Michael Dietze, Alexey Shiklomanov
}
|
/modules/assim.batch/man/autoburnin.Rd
|
permissive
|
PecanProject/pecan
|
R
| false
| true
| 809
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoburnin.R
\name{autoburnin}
\alias{autoburnin}
\title{Automatically calculate and apply burnin value}
\usage{
autoburnin(jags_out, return.burnin = FALSE, ...)
}
\arguments{
\item{jags_out}{JAGS output}
\item{return.burnin}{Logical. If \code{TRUE}, return burnin value in addition to
samples (as list). Default = FALSE.}
\item{...}{Additional arguments for \code{getBurnin}, \code{gelman_diag_mw},
and \code{gelman.diag}.}
}
\description{
Automatically calculate and apply burnin value
}
\examples{
z1 <- coda::mcmc(c(rnorm(2500, 5), rnorm(2500, 0)))
z2 <- coda::mcmc(c(rnorm(2500, -5), rnorm(2500, 0)))
z <- coda::mcmc.list(z1, z2)
z_burned <- autoburnin(z)
}
\author{
Michael Dietze, Alexey Shiklomanov
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estims.R
\name{divergence_min_KS}
\alias{divergence_min_KS}
\title{Divergence minimization by Kolmogorov Smirnov}
\usage{
divergence_min_KS(res = res)
}
\arguments{
\item{res}{residuals from a linear model with response variable yt and
explanatory variables x}
}
\value{
differences of supremum
}
\description{
Divergence minimization by Kolmogorov Smirnov
}
\keyword{internal}
|
/man/divergence_min_KS.Rd
|
no_license
|
akreutzmann/trafo
|
R
| false
| true
| 457
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estims.R
\name{divergence_min_KS}
\alias{divergence_min_KS}
\title{Divergence minimization by Kolmogorov Smirnov}
\usage{
divergence_min_KS(res = res)
}
\arguments{
\item{res}{residuals from a linear model with response variable yt and
explanatory variables x}
}
\value{
differences of supremum
}
\description{
Divergence minimization by Kolmogorov Smirnov
}
\keyword{internal}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$text1<-renderText(paste("the first name is:" ,input$var1))
output$text2<-renderText(paste("The last name is:" ,input$var2))
})
|
/submitbutton1/server.R
|
no_license
|
shrutiror/Shiny_Web_Applications
|
R
| false
| false
| 463
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$text1<-renderText(paste("the first name is:" ,input$var1))
output$text2<-renderText(paste("The last name is:" ,input$var2))
})
|
# Harvey Barnhard
# February 29, 2020
# Last modified on February 29, 2020
# Libraries ====================================================================
library(Rsolnp) # Constrained optimization
library(parallel) # Parallel processing
library(pbapply) # Progress bars for parallel processing
library(TMB)
prop.hazard <- function(Xlist,
censorvec,
thetahat,
theta_dom,
numiter=8){
data <- list(Xlist=Xlist, censorvec=censorvec)
# Step 1: Initialize coefficient vectors
alphahat <- rep(0, ncol(Xlist[[1]]))
pihat <- 1
loglik <- c()
i <- 1
while(i <= numiter){
cat("========== Iteration ", i, "==========\n")
# Set parameters for iteration
parameters <- list(alpha=alphahat,
theta=thetahat,
pi=pihat)
# Step 2: Create objective function and optimize keeping heterogeneity
# static.
map <- list(theta=rep(factor(NA), length(thetahat)),
pi=rep(factor(NA), length(pihat)))
obj <- TMB::MakeADFun(data,
parameters,
DLL="NPMLEsurv",
map=map,
silent=TRUE)
obj$method <- "BFGS" # Optimization method
obj$hessian <- TRUE # Return Hessian?
optiter <- do.call("optim", obj)
# If only one iteration is desired, then return initial values of
# heterogeneity points
if(numiter==1){
break
}
# Check to see if the negative log-likelihood has decreased by more than
# 0.5. If not, end process
if(i > 1){
if(abs(optiter$value - loglik[length(loglik)]) < 0.5){
break
}
}
loglik[i] <- optiter$value
alphahat <- optiter$par
# Print parameter output
cat(paste0(alphahat, "\n"))
# Step 3: Evaluate gradient of a new heterogeneity support point over a
# preset grid of values
parameters <- list(alpha=alphahat,
theta=c(thetahat,0),
pi=c(pihat,0))
map <- list(alpha=rep(factor(NA), length(alphahat)))
obj <- TMB::MakeADFun(data,
parameters,
DLL="NPMLEsurv",
map=map,
silent=TRUE)
muvec <- sapply(theta_dom[!theta_dom%in%thetahat],
function(x) obj$gr(c(thetahat,
x,
c(pihat,0)),
order=1)[2*(length(pihat)+1)])
if(all(muvec>=0)){
break
}
thetahat <- c(thetahat, theta_dom[!theta_dom%in%thetahat][which.min(muvec)])
pihat <- rep(1/(length(thetahat)), length(thetahat))
# Step 4: Numerically solve the constrained optimization problem for
# optimal probabilities
parameters <- list(alpha=alphahat,
theta=thetahat,
pi=pihat)
map <- list(alpha=rep(factor(NA), length(alphahat)),
theta=rep(factor(NA), length(thetahat)))
obj <- TMB::MakeADFun(data,
parameters,
DLL="NPMLEsurv",
map=map,
silent=TRUE)
eqfun <- function(x) sum(x)
opt <- solnp(pihat,
fun=obj$fn,
eqfun = eqfun,
eqB =1,
LB=rep(0, length(pihat)),
UB=rep(1, length(pihat)),
control=list(trace=0))
pihat <- opt$pars
i <- i+1
}
return(list(alpha=alphahat,
pi=pihat,
theta=thetahat,
loglik=loglik,
fisher=optiter$hessian))
}
# Wrapper ======================================================================
est.prop.hazard <- function(Xlist, censorvec, theta_dom, numiter=8,
clust, theta_num){
# Heterogenity support points to start from
theta_start <- sample(theta_dom, theta_num, replace=FALSE)
# Create cluster
trash <- clusterEvalQ(clust, library("Rsolnp", "TMB"))
clusterExport(clust,
c("Xlist", "censorvec", "theta_dom", "theta_start",
"prop.hazard", "numiter"),
envir=environment())
trash <- clusterEvalQ(clust, dyn.load(TMB::dynlib("NPMLEsurv")))
# Estimate
results <- pblapply(theta_start, function (x){
prop.hazard(
Xlist=Xlist,
censorvec,
x,
theta_dom,
numiter
)
},
cl=clust)
# Find the starting value of theta that resulted in the lowest negative
# log-likelihood
loglik <- sapply(lapply(results, `[[`, 4), min)
optresults <- results[[which.min(loglik)]]
# Find the estimates and approximate standard errors using the delta method
alphahat <- optresults$alpha
fisher <- optresults$fisher
se <- 1/sqrt(diag(fisher))
# Name coefficients
names(alphahat) <- paste0("alpha", 1:length(alphahat))
names(se) <- names(alphahat)
# Output results
output <- list(coef=rbind(alphahat, se),
ll=min(loglik))
return(output)
}
|
/R/ll_fun2.R
|
no_license
|
harveybarnhard/NPMLEsurv
|
R
| false
| false
| 5,146
|
r
|
# Harvey Barnhard
# February 29, 2020
# Last modified on February 29, 2020
# Libraries ====================================================================
library(Rsolnp) # Constrained optimization
library(parallel) # Parallel processing
library(pbapply) # Progress bars for parallel processing
library(TMB)
prop.hazard <- function(Xlist,
censorvec,
thetahat,
theta_dom,
numiter=8){
data <- list(Xlist=Xlist, censorvec=censorvec)
# Step 1: Initialize coefficient vectors
alphahat <- rep(0, ncol(Xlist[[1]]))
pihat <- 1
loglik <- c()
i <- 1
while(i <= numiter){
cat("========== Iteration ", i, "==========\n")
# Set parameters for iteration
parameters <- list(alpha=alphahat,
theta=thetahat,
pi=pihat)
# Step 2: Create objective function and optimize keeping heterogeneity
# static.
map <- list(theta=rep(factor(NA), length(thetahat)),
pi=rep(factor(NA), length(pihat)))
obj <- TMB::MakeADFun(data,
parameters,
DLL="NPMLEsurv",
map=map,
silent=TRUE)
obj$method <- "BFGS" # Optimization method
obj$hessian <- TRUE # Return Hessian?
optiter <- do.call("optim", obj)
# If only one iteration is desired, then return initial values of
# heterogeneity points
if(numiter==1){
break
}
# Check to see if the negative log-likelihood has decreased by more than
# 0.5. If not, end process
if(i > 1){
if(abs(optiter$value - loglik[length(loglik)]) < 0.5){
break
}
}
loglik[i] <- optiter$value
alphahat <- optiter$par
# Print parameter output
cat(paste0(alphahat, "\n"))
# Step 3: Evaluate gradient of a new heterogeneity support point over a
# preset grid of values
parameters <- list(alpha=alphahat,
theta=c(thetahat,0),
pi=c(pihat,0))
map <- list(alpha=rep(factor(NA), length(alphahat)))
obj <- TMB::MakeADFun(data,
parameters,
DLL="NPMLEsurv",
map=map,
silent=TRUE)
muvec <- sapply(theta_dom[!theta_dom%in%thetahat],
function(x) obj$gr(c(thetahat,
x,
c(pihat,0)),
order=1)[2*(length(pihat)+1)])
if(all(muvec>=0)){
break
}
thetahat <- c(thetahat, theta_dom[!theta_dom%in%thetahat][which.min(muvec)])
pihat <- rep(1/(length(thetahat)), length(thetahat))
# Step 4: Numerically solve the constrained optimization problem for
# optimal probabilities
parameters <- list(alpha=alphahat,
theta=thetahat,
pi=pihat)
map <- list(alpha=rep(factor(NA), length(alphahat)),
theta=rep(factor(NA), length(thetahat)))
obj <- TMB::MakeADFun(data,
parameters,
DLL="NPMLEsurv",
map=map,
silent=TRUE)
eqfun <- function(x) sum(x)
opt <- solnp(pihat,
fun=obj$fn,
eqfun = eqfun,
eqB =1,
LB=rep(0, length(pihat)),
UB=rep(1, length(pihat)),
control=list(trace=0))
pihat <- opt$pars
i <- i+1
}
return(list(alpha=alphahat,
pi=pihat,
theta=thetahat,
loglik=loglik,
fisher=optiter$hessian))
}
# Wrapper ======================================================================
est.prop.hazard <- function(Xlist, censorvec, theta_dom, numiter=8,
clust, theta_num){
# Heterogenity support points to start from
theta_start <- sample(theta_dom, theta_num, replace=FALSE)
# Create cluster
trash <- clusterEvalQ(clust, library("Rsolnp", "TMB"))
clusterExport(clust,
c("Xlist", "censorvec", "theta_dom", "theta_start",
"prop.hazard", "numiter"),
envir=environment())
trash <- clusterEvalQ(clust, dyn.load(TMB::dynlib("NPMLEsurv")))
# Estimate
results <- pblapply(theta_start, function (x){
prop.hazard(
Xlist=Xlist,
censorvec,
x,
theta_dom,
numiter
)
},
cl=clust)
# Find the starting value of theta that resulted in the lowest negative
# log-likelihood
loglik <- sapply(lapply(results, `[[`, 4), min)
optresults <- results[[which.min(loglik)]]
# Find the estimates and approximate standard errors using the delta method
alphahat <- optresults$alpha
fisher <- optresults$fisher
se <- 1/sqrt(diag(fisher))
# Name coefficients
names(alphahat) <- paste0("alpha", 1:length(alphahat))
names(se) <- names(alphahat)
# Output results
output <- list(coef=rbind(alphahat, se),
ll=min(loglik))
return(output)
}
|
# http://www.biostars.org/p/61192/
|
/ExtractRNA-seq.R
|
no_license
|
zhenyisong/CardioTF_Database
|
R
| false
| false
| 34
|
r
|
# http://www.biostars.org/p/61192/
|
library(testthat)
library(reportr)
test_check("reportr")
|
/tests/testthat.R
|
no_license
|
jonclayden/reportr
|
R
| false
| false
| 58
|
r
|
library(testthat)
library(reportr)
test_check("reportr")
|
### Dogbone Integration
# 1. Complex Analysis: Dogbone Contour Example
# https://www.youtube.com/watch?v=UDIKojCQ94U
integrate(function(x) x^(3/4) * (3 - x)^(1/4) / (5 - x), lower=0, upper=3)
# 2. Complex Analysis: Dogbone Contour Example #2
# https://www.youtube.com/watch?v=q1BxM1MWAqA
integrate(function(x) (1 - x)^(-2/3) * (1 + x)^(-1/3) / (4 + x^2), lower=-1, upper=1)
# 3. Complex Analysis: Dogbone Contour Example #3
# https://www.youtube.com/watch?v=-HWcFun7e4k
# - is similar to [2];
integrate(function(x) (1 - x)^(1/2) * (1 + x)^(1/2) / (1 + x^2), lower=-1, upper=1)
# 4. qncubed3: Complex Analysis: Dogbone Contour Generalisation
# https://www.youtube.com/watch?v=w-NIlyXZzqU
p = 1/3; a = 1; b = 4;
integrate(function(x) (x - a)^p * (b - x)^(1 - p), lower=a, upper=b)
# 5. qncubed3: Complex Analysis: Double Keyhole Contour
# https://www.youtube.com/watch?v=LT3jpvWMH2s
integrate(function(x) 1 / (x*sqrt(x^2 - 1)), lower=1, upper=Inf)
##############
### Example 1:
# Complex Analysis: Dogbone Contour Example
# https://www.youtube.com/watch?v=UDIKojCQ94U
integrate(function(x) x^(3/4)*(3 - x)^(1/4) / (5 - x), lower=0, upper=3)
pi*(17/4 - (5^3*2)^(1/4))*sqrt(2)
### Generalizations:
integrate(function(x) x^(3/4)*(3 - x)^(1/4) / (7 - x), lower=0, upper=3)
pi*(7 - 3/4 - (7^3*4)^(1/4))*sqrt(2)
### Gen 1:
k = 11
integrate(function(x) x^(3/4)*(3 - x)^(1/4) / (k - x), lower=0, upper=3)
pi*(k - 3/4 - (k^3*(k - 3))^(1/4))*sqrt(2)
### Gen 2:
k = 11; b = 4;
integrate(function(x) x^(3/4)*(b - x)^(1/4) / (k - x), lower=0, upper=b)
pi*(k - b/4 - (k^3*(k - b))^(1/4))*sqrt(2)
### Gen 3: Full
k = 11; b = 4;
p = 5;
integrate(function(x) x^((p-1)/p)*(b - x)^(1/p) / (k - x), lower=0, upper=b)
2i * pi*(k - b/p - (k^(p-1)*(k - b))^(1/p)) * exp(1i*pi/p) / (exp(2i*pi/p) - 1)
pi*(k - b/p - (k^(p-1)*(k - b))^(1/p)) / sin(pi/p)
# Arg - Inf from above: (p+1)/p * pi;
# Arg - Inf from below: -(p-1)/p * pi;
# Continuous: OK
# 2*pi - (p-1)/p * pi => (p+1)/p * pi;
# Note:
# - variation/partitioning of powers can be emulated with fractional p;
### Ex:
p = 5/2;
k = 11; b = 4;
integrate(function(x) x^(3/5)*(b - x)^(2/5) / (k - x), lower=0, upper=b)
#
integrate(function(x) x^((p-1)/p)*(b - x)^(1/p) / (k - x), lower=0, upper=b)
pi*(k - b/p - (k^(p-1)*(k - b))^(1/p)) / sin(pi/p)
######################
### Example 2:
# Complex Analysis: Dogbone Contour Example #2
# https://www.youtube.com/watch?v=q1BxM1MWAqA
integrate(function(x) (1 - x)^(-2/3)*(1 + x)^(-1/3) / (4 + x^2), lower=-1, upper=1)
pi*sin(atan(2)/3 + pi/3) / sin(pi/3) / sqrt(5) / 2
### Gen 1:
k = sqrt(5)
integrate(function(x) (1 - x)^(-2/3)*(1 + x)^(-1/3) / (k^2 + x^2), lower=-1, upper=1)
pi*sin(atan(k)/3 + pi/3) / sin(pi/3) / sqrt(k^2 + 1) / k
### Gen 2:
k = sqrt(3);
p = 5;
integrate(function(x) (1 - x)^(1/p - 1) * (1 + x)^(-1/p) / (k^2 + x^2), lower=-1, upper=1)
pi*sin(atan(k)*(1-2/p) + pi/p) / sin(pi/p) / sqrt(1 + k^2) / k
### Gen 3:
b = 5;
k = sqrt(3);
p = 5;
integrate(function(x) (b - x)^(1/p - 1) * (b + x)^(-1/p) / (k^2 + x^2), lower=-b, upper=b)
pi*sin(atan(k/b)*(1-2/p) + pi/p) / sin(pi/p) / sqrt(b^2 + k^2) / k
### Derivation:
pi*(exp(1i*atan(k)*(1-2/p) + 2i*pi/p) +
- exp(1i*(2/p-1)*atan(k))) / sqrt(1 + k^2) / (2i*k) / (exp(1i*pi/p) * sin(pi/p))
# Arg - Inf from above: (1/p - 2)*pi;
# Arg - Inf from below: 1/p * pi;
# Continuous: OK
# 2*pi + (1/p - 2)*pi => 1/p * pi;
# (1/p - 1)*2*pi (equivalent to: 2*pi/p); 0;
# (2*pi - fi)*(1/p-1) - fi/p = 2*pi/p + (1 - 2/p)*fi;
### Variant: n = 0
# (b - x)^(1/p - 0)
# Conditions: b > 0
b = sqrt(3);
k = sqrt(7);
p = 5;
integrate(function(x) (b - x)^(1/p) * (b + x)^(-1/p) / (k^2 + x^2), lower=-b, upper=b)
pi*sin(atan(k/b)*(-2/p) + pi/p) / sin(pi/p) / k
### Variant: n1 = -1; n2 = 1;
# - equivalent to substitution:
# 1/p => 1/p - 1 (in Variant 1);
b = sqrt(3);
k = sqrt(7);
p = 5;
integrate(function(x) (b - x)^(1/p - 1) * (b + x)^(1-1/p) / (k^2 + x^2), lower=-b, upper=b)
pi*sin(atan(k/b)*(2-2/p) + pi/p) / sin(pi/p) / k
#########################
#########################
# 4. qncubed3: Complex Analysis: Dogbone Contour Generalisation
# https://www.youtube.com/watch?v=w-NIlyXZzqU
p = 1/3; a = 1; b = 4;
integrate(function(x) (x - a)^p * (b - x)^(1 - p), lower=a, upper=b)
pi/2 * p*(1 - p)*(b - a)^2/sin(pi*p)
### Gen 1:
p = sqrt(3) - sqrt(2)
a = sqrt(5); b = sqrt(7);
integrate(function(x) x * (x - a)^p * (b - x)^(1 - p), lower=a, upper=b)
pi/6 * p*(1-p)*((2-p)*a + (1+p)*b)*(b - a)^2/sin(pi*p)
### Gen 2 & 3: (x - a)^p * (b - x)^q
# Transform: y = b - (b-a)/(x+1);
# New: (b-a)^(p+q+1) * x^p / (x+1)^(p+q+2);
# Interval: [0, 1] => [0, Inf];
### Gen 2:
p = 1/3; a = 1; b = 4;
integrate(function(x) (x - a)^p * (b - x)^p, lower=a, upper=b)
(b-a)^(2*p+1) * gamma(p+1)*gamma(p+1) / gamma(2*p+2)
###
p = sqrt(5) - sqrt(3); a = sqrt(2); b = 4;
integrate(function(x) (x - a)^p * (b - x)^p, lower=a, upper=b)
(b-a)^(2*p+1) * gamma(p+1)*gamma(p+1) / gamma(2*p+2)
### Gen 3: Full
p = 1/3; q = 1/5; a = 1; b = 4;
integrate(function(x) (x - a)^p * (b - x)^q, lower=a, upper=b)
(b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+2)
###
p = sqrt(3); q = sqrt(5); a = 1; b = 4;
integrate(function(x) (x - a)^p * (b - x)^q, lower=a, upper=b)
(b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+2)
# Note:
# - based on Convolution & Laplace transform:
# Dr. Peyam: Laplace integral gone BANANAS
# https://www.youtube.com/watch?v=a5l4owYxjRw
### Variant:
p = sqrt(3); q = sqrt(5); a = 1; b = 4;
integrate(function(x) x * (x - a)^p * (b - x)^q, lower=a, upper=b)
b*(b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+2) +
- (b-a)^(p+q+2) * gamma(p+1)*gamma(q+2) / gamma(p+q+3)
# simplified:
(b*(p+q+2) - (b-a)*(q+1)) * (b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+3)
(b*(p+1) + a*(q+1)) * (b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+3)
### Variant:
p = sqrt(3); q = sqrt(5); a = 1; b = 4;
integrate(function(x) x^2 * (x - a)^p * (b - x)^q, lower=a, upper=b)
b^2*(b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+2) +
- 2*b*(b-a)^(p+q+2) * gamma(p+1)*gamma(q+2) / gamma(p+q+3) +
+ (b-a)^(p+q+3) * gamma(p+1)*gamma(q+3) / gamma(p+q+4)
# simplified:
(b^2*(p+q+2)*(p+q+3) - 2*b*(b-a)*(q+1)*(p+q+3) + (b-a)^2*(q+1)*(q+2)) *
(b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+4)
###########################
###########################
# 5. qncubed3: Complex Analysis: Double Keyhole Contour
# https://www.youtube.com/watch?v=LT3jpvWMH2s
# - Contour: double keyhole (not a dogbone contour);
integrate(function(x) 1 / (x*sqrt(x^2 - 1)), lower=1, upper=Inf)
pi / 2
### Gen 1:
k = 3
integrate(function(x) 1 / (x * (x^2 - 1)^(1/k)), lower=1, upper=Inf)
pi/2 / sin(pi/k)
###
k = sqrt(5)
integrate(function(x) 1 / (x * (x^2 - 1)^(1/k)), lower=1, upper=Inf)
pi/2 / sin(pi/k)
# Res = 2i*pi * exp(-1i*pi/k)
### Gen 2:
n = 3
k = 2
integrate(function(x) 1 / (x * (x^n - 1)^(1/k)), lower=1, upper=Inf)
pi/n / sin(pi/k)
###
n = sqrt(5)
k = sqrt(2)
integrate(function(x) 1 / (x * (x^n - 1)^(1/k)), lower=1, upper=Inf)
pi/n / sin(pi/k)
|
/Math/Integrals.Dogbone.R
|
no_license
|
discoleo/R
|
R
| false
| false
| 7,257
|
r
|
### Dogbone Integration
# 1. Complex Analysis: Dogbone Contour Example
# https://www.youtube.com/watch?v=UDIKojCQ94U
integrate(function(x) x^(3/4) * (3 - x)^(1/4) / (5 - x), lower=0, upper=3)
# 2. Complex Analysis: Dogbone Contour Example #2
# https://www.youtube.com/watch?v=q1BxM1MWAqA
integrate(function(x) (1 - x)^(-2/3) * (1 + x)^(-1/3) / (4 + x^2), lower=-1, upper=1)
# 3. Complex Analysis: Dogbone Contour Example #3
# https://www.youtube.com/watch?v=-HWcFun7e4k
# - is similar to [2];
integrate(function(x) (1 - x)^(1/2) * (1 + x)^(1/2) / (1 + x^2), lower=-1, upper=1)
# 4. qncubed3: Complex Analysis: Dogbone Contour Generalisation
# https://www.youtube.com/watch?v=w-NIlyXZzqU
p = 1/3; a = 1; b = 4;
integrate(function(x) (x - a)^p * (b - x)^(1 - p), lower=a, upper=b)
# 5. qncubed3: Complex Analysis: Double Keyhole Contour
# https://www.youtube.com/watch?v=LT3jpvWMH2s
integrate(function(x) 1 / (x*sqrt(x^2 - 1)), lower=1, upper=Inf)
##############
### Example 1:
# Complex Analysis: Dogbone Contour Example
# https://www.youtube.com/watch?v=UDIKojCQ94U
integrate(function(x) x^(3/4)*(3 - x)^(1/4) / (5 - x), lower=0, upper=3)
pi*(17/4 - (5^3*2)^(1/4))*sqrt(2)
### Generalizations:
integrate(function(x) x^(3/4)*(3 - x)^(1/4) / (7 - x), lower=0, upper=3)
pi*(7 - 3/4 - (7^3*4)^(1/4))*sqrt(2)
### Gen 1:
k = 11
integrate(function(x) x^(3/4)*(3 - x)^(1/4) / (k - x), lower=0, upper=3)
pi*(k - 3/4 - (k^3*(k - 3))^(1/4))*sqrt(2)
### Gen 2:
k = 11; b = 4;
integrate(function(x) x^(3/4)*(b - x)^(1/4) / (k - x), lower=0, upper=b)
pi*(k - b/4 - (k^3*(k - b))^(1/4))*sqrt(2)
### Gen 3: Full
k = 11; b = 4;
p = 5;
integrate(function(x) x^((p-1)/p)*(b - x)^(1/p) / (k - x), lower=0, upper=b)
2i * pi*(k - b/p - (k^(p-1)*(k - b))^(1/p)) * exp(1i*pi/p) / (exp(2i*pi/p) - 1)
pi*(k - b/p - (k^(p-1)*(k - b))^(1/p)) / sin(pi/p)
# Arg - Inf from above: (p+1)/p * pi;
# Arg - Inf from below: -(p-1)/p * pi;
# Continuous: OK
# 2*pi - (p-1)/p * pi => (p+1)/p * pi;
# Note:
# - variation/partitioning of powers can be emulated with fractional p;
### Ex:
p = 5/2;
k = 11; b = 4;
integrate(function(x) x^(3/5)*(b - x)^(2/5) / (k - x), lower=0, upper=b)
#
integrate(function(x) x^((p-1)/p)*(b - x)^(1/p) / (k - x), lower=0, upper=b)
pi*(k - b/p - (k^(p-1)*(k - b))^(1/p)) / sin(pi/p)
######################
### Example 2:
# Complex Analysis: Dogbone Contour Example #2
# https://www.youtube.com/watch?v=q1BxM1MWAqA
integrate(function(x) (1 - x)^(-2/3)*(1 + x)^(-1/3) / (4 + x^2), lower=-1, upper=1)
pi*sin(atan(2)/3 + pi/3) / sin(pi/3) / sqrt(5) / 2
### Gen 1:
k = sqrt(5)
integrate(function(x) (1 - x)^(-2/3)*(1 + x)^(-1/3) / (k^2 + x^2), lower=-1, upper=1)
pi*sin(atan(k)/3 + pi/3) / sin(pi/3) / sqrt(k^2 + 1) / k
### Gen 2:
k = sqrt(3);
p = 5;
integrate(function(x) (1 - x)^(1/p - 1) * (1 + x)^(-1/p) / (k^2 + x^2), lower=-1, upper=1)
pi*sin(atan(k)*(1-2/p) + pi/p) / sin(pi/p) / sqrt(1 + k^2) / k
### Gen 3:
b = 5;
k = sqrt(3);
p = 5;
integrate(function(x) (b - x)^(1/p - 1) * (b + x)^(-1/p) / (k^2 + x^2), lower=-b, upper=b)
pi*sin(atan(k/b)*(1-2/p) + pi/p) / sin(pi/p) / sqrt(b^2 + k^2) / k
### Derivation:
pi*(exp(1i*atan(k)*(1-2/p) + 2i*pi/p) +
- exp(1i*(2/p-1)*atan(k))) / sqrt(1 + k^2) / (2i*k) / (exp(1i*pi/p) * sin(pi/p))
# Arg - Inf from above: (1/p - 2)*pi;
# Arg - Inf from below: 1/p * pi;
# Continuous: OK
# 2*pi + (1/p - 2)*pi => 1/p * pi;
# (1/p - 1)*2*pi (equivalent to: 2*pi/p); 0;
# (2*pi - fi)*(1/p-1) - fi/p = 2*pi/p + (1 - 2/p)*fi;
### Variant: n = 0
# (b - x)^(1/p - 0)
# Conditions: b > 0
b = sqrt(3);
k = sqrt(7);
p = 5;
integrate(function(x) (b - x)^(1/p) * (b + x)^(-1/p) / (k^2 + x^2), lower=-b, upper=b)
pi*sin(atan(k/b)*(-2/p) + pi/p) / sin(pi/p) / k
### Variant: n1 = -1; n2 = 1;
# - equivalent to substitution:
# 1/p => 1/p - 1 (in Variant 1);
b = sqrt(3);
k = sqrt(7);
p = 5;
integrate(function(x) (b - x)^(1/p - 1) * (b + x)^(1-1/p) / (k^2 + x^2), lower=-b, upper=b)
pi*sin(atan(k/b)*(2-2/p) + pi/p) / sin(pi/p) / k
#########################
#########################
# 4. qncubed3: Complex Analysis: Dogbone Contour Generalisation
# https://www.youtube.com/watch?v=w-NIlyXZzqU
p = 1/3; a = 1; b = 4;
integrate(function(x) (x - a)^p * (b - x)^(1 - p), lower=a, upper=b)
pi/2 * p*(1 - p)*(b - a)^2/sin(pi*p)
### Gen 1:
p = sqrt(3) - sqrt(2)
a = sqrt(5); b = sqrt(7);
integrate(function(x) x * (x - a)^p * (b - x)^(1 - p), lower=a, upper=b)
pi/6 * p*(1-p)*((2-p)*a + (1+p)*b)*(b - a)^2/sin(pi*p)
### Gen 2 & 3: (x - a)^p * (b - x)^q
# Transform: y = b - (b-a)/(x+1);
# New: (b-a)^(p+q+1) * x^p / (x+1)^(p+q+2);
# Interval: [0, 1] => [0, Inf];
### Gen 2:
p = 1/3; a = 1; b = 4;
integrate(function(x) (x - a)^p * (b - x)^p, lower=a, upper=b)
(b-a)^(2*p+1) * gamma(p+1)*gamma(p+1) / gamma(2*p+2)
###
p = sqrt(5) - sqrt(3); a = sqrt(2); b = 4;
integrate(function(x) (x - a)^p * (b - x)^p, lower=a, upper=b)
(b-a)^(2*p+1) * gamma(p+1)*gamma(p+1) / gamma(2*p+2)
### Gen 3: Full
p = 1/3; q = 1/5; a = 1; b = 4;
integrate(function(x) (x - a)^p * (b - x)^q, lower=a, upper=b)
(b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+2)
###
p = sqrt(3); q = sqrt(5); a = 1; b = 4;
integrate(function(x) (x - a)^p * (b - x)^q, lower=a, upper=b)
(b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+2)
# Note:
# - based on Convolution & Laplace transform:
# Dr. Peyam: Laplace integral gone BANANAS
# https://www.youtube.com/watch?v=a5l4owYxjRw
### Variant:
p = sqrt(3); q = sqrt(5); a = 1; b = 4;
integrate(function(x) x * (x - a)^p * (b - x)^q, lower=a, upper=b)
b*(b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+2) +
- (b-a)^(p+q+2) * gamma(p+1)*gamma(q+2) / gamma(p+q+3)
# simplified:
(b*(p+q+2) - (b-a)*(q+1)) * (b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+3)
(b*(p+1) + a*(q+1)) * (b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+3)
### Variant:
p = sqrt(3); q = sqrt(5); a = 1; b = 4;
integrate(function(x) x^2 * (x - a)^p * (b - x)^q, lower=a, upper=b)
b^2*(b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+2) +
- 2*b*(b-a)^(p+q+2) * gamma(p+1)*gamma(q+2) / gamma(p+q+3) +
+ (b-a)^(p+q+3) * gamma(p+1)*gamma(q+3) / gamma(p+q+4)
# simplified:
(b^2*(p+q+2)*(p+q+3) - 2*b*(b-a)*(q+1)*(p+q+3) + (b-a)^2*(q+1)*(q+2)) *
(b-a)^(p+q+1) * gamma(p+1)*gamma(q+1) / gamma(p+q+4)
###########################
###########################
# 5. qncubed3: Complex Analysis: Double Keyhole Contour
# https://www.youtube.com/watch?v=LT3jpvWMH2s
# - Contour: double keyhole (not a dogbone contour);
integrate(function(x) 1 / (x*sqrt(x^2 - 1)), lower=1, upper=Inf)
pi / 2
### Gen 1:
k = 3
integrate(function(x) 1 / (x * (x^2 - 1)^(1/k)), lower=1, upper=Inf)
pi/2 / sin(pi/k)
###
k = sqrt(5)
integrate(function(x) 1 / (x * (x^2 - 1)^(1/k)), lower=1, upper=Inf)
pi/2 / sin(pi/k)
# Res = 2i*pi * exp(-1i*pi/k)
### Gen 2:
n = 3
k = 2
integrate(function(x) 1 / (x * (x^n - 1)^(1/k)), lower=1, upper=Inf)
pi/n / sin(pi/k)
###
n = sqrt(5)
k = sqrt(2)
integrate(function(x) 1 / (x * (x^n - 1)^(1/k)), lower=1, upper=Inf)
pi/n / sin(pi/k)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.