content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
##performs out-of-sample error estimation for a BART model
k_fold_cv = function(X, y, k_folds = 5, folds_vec = NULL, verbose = FALSE, ...){
#we cannot afford the time sink of serialization during the grid search, so shut it off manually
args = list(...)
args$serialize = FALSE
if (!inherits(X, "data.frame")){
stop("The training data X must be a data frame.")
}
if (!(class(y) %in% c("numeric", "integer", "factor"))){
stop("Your response must be either numeric, an integer or a factor with two levels.\n")
}
if (!is.null(folds_vec) & !inherits(folds_vec, "integer")){
stop("folds_vec must be an a vector of integers specifying the indexes of each folds.")
}
y_levels = levels(y)
if (inherits(y, "numeric") || inherits(y, "integer")){ #if y is numeric, then it's a regression problem
pred_type = "regression"
} else if (inherits(y, "factor") & length(y_levels) == 2){ #if y is a factor and and binary, then it's a classification problem
pred_type = "classification"
}
n = nrow(X)
Xpreprocess = pre_process_training_data(X)$data
p = ncol(Xpreprocess)
#set up k folds
if (is.null(folds_vec)){ ##if folds were not pre-set:
if (k_folds == Inf){ #leave-one-out
k_folds = n
}
if (k_folds <= 1 || k_folds > n){
stop("The number of folds must be at least 2 and less than or equal to n, use \"Inf\" for leave one out")
}
temp = rnorm(n)
folds_vec = cut(temp, breaks = quantile(temp, seq(0, 1, length.out = k_folds + 1)),
include.lowest= T, labels = F)
} else {
k_folds = length(unique(folds_vec)) ##otherwise we know the folds, so just get k
}
if (pred_type == "regression"){
L1_err = 0
L2_err = 0
yhat_cv = numeric(n) ##store cv
} else {
phat_cv = numeric(n)
yhat_cv = factor(n, levels = y_levels)
confusion_matrix = matrix(0, nrow = 3, ncol = 3)
rownames(confusion_matrix) = c(paste("actual", y_levels), "use errors")
colnames(confusion_matrix) = c(paste("predicted", y_levels), "model errors")
}
Xy = data.frame(Xpreprocess, y) ##set up data
for (k in 1 : k_folds){
cat(".")
train_idx = which(folds_vec != k)
test_idx = setdiff(1 : n, train_idx)
test_data_k = Xy[test_idx, ]
training_data_k = Xy[train_idx, ]
#build bart object
bart_machine_cv = do.call(build_bart_machine, c(list(
X = training_data_k[, 1 : p, drop = FALSE],
y = training_data_k[, (p + 1)],
run_in_sample = FALSE,
verbose = verbose), args))
predict_obj = bart_predict_for_test_data(bart_machine_cv, test_data_k[, 1 : p, drop = FALSE], test_data_k[, (p + 1)])
#tabulate errors
if (pred_type == "regression"){
L1_err = L1_err + predict_obj$L1_err
L2_err = L2_err + predict_obj$L2_err
yhat_cv[test_idx] = predict_obj$y_hat
} else {
phat_cv[test_idx] = predict_obj$p_hat
yhat_cv[test_idx] = predict_obj$y_hat
tab = table(factor(test_data_k$y, levels = y_levels), factor(predict_obj$y_hat, levels = y_levels))
confusion_matrix[1 : 2, 1 : 2] = confusion_matrix[1 : 2, 1 : 2] + tab
}
}
cat("\n")
if (pred_type == "regression"){
list(y_hat = yhat_cv, L1_err = L1_err, L2_err = L2_err, rmse = sqrt(L2_err / n), PseudoRsq = 1 - L2_err / sum((y - mean(y))^2), folds = folds_vec)
} else {
#calculate the rest of the confusion matrix and return it plus the errors
confusion_matrix[3, 1] = round(confusion_matrix[2, 1] / (confusion_matrix[1, 1] + confusion_matrix[2, 1]), 3)
confusion_matrix[3, 2] = round(confusion_matrix[1, 2] / (confusion_matrix[1, 2] + confusion_matrix[2, 2]), 3)
confusion_matrix[1, 3] = round(confusion_matrix[1, 2] / (confusion_matrix[1, 1] + confusion_matrix[1, 2]), 3)
confusion_matrix[2, 3] = round(confusion_matrix[2, 1] / (confusion_matrix[2, 1] + confusion_matrix[2, 2]), 3)
confusion_matrix[3, 3] = round((confusion_matrix[1, 2] + confusion_matrix[2, 1]) / sum(confusion_matrix[1 : 2, 1 : 2]), 3)
list(y_hat = yhat_cv, phat = phat_cv, confusion_matrix = confusion_matrix, misclassification_error = confusion_matrix[3, 3], folds = folds_vec)
}
}
| /R/bart_package_cross_validation.R | no_license | cran/bartMachine | R | false | false | 4,222 | r | ##performs out-of-sample error estimation for a BART model
k_fold_cv = function(X, y, k_folds = 5, folds_vec = NULL, verbose = FALSE, ...){
#we cannot afford the time sink of serialization during the grid search, so shut it off manually
args = list(...)
args$serialize = FALSE
if (!inherits(X, "data.frame")){
stop("The training data X must be a data frame.")
}
if (!(class(y) %in% c("numeric", "integer", "factor"))){
stop("Your response must be either numeric, an integer or a factor with two levels.\n")
}
if (!is.null(folds_vec) & !inherits(folds_vec, "integer")){
stop("folds_vec must be an a vector of integers specifying the indexes of each folds.")
}
y_levels = levels(y)
if (inherits(y, "numeric") || inherits(y, "integer")){ #if y is numeric, then it's a regression problem
pred_type = "regression"
} else if (inherits(y, "factor") & length(y_levels) == 2){ #if y is a factor and and binary, then it's a classification problem
pred_type = "classification"
}
n = nrow(X)
Xpreprocess = pre_process_training_data(X)$data
p = ncol(Xpreprocess)
#set up k folds
if (is.null(folds_vec)){ ##if folds were not pre-set:
if (k_folds == Inf){ #leave-one-out
k_folds = n
}
if (k_folds <= 1 || k_folds > n){
stop("The number of folds must be at least 2 and less than or equal to n, use \"Inf\" for leave one out")
}
temp = rnorm(n)
folds_vec = cut(temp, breaks = quantile(temp, seq(0, 1, length.out = k_folds + 1)),
include.lowest= T, labels = F)
} else {
k_folds = length(unique(folds_vec)) ##otherwise we know the folds, so just get k
}
if (pred_type == "regression"){
L1_err = 0
L2_err = 0
yhat_cv = numeric(n) ##store cv
} else {
phat_cv = numeric(n)
yhat_cv = factor(n, levels = y_levels)
confusion_matrix = matrix(0, nrow = 3, ncol = 3)
rownames(confusion_matrix) = c(paste("actual", y_levels), "use errors")
colnames(confusion_matrix) = c(paste("predicted", y_levels), "model errors")
}
Xy = data.frame(Xpreprocess, y) ##set up data
for (k in 1 : k_folds){
cat(".")
train_idx = which(folds_vec != k)
test_idx = setdiff(1 : n, train_idx)
test_data_k = Xy[test_idx, ]
training_data_k = Xy[train_idx, ]
#build bart object
bart_machine_cv = do.call(build_bart_machine, c(list(
X = training_data_k[, 1 : p, drop = FALSE],
y = training_data_k[, (p + 1)],
run_in_sample = FALSE,
verbose = verbose), args))
predict_obj = bart_predict_for_test_data(bart_machine_cv, test_data_k[, 1 : p, drop = FALSE], test_data_k[, (p + 1)])
#tabulate errors
if (pred_type == "regression"){
L1_err = L1_err + predict_obj$L1_err
L2_err = L2_err + predict_obj$L2_err
yhat_cv[test_idx] = predict_obj$y_hat
} else {
phat_cv[test_idx] = predict_obj$p_hat
yhat_cv[test_idx] = predict_obj$y_hat
tab = table(factor(test_data_k$y, levels = y_levels), factor(predict_obj$y_hat, levels = y_levels))
confusion_matrix[1 : 2, 1 : 2] = confusion_matrix[1 : 2, 1 : 2] + tab
}
}
cat("\n")
if (pred_type == "regression"){
list(y_hat = yhat_cv, L1_err = L1_err, L2_err = L2_err, rmse = sqrt(L2_err / n), PseudoRsq = 1 - L2_err / sum((y - mean(y))^2), folds = folds_vec)
} else {
#calculate the rest of the confusion matrix and return it plus the errors
confusion_matrix[3, 1] = round(confusion_matrix[2, 1] / (confusion_matrix[1, 1] + confusion_matrix[2, 1]), 3)
confusion_matrix[3, 2] = round(confusion_matrix[1, 2] / (confusion_matrix[1, 2] + confusion_matrix[2, 2]), 3)
confusion_matrix[1, 3] = round(confusion_matrix[1, 2] / (confusion_matrix[1, 1] + confusion_matrix[1, 2]), 3)
confusion_matrix[2, 3] = round(confusion_matrix[2, 1] / (confusion_matrix[2, 1] + confusion_matrix[2, 2]), 3)
confusion_matrix[3, 3] = round((confusion_matrix[1, 2] + confusion_matrix[2, 1]) / sum(confusion_matrix[1 : 2, 1 : 2]), 3)
list(y_hat = yhat_cv, phat = phat_cv, confusion_matrix = confusion_matrix, misclassification_error = confusion_matrix[3, 3], folds = folds_vec)
}
}
|
# Source of data for the project:
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# This R script does the following:
# 1. Merges the training and the test sets to create one data set.
tmp1 <- read.table("train/X_train.txt")
tmp2 <- read.table("test/X_test.txt")
X <- rbind(tmp1, tmp2)
tmp1 <- read.table("train/subject_train.txt")
tmp2 <- read.table("test/subject_test.txt")
S <- rbind(tmp1, tmp2)
tmp1 <- read.table("train/y_train.txt")
tmp2 <- read.table("test/y_test.txt")
Y <- rbind(tmp1, tmp2)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("features.txt")
indices_of_good_features <- grep("-mean\\(\\)|-std\\(\\)", features[, 2])
X <- X[, indices_of_good_features]
names(X) <- features[indices_of_good_features, 2]
names(X) <- gsub("\\(|\\)", "", names(X))
names(X) <- tolower(names(X)) # see last slide of the lecture Editing Text Variables (week 4)
# 3. Uses descriptive activity names to name the activities in the data set
activities <- read.table("activity_labels.txt")
activities[, 2] = gsub("_", "", tolower(as.character(activities[, 2])))
Y[,1] = activities[Y[,1], 2]
names(Y) <- "activity"
# 4. Appropriately labels the data set with descriptive activity names.
names(S) <- "subject"
cleaned <- cbind(S, Y, X)
write.table(cleaned, "merged_clean_data.txt")
# 5. Creates a 2nd, independent tidy data set with the average of each variable for each activity and each subject.
uniqueSubjects = unique(S)[,1]
numSubjects = length(unique(S)[,1])
numActivities = length(activities[,1])
numCols = dim(cleaned)[2]
result = cleaned[1:(numSubjects*numActivities), ]
row = 1
for (s in 1:numSubjects) {
for (a in 1:numActivities) {
result[row, 1] = uniqueSubjects[s]
result[row, 2] = activities[a, 2]
tmp <- cleaned[cleaned$subject==s & cleaned$activity==activities[a, 2], ]
result[row, 3:numCols] <- colMeans(tmp[, 3:numCols])
row = row+1
}
}
write.table(result, "data_set_with_the_averages.txt")
| /run_analysis.R | no_license | midhunj/getting-and-cleaning-data | R | false | false | 2,048 | r | # Source of data for the project:
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# This R script does the following:
# 1. Merges the training and the test sets to create one data set.
tmp1 <- read.table("train/X_train.txt")
tmp2 <- read.table("test/X_test.txt")
X <- rbind(tmp1, tmp2)
tmp1 <- read.table("train/subject_train.txt")
tmp2 <- read.table("test/subject_test.txt")
S <- rbind(tmp1, tmp2)
tmp1 <- read.table("train/y_train.txt")
tmp2 <- read.table("test/y_test.txt")
Y <- rbind(tmp1, tmp2)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("features.txt")
indices_of_good_features <- grep("-mean\\(\\)|-std\\(\\)", features[, 2])
X <- X[, indices_of_good_features]
names(X) <- features[indices_of_good_features, 2]
names(X) <- gsub("\\(|\\)", "", names(X))
names(X) <- tolower(names(X)) # see last slide of the lecture Editing Text Variables (week 4)
# 3. Uses descriptive activity names to name the activities in the data set
activities <- read.table("activity_labels.txt")
activities[, 2] = gsub("_", "", tolower(as.character(activities[, 2])))
Y[,1] = activities[Y[,1], 2]
names(Y) <- "activity"
# 4. Appropriately labels the data set with descriptive activity names.
names(S) <- "subject"
cleaned <- cbind(S, Y, X)
write.table(cleaned, "merged_clean_data.txt")
# 5. Creates a 2nd, independent tidy data set with the average of each variable for each activity and each subject.
uniqueSubjects = unique(S)[,1]
numSubjects = length(unique(S)[,1])
numActivities = length(activities[,1])
numCols = dim(cleaned)[2]
result = cleaned[1:(numSubjects*numActivities), ]
row = 1
for (s in 1:numSubjects) {
for (a in 1:numActivities) {
result[row, 1] = uniqueSubjects[s]
result[row, 2] = activities[a, 2]
tmp <- cleaned[cleaned$subject==s & cleaned$activity==activities[a, 2], ]
result[row, 3:numCols] <- colMeans(tmp[, 3:numCols])
row = row+1
}
}
write.table(result, "data_set_with_the_averages.txt")
|
library(kader)
### Name: epanechnikov
### Title: Epanechnikov kernel
### Aliases: epanechnikov
### ** Examples
kader:::epanechnikov(x = c(-sqrt(6:5), -2:2, sqrt(5:6)))
## No test:
curve(kader:::epanechnikov(x), from = -sqrt(6), to = sqrt(6))
## End(No test)
| /data/genthat_extracted_code/kader/examples/epanechnikov.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 267 | r | library(kader)
### Name: epanechnikov
### Title: Epanechnikov kernel
### Aliases: epanechnikov
### ** Examples
kader:::epanechnikov(x = c(-sqrt(6:5), -2:2, sqrt(5:6)))
## No test:
curve(kader:::epanechnikov(x), from = -sqrt(6), to = sqrt(6))
## End(No test)
|
library(ggplot2)
theme_set(theme_bw(18))
setwd("~/webprojects/70_modals_comprehension_evidence/results/")
source("rscripts/helpers.r")
load("data/r.RData")
agr = aggregate(response ~ item_type,data=r,FUN=mean)
agr$SD = aggregate(response ~ item_type,data=r,FUN=sd)$response
agr
agr$CILow = aggregate(response ~ item_type,data=r, FUN=ci.low)$response
agr$CIHigh = aggregate(response ~ item_type,data=r,FUN=ci.high)$response
agr$YMin = agr$response - agr$CILow
agr$YMax = agr$response + agr$CIHigh
agr$Modal = factor(x=as.character(agr$item_type),levels=c("bare","must","probably","might"))
ggplot(agr, aes(x=Modal,y=response)) +
geom_point() +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25)
ggsave("graphs/means.pdf")
agr = aggregate(response ~ item_type + item,data=r,FUN=mean)
agr$CILow = aggregate(response ~ item_type+ item,data=r, FUN=ci.low)$response
agr$CIHigh = aggregate(response ~ item_type+ item,data=r,FUN=ci.high)$response
agr$YMin = agr$response - agr$CILow
agr$YMax = agr$response + agr$CIHigh
agr$Modal = factor(x=as.character(agr$item_type),levels=c("bare","must","probably","might"))
ggplot(agr, aes(x=Modal,y=response)) +
geom_point() +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25) +
facet_wrap(~item) +
theme(axis.text.x=element_text(angle=45,vjust=1,hjust=1))
ggsave("graphs/means_byitem.pdf")
# histograms of evidence type
t = as.data.frame(prop.table(table(r$item_type,r$evidence),mar=1))
head(t)
colnames(t) = c("Modal","EvidenceType","Proportion")
t$Condition = factor(x=as.character(t$Modal),levels=c("bare","must","probably","might"))
ggplot(t, aes(x=Condition,y=Proportion,fill=EvidenceType)) +
geom_bar(stat="identity")
ggsave("graphs/evidence_dist.pdf")
# histograms of evidence type by item
t = as.data.frame(prop.table(table(r$item,r$item_type,r$evidence),mar=c(1,2)))
head(t)
colnames(t) = c("Item","Modal","EvidenceType","Proportion")
t[t$Item == "coffee" & t$Modal == "must",]
t$Condition = factor(x=as.character(t$Modal),levels=c("bare","must","probably","might"))
ggplot(t, aes(x=Condition,y=Proportion,fill=EvidenceType)) +
geom_bar(stat="identity") +
facet_wrap(~Item) +
theme(axis.text.x=element_text(angle=45,vjust=1,hjust=1))
ggsave("graphs/evidence_dist_byitem.pdf")
t = as.data.frame(prop.table(table(r$item,r$item_type,r$evidence),mar=c(1,2)))
head(t)
colnames(t) = c("Item","Modal","Evidence","Proportion")
t$Directness = directness[paste(t$Item,t$Evidence),]$prob
head(t)
ggplot(t, aes(x=Directness,y=Proportion)) +
geom_point() +
geom_smooth() +
facet_wrap(~Modal)
# Bin by directness with threshold
#dthreshold <- median(t$Directness)
t$Modal <- factor(t$Modal, levels=c("bare", "must", "might", "probably"))
t$directnessBin <- cut(t$Directness, breaks=4)
t.byDirectness <- summarySE(t, measurevar=c("Proportion"), groupvars=c("Modal", "directnessBin"))
ggplot(t.byDirectness, aes(x=directnessBin, y=Proportion, fill=Modal)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
facet_grid(.~Modal)
| /experiments/70_modals_comprehension_evidence/results/rscripts/plots.R | permissive | thegricean/modals | R | false | false | 3,019 | r | library(ggplot2)
theme_set(theme_bw(18))
setwd("~/webprojects/70_modals_comprehension_evidence/results/")
source("rscripts/helpers.r")
load("data/r.RData")
agr = aggregate(response ~ item_type,data=r,FUN=mean)
agr$SD = aggregate(response ~ item_type,data=r,FUN=sd)$response
agr
agr$CILow = aggregate(response ~ item_type,data=r, FUN=ci.low)$response
agr$CIHigh = aggregate(response ~ item_type,data=r,FUN=ci.high)$response
agr$YMin = agr$response - agr$CILow
agr$YMax = agr$response + agr$CIHigh
agr$Modal = factor(x=as.character(agr$item_type),levels=c("bare","must","probably","might"))
ggplot(agr, aes(x=Modal,y=response)) +
geom_point() +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25)
ggsave("graphs/means.pdf")
agr = aggregate(response ~ item_type + item,data=r,FUN=mean)
agr$CILow = aggregate(response ~ item_type+ item,data=r, FUN=ci.low)$response
agr$CIHigh = aggregate(response ~ item_type+ item,data=r,FUN=ci.high)$response
agr$YMin = agr$response - agr$CILow
agr$YMax = agr$response + agr$CIHigh
agr$Modal = factor(x=as.character(agr$item_type),levels=c("bare","must","probably","might"))
ggplot(agr, aes(x=Modal,y=response)) +
geom_point() +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25) +
facet_wrap(~item) +
theme(axis.text.x=element_text(angle=45,vjust=1,hjust=1))
ggsave("graphs/means_byitem.pdf")
# histograms of evidence type
t = as.data.frame(prop.table(table(r$item_type,r$evidence),mar=1))
head(t)
colnames(t) = c("Modal","EvidenceType","Proportion")
t$Condition = factor(x=as.character(t$Modal),levels=c("bare","must","probably","might"))
ggplot(t, aes(x=Condition,y=Proportion,fill=EvidenceType)) +
geom_bar(stat="identity")
ggsave("graphs/evidence_dist.pdf")
# histograms of evidence type by item
t = as.data.frame(prop.table(table(r$item,r$item_type,r$evidence),mar=c(1,2)))
head(t)
colnames(t) = c("Item","Modal","EvidenceType","Proportion")
t[t$Item == "coffee" & t$Modal == "must",]
t$Condition = factor(x=as.character(t$Modal),levels=c("bare","must","probably","might"))
ggplot(t, aes(x=Condition,y=Proportion,fill=EvidenceType)) +
geom_bar(stat="identity") +
facet_wrap(~Item) +
theme(axis.text.x=element_text(angle=45,vjust=1,hjust=1))
ggsave("graphs/evidence_dist_byitem.pdf")
t = as.data.frame(prop.table(table(r$item,r$item_type,r$evidence),mar=c(1,2)))
head(t)
colnames(t) = c("Item","Modal","Evidence","Proportion")
t$Directness = directness[paste(t$Item,t$Evidence),]$prob
head(t)
ggplot(t, aes(x=Directness,y=Proportion)) +
geom_point() +
geom_smooth() +
facet_wrap(~Modal)
# Bin by directness with threshold
#dthreshold <- median(t$Directness)
t$Modal <- factor(t$Modal, levels=c("bare", "must", "might", "probably"))
t$directnessBin <- cut(t$Directness, breaks=4)
t.byDirectness <- summarySE(t, measurevar=c("Proportion"), groupvars=c("Modal", "directnessBin"))
ggplot(t.byDirectness, aes(x=directnessBin, y=Proportion, fill=Modal)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
facet_grid(.~Modal)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rtMod.R
\docType{class}
\name{rtModCV-class}
\alias{rtModCV-class}
\alias{rtModCV}
\title{\pkg{rtemis} Cross-Validated Supervised Model Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
rtModCV
}
\description{
R6 Class for \pkg{rtemis} Cross-Validated Supervised Models
}
\section{Fields}{
\describe{
\item{\code{mod.name}}{Model (algorithm) name}
\item{\code{y.train}}{Training set y data}
\item{\code{y.test}}{Testing set y data}
\item{\code{x.name}}{Name of x data}
\item{\code{y.name}}{Name of y data}
\item{\code{xnames}}{Character vector: Column names of x}
\item{\code{resampler}}{List of settings for \link{resample}. Set using \link{rtset.cv.resample}}
\item{\code{n.repeats}}{Integer: Number of repeats. This is the outermost iterator: i.e. You will run
\code{resampler} this many times.}
\item{\code{mod}}{Trained model}
\item{\code{fitted}}{Fitted values}
\item{\code{se.fit}}{Standard error of the fit}
\item{\code{error.train}}{Training error}
\item{\code{predicted}}{Predicted values}
\item{\code{se.prediction}}{Standard error of the prediction}
\item{\code{error.test}}{Testing error}
\item{\code{question}}{Question the model is hoping to answer}
\item{\code{extra}}{Algorithm-specific output}
}}
\keyword{datasets}
| /man/rtModCV-class.Rd | no_license | bakaibaiazbekov/rtemis | R | false | true | 1,361 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rtMod.R
\docType{class}
\name{rtModCV-class}
\alias{rtModCV-class}
\alias{rtModCV}
\title{\pkg{rtemis} Cross-Validated Supervised Model Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
rtModCV
}
\description{
R6 Class for \pkg{rtemis} Cross-Validated Supervised Models
}
\section{Fields}{
\describe{
\item{\code{mod.name}}{Model (algorithm) name}
\item{\code{y.train}}{Training set y data}
\item{\code{y.test}}{Testing set y data}
\item{\code{x.name}}{Name of x data}
\item{\code{y.name}}{Name of y data}
\item{\code{xnames}}{Character vector: Column names of x}
\item{\code{resampler}}{List of settings for \link{resample}. Set using \link{rtset.cv.resample}}
\item{\code{n.repeats}}{Integer: Number of repeats. This is the outermost iterator: i.e. You will run
\code{resampler} this many times.}
\item{\code{mod}}{Trained model}
\item{\code{fitted}}{Fitted values}
\item{\code{se.fit}}{Standard error of the fit}
\item{\code{error.train}}{Training error}
\item{\code{predicted}}{Predicted values}
\item{\code{se.prediction}}{Standard error of the prediction}
\item{\code{error.test}}{Testing error}
\item{\code{question}}{Question the model is hoping to answer}
\item{\code{extra}}{Algorithm-specific output}
}}
\keyword{datasets}
|
# Working directory should be set to UCI HAR Dataset folder
# Remember path to working directory
wd <- getwd()
setwd(paste(wd, "/UCI HAR Dataset", sep = ""))
features <- read.table("features.txt")
activity_labels <- read.table("activity_labels.txt")
# Read test set
setwd(paste(wd, "/UCI HAR Dataset/test", sep = ""))
y_test <- read.table("y_test.txt")
x_test <- read.table("X_test.txt")
subject_test <- read.table("subject_test.txt")
# Read training set
setwd(paste(wd, "/UCI HAR Dataset/train", sep = ""))
y_train <- read.table("y_train.txt")
x_train <- read.table("X_train.txt")
subject_train <- read.table("subject_train.txt")
# Change back to original working directory
setwd(wd)
# Merge test and train data sets ang give appropriate column names
colnames(x_test) <- features$V2
test_data <- data.frame(y_test, subject_test, x_test)
colnames(test_data)[1:2] <- c("activity", "subject")
colnames(x_train) <- features$V2
train_data <- data.frame(y_train, subject_train, x_train)
colnames(train_data)[1:2] <- c("activity", "subject")
data <- rbind(test_data, train_data)
# Change activity numbers to labels
activity_labels$V2 <- tolower(activity_labels$V2) # in order to make it easier to type
currentActivity = 1
for (currentActivityLabel in activity_labels$V2) {
data$activity <- gsub(currentActivity, currentActivityLabel, data$activity)
currentActivity <- currentActivity + 1
}
data$activity <- as.factor(data$activity)
data$subject <- as.factor(data$subject)
# Create vector stating columns to pick
columns <- c(TRUE, TRUE)
for (i in 1:length(features$V2)) {
m <- grepl("-mean()", features$V2[i])
s <- grepl("-std()", features$V2[i])
ms <- m | s
if (grepl("-meanFreq()", features$V2[i]) == FALSE) {
columns <- c(columns, ms)
}
else {columns <- c(columns, FALSE)}
}
# Pick columns with mean and std
data <- data[, columns]
# Create second tidy data set with the average of each variable for each activity and each subject
data2 <- data.frame()
activity <- data$activity
subject <- data$subject
data_without_activation_and_subject <- data[3:length(data)]
data2 <- aggregate(data, by = list(activity = data$activity, subject = data$subject), mean)
column_names <-colnames(data2)
for (k in 1:2) {
for (i in 1:length(column_names)) {
column_names[i] <- gsub("..", ".", column_names[i], fixed = TRUE)
number_of_full_stops <- length(gregexpr(".", column_names[i], fixed = TRUE)[[1]])
if (gregexpr(".", column_names[i], fixed = TRUE)[[1]][number_of_full_stops] == nchar(column_names[i])) {
column_names[i] <- substr(column_names[i], 1, nchar(column_names[i])- 1)
}
}
}
colnames(data2) <- column_names
data2[, 4] <- NULL
data2[, 3] <- NULL
write.table(data2, "tidy_data.txt", sep = " ", row.names = FALSE)
| /run_analysis.R | no_license | a7n7k7a7/GettingAndCleaningData | R | false | false | 2,765 | r | # Working directory should be set to UCI HAR Dataset folder
# Remember path to working directory
wd <- getwd()
setwd(paste(wd, "/UCI HAR Dataset", sep = ""))
features <- read.table("features.txt")
activity_labels <- read.table("activity_labels.txt")
# Read test set
setwd(paste(wd, "/UCI HAR Dataset/test", sep = ""))
y_test <- read.table("y_test.txt")
x_test <- read.table("X_test.txt")
subject_test <- read.table("subject_test.txt")
# Read training set
setwd(paste(wd, "/UCI HAR Dataset/train", sep = ""))
y_train <- read.table("y_train.txt")
x_train <- read.table("X_train.txt")
subject_train <- read.table("subject_train.txt")
# Change back to original working directory
setwd(wd)
# Merge test and train data sets ang give appropriate column names
colnames(x_test) <- features$V2
test_data <- data.frame(y_test, subject_test, x_test)
colnames(test_data)[1:2] <- c("activity", "subject")
colnames(x_train) <- features$V2
train_data <- data.frame(y_train, subject_train, x_train)
colnames(train_data)[1:2] <- c("activity", "subject")
data <- rbind(test_data, train_data)
# Change activity numbers to labels
activity_labels$V2 <- tolower(activity_labels$V2) # in order to make it easier to type
currentActivity = 1
for (currentActivityLabel in activity_labels$V2) {
data$activity <- gsub(currentActivity, currentActivityLabel, data$activity)
currentActivity <- currentActivity + 1
}
data$activity <- as.factor(data$activity)
data$subject <- as.factor(data$subject)
# Create vector stating columns to pick
columns <- c(TRUE, TRUE)
for (i in 1:length(features$V2)) {
m <- grepl("-mean()", features$V2[i])
s <- grepl("-std()", features$V2[i])
ms <- m | s
if (grepl("-meanFreq()", features$V2[i]) == FALSE) {
columns <- c(columns, ms)
}
else {columns <- c(columns, FALSE)}
}
# Pick columns with mean and std
data <- data[, columns]
# Create second tidy data set with the average of each variable for each activity and each subject
data2 <- data.frame()
activity <- data$activity
subject <- data$subject
data_without_activation_and_subject <- data[3:length(data)]
data2 <- aggregate(data, by = list(activity = data$activity, subject = data$subject), mean)
column_names <-colnames(data2)
for (k in 1:2) {
for (i in 1:length(column_names)) {
column_names[i] <- gsub("..", ".", column_names[i], fixed = TRUE)
number_of_full_stops <- length(gregexpr(".", column_names[i], fixed = TRUE)[[1]])
if (gregexpr(".", column_names[i], fixed = TRUE)[[1]][number_of_full_stops] == nchar(column_names[i])) {
column_names[i] <- substr(column_names[i], 1, nchar(column_names[i])- 1)
}
}
}
colnames(data2) <- column_names
data2[, 4] <- NULL
data2[, 3] <- NULL
write.table(data2, "tidy_data.txt", sep = " ", row.names = FALSE)
|
## -----------------------------------------------------------------------------
## Set up the number of retrospective years for the plot and table.
## retro.yrs is how many retrospective years the model will be run for,
## plot.retro.yrs is how many of those to plot, with the exception of the
## squid plots which will use retro.yrs to plot.
## -----------------------------------------------------------------------------
retro.yrs <- 1:20
plot.retro.yrs <- 1:5
| /R/retrospective-setup.R | no_license | andrew-edwards/hake-assessment | R | false | false | 466 | r | ## -----------------------------------------------------------------------------
## Set up the number of retrospective years for the plot and table.
## retro.yrs is how many retrospective years the model will be run for,
## plot.retro.yrs is how many of those to plot, with the exception of the
## squid plots which will use retro.yrs to plot.
## -----------------------------------------------------------------------------
retro.yrs <- 1:20
plot.retro.yrs <- 1:5
|
careless_dataset_na <- careless_dataset
careless_dataset_na[c(5:8),] <- NA
data_careless_maha <- mahad(careless_dataset_na) | /tests/testthat/test-mahad.R | permissive | mronkko/careless | R | false | false | 123 | r | careless_dataset_na <- careless_dataset
careless_dataset_na[c(5:8),] <- NA
data_careless_maha <- mahad(careless_dataset_na) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spec-model.R
\name{ExpectedCorrelation}
\alias{ExpectedCorrelation}
\title{Expected correlation between replicate proxy records and between a proxy record and the true climate}
\usage{
ExpectedCorrelation(pes, spec.pars = NULL)
}
\arguments{
\item{pes}{Object of class proxy.error.spec, e.g. output from \link{ProxyErrorSpectrum}}
\item{spec.pars}{Parameters of the proxy error spectrum, these are taken
from proxy.error.spec if it is a proxy.error.spec object. Can be passed here to
allow calculation on compatible none proxy.error.spec objects.}
}
\value{
a data.frame / tibble
}
\description{
Expected correlation between replicate proxy records and between a proxy record and the true climate
}
\examples{
spec.pars <- GetSpecPars("Mg_Ca", tau_b = 1000 * 10 / 2, sigma.meas = 1)
spec.obj <- do.call(ProxyErrorSpectrum, spec.pars)
exp.corr <- ExpectedCorrelation(spec.obj)
plot(rho~smoothed.resolution, data = exp.corr, type = "l", log = "x")
}
| /man/ExpectedCorrelation.Rd | permissive | EarthSystemDiagnostics/psem | R | false | true | 1,028 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spec-model.R
\name{ExpectedCorrelation}
\alias{ExpectedCorrelation}
\title{Expected correlation between replicate proxy records and between a proxy record and the true climate}
\usage{
ExpectedCorrelation(pes, spec.pars = NULL)
}
\arguments{
\item{pes}{Object of class proxy.error.spec, e.g. output from \link{ProxyErrorSpectrum}}
\item{spec.pars}{Parameters of the proxy error spectrum, these are taken
from proxy.error.spec if it is a proxy.error.spec object. Can be passed here to
allow calculation on compatible none proxy.error.spec objects.}
}
\value{
a data.frame / tibble
}
\description{
Expected correlation between replicate proxy records and between a proxy record and the true climate
}
\examples{
spec.pars <- GetSpecPars("Mg_Ca", tau_b = 1000 * 10 / 2, sigma.meas = 1)
spec.obj <- do.call(ProxyErrorSpectrum, spec.pars)
exp.corr <- ExpectedCorrelation(spec.obj)
plot(rho~smoothed.resolution, data = exp.corr, type = "l", log = "x")
}
|
#' Printing tibbles
#'
#' @description
#' One of the main features of the `tbl_df` class is the printing:
#'
#' * Tibbles only print as many rows and columns as fit on one screen,
#' supplemented by a summary of the remaining rows and columns.
#' * Tibble reveals the type of each column, which keeps the user informed about
#' whether a variable is, e.g., `<chr>` or `<fct>` (character versus factor).
#'
#' Printing can be tweaked for a one-off call by calling `print()` explicitly
#' and setting arguments like `n` and `width`. More persistent control is
#' available by setting the options described below.
#'
#' As of tibble 3.1.0, printing is handled entirely by the \pkg{pillar} package.
#' If you implement a package that extend tibble,
#' the printed output can be customized in various ways.
#' See `vignette("extending", package = "pillar")` for details.
#'
#' @inheritSection pillar::`pillar-package` Package options
#' @section Package options:
#'
#' The following options are used by the tibble and pillar packages
#' to format and print `tbl_df` objects.
#' Used by the formatting workhorse `trunc_mat()` and, therefore,
#' indirectly, by `print.tbl()`.
#'
#' * `tibble.print_max`: Row number threshold: Maximum number of rows printed.
#' Set to `Inf` to always print all rows. Default: 20.
#' * `tibble.print_min`: Number of rows printed if row number threshold is
#' exceeded. Default: 10.
#' * `tibble.width`: Output width. Default: `NULL` (use `width` option).
#' * `tibble.max_extra_cols`: Number of extra columns printed in reduced form.
#' Default: 100.
#'
#' @param x Object to format or print.
#' @param ... Other arguments passed on to individual methods.
#' @param n Number of rows to show. If `NULL`, the default, will print all rows
#' if less than option `tibble.print_max`. Otherwise, will print
#' `tibble.print_min` rows.
#' @param width Width of text output to generate. This defaults to `NULL`, which
#' means use `getOption("tibble.width")` or (if also `NULL`)
#' `getOption("width")`; the latter displays only the columns that fit on one
#' screen. You can also set `options(tibble.width = Inf)` to override this
#' default and always print all columns, this may be slow for very wide tibbles.
#' @param n_extra Number of extra columns to print abbreviated information for,
#' if the width is too small for the entire tibble. If `NULL`, the default,
#' will print information about at most `tibble.max_extra_cols` extra columns.
#' @examples
#' print(as_tibble(mtcars))
#' print(as_tibble(mtcars), n = 1)
#' print(as_tibble(mtcars), n = 3)
#'
#' print(as_tibble(iris), n = 100)
#'
#' print(mtcars, width = 10)
#'
#' mtcars2 <- as_tibble(cbind(mtcars, mtcars), .name_repair = "unique")
#' print(mtcars2, n = 25, n_extra = 3)
#'
#' @examplesIf requireNamespace("nycflights13", quietly = TRUE)
#' print(nycflights13::flights, n_extra = 2)
#' print(nycflights13::flights, width = Inf)
#'
#' @name formatting
#' @aliases print.tbl format.tbl
NULL
# Only for documentation, doesn't do anything
#' @rdname formatting
print.tbl_df <- function(x, ..., n = NULL, width = NULL, n_extra = NULL) {
NextMethod()
}
# Only for documentation, doesn't do anything
#' @rdname formatting
format.tbl_df <- function(x, ..., n = NULL, width = NULL, n_extra = NULL) {
NextMethod()
}
#' Legacy printing
#'
#' @description
#' `r lifecycle::badge("deprecated")`
#' As of tibble 3.1.0, printing is handled entirely by the \pkg{pillar} package.
#' Do not use this function.
#' If you implement a package that extend tibble,
#' the printed output can be customized in various ways.
#' See `vignette("extending", package = "pillar")` for details.
#'
#' @inheritParams formatting
#' @export
#' @keywords internal
trunc_mat <- function(x, n = NULL, width = NULL, n_extra = NULL) {
deprecate_soft("3.1.0", "tibble::trunc_mat()",
details = "Printing has moved to the pillar package.")
rows <- nrow(x)
if (is.null(n) || n < 0) {
if (is.na(rows) || rows > tibble_opt("print_max")) {
n <- tibble_opt("print_min")
} else {
n <- rows
}
}
n_extra <- n_extra %||% tibble_opt("max_extra_cols")
if (is.na(rows)) {
df <- as.data.frame(head(x, n + 1))
if (nrow(df) <= n) {
rows <- nrow(df)
} else {
df <- df[seq_len(n), , drop = FALSE]
}
} else {
df <- as.data.frame(head(x, n))
}
shrunk <- shrink_mat(df, rows, n, star = has_rownames(x))
trunc_info <- list(
width = width, rows_total = rows, rows_min = nrow(df),
n_extra = n_extra, summary = tbl_sum(x)
)
structure(
c(shrunk, trunc_info),
class = c(paste0("trunc_mat_", class(x)), "trunc_mat")
)
}
shrink_mat <- function(df, rows, n, star) {
df <- remove_rownames(df)
if (is.na(rows)) {
needs_dots <- (nrow(df) >= n)
} else {
needs_dots <- (rows > n)
}
if (needs_dots) {
rows_missing <- rows - n
} else {
rows_missing <- 0L
}
mcf <- pillar::colonnade(
df,
has_row_id = if (star) "*" else TRUE
)
list(mcf = mcf, rows_missing = rows_missing)
}
#' @importFrom pillar style_subtle
#' @export
format.trunc_mat <- function(x, width = NULL, ...) {
if (is.null(width)) {
width <- x$width
}
width <- tibble_width(width)
named_header <- format_header(x)
if (all(names2(named_header) == "")) {
header <- named_header
} else {
header <- paste0(
justify(
paste0(names2(named_header), ":"),
right = FALSE, space = NBSP
),
# We add a space after the NBSP inserted by justify()
# so that wrapping occurs at the right location for very narrow outputs
" ",
named_header
)
}
comment <- format_comment(header, width = width)
squeezed <- pillar::squeeze(x$mcf, width = width)
mcf <- format_body(squeezed)
# Splitting lines is important, otherwise subtle style may be lost
# if column names contain spaces.
footer <- pre_dots(format_footer(x, squeezed))
footer_comment <- split_lines(format_comment(footer, width = width))
c(style_subtle(comment), mcf, style_subtle(footer_comment))
}
# Needs to be defined in package code: r-lib/pkgload#85
print_with_mocked_format_body <- function(x, ...) {
scoped_lifecycle_silence()
mockr::with_mock(
format_body = function(x, ...) {
paste0("<body created by pillar>")
},
{
print(x, ...)
}
)
}
#' @export
print.trunc_mat <- function(x, ...) {
cli::cat_line(format(x, ...))
invisible(x)
}
format_header <- function(x) {
x$summary
}
format_body <- function(x) {
format(x)
}
format_footer <- function(x, squeezed_colonnade) {
extra_rows <- format_footer_rows(x)
extra_cols <- format_footer_cols(x, pillar::extra_cols(squeezed_colonnade, n = x$n_extra))
extra <- c(extra_rows, extra_cols)
if (length(extra) >= 1) {
extra[[1]] <- paste0("with ", extra[[1]])
extra[-1] <- map_chr(extra[-1], function(ex) paste0("and ", ex))
collapse(extra)
} else {
character()
}
}
format_footer_rows <- function(x) {
if (length(x$mcf) != 0) {
if (is.na(x$rows_missing)) {
"more rows"
} else if (x$rows_missing > 0) {
paste0(big_mark(x$rows_missing), pluralise_n(" more row(s)", x$rows_missing))
}
} else if (is.na(x$rows_total) && x$rows_min > 0) {
paste0("at least ", big_mark(x$rows_min), pluralise_n(" row(s) total", x$rows_min))
}
}
format_footer_cols <- function(x, extra_cols) {
if (length(extra_cols) == 0) return(NULL)
vars <- format_extra_vars(extra_cols)
paste0(
big_mark(length(extra_cols)), " ",
if (!identical(x$rows_total, 0L) && x$rows_min > 0) "more ",
pluralise("variable(s)", extra_cols), vars
)
}
format_extra_vars <- function(extra_cols) {
# Also covers empty extra_cols vector!
if (is.na(extra_cols[1])) return("")
if (anyNA(extra_cols)) {
extra_cols <- c(extra_cols[!is.na(extra_cols)], cli::symbol$ellipsis)
}
paste0(": ", collapse(extra_cols))
}
format_comment <- function(x, width) {
if (length(x) == 0L) return(character())
map_chr(x, wrap, prefix = "# ", width = min(width, getOption("width")))
}
pre_dots <- function(x) {
if (length(x) > 0) {
paste0(cli::symbol$ellipsis, " ", x)
} else {
character()
}
}
justify <- function(x, right = TRUE, space = " ") {
if (length(x) == 0L) return(character())
width <- nchar_width(x)
max_width <- max(width)
spaces_template <- paste(rep(space, max_width), collapse = "")
spaces <- map_chr(max_width - width, substr, x = spaces_template, start = 1L)
if (right) {
paste0(spaces, x)
} else {
paste0(x, spaces)
}
}
split_lines <- function(x) {
# Avoid .ptype argument to vec_c()
if (is_empty(x)) return(character())
unlist(strsplit(x, "\n", fixed = TRUE))
}
#' knit_print method for trunc mat
#' @keywords internal
#' @export
knit_print.trunc_mat <- function(x, options) {
header <- format_header(x)
if (length(header) > 0L) {
header[names2(header) != ""] <- paste0(names2(header), ": ", header)
summary <- header
} else {
summary <- character()
}
squeezed <- pillar::squeeze(x$mcf, x$width)
kable <- format_knitr_body(squeezed)
extra <- format_footer(x, squeezed)
if (length(extra) > 0) {
extra <- wrap("(", collapse(extra), ")", width = x$width)
} else {
extra <- "\n"
}
res <- paste(c("", "", summary, "", kable, "", extra), collapse = "\n")
knitr::asis_output(fansi::strip_sgr(res), cacheable = TRUE)
}
format_knitr_body <- function(x) {
knitr::knit_print(x)
}
big_mark <- function(x, ...) {
# The thousand separator,
# "," unless it's used for the decimal point, in which case "."
mark <- if (identical(getOption("OutDec"), ",")) "." else ","
ret <- formatC(x, big.mark = mark, format = "d", ...)
ret[is.na(x)] <- "??"
ret
}
mult_sign <- function() {
"x"
}
spaces_around <- function(x) {
paste0(" ", x, " ")
}
format_n <- function(x) collapse(quote_n(x))
quote_n <- function(x) UseMethod("quote_n")
#' @export
quote_n.default <- function(x) as.character(x)
#' @export
quote_n.character <- function(x) tick(x)
collapse <- function(x) paste(x, collapse = ", ")
# wrap --------------------------------------------------------------------
NBSP <- "\U00A0"
wrap <- function(..., indent = 0, prefix = "", width) {
x <- paste0(..., collapse = "")
wrapped <- strwrap2(x, width - nchar_width(prefix), indent)
wrapped <- paste0(prefix, wrapped)
wrapped <- gsub(NBSP, " ", wrapped)
paste0(wrapped, collapse = "\n")
}
strwrap2 <- function(x, width, indent) {
fansi::strwrap_ctl(x, width = max(width, 0), indent = indent, exdent = indent + 2)
}
| /R/print.R | permissive | tjebo/tibble | R | false | false | 10,622 | r | #' Printing tibbles
#'
#' @description
#' One of the main features of the `tbl_df` class is the printing:
#'
#' * Tibbles only print as many rows and columns as fit on one screen,
#' supplemented by a summary of the remaining rows and columns.
#' * Tibble reveals the type of each column, which keeps the user informed about
#' whether a variable is, e.g., `<chr>` or `<fct>` (character versus factor).
#'
#' Printing can be tweaked for a one-off call by calling `print()` explicitly
#' and setting arguments like `n` and `width`. More persistent control is
#' available by setting the options described below.
#'
#' As of tibble 3.1.0, printing is handled entirely by the \pkg{pillar} package.
#' If you implement a package that extend tibble,
#' the printed output can be customized in various ways.
#' See `vignette("extending", package = "pillar")` for details.
#'
#' @inheritSection pillar::`pillar-package` Package options
#' @section Package options:
#'
#' The following options are used by the tibble and pillar packages
#' to format and print `tbl_df` objects.
#' Used by the formatting workhorse `trunc_mat()` and, therefore,
#' indirectly, by `print.tbl()`.
#'
#' * `tibble.print_max`: Row number threshold: Maximum number of rows printed.
#' Set to `Inf` to always print all rows. Default: 20.
#' * `tibble.print_min`: Number of rows printed if row number threshold is
#' exceeded. Default: 10.
#' * `tibble.width`: Output width. Default: `NULL` (use `width` option).
#' * `tibble.max_extra_cols`: Number of extra columns printed in reduced form.
#' Default: 100.
#'
#' @param x Object to format or print.
#' @param ... Other arguments passed on to individual methods.
#' @param n Number of rows to show. If `NULL`, the default, will print all rows
#' if less than option `tibble.print_max`. Otherwise, will print
#' `tibble.print_min` rows.
#' @param width Width of text output to generate. This defaults to `NULL`, which
#' means use `getOption("tibble.width")` or (if also `NULL`)
#' `getOption("width")`; the latter displays only the columns that fit on one
#' screen. You can also set `options(tibble.width = Inf)` to override this
#' default and always print all columns, this may be slow for very wide tibbles.
#' @param n_extra Number of extra columns to print abbreviated information for,
#' if the width is too small for the entire tibble. If `NULL`, the default,
#' will print information about at most `tibble.max_extra_cols` extra columns.
#' @examples
#' print(as_tibble(mtcars))
#' print(as_tibble(mtcars), n = 1)
#' print(as_tibble(mtcars), n = 3)
#'
#' print(as_tibble(iris), n = 100)
#'
#' print(mtcars, width = 10)
#'
#' mtcars2 <- as_tibble(cbind(mtcars, mtcars), .name_repair = "unique")
#' print(mtcars2, n = 25, n_extra = 3)
#'
#' @examplesIf requireNamespace("nycflights13", quietly = TRUE)
#' print(nycflights13::flights, n_extra = 2)
#' print(nycflights13::flights, width = Inf)
#'
#' @name formatting
#' @aliases print.tbl format.tbl
NULL
# Only for documentation, doesn't do anything
#' @rdname formatting
print.tbl_df <- function(x, ..., n = NULL, width = NULL, n_extra = NULL) {
NextMethod()
}
# Only for documentation, doesn't do anything
#' @rdname formatting
format.tbl_df <- function(x, ..., n = NULL, width = NULL, n_extra = NULL) {
NextMethod()
}
#' Legacy printing
#'
#' @description
#' `r lifecycle::badge("deprecated")`
#' As of tibble 3.1.0, printing is handled entirely by the \pkg{pillar} package.
#' Do not use this function.
#' If you implement a package that extend tibble,
#' the printed output can be customized in various ways.
#' See `vignette("extending", package = "pillar")` for details.
#'
#' @inheritParams formatting
#' @export
#' @keywords internal
trunc_mat <- function(x, n = NULL, width = NULL, n_extra = NULL) {
deprecate_soft("3.1.0", "tibble::trunc_mat()",
details = "Printing has moved to the pillar package.")
rows <- nrow(x)
if (is.null(n) || n < 0) {
if (is.na(rows) || rows > tibble_opt("print_max")) {
n <- tibble_opt("print_min")
} else {
n <- rows
}
}
n_extra <- n_extra %||% tibble_opt("max_extra_cols")
if (is.na(rows)) {
df <- as.data.frame(head(x, n + 1))
if (nrow(df) <= n) {
rows <- nrow(df)
} else {
df <- df[seq_len(n), , drop = FALSE]
}
} else {
df <- as.data.frame(head(x, n))
}
shrunk <- shrink_mat(df, rows, n, star = has_rownames(x))
trunc_info <- list(
width = width, rows_total = rows, rows_min = nrow(df),
n_extra = n_extra, summary = tbl_sum(x)
)
structure(
c(shrunk, trunc_info),
class = c(paste0("trunc_mat_", class(x)), "trunc_mat")
)
}
shrink_mat <- function(df, rows, n, star) {
df <- remove_rownames(df)
if (is.na(rows)) {
needs_dots <- (nrow(df) >= n)
} else {
needs_dots <- (rows > n)
}
if (needs_dots) {
rows_missing <- rows - n
} else {
rows_missing <- 0L
}
mcf <- pillar::colonnade(
df,
has_row_id = if (star) "*" else TRUE
)
list(mcf = mcf, rows_missing = rows_missing)
}
#' @importFrom pillar style_subtle
#' @export
format.trunc_mat <- function(x, width = NULL, ...) {
if (is.null(width)) {
width <- x$width
}
width <- tibble_width(width)
named_header <- format_header(x)
if (all(names2(named_header) == "")) {
header <- named_header
} else {
header <- paste0(
justify(
paste0(names2(named_header), ":"),
right = FALSE, space = NBSP
),
# We add a space after the NBSP inserted by justify()
# so that wrapping occurs at the right location for very narrow outputs
" ",
named_header
)
}
comment <- format_comment(header, width = width)
squeezed <- pillar::squeeze(x$mcf, width = width)
mcf <- format_body(squeezed)
# Splitting lines is important, otherwise subtle style may be lost
# if column names contain spaces.
footer <- pre_dots(format_footer(x, squeezed))
footer_comment <- split_lines(format_comment(footer, width = width))
c(style_subtle(comment), mcf, style_subtle(footer_comment))
}
# Needs to be defined in package code: r-lib/pkgload#85
print_with_mocked_format_body <- function(x, ...) {
scoped_lifecycle_silence()
mockr::with_mock(
format_body = function(x, ...) {
paste0("<body created by pillar>")
},
{
print(x, ...)
}
)
}
#' @export
print.trunc_mat <- function(x, ...) {
cli::cat_line(format(x, ...))
invisible(x)
}
format_header <- function(x) {
x$summary
}
format_body <- function(x) {
format(x)
}
format_footer <- function(x, squeezed_colonnade) {
extra_rows <- format_footer_rows(x)
extra_cols <- format_footer_cols(x, pillar::extra_cols(squeezed_colonnade, n = x$n_extra))
extra <- c(extra_rows, extra_cols)
if (length(extra) >= 1) {
extra[[1]] <- paste0("with ", extra[[1]])
extra[-1] <- map_chr(extra[-1], function(ex) paste0("and ", ex))
collapse(extra)
} else {
character()
}
}
format_footer_rows <- function(x) {
if (length(x$mcf) != 0) {
if (is.na(x$rows_missing)) {
"more rows"
} else if (x$rows_missing > 0) {
paste0(big_mark(x$rows_missing), pluralise_n(" more row(s)", x$rows_missing))
}
} else if (is.na(x$rows_total) && x$rows_min > 0) {
paste0("at least ", big_mark(x$rows_min), pluralise_n(" row(s) total", x$rows_min))
}
}
format_footer_cols <- function(x, extra_cols) {
if (length(extra_cols) == 0) return(NULL)
vars <- format_extra_vars(extra_cols)
paste0(
big_mark(length(extra_cols)), " ",
if (!identical(x$rows_total, 0L) && x$rows_min > 0) "more ",
pluralise("variable(s)", extra_cols), vars
)
}
format_extra_vars <- function(extra_cols) {
# Also covers empty extra_cols vector!
if (is.na(extra_cols[1])) return("")
if (anyNA(extra_cols)) {
extra_cols <- c(extra_cols[!is.na(extra_cols)], cli::symbol$ellipsis)
}
paste0(": ", collapse(extra_cols))
}
format_comment <- function(x, width) {
if (length(x) == 0L) return(character())
map_chr(x, wrap, prefix = "# ", width = min(width, getOption("width")))
}
pre_dots <- function(x) {
if (length(x) > 0) {
paste0(cli::symbol$ellipsis, " ", x)
} else {
character()
}
}
justify <- function(x, right = TRUE, space = " ") {
if (length(x) == 0L) return(character())
width <- nchar_width(x)
max_width <- max(width)
spaces_template <- paste(rep(space, max_width), collapse = "")
spaces <- map_chr(max_width - width, substr, x = spaces_template, start = 1L)
if (right) {
paste0(spaces, x)
} else {
paste0(x, spaces)
}
}
split_lines <- function(x) {
# Avoid .ptype argument to vec_c()
if (is_empty(x)) return(character())
unlist(strsplit(x, "\n", fixed = TRUE))
}
#' knit_print method for trunc mat
#' @keywords internal
#' @export
knit_print.trunc_mat <- function(x, options) {
header <- format_header(x)
if (length(header) > 0L) {
header[names2(header) != ""] <- paste0(names2(header), ": ", header)
summary <- header
} else {
summary <- character()
}
squeezed <- pillar::squeeze(x$mcf, x$width)
kable <- format_knitr_body(squeezed)
extra <- format_footer(x, squeezed)
if (length(extra) > 0) {
extra <- wrap("(", collapse(extra), ")", width = x$width)
} else {
extra <- "\n"
}
res <- paste(c("", "", summary, "", kable, "", extra), collapse = "\n")
knitr::asis_output(fansi::strip_sgr(res), cacheable = TRUE)
}
format_knitr_body <- function(x) {
knitr::knit_print(x)
}
big_mark <- function(x, ...) {
# The thousand separator,
# "," unless it's used for the decimal point, in which case "."
mark <- if (identical(getOption("OutDec"), ",")) "." else ","
ret <- formatC(x, big.mark = mark, format = "d", ...)
ret[is.na(x)] <- "??"
ret
}
mult_sign <- function() {
"x"
}
spaces_around <- function(x) {
paste0(" ", x, " ")
}
format_n <- function(x) collapse(quote_n(x))
quote_n <- function(x) UseMethod("quote_n")
#' @export
quote_n.default <- function(x) as.character(x)
#' @export
quote_n.character <- function(x) tick(x)
collapse <- function(x) paste(x, collapse = ", ")
# wrap --------------------------------------------------------------------
NBSP <- "\U00A0"
wrap <- function(..., indent = 0, prefix = "", width) {
x <- paste0(..., collapse = "")
wrapped <- strwrap2(x, width - nchar_width(prefix), indent)
wrapped <- paste0(prefix, wrapped)
wrapped <- gsub(NBSP, " ", wrapped)
paste0(wrapped, collapse = "\n")
}
strwrap2 <- function(x, width, indent) {
fansi::strwrap_ctl(x, width = max(width, 0), indent = indent, exdent = indent + 2)
}
|
datasetsUI <- function (id) {
ns <- NS(id)
tagList(tabsetPanel(
id = ns("tabsetPanel"),
tabPanel("Mis datasets",
br(),
uiOutput(ns("mode"))),
tabPanel(
"Nuevo conjunto de datos",
br(),
sidebarPanel(uploadDatasetUI(ns("upload"))),
mainPanel(datasetUI(ns("dataset")))
)
))
}
datasets <- function (input, output, session) {
values <- reactiveValues(reload = FALSE, show_edit = FALSE)
path <- callModule(uploadDatasetServer, "upload", reactive(NULL))
cancel <- callModule(editDatasetServer, "edit", dataset_id)
callModule(dataset, "dataset", reactive({
req(path())
if (path() != -1) {
read.csv(path())
}
}))
dataset_id <-
callModule(listServer, "list", loadDatasets, reactive(values$reload))
observeEvent(input$tabsetPanel, {
if (input$tabsetPanel == "Nuevo conjunto de datos") {
values$reload <- FALSE
} else{
values$reload <- TRUE
}
})
observeEvent(dataset_id(), {
values$show_edit <- TRUE
values$reload <- FALSE
})
observeEvent(cancel(), {
values$show_edit <- FALSE
values$reload <- TRUE
})
output$mode <- renderUI({
if (values$show_edit) {
editDatasetUI(session$ns("edit"))
} else {
listUI(session$ns("list"))
}
})
} | /modules/datasets/containers/datasets.R | no_license | JulioMh/TFG | R | false | false | 1,321 | r | datasetsUI <- function (id) {
ns <- NS(id)
tagList(tabsetPanel(
id = ns("tabsetPanel"),
tabPanel("Mis datasets",
br(),
uiOutput(ns("mode"))),
tabPanel(
"Nuevo conjunto de datos",
br(),
sidebarPanel(uploadDatasetUI(ns("upload"))),
mainPanel(datasetUI(ns("dataset")))
)
))
}
datasets <- function (input, output, session) {
values <- reactiveValues(reload = FALSE, show_edit = FALSE)
path <- callModule(uploadDatasetServer, "upload", reactive(NULL))
cancel <- callModule(editDatasetServer, "edit", dataset_id)
callModule(dataset, "dataset", reactive({
req(path())
if (path() != -1) {
read.csv(path())
}
}))
dataset_id <-
callModule(listServer, "list", loadDatasets, reactive(values$reload))
observeEvent(input$tabsetPanel, {
if (input$tabsetPanel == "Nuevo conjunto de datos") {
values$reload <- FALSE
} else{
values$reload <- TRUE
}
})
observeEvent(dataset_id(), {
values$show_edit <- TRUE
values$reload <- FALSE
})
observeEvent(cancel(), {
values$show_edit <- FALSE
values$reload <- TRUE
})
output$mode <- renderUI({
if (values$show_edit) {
editDatasetUI(session$ns("edit"))
} else {
listUI(session$ns("list"))
}
})
} |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/run.R
\name{run}
\alias{run}
\alias{run_flow}
\alias{run_pipe}
\title{Run automated Pipelines}
\usage{
run(x, platform, def, conf, wd = get_opts("flow_run_path"),
flow_run_path = wd, rerun_wd, start_from, execute = FALSE, ...)
run_pipe(x, platform, def, conf, wd = get_opts("flow_run_path"),
flow_run_path = wd, rerun_wd, start_from, execute = FALSE, ...)
}
\arguments{
\item{x}{name of the pipeline to run. This is a function called to create a flow_mat.}
\item{platform}{what platform to use, overrides flowdef}
\item{def}{flow definition}
\item{conf}{a tab-delimited configuration file with path to tools and default parameters. See \link{fetch_pipes}.}
\item{wd}{an alias to flow_run_path}
\item{flow_run_path}{passed onto to_flow. Default it picked up from flowr.conf. Typically this is ~/flowr/runs}
\item{rerun_wd}{if you need to run, supply the previous working dir}
\item{start_from}{the step to start a rerun from. Intitutively, this is ignored in a fresh run and only used in re-running a pipeline.}
\item{execute}{TRUE/FALSE}
\item{...}{passed onto the pipeline function as specified in x}
}
\description{
Run complete pipelines, by wrapping several steps into one convinient function:
NOTE: please use flowr version: 0.9.8.9010
Taking \code{sleep_pipe} as a example.
\itemize{
\item Use \link{fetch_pipes} to get paths to a Rscript, flowdef file and optionally a configuration file
with various default options used.
\item Create a flowmat (using the function defined in the Rscript)
\item Create a `flow` object, using flowmat created and flowdef (as fetched using fetch_pipes)
\item Submit the flow to the cluster (using \link{submit_flow})
}
}
\examples{
\dontrun{
## Run a short pipeline (dry run)
run("sleep_pipe")
## Run a short pipeline on the local machine
run("sleep_pipe", platform = "local", execute = TRUE)
## Run a short pipeline on the a torque cluster (qsub)
run("sleep_pipe", platform = "torque", execute = TRUE)
## Run a short pipeline on the a MOAB cluster (msub)
run("sleep_pipe", platform = "moab", execute = TRUE)
## Run a short pipeline on the a IBM (LSF) cluster (bsub)
run("sleep_pipe", platform = "lsf", execute = TRUE)
## Run a short pipeline on the a MOAB cluster (msub)
run("sleep_pipe", platform = "moab", execute = TRUE)
## change parameters of the pipeline
## All extra parameters are passed on to the function function.
run("sleep_pipe", platform = "lsf", execute = TRUE, x = 5)
}
}
| /man/run.Rd | permissive | KillEdision/flowr | R | false | false | 2,548 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/run.R
\name{run}
\alias{run}
\alias{run_flow}
\alias{run_pipe}
\title{Run automated Pipelines}
\usage{
run(x, platform, def, conf, wd = get_opts("flow_run_path"),
flow_run_path = wd, rerun_wd, start_from, execute = FALSE, ...)
run_pipe(x, platform, def, conf, wd = get_opts("flow_run_path"),
flow_run_path = wd, rerun_wd, start_from, execute = FALSE, ...)
}
\arguments{
\item{x}{name of the pipeline to run. This is a function called to create a flow_mat.}
\item{platform}{what platform to use, overrides flowdef}
\item{def}{flow definition}
\item{conf}{a tab-delimited configuration file with path to tools and default parameters. See \link{fetch_pipes}.}
\item{wd}{an alias to flow_run_path}
\item{flow_run_path}{passed onto to_flow. Default it picked up from flowr.conf. Typically this is ~/flowr/runs}
\item{rerun_wd}{if you need to run, supply the previous working dir}
\item{start_from}{the step to start a rerun from. Intitutively, this is ignored in a fresh run and only used in re-running a pipeline.}
\item{execute}{TRUE/FALSE}
\item{...}{passed onto the pipeline function as specified in x}
}
\description{
Run complete pipelines, by wrapping several steps into one convinient function:
NOTE: please use flowr version: 0.9.8.9010
Taking \code{sleep_pipe} as a example.
\itemize{
\item Use \link{fetch_pipes} to get paths to a Rscript, flowdef file and optionally a configuration file
with various default options used.
\item Create a flowmat (using the function defined in the Rscript)
\item Create a `flow` object, using flowmat created and flowdef (as fetched using fetch_pipes)
\item Submit the flow to the cluster (using \link{submit_flow})
}
}
\examples{
\dontrun{
## Run a short pipeline (dry run)
run("sleep_pipe")
## Run a short pipeline on the local machine
run("sleep_pipe", platform = "local", execute = TRUE)
## Run a short pipeline on the a torque cluster (qsub)
run("sleep_pipe", platform = "torque", execute = TRUE)
## Run a short pipeline on the a MOAB cluster (msub)
run("sleep_pipe", platform = "moab", execute = TRUE)
## Run a short pipeline on the a IBM (LSF) cluster (bsub)
run("sleep_pipe", platform = "lsf", execute = TRUE)
## Run a short pipeline on the a MOAB cluster (msub)
run("sleep_pipe", platform = "moab", execute = TRUE)
## change parameters of the pipeline
## All extra parameters are passed on to the function function.
run("sleep_pipe", platform = "lsf", execute = TRUE, x = 5)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/batch_functions.R
\name{render_notebook}
\alias{render_notebook}
\title{Batch-render analysis notebooks for multiple participants}
\usage{
render_notebook(notebook_file, notebook_dir = "analysis",
reports_dir = "reports", params_tibble, force = FALSE)
}
\arguments{
\item{notebook_file}{filename of the template notebook to be run}
\item{notebook_dir}{directory where the template notebook resides}
\item{reports_dir}{directory where reports are written}
\item{params_tibble}{tibble of parameter values with which to run the notebooks}
\item{force}{whether or note to rerun a notebook when it exists}
}
\description{
A notebook will be run
}
| /man/render_notebook.Rd | permissive | bramzandbelt/cmdsddfeitc | R | false | true | 726 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/batch_functions.R
\name{render_notebook}
\alias{render_notebook}
\title{Batch-render analysis notebooks for multiple participants}
\usage{
render_notebook(notebook_file, notebook_dir = "analysis",
reports_dir = "reports", params_tibble, force = FALSE)
}
\arguments{
\item{notebook_file}{filename of the template notebook to be run}
\item{notebook_dir}{directory where the template notebook resides}
\item{reports_dir}{directory where reports are written}
\item{params_tibble}{tibble of parameter values with which to run the notebooks}
\item{force}{whether or note to rerun a notebook when it exists}
}
\description{
A notebook will be run
}
|
#' @importFrom edgeR DGEList calcNormFactors estimateDisp glmQLFit glmQLFTest topTags
#' @importFrom stats model.matrix
.run.edgeRglm <- function(dat) {
start.time.params <- Sys.time()
## run edgeR
dge <- edgeR::DGEList(counts=dat$counts, group=factor(dat$designs))
if (dat$RNAseq=="bulk") {
dge <- edgeR::calcNormFactors(dge, method='TMM')
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# DE testing
design.mat <- stats::model.matrix( ~ dat$designs)
dge <- edgeR::estimateDisp(y=dge, design = design.mat, robust=T)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
fit.edgeR <- edgeR::glmFit(dge, design = design.mat)
lrt.edgeR <- edgeR::glmLRT(fit.edgeR)
res.edgeR <- edgeR::topTags(lrt.edgeR, adjust.method="BH", n=Inf, sort.by = 'none')
end.time.DE <- Sys.time()
# mean, disp, dropout
start.time.NB <- Sys.time()
means <- rowMeans(dge$counts / dge$samples$norm.factors)
dispersion <- dge$tagwise.dispersion
nsamples <- ncol(dge$counts)
counts0 <- dge$counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
end.time.NB <- Sys.time()
## construct results
result <- data.frame(geneIndex=rownames(res.edgeR$table), means=means, dispersion=dispersion, dropout=p0, pval=res.edgeR$table$PValue, fdr=rep(NA, nrow(res.edgeR$table)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' #' @importFrom edgeR DGEList calcNormFactors estimateDisp glmFit glmLRT topTags
#' #' @importFrom stats model.matrix
#' .run.edgeRql <- function(dat) {
#' start.time.params <- Sys.time()
#' ## run edgeR
#' dge <- edgeR::DGEList(counts=dat$counts, group=factor(dat$designs))
#' if (dat$RNAseq=="bulk") {
#' dge <- edgeR::calcNormFactors(dge)
#' }
#' if (dat$RNAseq=="singlecell") {
#' # make sceset and calculate size factors
#' sce <- .scran.calc(cnts = dat$counts)
#' dge <- .convertToedgeR(sce)
#' dge$samples$group <- factor(dat$designs)
#' }
#'
#' # DE testing
#' design.mat <- stats::model.matrix(~ dat$designs)
#' dge <- edgeR::estimateDisp(y=dge, design = design.mat)
#' end.time.params <- Sys.time()
#' start.time.DE <- Sys.time()
#' fit.edgeR <- edgeR::glmQLFit(dge, design = design.mat, robust=T)
#' Ftest.edgeR <- edgeR::glmQLFTest(fit.edgeR)
#' res.edgeR <- edgeR::topTags(Ftest.edgeR, adjust.method="BH", n=Inf, sort.by = 'none')
#' end.time.DE <- Sys.time()
#'
#' # mean, disp, dropout
#' start.time.NB <- Sys.time()
#' means <- rowMeans(dge$counts / dge$samples$norm.factors)
#' dispersion <- dge$tagwise.dispersion
#' nsamples <- ncol(dge$counts)
#' counts0 <- dge$counts == 0
#' nn0 <- rowSums(!counts0)
#' p0 <- (nsamples - nn0)/nsamples
#' end.time.NB <- Sys.time()
#'
#' ## construct results
#' result <- data.frame(geneIndex=rownames(res.edgeR$table), means=means, dispersion=dispersion, dropout=p0, pval=res.edgeR$table$PValue, fdr=rep(NA, nrow(res.edgeR$table)), stringsAsFactors = F)
#' time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
#' time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
#' time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
#' timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
#' res <- list(result=result, timing=timing)
#' return(res)
#' }
#' @importFrom limma lmFit eBayes voom topTable
#' @importFrom edgeR DGEList calcNormFactors
#' @importFrom stats model.matrix
.run.limma <- function(dat) {
start.time.params <- Sys.time()
dge <- edgeR::DGEList(counts=dat$counts, group=factor(dat$designs))
if (dat$RNAseq=="bulk") {
dge <- edgeR::calcNormFactors(dge)
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# linear model fit
p.DE <- dat$p.DE
design.mat <- stats::model.matrix( ~ dat$designs)
v <- limma::voom(dge, design.mat, plot=FALSE)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
fit <- limma::lmFit(object = v, design = design.mat)
fit <- limma::eBayes(fit, proportion=p.DE, robust=T)
resT <- limma::topTable(fit=fit, coef=2, number=Inf, adjust.method = "BH", sort.by = "none")
end.time.DE <- Sys.time()
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
## construct results
result <- data.frame(geneIndex=rownames(resT), means=means, dispersion=dispersion, dropout=p0, pval=resT$P.Value, fdr=rep(NA, nrow(resT)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom DESeq2 DESeqDataSetFromMatrix estimateSizeFactors DESeq sizeFactors results
#' @importFrom BiocParallel MulticoreParam
#' @importFrom scater sizeFactors
#' @importFrom stats model.matrix
.run.DESeq2 <- function(dat) {
start.time.params <- Sys.time()
coldat <- data.frame(design=factor(dat$designs))
## run DESeq2
dds <- DESeq2::DESeqDataSetFromMatrix(dat$counts, coldat, ~design, tidy = FALSE, ignoreRank = FALSE)
if (dat$RNAseq=="bulk") {
dds <- DESeq2::estimateSizeFactors(dds)
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
out.sf <- scater::sizeFactors(sce)
out.sf[out.sf<0] <- min(out.sf[out.sf > 0])
DESeq2::sizeFactors(dds) <- out.sf
}
end.time.params <- Sys.time()
#
start.time.DE <- Sys.time()
if (is.null(dat$ncores)) {
fit.DeSeq <- DESeq2::DESeq(dds, test="Wald", quiet = TRUE, parallel=FALSE)
}
if (!is.null(dat$ncores)) {
fit.DeSeq <- DESeq2::DESeq(dds, test="Wald", quiet = TRUE, parallel=T, BPPARAM = BiocParallel::MulticoreParam(dat$ncores))
}
res.DeSeq <- DESeq2::results(fit.DeSeq)
end.time.DE <- Sys.time()
# mean, disp, dropout
start.time.NB <- Sys.time()
means <- as.vector(S4Vectors::mcols(fit.DeSeq)[, "baseMean"])
dispersion <- as.vector(S4Vectors::mcols(fit.DeSeq)[, "dispGeneEst"])
nsamples <- ncol(counts(dds))
counts0 <- counts(dds) == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
end.time.NB <- Sys.time()
## construct results
result <- data.frame(geneIndex=rownames(res.DeSeq), means=means, dispersion=dispersion, dropout=p0, pval=res.DeSeq$pvalue, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom edgeR DGEList calcNormFactors cpm.DGEList
#' @importFrom ROTS ROTS
.run.ROTS <- function(dat) {
start.time.params <- Sys.time()
if (dat$RNAseq=="bulk") {
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge, method='TMM')
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# size factor normalised log2(CPM+1) values. Note that the function in scater gave negative values and when cpm.DGEList was allowed to take the log itself all CPMs were nonzero!
out.cpm <- edgeR::cpm.DGEList(dge, normalized.lib.sizes = T, log = F)
out.expr <- log2(out.cpm+1)
end.time.params <- Sys.time()
# mean, disp, dropout
start.time.NB = Sys.time()
norm.counts = dge$counts / dge$samples$norm.factors
nsamples = ncol(norm.counts)
counts0 = norm.counts == 0
nn0 = rowSums(!counts0)
p0 = (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB = Sys.time()
start.time.DE <- Sys.time()
# run ROTS
res <- ROTS::ROTS(data = out.expr, groups = factor(dat$designs) , B = 50, K = floor(nrow(out.expr)/2) , progress=F)
end.time.DE <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=rownames(res$data), means=means, dispersion=dispersion, dropout=p0, pval=res$pvalue, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom edgeR DGEList calcNormFactors cpm.DGEList
#' @importFrom snow makeCluster stopCluster
#' @importMethodsFrom baySeq libsizes
#' @importFrom baySeq getPriors.NB getLikelihoods topCounts
.run.baySeq <- function(dat) {
start.time.params <- Sys.time()
if (dat$RNAseq=="bulk") {
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge)
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# set multiple cores
if(is.null(dat$ncores)) {
cl <- NULL
}
if(!is.null(dat$ncores)) {
cl <- snow::makeCluster(dat$ncores)
}
# make input data sets for baySeq
replicates <- ifelse(dat$designs==-1, "A", "B")
groups <- list(NDE = c(rep(1, length(dat$designs))),
DE = c(ifelse(dat$designs==-1, 1, 2)))
CD <- new("countData", data = dge$counts, replicates = replicates, groups = groups)
# fill in library size factors
CD@sampleObservables$libsizes <- dge$samples$norm.factors * dge$samples$lib.size
CD@annotation <- data.frame(name = rownames(dge$counts), stringsAsFactors = F)
# run prior estimation
CD <- baySeq::getPriors.NB(CD, samplesize = nrow(dge$counts), estimation = "QL", cl = cl, equalDispersions=TRUE, verbose=F)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
# run likelihood ratio test
CD <- baySeq::getLikelihoods(CD, cl = cl, bootStraps = 10, verbose = FALSE)
# get test results
res <- baySeq::topCounts(cD=CD, group="DE", decreasing = FALSE, number = Inf, normaliseData = FALSE)
res <- res[match(CD@annotation$name, res$annotation),]
end.time.DE <- Sys.time()
# free multiple cores
if(!is.null(dat$ncores)) {
snow::stopCluster(cl)
}
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=res$annotation, means=means, dispersion=dispersion, dropout=p0, pval=rep(NA, nrow(dat$counts)), fdr=res$FDR.DE, stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom NOISeq readData noiseqbio
#' @importFrom edgeR DGEList calcNormFactors cpm.DGEList
.run.NOISeq <- function(dat) {
start.time.params <- Sys.time()
groups <- data.frame(Group=factor(dat$designs))
if (dat$RNAseq=="bulk") {
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge, method="TMM")
# make input data set
in.noiseq <- NOISeq::readData(data = dat$counts, factors = groups)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
# run DE detection
calc.noiseq <- NOISeq::noiseqbio(in.noiseq, k = NULL, norm = "tmm", nclust = 15, plot = FALSE,
factor="Group", conditions = NULL, lc = 0, r = 50, adj = 1.5,
a0per = 0.9, filter = 0)
res <- calc.noiseq@results[[1]]
res$fdr <- 1-res$prob
end.time.DE <- Sys.time()
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
# size factor normalised CPM values.
out.cpm <- edgeR::cpm.DGEList(dge, normalized.lib.sizes = T, log = F)
# make input data set
in.noiseq <- NOISeq::readData(data = out.cpm, factors = groups)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
# run DE detection
calc.noiseq <- NOISeq::noiseqbio(in.noiseq, k = NULL, norm = "n", nclust = 15, plot = FALSE,
factor="Group", conditions = NULL, lc = 0, r = 50, adj = 1.5,
a0per = 0.9, filter = 0)
res <- calc.noiseq@results[[1]]
res$fdr <- 1-res$prob
end.time.DE <- Sys.time()
}
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=rownames(res), means=means, dispersion=dispersion, dropout=p0, pval=rep(NA, nrow(res)), fdr=res$fdr, stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom DSS newSeqCountSet estNormFactors estDispersion waldTest
#' @importFrom splines ns
#' @importFrom edgeR DGEList calcNormFactors
#' @importFrom scater sizeFactors
.run.DSS <- function(dat) {
start.time.params <- Sys.time()
# make input data set
designs <- ifelse(dat$designs==-1, 0, 1)
cd <- dat$counts
rownames(cd) <- NULL
colnames(cd) <- NULL
seqData <- DSS::newSeqCountSet(counts = cd, designs = designs)
if (dat$RNAseq=="bulk") {
# estimate mean, dispersion
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge)
# estimate size factors and dispersions
seqData <- DSS::estNormFactors(seqData)
seqData <- DSS::estDispersion(seqData)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
# run DE detection
res.dss <- suppressWarnings(DSS::waldTest(seqData = seqData,
sampleA = 0, sampleB = 1))
res.dss <- res.dss[order(res.dss$geneIndex),]
pval <- res.dss$pval
end.time.DE <- Sys.time()
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
# estimate size factors and dispersions
out.sf <- scater::sizeFactors(sce)
out.sf[out.sf<0] <- min(out.sf[out.sf > 0])
seqData@normalizationFactor <- out.sf
seqData <- DSS::estDispersion(seqData)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
# run DE detection
res.dss <- suppressWarnings(DSS::waldTest(seqData = seqData,
sampleA = 0, sampleB = 1))
res.dss <- res.dss[order(res.dss$geneIndex),]
pval <- res.dss$pval
end.time.DE <- Sys.time()
}
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=rownames(dat$counts), means=means, dispersion=dispersion, dropout=p0, pval=pval, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom EBSeq MedianNorm EBTest
#' @importFrom edgeR DGEList calcNormFactors
#' @importFrom scater sizeFactors
.run.EBSeq <- function(dat) {
groups <- data.frame(Group=factor(dat$designs))
if (dat$RNAseq=="bulk") {
start.time.params <- Sys.time()
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge, method='TMM')
sf <- EBSeq::MedianNorm(dat$counts)
end.time.params <- Sys.time()
# run DE detection
start.time.DE <- Sys.time()
calc.ebseq <- suppressMessages(EBSeq::EBTest(Data = dat$counts, NgVector = NULL, Conditions = factor(dat$designs), sizeFactors = sf, maxround = 20, Pool = F, NumBin = 1000, ApproxVal = 10^-10, Alpha = NULL, Beta = NULL, PInput = NULL, RInput = NULL, PoolLower = .25, PoolUpper = .75, Print = F, Qtrm = 1,QtrmCut=0))
fdr <- 1-calc.ebseq$PPDE
end.time.DE <- Sys.time()
}
if (dat$RNAseq=="singlecell") {
start.time.params <- Sys.time()
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
sf <- scater::sizeFactors(sce)
sf[sf<0] <- min(sf[sf > 0])
end.time.params <- Sys.time()
# run DE detection
start.time.DE <- Sys.time()
calc.ebseq <- suppressMessages(EBSeq::EBTest(Data = dat$counts, NgVector = NULL, Conditions = factor(dat$designs), sizeFactors = sf, maxround = 20, Pool = F, NumBin = 1000, ApproxVal = 10^-10, Alpha = NULL, Beta = NULL, PInput = NULL, RInput = NULL, PoolLower = .25, PoolUpper = .75, Print = F, Qtrm = 1,QtrmCut=0))
fdr <- 1-calc.ebseq$PPDE
end.time.DE <- Sys.time()
}
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=rownames(dat$counts), means=means, dispersion=dispersion, dropout=p0, pval=rep(NA, nrow(dat$counts)), fdr=fdr, stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' #' @importFrom NBPSeq nbp.test
#' #' @importFrom edgeR DGEList calcNormFactors
#' .run.NBPSeq <- function(dat) {
#'
#' dge <- edgeR::DGEList(counts=dat$counts, group=factor(dat$designs))
#' if (dat$RNAseq=="bulk") {
#' start.time.params <- Sys.time()
#' dge <- edgeR::calcNormFactors(dge, method='TMM')
#' end.time.params <- Sys.time()
#' start.time.DE <- Sys.time()
#' res <- NBPSeq::nbp.test(counts=dge$counts, grp.ids=dat$designs, grp1=-1, grp2=1, norm.factors = dge$samples$norm.factors, lib.sizes = colSums(dge$counts), model.disp = "NBQ", print.level = 0)
#' end.time.DE <- Sys.time()
#' }
#' if (dat$RNAseq=="singlecell") {
#' # make sceset and calculate size factors
#' start.time.params <- Sys.time()
#' # make sceset and calculate size factors
#' sce <- .scran.calc(cnts = dat$counts)
#' dge <- .convertToedgeR(sce)
#' dge$samples$group <- factor(dat$designs)
#' end.time.params <- Sys.time()
#' start.time.DE <- Sys.time()
#' res <- NBPSeq::nbp.test(counts=dge$counts, grp.ids=dat$designs, grp1=-1, grp2=1, norm.factors = dge$samples$norm.factors, lib.sizes = colSums(dge$counts), model.disp = "NBQ", print.level = 0)
#' end.time.DE <- Sys.time()
#' }
#'
#' # mean, disp, dropout
#' start.time.NB <- Sys.time()
#' norm.counts <- dge$counts / dge$samples$norm.factors
#' nsamples <- ncol(norm.counts)
#' counts0 <- norm.counts == 0
#' nn0 <- rowSums(!counts0)
#' p0 <- (nsamples - nn0)/nsamples
#' means = rowSums(norm.counts)/nsamples
#' s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
#' size = means^2/(s2 - means + 1e-04)
#' size = ifelse(size > 0, size, NA)
#' dispersion = 1/size
#' end.time.NB <- Sys.time()
#'
#' ## construct results
#' result <- data.frame(geneIndex=rownames(dat$counts), means=means, dispersion=dispersion, dropout=p0, pval=res$pv.alues, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
#' time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
#' time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
#' time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
#' timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
#' res <- list(result=result, timing=timing)
#' return(res)
#' }
#' #' @importFrom edgeR DGEList calcNormFactors
#' .run.TSPM <- function(dat) {
#'
#' dge <- edgeR::DGEList(counts=dat$counts, group=factor(dat$designs))
#' if (dat$RNAseq=="bulk") {
#' start.time.params <- Sys.time()
#' dge <- edgeR::calcNormFactors(dge)
#' x1 <- ifelse(dat$designs==-1, "A", "B")
#' x0 <- rep(1, times=length(factor(dat$designs)))
#' lib.size <- dge$samples$norm.factors
#' end.time.params <- Sys.time()
#' start.time.DE <- Sys.time()
#' res <- TSPM(dat$counts, x1, x0, lib.size)
#' end.time.DE <- Sys.time()
#' }
#' if (dat$RNAseq=="singlecell") {
#' message("TSPM is developed for bulk RNAseq!")
#' # make sceset and calculate size factors
#' start.time.params <- Sys.time()
#' sce <- .scran.calc(cnts = dat$counts)
#' dge <- .convertToedgeR(sce)
#' dge$samples$group <- factor(dat$designs)
#' x1 <- ifelse(dat$designs==-1, "A", "B")
#' x0 <- rep(1, times=length(factor(dat$designs)))
#' lib.size <- dge$samples$norm.factors
#' end.time.params <- Sys.time()
#' start.time.DE <- Sys.time()
#' res <- TSPM(dat$counts, x1, x0, lib.size)
#' end.time.DE <- Sys.time()
#' }
#' # mean, disp, dropout
#' start.time.NB <- Sys.time()
#' norm.counts <- dge$counts / dge$samples$norm.factors
#' nsamples <- ncol(norm.counts)
#' counts0 <- norm.counts == 0
#' nn0 <- rowSums(!counts0)
#' p0 <- (nsamples - nn0)/nsamples
#' means = rowSums(norm.counts)/nsamples
#' s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
#' size = means^2/(s2 - means + 1e-04)
#' size = ifelse(size > 0, size, NA)
#' dispersion = 1/size
#' end.time.NB <- Sys.time()
#'
#' ## construct results
#' result <- data.frame(geneIndex=rownames(dat$counts), means=means, dispersion=dispersion, dropout=p0, pval=res$pvalues, fdr=res$padj,stringsAsFactors = F)
#' time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
#' time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
#' time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
#' timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
#' res <- list(result=result, timing=timing)
#' return(res)
#' }
#' @importFrom MAST FromMatrix zlm.SingleCellAssay lrTest
#' @importFrom S4Vectors mcols
#' @importFrom AnnotationDbi as.list
#' @importFrom edgeR DGEList calcNormFactors cpm.DGEList
#' @importFrom data.table data.table
#' @importFrom reshape2 melt
#' @importFrom parallel mclapply
.run.MAST <- function(dat) {
start.time.params <- Sys.time()
if (dat$RNAseq=="bulk") {
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge)
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# 1. size factor normalised log2(CPM+1) values. Note that the function in scater gave negative values and when cpm.DGEList was allowed to take the log itself all CPMs were nonzero!
out.cpm <- edgeR::cpm.DGEList(dge, normalized.lib.sizes = T, log = F)
out.expr <- log2(out.cpm+1)
# 2.: cell (sample ID, CDR, condition) and gene (gene name) annotation
ids=colnames(out.expr)
ngeneson=colSums(out.expr>0)
cngeneson=ngeneson-mean(ngeneson)
cond=factor(dat$designs)
cdat <- data.frame(wellKey=ids, ngeneson=ngeneson, cngeneson=cngeneson, condition=cond, stringsAsFactors = F)
fdat <- data.frame(primerid=rownames(out.expr), stringsAsFactors = F)
# 3.: construct MAST single cell assay
sca <- MAST::FromMatrix(class = "SingleCellAssay",
exprsArray=out.expr,
cData = cdat,
fData = fdat)
end.time.params <- Sys.time()
# 4.: Model Fit
start.time.DE <- Sys.time()
if (!is.null(dat$ncores)) {
options(mc.cores=dat$ncores)
}
zlm <- MAST::zlm.SingleCellAssay(~ condition + cngeneson, sca, method = "bayesglm", ebayes = TRUE, ebayesControl = list(method = "MLE", model = "H1"))
# 5.: LRT
lrt <- MAST::lrTest(zlm, "condition")
# results table extraction
res_gene <- data.table::data.table(reshape2::melt(lrt))
res_gene_hurdle <- res_gene[metric=="Pr(>Chisq)" & test.type=="hurdle"]
res <- data.frame(res_gene_hurdle, stringsAsFactors = F)
res <- res[match(S4Vectors::mcols(sca)$primerid, res$primerid),]
end.time.DE <- Sys.time()
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
## construct results
result <- data.frame(geneIndex=res$primerid, means=means, dispersion=dispersion, dropout=p0, pval=res$value, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom scde scde.error.models scde.expression.prior scde.expression.difference
#' @importFrom stats pnorm
.run.scde <- function(dat) {
if (dat$RNAseq=="bulk") {
stop("scde is only for single cell RNAseq data analysis")
}
if (dat$RNAseq=="singlecell") {
start.time.params <- Sys.time()
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
# make group vector
groups <- factor(dat$designs)
names(groups) <- colnames(counts)
if(is.null(dat$ncores)) {
ncores = 1
}
if(!is.null(dat$ncores)) {
ncores = dat$ncores
}
# calculate error models
o.ifm <- scde::scde.error.models(counts = dat$counts,
groups = groups,
n.cores = ncores,
min.count.threshold = 1,
threshold.segmentation = TRUE,
save.crossfit.plots = FALSE,
save.model.plots = FALSE,
verbose = 0)
# estimate gene expression prior
o.prior <- scde::scde.expression.prior(models = o.ifm,
counts = dat$counts,
length.out = 400,
show.plot = FALSE)
end.time.params <- Sys.time()
# run differential expression tests on all genes.
start.time.DE <- Sys.time()
ediff <- scde::scde.expression.difference(models=o.ifm, counts=dat$counts, prior=o.prior,
groups = groups,
n.cores = ncores,
n.randomizations = 100,
verbose = 0)
pval <- 2 * (1 - pnorm(abs(ediff$Z)))
end.time.DE <- Sys.time()
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
result=data.frame(geneIndex=rownames(ediff), means=means, dispersion=dispersion, dropout=p0, pval=pval, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
}
#' @importFrom BPSC BPglm
#' @importFrom edgeR DGEList calcNormFactors cpm.DGEList
#' @importFrom parallel makeCluster stopCluster
#' @importFrom doParallel registerDoParallel
#' @importFrom stats model.matrix
.run.BPSC <- function(dat) {
if (dat$RNAseq=="bulk") {
start.time.params <- Sys.time()
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge, method="TMM")
}
if (dat$RNAseq=="singlecell") {
start.time.params <- Sys.time()
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# size factor normalised CPM values.
out.cpm <- edgeR::cpm.DGEList(dge, normalized.lib.sizes = T, log = F)
exprmat <- out.cpm
group <- dat$designs
controlIDs <- which(group == -1)
design.mat <- stats::model.matrix( ~ group)
coef <- 2
end.time.params <- Sys.time()
if(!is.null(dat$ncores)) {
start.time.DE <- Sys.time()
cl <- parallel::makeCluster(dat$ncores)
doParallel::registerDoParallel(cl)
res <- BPglm(data = exprmat, controlIds = controlIDs, design = design.mat, coef = coef, useParallel=TRUE)
parallel::stopCluster(cl)
end.time.DE <- Sys.time()
}
if(is.null(dat$ncores)) {
start.time.DE <- Sys.time()
res <- BPSC::BPglm(data = exprmat, controlIds = controlIDs, design = design.mat, coef = coef)
end.time.DE <- Sys.time()
}
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=rownames(exprmat), means=means, dispersion=dispersion, dropout=p0, pval=res$PVAL, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' #' @importFrom monocle newCellDataSet differentialGeneTest
#' #' @importFrom VGAM tobit
#' #' @importFrom edgeR cpm.DGEList
#' #' @importFrom scater sizeFactors
#' #' @importFrom methods new
#' .run.monocle <- function(dat) {
#' if (dat$RNAseq=="bulk") {
#' stop("monocle is only for single cell RNAseq data analysis")
#' }
#' if (dat$RNAseq=="singlecell") {
#' start.time.params <- Sys.time()
#' # make sceset and calculate size factors
#' sce <- .scran.calc(cnts = dat$counts)
#' dge <- .convertToedgeR(sce)
#' dge$samples$group <- factor(dat$designs)
#' out.sf <- scater::sizeFactors(sce)
#' out.sf[out.sf<0] <- min(out.sf[out.sf > 0])
#' out.cpm <- edgeR::cpm.DGEList(dge, normalized.lib.sizes = T, log = F)
#' }
#' # make annotated dataframes for monocle
#' gene.dat <- data.frame(row.names = rownames(dge$counts), biotype=rep("protein_coding", nrow(dge$counts)), num_cells_expressed=rowSums(dge$counts>0))
#' cell.dat <- data.frame(row.names=colnames(dge$counts), Group=dge$samples$group)
#' fd <- new("AnnotatedDataFrame", data = gene.dat)
#' pd <- new("AnnotatedDataFrame", data = cell.dat)
#' ed <- out.cpm
#' # construct cell data set
#' cds <- monocle::newCellDataSet(cellData = ed, phenoData = pd, featureData = fd, expressionFamily = VGAM::tobit())
#' end.time.params <- Sys.time()
#'
#' # run the testing
#' if(!is.null(dat$ncores)) {
#' start.time.DE <- Sys.time()
#' diff_test_res <- monocle::differentialGeneTest(cds, fullModelFormulaStr = "~Group", reducedModelFormulaStr = "~1", relative_expr = FALSE, cores = dat$ncores, verbose = FALSE)
#' }
#' if(is.null(dat$ncores)) {
#' start.time.DE <- Sys.time()
#' diff_test_res <- monocle::differentialGeneTest(cds, fullModelFormulaStr = "~Group", reducedModelFormulaStr = "~1", relative_expr = FALSE, cores = 1, verbose = FALSE)
#' }
#' res <- diff_test_res[match(rownames(dge$counts), rownames(diff_test_res)),]
#' end.time.DE <- Sys.time()
#'
#' # mean, disp, droput
#' start.time.NB <- Sys.time()
#' norm.counts <- dge$counts / dge$samples$norm.factors
#' nsamples <- ncol(norm.counts)
#' counts0 <- norm.counts == 0
#' nn0 <- rowSums(!counts0)
#' p0 <- (nsamples - nn0)/nsamples
#' means = rowSums(norm.counts)/nsamples
#' s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
#' size = means^2/(s2 - means + 1e-04)
#' size = ifelse(size > 0, size, NA)
#' dispersion = 1/size
#' end.time.NB <- Sys.time()
#'
#' # construct result data frame
#' result=data.frame(geneIndex=rownames(res), means=means, dispersion=dispersion, dropout=p0, pval=res$pval, fdr=rep(NA, nrow(res)), stringsAsFactors = F)
#' time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
#' time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
#' time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
#' timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
#' res <- list(result=result, timing=timing)
#' return(res)
#' }
#' @importFrom scDD scDD
#' @importFrom edgeR cpm.DGEList
#' @importFrom SummarizedExperiment SummarizedExperiment
.run.scDD <- function(dat) {
if (dat$RNAseq=="bulk") {
stop("scDD is only for single cell RNAseq data analysis")
}
if (dat$RNAseq=="singlecell") {
start.time.params <- Sys.time()
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# size factor normalised CPM values.
out.cpm <- edgeR::cpm.DGEList(dge, normalized.lib.sizes = T, log = F)
# create input data
exprmat <- out.cpm
condition <- ifelse(dat$designs==-1, 1, 2)
cell.dat <- data.frame(row.names=colnames(exprmat), condition=condition)
SCdat <- SummarizedExperiment::SummarizedExperiment(assays=list('NormCounts'=exprmat), colData=cell.dat)
# SCdat <- Biobase::ExpressionSet(assayData=exprmat, phenoData=as(cell.dat, "AnnotatedDataFrame"))
end.time.params <- Sys.time()
# DE testing
if(!is.null(dat$ncores)) {
start.time.DE <- Sys.time()
res.tmp <- scDD::scDD(SCdat, prior_param = list(alpha = 0.1, mu0 = 0, s0 = 0.01, a0 = 0.01, b0 = 0.01), permutations = 0, testZeroes = FALSE, adjust.perms = FALSE, param = BiocParallel::MulticoreParam(dat$ncores), parallelBy = "Genes", condition = "condition")
end.time.DE <- Sys.time()
}
if(is.null(dat$ncores)) {
start.time.DE <- Sys.time()
res.tmp <- scDD(SCdat, prior_param = list(alpha = 0.1, mu0 = 0, s0 = 0.01, a0 = 0.01, b0 = 0.01), permutations = 0, testZeroes = FALSE, adjust.perms = FALSE, parallelBy = "Genes", condition = "condition")
end.time.params <- Sys.time()
}
res <- res.tmp$Genes
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=as.character(res$gene), means=means, dispersion=dispersion, dropout=p0, pval=res$nonzero.pvalue, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
# TODO: Do a system call since D3E is written in python
| /R/DEdetection.R | no_license | bvieth/powsim | R | false | false | 39,918 | r | #' @importFrom edgeR DGEList calcNormFactors estimateDisp glmQLFit glmQLFTest topTags
#' @importFrom stats model.matrix
.run.edgeRglm <- function(dat) {
start.time.params <- Sys.time()
## run edgeR
dge <- edgeR::DGEList(counts=dat$counts, group=factor(dat$designs))
if (dat$RNAseq=="bulk") {
dge <- edgeR::calcNormFactors(dge, method='TMM')
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# DE testing
design.mat <- stats::model.matrix( ~ dat$designs)
dge <- edgeR::estimateDisp(y=dge, design = design.mat, robust=T)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
fit.edgeR <- edgeR::glmFit(dge, design = design.mat)
lrt.edgeR <- edgeR::glmLRT(fit.edgeR)
res.edgeR <- edgeR::topTags(lrt.edgeR, adjust.method="BH", n=Inf, sort.by = 'none')
end.time.DE <- Sys.time()
# mean, disp, dropout
start.time.NB <- Sys.time()
means <- rowMeans(dge$counts / dge$samples$norm.factors)
dispersion <- dge$tagwise.dispersion
nsamples <- ncol(dge$counts)
counts0 <- dge$counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
end.time.NB <- Sys.time()
## construct results
result <- data.frame(geneIndex=rownames(res.edgeR$table), means=means, dispersion=dispersion, dropout=p0, pval=res.edgeR$table$PValue, fdr=rep(NA, nrow(res.edgeR$table)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' #' @importFrom edgeR DGEList calcNormFactors estimateDisp glmFit glmLRT topTags
#' #' @importFrom stats model.matrix
#' .run.edgeRql <- function(dat) {
#' start.time.params <- Sys.time()
#' ## run edgeR
#' dge <- edgeR::DGEList(counts=dat$counts, group=factor(dat$designs))
#' if (dat$RNAseq=="bulk") {
#' dge <- edgeR::calcNormFactors(dge)
#' }
#' if (dat$RNAseq=="singlecell") {
#' # make sceset and calculate size factors
#' sce <- .scran.calc(cnts = dat$counts)
#' dge <- .convertToedgeR(sce)
#' dge$samples$group <- factor(dat$designs)
#' }
#'
#' # DE testing
#' design.mat <- stats::model.matrix(~ dat$designs)
#' dge <- edgeR::estimateDisp(y=dge, design = design.mat)
#' end.time.params <- Sys.time()
#' start.time.DE <- Sys.time()
#' fit.edgeR <- edgeR::glmQLFit(dge, design = design.mat, robust=T)
#' Ftest.edgeR <- edgeR::glmQLFTest(fit.edgeR)
#' res.edgeR <- edgeR::topTags(Ftest.edgeR, adjust.method="BH", n=Inf, sort.by = 'none')
#' end.time.DE <- Sys.time()
#'
#' # mean, disp, dropout
#' start.time.NB <- Sys.time()
#' means <- rowMeans(dge$counts / dge$samples$norm.factors)
#' dispersion <- dge$tagwise.dispersion
#' nsamples <- ncol(dge$counts)
#' counts0 <- dge$counts == 0
#' nn0 <- rowSums(!counts0)
#' p0 <- (nsamples - nn0)/nsamples
#' end.time.NB <- Sys.time()
#'
#' ## construct results
#' result <- data.frame(geneIndex=rownames(res.edgeR$table), means=means, dispersion=dispersion, dropout=p0, pval=res.edgeR$table$PValue, fdr=rep(NA, nrow(res.edgeR$table)), stringsAsFactors = F)
#' time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
#' time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
#' time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
#' timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
#' res <- list(result=result, timing=timing)
#' return(res)
#' }
#' @importFrom limma lmFit eBayes voom topTable
#' @importFrom edgeR DGEList calcNormFactors
#' @importFrom stats model.matrix
.run.limma <- function(dat) {
start.time.params <- Sys.time()
dge <- edgeR::DGEList(counts=dat$counts, group=factor(dat$designs))
if (dat$RNAseq=="bulk") {
dge <- edgeR::calcNormFactors(dge)
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# linear model fit
p.DE <- dat$p.DE
design.mat <- stats::model.matrix( ~ dat$designs)
v <- limma::voom(dge, design.mat, plot=FALSE)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
fit <- limma::lmFit(object = v, design = design.mat)
fit <- limma::eBayes(fit, proportion=p.DE, robust=T)
resT <- limma::topTable(fit=fit, coef=2, number=Inf, adjust.method = "BH", sort.by = "none")
end.time.DE <- Sys.time()
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
## construct results
result <- data.frame(geneIndex=rownames(resT), means=means, dispersion=dispersion, dropout=p0, pval=resT$P.Value, fdr=rep(NA, nrow(resT)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom DESeq2 DESeqDataSetFromMatrix estimateSizeFactors DESeq sizeFactors results
#' @importFrom BiocParallel MulticoreParam
#' @importFrom scater sizeFactors
#' @importFrom stats model.matrix
.run.DESeq2 <- function(dat) {
start.time.params <- Sys.time()
coldat <- data.frame(design=factor(dat$designs))
## run DESeq2
dds <- DESeq2::DESeqDataSetFromMatrix(dat$counts, coldat, ~design, tidy = FALSE, ignoreRank = FALSE)
if (dat$RNAseq=="bulk") {
dds <- DESeq2::estimateSizeFactors(dds)
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
out.sf <- scater::sizeFactors(sce)
out.sf[out.sf<0] <- min(out.sf[out.sf > 0])
DESeq2::sizeFactors(dds) <- out.sf
}
end.time.params <- Sys.time()
#
start.time.DE <- Sys.time()
if (is.null(dat$ncores)) {
fit.DeSeq <- DESeq2::DESeq(dds, test="Wald", quiet = TRUE, parallel=FALSE)
}
if (!is.null(dat$ncores)) {
fit.DeSeq <- DESeq2::DESeq(dds, test="Wald", quiet = TRUE, parallel=T, BPPARAM = BiocParallel::MulticoreParam(dat$ncores))
}
res.DeSeq <- DESeq2::results(fit.DeSeq)
end.time.DE <- Sys.time()
# mean, disp, dropout
start.time.NB <- Sys.time()
means <- as.vector(S4Vectors::mcols(fit.DeSeq)[, "baseMean"])
dispersion <- as.vector(S4Vectors::mcols(fit.DeSeq)[, "dispGeneEst"])
nsamples <- ncol(counts(dds))
counts0 <- counts(dds) == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
end.time.NB <- Sys.time()
## construct results
result <- data.frame(geneIndex=rownames(res.DeSeq), means=means, dispersion=dispersion, dropout=p0, pval=res.DeSeq$pvalue, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom edgeR DGEList calcNormFactors cpm.DGEList
#' @importFrom ROTS ROTS
.run.ROTS <- function(dat) {
start.time.params <- Sys.time()
if (dat$RNAseq=="bulk") {
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge, method='TMM')
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# size factor normalised log2(CPM+1) values. Note that the function in scater gave negative values and when cpm.DGEList was allowed to take the log itself all CPMs were nonzero!
out.cpm <- edgeR::cpm.DGEList(dge, normalized.lib.sizes = T, log = F)
out.expr <- log2(out.cpm+1)
end.time.params <- Sys.time()
# mean, disp, dropout
start.time.NB = Sys.time()
norm.counts = dge$counts / dge$samples$norm.factors
nsamples = ncol(norm.counts)
counts0 = norm.counts == 0
nn0 = rowSums(!counts0)
p0 = (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB = Sys.time()
start.time.DE <- Sys.time()
# run ROTS
res <- ROTS::ROTS(data = out.expr, groups = factor(dat$designs) , B = 50, K = floor(nrow(out.expr)/2) , progress=F)
end.time.DE <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=rownames(res$data), means=means, dispersion=dispersion, dropout=p0, pval=res$pvalue, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom edgeR DGEList calcNormFactors cpm.DGEList
#' @importFrom snow makeCluster stopCluster
#' @importMethodsFrom baySeq libsizes
#' @importFrom baySeq getPriors.NB getLikelihoods topCounts
.run.baySeq <- function(dat) {
start.time.params <- Sys.time()
if (dat$RNAseq=="bulk") {
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge)
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# set multiple cores
if(is.null(dat$ncores)) {
cl <- NULL
}
if(!is.null(dat$ncores)) {
cl <- snow::makeCluster(dat$ncores)
}
# make input data sets for baySeq
replicates <- ifelse(dat$designs==-1, "A", "B")
groups <- list(NDE = c(rep(1, length(dat$designs))),
DE = c(ifelse(dat$designs==-1, 1, 2)))
CD <- new("countData", data = dge$counts, replicates = replicates, groups = groups)
# fill in library size factors
CD@sampleObservables$libsizes <- dge$samples$norm.factors * dge$samples$lib.size
CD@annotation <- data.frame(name = rownames(dge$counts), stringsAsFactors = F)
# run prior estimation
CD <- baySeq::getPriors.NB(CD, samplesize = nrow(dge$counts), estimation = "QL", cl = cl, equalDispersions=TRUE, verbose=F)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
# run likelihood ratio test
CD <- baySeq::getLikelihoods(CD, cl = cl, bootStraps = 10, verbose = FALSE)
# get test results
res <- baySeq::topCounts(cD=CD, group="DE", decreasing = FALSE, number = Inf, normaliseData = FALSE)
res <- res[match(CD@annotation$name, res$annotation),]
end.time.DE <- Sys.time()
# free multiple cores
if(!is.null(dat$ncores)) {
snow::stopCluster(cl)
}
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=res$annotation, means=means, dispersion=dispersion, dropout=p0, pval=rep(NA, nrow(dat$counts)), fdr=res$FDR.DE, stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom NOISeq readData noiseqbio
#' @importFrom edgeR DGEList calcNormFactors cpm.DGEList
.run.NOISeq <- function(dat) {
start.time.params <- Sys.time()
groups <- data.frame(Group=factor(dat$designs))
if (dat$RNAseq=="bulk") {
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge, method="TMM")
# make input data set
in.noiseq <- NOISeq::readData(data = dat$counts, factors = groups)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
# run DE detection
calc.noiseq <- NOISeq::noiseqbio(in.noiseq, k = NULL, norm = "tmm", nclust = 15, plot = FALSE,
factor="Group", conditions = NULL, lc = 0, r = 50, adj = 1.5,
a0per = 0.9, filter = 0)
res <- calc.noiseq@results[[1]]
res$fdr <- 1-res$prob
end.time.DE <- Sys.time()
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
# size factor normalised CPM values.
out.cpm <- edgeR::cpm.DGEList(dge, normalized.lib.sizes = T, log = F)
# make input data set
in.noiseq <- NOISeq::readData(data = out.cpm, factors = groups)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
# run DE detection
calc.noiseq <- NOISeq::noiseqbio(in.noiseq, k = NULL, norm = "n", nclust = 15, plot = FALSE,
factor="Group", conditions = NULL, lc = 0, r = 50, adj = 1.5,
a0per = 0.9, filter = 0)
res <- calc.noiseq@results[[1]]
res$fdr <- 1-res$prob
end.time.DE <- Sys.time()
}
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=rownames(res), means=means, dispersion=dispersion, dropout=p0, pval=rep(NA, nrow(res)), fdr=res$fdr, stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom DSS newSeqCountSet estNormFactors estDispersion waldTest
#' @importFrom splines ns
#' @importFrom edgeR DGEList calcNormFactors
#' @importFrom scater sizeFactors
.run.DSS <- function(dat) {
start.time.params <- Sys.time()
# make input data set
designs <- ifelse(dat$designs==-1, 0, 1)
cd <- dat$counts
rownames(cd) <- NULL
colnames(cd) <- NULL
seqData <- DSS::newSeqCountSet(counts = cd, designs = designs)
if (dat$RNAseq=="bulk") {
# estimate mean, dispersion
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge)
# estimate size factors and dispersions
seqData <- DSS::estNormFactors(seqData)
seqData <- DSS::estDispersion(seqData)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
# run DE detection
res.dss <- suppressWarnings(DSS::waldTest(seqData = seqData,
sampleA = 0, sampleB = 1))
res.dss <- res.dss[order(res.dss$geneIndex),]
pval <- res.dss$pval
end.time.DE <- Sys.time()
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
# estimate size factors and dispersions
out.sf <- scater::sizeFactors(sce)
out.sf[out.sf<0] <- min(out.sf[out.sf > 0])
seqData@normalizationFactor <- out.sf
seqData <- DSS::estDispersion(seqData)
end.time.params <- Sys.time()
start.time.DE <- Sys.time()
# run DE detection
res.dss <- suppressWarnings(DSS::waldTest(seqData = seqData,
sampleA = 0, sampleB = 1))
res.dss <- res.dss[order(res.dss$geneIndex),]
pval <- res.dss$pval
end.time.DE <- Sys.time()
}
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=rownames(dat$counts), means=means, dispersion=dispersion, dropout=p0, pval=pval, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom EBSeq MedianNorm EBTest
#' @importFrom edgeR DGEList calcNormFactors
#' @importFrom scater sizeFactors
.run.EBSeq <- function(dat) {
groups <- data.frame(Group=factor(dat$designs))
if (dat$RNAseq=="bulk") {
start.time.params <- Sys.time()
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge, method='TMM')
sf <- EBSeq::MedianNorm(dat$counts)
end.time.params <- Sys.time()
# run DE detection
start.time.DE <- Sys.time()
calc.ebseq <- suppressMessages(EBSeq::EBTest(Data = dat$counts, NgVector = NULL, Conditions = factor(dat$designs), sizeFactors = sf, maxround = 20, Pool = F, NumBin = 1000, ApproxVal = 10^-10, Alpha = NULL, Beta = NULL, PInput = NULL, RInput = NULL, PoolLower = .25, PoolUpper = .75, Print = F, Qtrm = 1,QtrmCut=0))
fdr <- 1-calc.ebseq$PPDE
end.time.DE <- Sys.time()
}
if (dat$RNAseq=="singlecell") {
start.time.params <- Sys.time()
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
sf <- scater::sizeFactors(sce)
sf[sf<0] <- min(sf[sf > 0])
end.time.params <- Sys.time()
# run DE detection
start.time.DE <- Sys.time()
calc.ebseq <- suppressMessages(EBSeq::EBTest(Data = dat$counts, NgVector = NULL, Conditions = factor(dat$designs), sizeFactors = sf, maxround = 20, Pool = F, NumBin = 1000, ApproxVal = 10^-10, Alpha = NULL, Beta = NULL, PInput = NULL, RInput = NULL, PoolLower = .25, PoolUpper = .75, Print = F, Qtrm = 1,QtrmCut=0))
fdr <- 1-calc.ebseq$PPDE
end.time.DE <- Sys.time()
}
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=rownames(dat$counts), means=means, dispersion=dispersion, dropout=p0, pval=rep(NA, nrow(dat$counts)), fdr=fdr, stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' #' @importFrom NBPSeq nbp.test
#' #' @importFrom edgeR DGEList calcNormFactors
#' .run.NBPSeq <- function(dat) {
#'
#' dge <- edgeR::DGEList(counts=dat$counts, group=factor(dat$designs))
#' if (dat$RNAseq=="bulk") {
#' start.time.params <- Sys.time()
#' dge <- edgeR::calcNormFactors(dge, method='TMM')
#' end.time.params <- Sys.time()
#' start.time.DE <- Sys.time()
#' res <- NBPSeq::nbp.test(counts=dge$counts, grp.ids=dat$designs, grp1=-1, grp2=1, norm.factors = dge$samples$norm.factors, lib.sizes = colSums(dge$counts), model.disp = "NBQ", print.level = 0)
#' end.time.DE <- Sys.time()
#' }
#' if (dat$RNAseq=="singlecell") {
#' # make sceset and calculate size factors
#' start.time.params <- Sys.time()
#' # make sceset and calculate size factors
#' sce <- .scran.calc(cnts = dat$counts)
#' dge <- .convertToedgeR(sce)
#' dge$samples$group <- factor(dat$designs)
#' end.time.params <- Sys.time()
#' start.time.DE <- Sys.time()
#' res <- NBPSeq::nbp.test(counts=dge$counts, grp.ids=dat$designs, grp1=-1, grp2=1, norm.factors = dge$samples$norm.factors, lib.sizes = colSums(dge$counts), model.disp = "NBQ", print.level = 0)
#' end.time.DE <- Sys.time()
#' }
#'
#' # mean, disp, dropout
#' start.time.NB <- Sys.time()
#' norm.counts <- dge$counts / dge$samples$norm.factors
#' nsamples <- ncol(norm.counts)
#' counts0 <- norm.counts == 0
#' nn0 <- rowSums(!counts0)
#' p0 <- (nsamples - nn0)/nsamples
#' means = rowSums(norm.counts)/nsamples
#' s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
#' size = means^2/(s2 - means + 1e-04)
#' size = ifelse(size > 0, size, NA)
#' dispersion = 1/size
#' end.time.NB <- Sys.time()
#'
#' ## construct results
#' result <- data.frame(geneIndex=rownames(dat$counts), means=means, dispersion=dispersion, dropout=p0, pval=res$pv.alues, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
#' time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
#' time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
#' time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
#' timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
#' res <- list(result=result, timing=timing)
#' return(res)
#' }
#' #' @importFrom edgeR DGEList calcNormFactors
#' .run.TSPM <- function(dat) {
#'
#' dge <- edgeR::DGEList(counts=dat$counts, group=factor(dat$designs))
#' if (dat$RNAseq=="bulk") {
#' start.time.params <- Sys.time()
#' dge <- edgeR::calcNormFactors(dge)
#' x1 <- ifelse(dat$designs==-1, "A", "B")
#' x0 <- rep(1, times=length(factor(dat$designs)))
#' lib.size <- dge$samples$norm.factors
#' end.time.params <- Sys.time()
#' start.time.DE <- Sys.time()
#' res <- TSPM(dat$counts, x1, x0, lib.size)
#' end.time.DE <- Sys.time()
#' }
#' if (dat$RNAseq=="singlecell") {
#' message("TSPM is developed for bulk RNAseq!")
#' # make sceset and calculate size factors
#' start.time.params <- Sys.time()
#' sce <- .scran.calc(cnts = dat$counts)
#' dge <- .convertToedgeR(sce)
#' dge$samples$group <- factor(dat$designs)
#' x1 <- ifelse(dat$designs==-1, "A", "B")
#' x0 <- rep(1, times=length(factor(dat$designs)))
#' lib.size <- dge$samples$norm.factors
#' end.time.params <- Sys.time()
#' start.time.DE <- Sys.time()
#' res <- TSPM(dat$counts, x1, x0, lib.size)
#' end.time.DE <- Sys.time()
#' }
#' # mean, disp, dropout
#' start.time.NB <- Sys.time()
#' norm.counts <- dge$counts / dge$samples$norm.factors
#' nsamples <- ncol(norm.counts)
#' counts0 <- norm.counts == 0
#' nn0 <- rowSums(!counts0)
#' p0 <- (nsamples - nn0)/nsamples
#' means = rowSums(norm.counts)/nsamples
#' s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
#' size = means^2/(s2 - means + 1e-04)
#' size = ifelse(size > 0, size, NA)
#' dispersion = 1/size
#' end.time.NB <- Sys.time()
#'
#' ## construct results
#' result <- data.frame(geneIndex=rownames(dat$counts), means=means, dispersion=dispersion, dropout=p0, pval=res$pvalues, fdr=res$padj,stringsAsFactors = F)
#' time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
#' time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
#' time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
#' timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
#' res <- list(result=result, timing=timing)
#' return(res)
#' }
#' @importFrom MAST FromMatrix zlm.SingleCellAssay lrTest
#' @importFrom S4Vectors mcols
#' @importFrom AnnotationDbi as.list
#' @importFrom edgeR DGEList calcNormFactors cpm.DGEList
#' @importFrom data.table data.table
#' @importFrom reshape2 melt
#' @importFrom parallel mclapply
.run.MAST <- function(dat) {
start.time.params <- Sys.time()
if (dat$RNAseq=="bulk") {
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge)
}
if (dat$RNAseq=="singlecell") {
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# 1. size factor normalised log2(CPM+1) values. Note that the function in scater gave negative values and when cpm.DGEList was allowed to take the log itself all CPMs were nonzero!
out.cpm <- edgeR::cpm.DGEList(dge, normalized.lib.sizes = T, log = F)
out.expr <- log2(out.cpm+1)
# 2.: cell (sample ID, CDR, condition) and gene (gene name) annotation
ids=colnames(out.expr)
ngeneson=colSums(out.expr>0)
cngeneson=ngeneson-mean(ngeneson)
cond=factor(dat$designs)
cdat <- data.frame(wellKey=ids, ngeneson=ngeneson, cngeneson=cngeneson, condition=cond, stringsAsFactors = F)
fdat <- data.frame(primerid=rownames(out.expr), stringsAsFactors = F)
# 3.: construct MAST single cell assay
sca <- MAST::FromMatrix(class = "SingleCellAssay",
exprsArray=out.expr,
cData = cdat,
fData = fdat)
end.time.params <- Sys.time()
# 4.: Model Fit
start.time.DE <- Sys.time()
if (!is.null(dat$ncores)) {
options(mc.cores=dat$ncores)
}
zlm <- MAST::zlm.SingleCellAssay(~ condition + cngeneson, sca, method = "bayesglm", ebayes = TRUE, ebayesControl = list(method = "MLE", model = "H1"))
# 5.: LRT
lrt <- MAST::lrTest(zlm, "condition")
# results table extraction
res_gene <- data.table::data.table(reshape2::melt(lrt))
res_gene_hurdle <- res_gene[metric=="Pr(>Chisq)" & test.type=="hurdle"]
res <- data.frame(res_gene_hurdle, stringsAsFactors = F)
res <- res[match(S4Vectors::mcols(sca)$primerid, res$primerid),]
end.time.DE <- Sys.time()
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
## construct results
result <- data.frame(geneIndex=res$primerid, means=means, dispersion=dispersion, dropout=p0, pval=res$value, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' @importFrom scde scde.error.models scde.expression.prior scde.expression.difference
#' @importFrom stats pnorm
.run.scde <- function(dat) {
if (dat$RNAseq=="bulk") {
stop("scde is only for single cell RNAseq data analysis")
}
if (dat$RNAseq=="singlecell") {
start.time.params <- Sys.time()
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
# make group vector
groups <- factor(dat$designs)
names(groups) <- colnames(counts)
if(is.null(dat$ncores)) {
ncores = 1
}
if(!is.null(dat$ncores)) {
ncores = dat$ncores
}
# calculate error models
o.ifm <- scde::scde.error.models(counts = dat$counts,
groups = groups,
n.cores = ncores,
min.count.threshold = 1,
threshold.segmentation = TRUE,
save.crossfit.plots = FALSE,
save.model.plots = FALSE,
verbose = 0)
# estimate gene expression prior
o.prior <- scde::scde.expression.prior(models = o.ifm,
counts = dat$counts,
length.out = 400,
show.plot = FALSE)
end.time.params <- Sys.time()
# run differential expression tests on all genes.
start.time.DE <- Sys.time()
ediff <- scde::scde.expression.difference(models=o.ifm, counts=dat$counts, prior=o.prior,
groups = groups,
n.cores = ncores,
n.randomizations = 100,
verbose = 0)
pval <- 2 * (1 - pnorm(abs(ediff$Z)))
end.time.DE <- Sys.time()
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
result=data.frame(geneIndex=rownames(ediff), means=means, dispersion=dispersion, dropout=p0, pval=pval, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
}
#' @importFrom BPSC BPglm
#' @importFrom edgeR DGEList calcNormFactors cpm.DGEList
#' @importFrom parallel makeCluster stopCluster
#' @importFrom doParallel registerDoParallel
#' @importFrom stats model.matrix
.run.BPSC <- function(dat) {
if (dat$RNAseq=="bulk") {
start.time.params <- Sys.time()
dge <- edgeR::DGEList(dat$counts, group = factor(dat$designs))
dge <- edgeR::calcNormFactors(dge, method="TMM")
}
if (dat$RNAseq=="singlecell") {
start.time.params <- Sys.time()
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# size factor normalised CPM values.
out.cpm <- edgeR::cpm.DGEList(dge, normalized.lib.sizes = T, log = F)
exprmat <- out.cpm
group <- dat$designs
controlIDs <- which(group == -1)
design.mat <- stats::model.matrix( ~ group)
coef <- 2
end.time.params <- Sys.time()
if(!is.null(dat$ncores)) {
start.time.DE <- Sys.time()
cl <- parallel::makeCluster(dat$ncores)
doParallel::registerDoParallel(cl)
res <- BPglm(data = exprmat, controlIds = controlIDs, design = design.mat, coef = coef, useParallel=TRUE)
parallel::stopCluster(cl)
end.time.DE <- Sys.time()
}
if(is.null(dat$ncores)) {
start.time.DE <- Sys.time()
res <- BPSC::BPglm(data = exprmat, controlIds = controlIDs, design = design.mat, coef = coef)
end.time.DE <- Sys.time()
}
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=rownames(exprmat), means=means, dispersion=dispersion, dropout=p0, pval=res$PVAL, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
#' #' @importFrom monocle newCellDataSet differentialGeneTest
#' #' @importFrom VGAM tobit
#' #' @importFrom edgeR cpm.DGEList
#' #' @importFrom scater sizeFactors
#' #' @importFrom methods new
#' .run.monocle <- function(dat) {
#' if (dat$RNAseq=="bulk") {
#' stop("monocle is only for single cell RNAseq data analysis")
#' }
#' if (dat$RNAseq=="singlecell") {
#' start.time.params <- Sys.time()
#' # make sceset and calculate size factors
#' sce <- .scran.calc(cnts = dat$counts)
#' dge <- .convertToedgeR(sce)
#' dge$samples$group <- factor(dat$designs)
#' out.sf <- scater::sizeFactors(sce)
#' out.sf[out.sf<0] <- min(out.sf[out.sf > 0])
#' out.cpm <- edgeR::cpm.DGEList(dge, normalized.lib.sizes = T, log = F)
#' }
#' # make annotated dataframes for monocle
#' gene.dat <- data.frame(row.names = rownames(dge$counts), biotype=rep("protein_coding", nrow(dge$counts)), num_cells_expressed=rowSums(dge$counts>0))
#' cell.dat <- data.frame(row.names=colnames(dge$counts), Group=dge$samples$group)
#' fd <- new("AnnotatedDataFrame", data = gene.dat)
#' pd <- new("AnnotatedDataFrame", data = cell.dat)
#' ed <- out.cpm
#' # construct cell data set
#' cds <- monocle::newCellDataSet(cellData = ed, phenoData = pd, featureData = fd, expressionFamily = VGAM::tobit())
#' end.time.params <- Sys.time()
#'
#' # run the testing
#' if(!is.null(dat$ncores)) {
#' start.time.DE <- Sys.time()
#' diff_test_res <- monocle::differentialGeneTest(cds, fullModelFormulaStr = "~Group", reducedModelFormulaStr = "~1", relative_expr = FALSE, cores = dat$ncores, verbose = FALSE)
#' }
#' if(is.null(dat$ncores)) {
#' start.time.DE <- Sys.time()
#' diff_test_res <- monocle::differentialGeneTest(cds, fullModelFormulaStr = "~Group", reducedModelFormulaStr = "~1", relative_expr = FALSE, cores = 1, verbose = FALSE)
#' }
#' res <- diff_test_res[match(rownames(dge$counts), rownames(diff_test_res)),]
#' end.time.DE <- Sys.time()
#'
#' # mean, disp, droput
#' start.time.NB <- Sys.time()
#' norm.counts <- dge$counts / dge$samples$norm.factors
#' nsamples <- ncol(norm.counts)
#' counts0 <- norm.counts == 0
#' nn0 <- rowSums(!counts0)
#' p0 <- (nsamples - nn0)/nsamples
#' means = rowSums(norm.counts)/nsamples
#' s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
#' size = means^2/(s2 - means + 1e-04)
#' size = ifelse(size > 0, size, NA)
#' dispersion = 1/size
#' end.time.NB <- Sys.time()
#'
#' # construct result data frame
#' result=data.frame(geneIndex=rownames(res), means=means, dispersion=dispersion, dropout=p0, pval=res$pval, fdr=rep(NA, nrow(res)), stringsAsFactors = F)
#' time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
#' time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
#' time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
#' timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
#' res <- list(result=result, timing=timing)
#' return(res)
#' }
#' @importFrom scDD scDD
#' @importFrom edgeR cpm.DGEList
#' @importFrom SummarizedExperiment SummarizedExperiment
.run.scDD <- function(dat) {
if (dat$RNAseq=="bulk") {
stop("scDD is only for single cell RNAseq data analysis")
}
if (dat$RNAseq=="singlecell") {
start.time.params <- Sys.time()
# make sceset and calculate size factors
sce <- .scran.calc(cnts = dat$counts)
dge <- .convertToedgeR(sce)
dge$samples$group <- factor(dat$designs)
}
# size factor normalised CPM values.
out.cpm <- edgeR::cpm.DGEList(dge, normalized.lib.sizes = T, log = F)
# create input data
exprmat <- out.cpm
condition <- ifelse(dat$designs==-1, 1, 2)
cell.dat <- data.frame(row.names=colnames(exprmat), condition=condition)
SCdat <- SummarizedExperiment::SummarizedExperiment(assays=list('NormCounts'=exprmat), colData=cell.dat)
# SCdat <- Biobase::ExpressionSet(assayData=exprmat, phenoData=as(cell.dat, "AnnotatedDataFrame"))
end.time.params <- Sys.time()
# DE testing
if(!is.null(dat$ncores)) {
start.time.DE <- Sys.time()
res.tmp <- scDD::scDD(SCdat, prior_param = list(alpha = 0.1, mu0 = 0, s0 = 0.01, a0 = 0.01, b0 = 0.01), permutations = 0, testZeroes = FALSE, adjust.perms = FALSE, param = BiocParallel::MulticoreParam(dat$ncores), parallelBy = "Genes", condition = "condition")
end.time.DE <- Sys.time()
}
if(is.null(dat$ncores)) {
start.time.DE <- Sys.time()
res.tmp <- scDD(SCdat, prior_param = list(alpha = 0.1, mu0 = 0, s0 = 0.01, a0 = 0.01, b0 = 0.01), permutations = 0, testZeroes = FALSE, adjust.perms = FALSE, parallelBy = "Genes", condition = "condition")
end.time.params <- Sys.time()
}
res <- res.tmp$Genes
# mean, disp, dropout
start.time.NB <- Sys.time()
norm.counts <- dge$counts / dge$samples$norm.factors
nsamples <- ncol(norm.counts)
counts0 <- norm.counts == 0
nn0 <- rowSums(!counts0)
p0 <- (nsamples - nn0)/nsamples
means = rowSums(norm.counts)/nsamples
s2 = rowSums((norm.counts - means)^2)/(nsamples - 1)
size = means^2/(s2 - means + 1e-04)
size = ifelse(size > 0, size, NA)
dispersion = 1/size
end.time.NB <- Sys.time()
# construct result data frame
result=data.frame(geneIndex=as.character(res$gene), means=means, dispersion=dispersion, dropout=p0, pval=res$nonzero.pvalue, fdr=rep(NA, nrow(dat$counts)), stringsAsFactors = F)
time.taken.params <- difftime(end.time.params, start.time.params, units="mins")
time.taken.DE <- difftime(end.time.DE, start.time.DE, units="mins")
time.taken.NB <- difftime(end.time.NB, start.time.NB, units="mins")
timing <- rbind(time.taken.params, time.taken.DE, time.taken.NB)
res <- list(result=result, timing=timing)
return(res)
}
# TODO: Do a system call since D3E is written in python
|
DIR=getwd()
setwd("E:\\Rcode\\data")
library(survival)
library(ggplot2)
library(ggpubr)
library(survminer)
expr_data<-read.table("expr_all_stomach.txt")
names(expr_data) <- gsub("\\.","-",names(expr_data))
expr_data_log <- log10(expr_data)
write.table(expr_data_log,"expr_all_stomach_log.txt",sep = "\t")
clin_data<-read.table("clinical_matrix.txt",header = TRUE)
# os_matrix<-read.table("OS_result.matrix.txt",blank.lines.skip=F,header = T)
# time <- as.numeric(os_matrix[,1])
# status <- os_matrix[,2]
dat <- clin_data[clin_data$OS>30,]
write.csv(dat,"OS_dat.csv",row.names = FALSE)
table(dat$vital_status)
attach(dat)
ggplot(dat,
aes(x = OS, group = vital_status,colour = vital_status,
fill = vital_status
)) + geom_density(alpha = 0.5)
## ?οΏ½οΏ½?KM????????
my.surv <- Surv(OS,vital_status=='dead')
## The status indicator, normally 0=alive, 1=dead.
## Other choices are TRUE/FALSE (TRUE = death) or 1/2 (2=death).
kmfit <- survfit(my.surv~1)
summary(kmfit)
plot(kmfit,main='Overall Survival',xlab='Days',ylab='Percent Survival')
detach(dat)
dat_expr <- cbind(dat,t(expr_data[,dat$submitter_id]))
# dat_expr <- na.omit(dat_expr)
# dat_expr <- dat_expr[!is.na(dat_expr$vital_status),]
dat_expr$vital_status <- as.character(dat_expr$vital_status)
write.csv(dat_expr,"OS_dat_expr.csv",row.names = FALSE)
attach(dat_expr)
ggplot(dat_expr,aes(x=vital_status,y=GPX3))+geom_boxplot()
p <- ggboxplot(dat_expr, x="vital_status", y="GPX3", color = "vital_status",
palette = "jco", add = "jitter")
p+stat_compare_means(method = "t.test")
GPX3_group <- ifelse(GPX3 > median(GPX3),'high','low')
GPX3_group <- as.factor(GPX3_group)
table(GPX3_group)
kmfit1 <- survfit(my.surv~GPX3_group,data=dat_expr)
summary(kmfit1)
plot(kmfit1,col =rainbow(2),main='Overall Survival GPX3 ',xlab='Days',ylab='Percent Survival')
legend("topright", legend=c(levels(GPX3_group)), col=rainbow(2), lwd=2)
ggsurvplot(kmfit1,conf.int =F, pval = T,
ggtheme = theme_bw())
ggsurvplot(kmfit1,conf.int =F, pval = T,risk.table =T, ncensor.plot = TRUE, fun = "event",
ggtheme = theme_bw())
str(dat_expr, no.list = T, vec.len = 2)
m <- coxph(my.surv ~ GPX3,data = dat_expr)
ggsurvplot(survfit(m, data = dat_expr), palette = "#2E9FDF",
ggtheme = theme_minimal())
beta <- coef(m)
se <- sqrt(diag(vcov(m)))
HR <- exp(beta)
HRse <- HR * se
p_value <- summary(m)$sctest[3]
diff <- survdiff(my.surv ~ GPX3,data = dat_expr)
# pvalue <- 1-pchisq(diff$chisq,df=1)
pvalue <- 1 - pchisq(diff$chisq, length(data.survdiff$n) - 1)
detach(dat_expr)
save.image("Surv.RData")
load("Surv.RData")
| /Surv_analysis.R | no_license | Bigbug4/Rcode | R | false | false | 2,649 | r | DIR=getwd()
setwd("E:\\Rcode\\data")
library(survival)
library(ggplot2)
library(ggpubr)
library(survminer)
expr_data<-read.table("expr_all_stomach.txt")
names(expr_data) <- gsub("\\.","-",names(expr_data))
expr_data_log <- log10(expr_data)
write.table(expr_data_log,"expr_all_stomach_log.txt",sep = "\t")
clin_data<-read.table("clinical_matrix.txt",header = TRUE)
# os_matrix<-read.table("OS_result.matrix.txt",blank.lines.skip=F,header = T)
# time <- as.numeric(os_matrix[,1])
# status <- os_matrix[,2]
dat <- clin_data[clin_data$OS>30,]
write.csv(dat,"OS_dat.csv",row.names = FALSE)
table(dat$vital_status)
attach(dat)
ggplot(dat,
aes(x = OS, group = vital_status,colour = vital_status,
fill = vital_status
)) + geom_density(alpha = 0.5)
## ?οΏ½οΏ½?KM????????
my.surv <- Surv(OS,vital_status=='dead')
## The status indicator, normally 0=alive, 1=dead.
## Other choices are TRUE/FALSE (TRUE = death) or 1/2 (2=death).
kmfit <- survfit(my.surv~1)
summary(kmfit)
plot(kmfit,main='Overall Survival',xlab='Days',ylab='Percent Survival')
detach(dat)
dat_expr <- cbind(dat,t(expr_data[,dat$submitter_id]))
# dat_expr <- na.omit(dat_expr)
# dat_expr <- dat_expr[!is.na(dat_expr$vital_status),]
dat_expr$vital_status <- as.character(dat_expr$vital_status)
write.csv(dat_expr,"OS_dat_expr.csv",row.names = FALSE)
attach(dat_expr)
ggplot(dat_expr,aes(x=vital_status,y=GPX3))+geom_boxplot()
p <- ggboxplot(dat_expr, x="vital_status", y="GPX3", color = "vital_status",
palette = "jco", add = "jitter")
p+stat_compare_means(method = "t.test")
GPX3_group <- ifelse(GPX3 > median(GPX3),'high','low')
GPX3_group <- as.factor(GPX3_group)
table(GPX3_group)
kmfit1 <- survfit(my.surv~GPX3_group,data=dat_expr)
summary(kmfit1)
plot(kmfit1,col =rainbow(2),main='Overall Survival GPX3 ',xlab='Days',ylab='Percent Survival')
legend("topright", legend=c(levels(GPX3_group)), col=rainbow(2), lwd=2)
ggsurvplot(kmfit1,conf.int =F, pval = T,
ggtheme = theme_bw())
ggsurvplot(kmfit1,conf.int =F, pval = T,risk.table =T, ncensor.plot = TRUE, fun = "event",
ggtheme = theme_bw())
str(dat_expr, no.list = T, vec.len = 2)
m <- coxph(my.surv ~ GPX3,data = dat_expr)
ggsurvplot(survfit(m, data = dat_expr), palette = "#2E9FDF",
ggtheme = theme_minimal())
beta <- coef(m)
se <- sqrt(diag(vcov(m)))
HR <- exp(beta)
HRse <- HR * se
p_value <- summary(m)$sctest[3]
diff <- survdiff(my.surv ~ GPX3,data = dat_expr)
# pvalue <- 1-pchisq(diff$chisq,df=1)
pvalue <- 1 - pchisq(diff$chisq, length(data.survdiff$n) - 1)
detach(dat_expr)
save.image("Surv.RData")
load("Surv.RData")
|
# Some experimenting to see if I could implement what I've learned about the laplacian matrix and stochastic/ transfer matrices.
# ======================
# = Load Sim Functions =
# ======================
sim.location <- "~/Documents/School&Work/pinskyPost/trawl/Scripts/SimFunctions"
invisible(sapply(paste(sim.location, list.files(sim.location), sep="/"), source, .GlobalEnv))
# =================
# = Load Packages =
# =================
library(fields)
# ======================================
# = Experimented with Laplacian Matrix =
# ======================================
M <- 10
N <- 5
C0 <- matrix(0, nrow=M, ncol=N)
# C0[(M-1):M, c(1,N)] <- 50
C0[cbind(M:(M-N+1), 1:N)] <- 50
C0[cbind(M:(M-N+1), N:1)] <- 50
C0 <- matrix(C0,ncol=1)
disp <- 0.1 # 10% of the biomass in each vertex will disperse at each time step; i.e., the non-self-connecting edges would be 1/D where D is the degree of the vertex from which an edge is leaving. This would be a directed graph.
# Dinv <- solve(Deg) # inverse of the degree matrix
w <- sqrt(2)/(2*sqrt(2)+1)/2 # just a weighting for diagonal movement vs rook movement (based on the idea that moving between corners is a factor of sqrt(2) greater distance than moving between edges)
Trans00 <- matrix(0, nrow=M*N, ncol=M*N)
Trans00[cardinal(M, N, "north")] <- 1 - w*2
Trans00[cardinal(M, N, "northwest")] <- w
Trans00[cardinal(M, N, "northeast")] <- w
Trans0 <- Trans00
Trans0 <- Trans0*disp
Trans <- Trans0
diag(Trans) <- 1 - colSums(Trans)
dev.new(width=8, height=6)
par(mfrow=c(5,8), mar=c(1,1,0.1,0.1))
image.plot(t(matrix(C0, nrow=M)), zlim=c(0,60), ylim=c(1.05,0))
C.old <- C0
for(i in 1:39){
C.t <- matrix(Trans%*%C.old, nrow=M)
C.old <- matrix(C.t, ncol=1)
image.plot(t(C.t), zlim=c(0,60), ylim=c(1.05,0))
}
# ==================
# = End Expt w/ LM =
# ================== | /Scripts/Simulation/moveXnorth.R | no_license | rBatt/trawl | R | false | false | 1,839 | r |
# Some experimenting to see if I could implement what I've learned about the laplacian matrix and stochastic/ transfer matrices.
# ======================
# = Load Sim Functions =
# ======================
sim.location <- "~/Documents/School&Work/pinskyPost/trawl/Scripts/SimFunctions"
invisible(sapply(paste(sim.location, list.files(sim.location), sep="/"), source, .GlobalEnv))
# =================
# = Load Packages =
# =================
library(fields)
# ======================================
# = Experimented with Laplacian Matrix =
# ======================================
M <- 10
N <- 5
C0 <- matrix(0, nrow=M, ncol=N)
# C0[(M-1):M, c(1,N)] <- 50
C0[cbind(M:(M-N+1), 1:N)] <- 50
C0[cbind(M:(M-N+1), N:1)] <- 50
C0 <- matrix(C0,ncol=1)
disp <- 0.1 # 10% of the biomass in each vertex will disperse at each time step; i.e., the non-self-connecting edges would be 1/D where D is the degree of the vertex from which an edge is leaving. This would be a directed graph.
# Dinv <- solve(Deg) # inverse of the degree matrix
w <- sqrt(2)/(2*sqrt(2)+1)/2 # just a weighting for diagonal movement vs rook movement (based on the idea that moving between corners is a factor of sqrt(2) greater distance than moving between edges)
Trans00 <- matrix(0, nrow=M*N, ncol=M*N)
Trans00[cardinal(M, N, "north")] <- 1 - w*2
Trans00[cardinal(M, N, "northwest")] <- w
Trans00[cardinal(M, N, "northeast")] <- w
Trans0 <- Trans00
Trans0 <- Trans0*disp
Trans <- Trans0
diag(Trans) <- 1 - colSums(Trans)
dev.new(width=8, height=6)
par(mfrow=c(5,8), mar=c(1,1,0.1,0.1))
image.plot(t(matrix(C0, nrow=M)), zlim=c(0,60), ylim=c(1.05,0))
C.old <- C0
for(i in 1:39){
C.t <- matrix(Trans%*%C.old, nrow=M)
C.old <- matrix(C.t, ncol=1)
image.plot(t(C.t), zlim=c(0,60), ylim=c(1.05,0))
}
# ==================
# = End Expt w/ LM =
# ================== |
\name{BclimMixSer}
\alias{BclimMixSer}
\title{
Serial version of Bclim mixture analysis
}
\description{
Function to approximate marginal data posteriors as mixtures of Gaussians
}
\usage{
BclimMixSer(MDP, G = 10, mixwarnings = FALSE)
}
\arguments{
\item{MDP}{
A set of marginal data posteriors, as produced by \code{\link{BclimLayer}}
}
\item{G}{
The number of Gaussian groups required for each layer to be partitioned into. The default of 10 is usually fine.
}
\item{mixwarnings}{
Whether to suppress mixture warnings (default) or not.
}
}
\details{
This function approximates marginal data posteriors (MDPs) as mixtures of Gaussians. The mixture algorithm is taken from the Mclust package which is a required installation for this to run. This is the serial version, i.e. it only uses one processor, as opposed to the \code{\link{BclimMixPar}} parallel version which will run much faster but requires extra packages to be installed and a multi-core machine.
}
\value{
Outputs a list containing the following objects:
\item{MDP }{A nsamples x n x m array (these values are described below) }
\item{n }{The number of layers}
\item{m }{The number of climate dimensions (always 3)}
\item{n.samp }{The number of samples given in \code{\link{BclimLayer}}}
\item{ScMean}{The raw climate means (used for standardisation purposes)}
\item{ScVar}{The raw climate variances (used for standardisation purposes)}
\item{G }{The number of mixture groups (as above)}
\item{mu.mat }{An estimate of the Gaussian mixture mean components}
\item{tau.mat }{An estimate of the Gaussian mixture precision components}
\item{p.mat }{An estimate of the Gaussian mixture proportions}
}
\references{
See Arxiv paper at http://arxiv.org/abs/1206.5009.
}
\author{
Andrew Parnell <andrew.parnell@ucd.ie>
}
\seealso{
The output here can be used as an input to \code{\link{BclimMCMC}}. See the main \code{\link{BclimRun}} function for more details of the other stages you might need to run.
}
\examples{
\dontrun{
# Set the working directory using setwd (not shown)
# Download and load in the response surfaces:
url1 <- 'http://mathsci.ucd.ie/~parnell_a/required.data3D.RData'
download.file(url1,'required_data3D.RData')
# and now the pollen
url2 <- 'http://mathsci.ucd.ie/~parnell_a/SlugganPollen.txt'
download.file(url2,'SlugganPollen.txt')
# and finally the chronologies
url3 <- 'http://mathsci.ucd.ie/~parnell_a/Sluggan_2chrons.txt'
download.file(url3,'Slugganchrons.txt')
# Create variables which state the locations of the pollen and chronologies
pollen.loc <- paste(getwd(),'/SlugganPollen.txt',sep='')
chron.loc <- paste(getwd(),'/Slugganchrons.txt',sep='')
# Load in the response surfaces
load('required.data3D.RData')
## note that all of these functions have further options you can change with
step1 <- BclimLayer(pollen.loc,required.data3D=required.data3D)
step2 <- BclimMixSer(step1)
# See also the parallelised version BclimMixPar if you have doMC and foreach installed
step3 <- BclimMCMC(step2,chron.loc)
# You should probably do some convergence checking after this step
step4 <- BclimInterp(step2,step3)
results <- BclimCompile(step1,step2,step3,step4,core.name="Sluggan Moss")
# Create a plot of MTCO (dim=2)
plotBclim(results,dim=2)
# Create a volatility plot
plotBclimVol(results,dim=2)
}
}
\keyword{ model }
\keyword{ multivariate }
\keyword{ smooth }
| /man/BclimMixSer.Rd | no_license | uberstig/Bclim | R | false | false | 3,379 | rd | \name{BclimMixSer}
\alias{BclimMixSer}
\title{
Serial version of Bclim mixture analysis
}
\description{
Function to approximate marginal data posteriors as mixtures of Gaussians
}
\usage{
BclimMixSer(MDP, G = 10, mixwarnings = FALSE)
}
\arguments{
\item{MDP}{
A set of marginal data posteriors, as produced by \code{\link{BclimLayer}}
}
\item{G}{
The number of Gaussian groups required for each layer to be partitioned into. The default of 10 is usually fine.
}
\item{mixwarnings}{
Whether to suppress mixture warnings (default) or not.
}
}
\details{
This function approximates marginal data posteriors (MDPs) as mixtures of Gaussians. The mixture algorithm is taken from the Mclust package which is a required installation for this to run. This is the serial version, i.e. it only uses one processor, as opposed to the \code{\link{BclimMixPar}} parallel version which will run much faster but requires extra packages to be installed and a multi-core machine.
}
\value{
Outputs a list containing the following objects:
\item{MDP }{A nsamples x n x m array (these values are described below) }
\item{n }{The number of layers}
\item{m }{The number of climate dimensions (always 3)}
\item{n.samp }{The number of samples given in \code{\link{BclimLayer}}}
\item{ScMean}{The raw climate means (used for standardisation purposes)}
\item{ScVar}{The raw climate variances (used for standardisation purposes)}
\item{G }{The number of mixture groups (as above)}
\item{mu.mat }{An estimate of the Gaussian mixture mean components}
\item{tau.mat }{An estimate of the Gaussian mixture precision components}
\item{p.mat }{An estimate of the Gaussian mixture proportions}
}
\references{
See Arxiv paper at http://arxiv.org/abs/1206.5009.
}
\author{
Andrew Parnell <andrew.parnell@ucd.ie>
}
\seealso{
The output here can be used as an input to \code{\link{BclimMCMC}}. See the main \code{\link{BclimRun}} function for more details of the other stages you might need to run.
}
\examples{
\dontrun{
# Set the working directory using setwd (not shown)
# Download and load in the response surfaces:
url1 <- 'http://mathsci.ucd.ie/~parnell_a/required.data3D.RData'
download.file(url1,'required_data3D.RData')
# and now the pollen
url2 <- 'http://mathsci.ucd.ie/~parnell_a/SlugganPollen.txt'
download.file(url2,'SlugganPollen.txt')
# and finally the chronologies
url3 <- 'http://mathsci.ucd.ie/~parnell_a/Sluggan_2chrons.txt'
download.file(url3,'Slugganchrons.txt')
# Create variables which state the locations of the pollen and chronologies
pollen.loc <- paste(getwd(),'/SlugganPollen.txt',sep='')
chron.loc <- paste(getwd(),'/Slugganchrons.txt',sep='')
# Load in the response surfaces
load('required.data3D.RData')
## note that all of these functions have further options you can change with
step1 <- BclimLayer(pollen.loc,required.data3D=required.data3D)
step2 <- BclimMixSer(step1)
# See also the parallelised version BclimMixPar if you have doMC and foreach installed
step3 <- BclimMCMC(step2,chron.loc)
# You should probably do some convergence checking after this step
step4 <- BclimInterp(step2,step3)
results <- BclimCompile(step1,step2,step3,step4,core.name="Sluggan Moss")
# Create a plot of MTCO (dim=2)
plotBclim(results,dim=2)
# Create a volatility plot
plotBclimVol(results,dim=2)
}
}
\keyword{ model }
\keyword{ multivariate }
\keyword{ smooth }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/esearch.R
\name{entrez_uid-class}
\alias{entrez_uid-class}
\title{Class \code{"entrez_uid"}}
\description{
A container for UIDs returned by a call to \code{\link{esearch}}.
It is essentially a character vector of UIDs supplemented with a number
of attributes:
\describe{
\item{\code{retmax}:}{Total number of hits retrieved from the Entrez server.}
\item{\code{retstart}:}{Index of the first hit retrieved from the Entrez server.}
\item{\code{count}:}{Total number of hits for a search query.}
\item{\code{query_translation}:}{Details of how Entrez translated the query.}
\item{\code{querykey}:}{If \code{usehistory = TRUE}, the query key,
otherwise \code{NA}.}
\item{\code{webenv}:}{If \code{usehistory = TRUE}, the Web envronment string,
otherwise \code{NA}.}
\item{\code{database}:}{Name of the queried database.}
}
}
\examples{
###
}
\keyword{classes}
\keyword{internal}
| /man/entrez_uid-class.Rd | no_license | cran/reutils | R | false | true | 981 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/esearch.R
\name{entrez_uid-class}
\alias{entrez_uid-class}
\title{Class \code{"entrez_uid"}}
\description{
A container for UIDs returned by a call to \code{\link{esearch}}.
It is essentially a character vector of UIDs supplemented with a number
of attributes:
\describe{
\item{\code{retmax}:}{Total number of hits retrieved from the Entrez server.}
\item{\code{retstart}:}{Index of the first hit retrieved from the Entrez server.}
\item{\code{count}:}{Total number of hits for a search query.}
\item{\code{query_translation}:}{Details of how Entrez translated the query.}
\item{\code{querykey}:}{If \code{usehistory = TRUE}, the query key,
otherwise \code{NA}.}
\item{\code{webenv}:}{If \code{usehistory = TRUE}, the Web envronment string,
otherwise \code{NA}.}
\item{\code{database}:}{Name of the queried database.}
}
}
\examples{
###
}
\keyword{classes}
\keyword{internal}
|
library(tidyverse)
library(rvest)
# Generate the URL
url <- "https://finance.yahoo.com/quote/AAPL"
# Read in the webpage data
page <- read_html(url)
# Extract just the article titles
article_titles <- page %>%
html_nodes("#Main") %>%
html_nodes("h3") %>%
html_text() %>%
as_tibble_col(column_name = "title")
# Compute the sentiment for each word
library(tidytext)
word_sentiment <- article_titles %>%
rowid_to_column() %>%
unnest_tokens(word, title) %>%
inner_join(sentiments, by = "word") %>%
mutate(sentiment = factor(sentiment, levels = c("positive", "negative")))
# Decide whether to buy or sell
buy_or_sell <- function(sentiment) {
tb <- table(sentiment)
if (tb[["positive"]] > tb[["negative"]]) {
"BUY"
} else {
"SELL"
}
}
buy_or_sell(word_sentiment$sentiment)
| /scrape.R | permissive | dorbuandrew/stock-picker | R | false | false | 805 | r | library(tidyverse)
library(rvest)
# Generate the URL
url <- "https://finance.yahoo.com/quote/AAPL"
# Read in the webpage data
page <- read_html(url)
# Extract just the article titles
article_titles <- page %>%
html_nodes("#Main") %>%
html_nodes("h3") %>%
html_text() %>%
as_tibble_col(column_name = "title")
# Compute the sentiment for each word
library(tidytext)
word_sentiment <- article_titles %>%
rowid_to_column() %>%
unnest_tokens(word, title) %>%
inner_join(sentiments, by = "word") %>%
mutate(sentiment = factor(sentiment, levels = c("positive", "negative")))
# Decide whether to buy or sell
buy_or_sell <- function(sentiment) {
tb <- table(sentiment)
if (tb[["positive"]] > tb[["negative"]]) {
"BUY"
} else {
"SELL"
}
}
buy_or_sell(word_sentiment$sentiment)
|
#
# This test file has been generated by kwb.test::create_test_files()
#
test_that("import_analytics_basel() works", {
expect_error(aquanes.report:::import_analytics_basel())
})
| /tests/testthat/test-function-import_analytics_basel.R | permissive | KWB-R/aquanes.report | R | false | false | 183 | r | #
# This test file has been generated by kwb.test::create_test_files()
#
test_that("import_analytics_basel() works", {
expect_error(aquanes.report:::import_analytics_basel())
})
|
#===================================================================================================
#' Guess separator
#' @param x character vector or SummarizedExperiment
#' @param var svar or fvar
#' @param possible_separators character vector with possible separators to look for
#' @param verbose logical
#' @param ... used for proper S3 method dispatch
#' @return separator (string) or NULL (if no separator could be identified)
#' @examples
#' require(magrittr)
#'
#' # charactervector
#' x <- c('PERM_NON.R1[H/L]', 'PERM_NON.R2[H/L]', 'PERM_NON.R3[H/L]', 'PERM_NON.R4[H/L]')
#' x %>% guess_sep()
#'
#' x <- c('WT untreated 1', 'WT untreated 2', 'WT treated 1')
#' x %>% guess_sep()
#'
#' x <- c('group1', 'group2', 'group3.R1')
#' x %>% guess_sep()
#'
#' # SummarizedExperiment
#' if (require(autonomics.data)) autonomics.data::glutaminase %>%
#' guess_sep()
#'
#' if (require(autonomics.data)) autonomics.data::stemcomp.proteinratios %>%
#' guess_sep()
#' @export
guess_sep <- function (x, ...) {
UseMethod("guess_sep", x)
}
#' @rdname guess_sep
#' @importFrom magrittr %>%
#' @export
guess_sep.character <- function(
x,
possible_separators = c('.', ' ', '_'),
verbose = FALSE,
...
){
. <- NULL
sep_freqs <- Map(function(y) stringi::stri_split_fixed(x, y), possible_separators) %>%
lapply(function(y) y %>% vapply(length, integer(1))) %>%
magrittr::extract( vapply(., autonomics.support::has_identical_values, logical(1))) %>%
vapply(unique, integer(1))
# No separator detected - return NULL
if (all(sep_freqs==1)){
if (verbose) autonomics.support::cmessage('%s: no (consistent) separator. Returning NULL', x[1])
return(NULL) # no separator detected
}
# Find best separator
best_sep <- sep_freqs %>%
magrittr::extract(.!=1) %>%
magrittr::extract(autonomics.support::is_max(vapply(., magrittr::extract, integer(1), 1))) %>%
names()
# Ambiguous separator - take first from tail
if (length(best_sep)>1){
pattern <- best_sep %>% paste0(collapse='') %>% paste0('[', ., ']')
best_sep <- x[1] %>% stringi::stri_extract_last_regex(pattern)
}
# Separator identified - return
if (verbose) autonomics.support::cmessage("\t\tGuess sep: '%s'", best_sep)
return(best_sep)
}
#' @rdname guess_sep
#' @importFrom magrittr %>%
#' @export
guess_sep.factor <- function(x, ...) x %>% levels %>% guess_sep.character()
#' @rdname guess_sep
#' @importFrom magrittr %>%
#' @export
guess_sep.SummarizedExperiment <- function(
x,
var = 'sample_id',
possible_separators = c('.', '_', ' '),# if (contains_ratios(x)) c('.', ' ') else c('.', '_', ' '),
verbose = FALSE,
...
){
assertive.sets::assert_is_subset(var, c(svars(x), fvars(x)))
(if (var %in% svars(x)) slevels(x, var) else flevels(x, var)) %>%
guess_sep(possible_separators = possible_separators,
verbose = verbose)
}
infer_design_sep <- function(...){
.Deprecated('guess_sep')
guess_sep(...)
}
#' @rdname guess_sep
#' @importFrom magrittr %>%
#' @export
ssep <- function(...){
.Deprecated('guess_sep')
guess_sep(...)
}
#' @rdname guess_sep
#' @importFrom magrittr %>%
#' @export
subgroup_sep <- function(...){
.Deprecated('guess_sep')
guess_sep(...)
}
#=======================================================================
extract_first_components <- function(x, sep){
x %>%
stringi::stri_split_fixed(sep) %>%
vapply(function(y) y %>%
magrittr::extract(1:(length(y)-1)) %>%
paste0(collapse = sep), character(1))
}
extract_last_component <- function(x, sep){
x %>%
stringi::stri_split_fixed(sep) %>%
vapply(function(y) y %>%
magrittr::extract(length(y)) %>%
paste0(collapse = sep), character(1))
}
#' Guess subgroup values
#' @param x charactervector, SummarizedExperiment
#' @param sep character(1)
#' @param invert FALSE (default) or TRUE: whether to guess "non-subgroup" component
#' @param verbose logical(1)
#' @param ... used for proper S3 method dispatch
#' @return character(n)
#' @examples
#' require(magrittr)
#'
#' # charactervector
#' # No sep: subgroup = x
#' x <- c("EM00", "EM01", "EM02")
#' x %>% guess_subgroup_values()
#'
#' # Sep: subgroup = head components of x
#' x <- c("UT_10h_R1", "UT_10h_R2", "UT_10h_R3")
#' x %>% guess_subgroup_values()
#' x %>% guess_subgroup_values(invert = TRUE)
#'
#' x <- c("EM00_STD.R1", "EM01_STD.R1", "EM01_EM00.R1")
#' x %>% guess_subgroup_values()
#' x %>% guess_subgroup_values(invert = TRUE)
#'
#' @export
guess_subgroup_values <- function (x, ...) {
UseMethod("guess_subgroup_values", x)
}
#' @rdname guess_subgroup_values
#' @importFrom magrittr %>%
#' @export
guess_subgroup_values.character <- function(
x,
sep = x %>% guess_sep(),
invert = FALSE,
verbose = FALSE,
...
){
# Guess
subgroup_values <- if (is.null(sep)){ x
} else if (invert){ extract_last_component(x, sep)
} else { extract_first_components(x, sep)
}
# Inform
if (verbose) autonomics.support::cmessage('\t\tGuess subgroup values: %s => %s', x[1], subgroup_values[1])
# Return
return(subgroup_values)
}
#' @rdname guess_subgroup_values
#' @importFrom magrittr %>%
#' @export
guess_subgroup_values.SummarizedExperiment <- function(
x,
sep = x %>% guess_sep(),
invert = FALSE,
verbose = FALSE,
...
){
# already in x
if ('subgroup' %in% svars(x)){
if (verbose) autonomics.support::cmessage("\t\tUse 'subgroup' values in x ")
return(sdata(x)$subgroup)
}
# guess from sampleid values
x %>% sampleid_values() %>% guess_subgroup_values(sep = sep, invert = invert, verbose = verbose)
}
#==========================================================
# guess_subject_values <- function (x, ...) {
# UseMethod("guess_subject_values", x)
# }
#
# guess_subject_values.character(
# x,
# sep = guess_sep(x),
# verbose = FALSE
# ){
# NULL
# }
| /autonomics.import/R/guess.R | no_license | bhagwataditya/autonomics0 | R | false | false | 6,514 | r | #===================================================================================================
#' Guess separator
#' @param x character vector or SummarizedExperiment
#' @param var svar or fvar
#' @param possible_separators character vector with possible separators to look for
#' @param verbose logical
#' @param ... used for proper S3 method dispatch
#' @return separator (string) or NULL (if no separator could be identified)
#' @examples
#' require(magrittr)
#'
#' # charactervector
#' x <- c('PERM_NON.R1[H/L]', 'PERM_NON.R2[H/L]', 'PERM_NON.R3[H/L]', 'PERM_NON.R4[H/L]')
#' x %>% guess_sep()
#'
#' x <- c('WT untreated 1', 'WT untreated 2', 'WT treated 1')
#' x %>% guess_sep()
#'
#' x <- c('group1', 'group2', 'group3.R1')
#' x %>% guess_sep()
#'
#' # SummarizedExperiment
#' if (require(autonomics.data)) autonomics.data::glutaminase %>%
#' guess_sep()
#'
#' if (require(autonomics.data)) autonomics.data::stemcomp.proteinratios %>%
#' guess_sep()
#' @export
guess_sep <- function (x, ...) {
UseMethod("guess_sep", x)
}
#' @rdname guess_sep
#' @importFrom magrittr %>%
#' @export
guess_sep.character <- function(
x,
possible_separators = c('.', ' ', '_'),
verbose = FALSE,
...
){
. <- NULL
sep_freqs <- Map(function(y) stringi::stri_split_fixed(x, y), possible_separators) %>%
lapply(function(y) y %>% vapply(length, integer(1))) %>%
magrittr::extract( vapply(., autonomics.support::has_identical_values, logical(1))) %>%
vapply(unique, integer(1))
# No separator detected - return NULL
if (all(sep_freqs==1)){
if (verbose) autonomics.support::cmessage('%s: no (consistent) separator. Returning NULL', x[1])
return(NULL) # no separator detected
}
# Find best separator
best_sep <- sep_freqs %>%
magrittr::extract(.!=1) %>%
magrittr::extract(autonomics.support::is_max(vapply(., magrittr::extract, integer(1), 1))) %>%
names()
# Ambiguous separator - take first from tail
if (length(best_sep)>1){
pattern <- best_sep %>% paste0(collapse='') %>% paste0('[', ., ']')
best_sep <- x[1] %>% stringi::stri_extract_last_regex(pattern)
}
# Separator identified - return
if (verbose) autonomics.support::cmessage("\t\tGuess sep: '%s'", best_sep)
return(best_sep)
}
#' @rdname guess_sep
#' @importFrom magrittr %>%
#' @export
guess_sep.factor <- function(x, ...) x %>% levels %>% guess_sep.character()
#' @rdname guess_sep
#' @importFrom magrittr %>%
#' @export
guess_sep.SummarizedExperiment <- function(
x,
var = 'sample_id',
possible_separators = c('.', '_', ' '),# if (contains_ratios(x)) c('.', ' ') else c('.', '_', ' '),
verbose = FALSE,
...
){
assertive.sets::assert_is_subset(var, c(svars(x), fvars(x)))
(if (var %in% svars(x)) slevels(x, var) else flevels(x, var)) %>%
guess_sep(possible_separators = possible_separators,
verbose = verbose)
}
infer_design_sep <- function(...){
.Deprecated('guess_sep')
guess_sep(...)
}
#' @rdname guess_sep
#' @importFrom magrittr %>%
#' @export
ssep <- function(...){
.Deprecated('guess_sep')
guess_sep(...)
}
#' @rdname guess_sep
#' @importFrom magrittr %>%
#' @export
subgroup_sep <- function(...){
.Deprecated('guess_sep')
guess_sep(...)
}
#=======================================================================
extract_first_components <- function(x, sep){
x %>%
stringi::stri_split_fixed(sep) %>%
vapply(function(y) y %>%
magrittr::extract(1:(length(y)-1)) %>%
paste0(collapse = sep), character(1))
}
extract_last_component <- function(x, sep){
x %>%
stringi::stri_split_fixed(sep) %>%
vapply(function(y) y %>%
magrittr::extract(length(y)) %>%
paste0(collapse = sep), character(1))
}
#' Guess subgroup values
#' @param x charactervector, SummarizedExperiment
#' @param sep character(1)
#' @param invert FALSE (default) or TRUE: whether to guess "non-subgroup" component
#' @param verbose logical(1)
#' @param ... used for proper S3 method dispatch
#' @return character(n)
#' @examples
#' require(magrittr)
#'
#' # charactervector
#' # No sep: subgroup = x
#' x <- c("EM00", "EM01", "EM02")
#' x %>% guess_subgroup_values()
#'
#' # Sep: subgroup = head components of x
#' x <- c("UT_10h_R1", "UT_10h_R2", "UT_10h_R3")
#' x %>% guess_subgroup_values()
#' x %>% guess_subgroup_values(invert = TRUE)
#'
#' x <- c("EM00_STD.R1", "EM01_STD.R1", "EM01_EM00.R1")
#' x %>% guess_subgroup_values()
#' x %>% guess_subgroup_values(invert = TRUE)
#'
#' @export
guess_subgroup_values <- function (x, ...) {
UseMethod("guess_subgroup_values", x)
}
#' @rdname guess_subgroup_values
#' @importFrom magrittr %>%
#' @export
guess_subgroup_values.character <- function(
x,
sep = x %>% guess_sep(),
invert = FALSE,
verbose = FALSE,
...
){
# Guess
subgroup_values <- if (is.null(sep)){ x
} else if (invert){ extract_last_component(x, sep)
} else { extract_first_components(x, sep)
}
# Inform
if (verbose) autonomics.support::cmessage('\t\tGuess subgroup values: %s => %s', x[1], subgroup_values[1])
# Return
return(subgroup_values)
}
#' @rdname guess_subgroup_values
#' @importFrom magrittr %>%
#' @export
guess_subgroup_values.SummarizedExperiment <- function(
x,
sep = x %>% guess_sep(),
invert = FALSE,
verbose = FALSE,
...
){
# already in x
if ('subgroup' %in% svars(x)){
if (verbose) autonomics.support::cmessage("\t\tUse 'subgroup' values in x ")
return(sdata(x)$subgroup)
}
# guess from sampleid values
x %>% sampleid_values() %>% guess_subgroup_values(sep = sep, invert = invert, verbose = verbose)
}
#==========================================================
# guess_subject_values <- function (x, ...) {
# UseMethod("guess_subject_values", x)
# }
#
# guess_subject_values.character(
# x,
# sep = guess_sep(x),
# verbose = FALSE
# ){
# NULL
# }
|
#' Tests if the functions in \code{fmat} and \code{gmat} are equal in distribution
#' @param fmat Matrix of functions. Each column is a function.
#' @param gmat Matrix of functions. Each column is a function. Need to be same length as fmat.
#'
#' @return Value of the KD statistic and the associated p value under the null, as a vector.
#'
#' @export
kstat = function(fmat, gmat) {
ff.xd = int_depth(fmat, fmat)
fg.xd = int_depth(fmat, gmat)
gg.xd = int_depth(gmat, gmat)
gf.xd = int_depth(gmat, fmat)
ff.cdf = sapply(ff.xd, function(y) mean(ff.xd <= y))
gf.cdf = sapply(ff.xd, function(y) mean(gf.xd <= y))
fg.cdf = sapply(gg.xd, function(y) mean(fg.xd <= y))
gg.cdf = sapply(gg.xd, function(y) mean(gg.xd <= y))
rate = sqrt((ncol(gmat)*ncol(fmat)) / (ncol(gmat) + ncol(fmat)))
ksf = max(abs(ff.cdf - gf.cdf))
ksg = max(abs(gg.cdf - fg.cdf))
kd = max(ksf, ksg)
c(kd, 1-ks_cdf(rate*kd))
}
#' Calculates the probability of the value \code{x} under the Kolmogorov Distribution.
#'
#' @param x A positive number.
#' @param n A positive integer. Number of terms to include in the Kolmogorov distribution. Defaults to 20.
#'
#' @return Probability of x.
#'
#' @export
ks_cdf = function(x, n = 20) {
if(x < 0.05) return(0)
1 - 2*(sum(sapply(1:n, function(k) ((-1)^(k-1)) * exp(-2*(k^2)*(x^2)))))
}
#' Tests if the functions in \code{fmat} and \code{gmat} are equal in distribution
#' @param fmat Matrix of functions. Each column is a function.
#' @param gmat Matrix of functions. Each column is a function. Need to be same length as fmat.
#' @param perms Positive integer. Number of permutations to construct the approximate permutation distribution.
#'
#'
#' @return Value of the KD statistic and the associated p value under the null, as a vector, using a permutation distribution
#'
#' @export
kstat_perm = function(fmat, gmat, perms = 500) {
# compute KD statistic
kd = kstat(fmat, gmat)[1]
# construct permutation distribution
hmat = cbind(fmat, gmat)
hn = ncol(hmat)
fn = ncol(fmat)
kd.dist = rep(0, perms)
kd.dist = sapply(1:perms, function(y) {
hstar = hmat[,sample(1:hn, hn, replace = F)]
kstat(hstar[,1:fn], hstar[,-(1:fn)])[1]
})
# return KD and permutation p value
c(kd, mean(kd.dist > kd))
}
| /R/stat.R | no_license | trevor-harris/kstat | R | false | false | 2,277 | r | #' Tests if the functions in \code{fmat} and \code{gmat} are equal in distribution
#' @param fmat Matrix of functions. Each column is a function.
#' @param gmat Matrix of functions. Each column is a function. Need to be same length as fmat.
#'
#' @return Value of the KD statistic and the associated p value under the null, as a vector.
#'
#' @export
kstat = function(fmat, gmat) {
ff.xd = int_depth(fmat, fmat)
fg.xd = int_depth(fmat, gmat)
gg.xd = int_depth(gmat, gmat)
gf.xd = int_depth(gmat, fmat)
ff.cdf = sapply(ff.xd, function(y) mean(ff.xd <= y))
gf.cdf = sapply(ff.xd, function(y) mean(gf.xd <= y))
fg.cdf = sapply(gg.xd, function(y) mean(fg.xd <= y))
gg.cdf = sapply(gg.xd, function(y) mean(gg.xd <= y))
rate = sqrt((ncol(gmat)*ncol(fmat)) / (ncol(gmat) + ncol(fmat)))
ksf = max(abs(ff.cdf - gf.cdf))
ksg = max(abs(gg.cdf - fg.cdf))
kd = max(ksf, ksg)
c(kd, 1-ks_cdf(rate*kd))
}
#' Calculates the probability of the value \code{x} under the Kolmogorov Distribution.
#'
#' @param x A positive number.
#' @param n A positive integer. Number of terms to include in the Kolmogorov distribution. Defaults to 20.
#'
#' @return Probability of x.
#'
#' @export
ks_cdf = function(x, n = 20) {
if(x < 0.05) return(0)
1 - 2*(sum(sapply(1:n, function(k) ((-1)^(k-1)) * exp(-2*(k^2)*(x^2)))))
}
#' Tests if the functions in \code{fmat} and \code{gmat} are equal in distribution
#' @param fmat Matrix of functions. Each column is a function.
#' @param gmat Matrix of functions. Each column is a function. Need to be same length as fmat.
#' @param perms Positive integer. Number of permutations to construct the approximate permutation distribution.
#'
#'
#' @return Value of the KD statistic and the associated p value under the null, as a vector, using a permutation distribution
#'
#' @export
kstat_perm = function(fmat, gmat, perms = 500) {
# compute KD statistic
kd = kstat(fmat, gmat)[1]
# construct permutation distribution
hmat = cbind(fmat, gmat)
hn = ncol(hmat)
fn = ncol(fmat)
kd.dist = rep(0, perms)
kd.dist = sapply(1:perms, function(y) {
hstar = hmat[,sample(1:hn, hn, replace = F)]
kstat(hstar[,1:fn], hstar[,-(1:fn)])[1]
})
# return KD and permutation p value
c(kd, mean(kd.dist > kd))
}
|
#' plotGC Function
#'
#' @param ph phage dataset
#' @param tr bacterial tRNAs
#' @param cd bacterial CDS
#' @param output plot or write the graph
#' @keywords reformat the phage data
#' @export
#' @examples
#' plotGC()
plotGC <- function(ph = phage, tr = tRNA, cd = CDS, output = "plot"){
col_count = ncol(tr)
if(col_count == 2){
p <- ggplot() +
geom_density(data = ph, aes(genome_gc), colour="red") +
geom_density(data = tr, aes(V2), colour="blue") +
geom_density(data = cd, aes(V2), colour="green")
}else{
p <- ggplot() +
geom_density(data = ph, aes(genome_gc), colour="red") +
geom_density(data = tr, aes(V3), colour="blue") +
geom_density(data = cd, aes(V2), colour="green")
}
if(output == "plot"){
p
}else if(output == "write"){
print("Saving png.")
ggsave(filename = paste(input_out_name, "_gc.png", sep = ""), plot = p, device = "png", width = 15, height = 10)
}else{
print("Error: Select 'plot' or 'write' for the output option" )
}
}
| /R/plotGC.R | no_license | TJN25/comparativeSRA | R | false | false | 1,028 | r | #' plotGC Function
#'
#' @param ph phage dataset
#' @param tr bacterial tRNAs
#' @param cd bacterial CDS
#' @param output plot or write the graph
#' @keywords reformat the phage data
#' @export
#' @examples
#' plotGC()
plotGC <- function(ph = phage, tr = tRNA, cd = CDS, output = "plot"){
col_count = ncol(tr)
if(col_count == 2){
p <- ggplot() +
geom_density(data = ph, aes(genome_gc), colour="red") +
geom_density(data = tr, aes(V2), colour="blue") +
geom_density(data = cd, aes(V2), colour="green")
}else{
p <- ggplot() +
geom_density(data = ph, aes(genome_gc), colour="red") +
geom_density(data = tr, aes(V3), colour="blue") +
geom_density(data = cd, aes(V2), colour="green")
}
if(output == "plot"){
p
}else if(output == "write"){
print("Saving png.")
ggsave(filename = paste(input_out_name, "_gc.png", sep = ""), plot = p, device = "png", width = 15, height = 10)
}else{
print("Error: Select 'plot' or 'write' for the output option" )
}
}
|
# Get timing of images for AOP data
# adapted from
# https://www.neonscience.org/resources/learning-hub/tutorials/neon-api-usage
# use IMAGEDATETIME in digital camera file names
library(tidyverse)
library(httr)
library(jsonlite)
library(glue)
library(lubridate)
## table of flight dates
aop_dates <- read_csv('~/Documents/data/NEON/meta/flight.dates.AOP.csv')
aop_dates <- aop_dates %>%
mutate(year = substr(YearSiteVisit, 1, 4),
siteid = substr(YearSiteVisit, 6, 9),
flightdate = ymd(substr(FlightDate, 1, 8)))
aop_dates %>% write_csv('results/all_aop_dates.csv')
aop_dates <- read_csv('results/all_aop_dates.csv')
sites_x_aop <- read_csv('results/sites_x_aop.csv')
sites_x_aop_sub <- sites_x_aop %>%
dplyr::select(domanID, siteID, domainName, flightbxID, aop_site_id) %>%
distinct()
aop_dates %>%
left_join(sites_x_aop_sub, by = c('siteid' = 'aop_site_id')) %>%
dplyr::filter(!is.na(siteID)) %>%
write_csv('results/aquatic-sites-aop-dates.csv')
sites_join_aop_dates <- sites_x_aop %>% left_join(aop_dates, by = c("aop_site_id" = "siteid"))
sites_join_aop_dates %>% write_csv('results/sites_join_aop_dates.csv')
# aquatic to aop sites
aop_dates <- read_csv('results/sites_join_aop_dates.csv')
# siteID is the aquatic site
get_aop_dates <- function(aq_siteids){
aop_dates <- read_csv('results/sites_join_aop_dates.csv') %>%
dplyr::filter(siteID %in% aq_siteids) %>%
dplyr::select(siteID, aop_site_id, flightdate) %>%
arrange(flightdate) %>% distinct()
return(aop_dates)
}
get_aop_dates('CARI')
get_aop_dates('KING')
### Or from API ###
base_url <- 'http://data.neonscience.org/api/v0/'
# hs_data_id <- 'DP3.30010.001'
data_id <- 'DP1.30010.001' # digital camera 10cm imagery
req_aop <- GET(glue('{base_url}/products/{data_id}'))
avail_aop <- content(req_aop, as = 'text') %>%
# readLines() %>%
fromJSON(simplifyDataFrame = TRUE, flatten = TRUE)
# List of products by site code with month
# eg ABBY/2017-06
data_urls_list <- avail_aop$data$siteCodes$availableDataUrls
data_urls <- data_urls_list %>% unlist()
# make this into a table
avail_df <- data_urls_list %>%
purrr::map(~str_sub(.x, 56, 67)) %>%
unlist() %>% as.data.frame() %>%
mutate(siteid = str_sub(., 1, 4)) %>%
mutate(month = str_sub(., 6, 12)) %>%
dplyr::select(siteid, month)
my_url <- data_urls[1]
# actual files available
get_img_datetimes <- function(my_url){
data_files_req <- GET(my_url)
data_files <- content(data_files_req, as = "text") %>% fromJSON()
# filter to just the tifs
imgs <- data_files$data$files$name %>% fs::path_filter("*ort.tif")
# extract image dates from parenthesis
img_datetimes <- imgs %>%
str_match_all("(?<=\\().+?(?=\\))") %>%
unlist() %>% sort() %>% lubridate::as_datetime()
# one row data frame of results
meta <- data.frame(siteid = data_files$data$siteCode,
month = data_files$data$month,
first_img = head(img_datetimes, 1),
last_img = tail(img_datetimes, 1))
return(meta)
}
get_img_datetimes(data_urls[13])
poss_get_img_datetimes <- purrr::possibly(get_img_datetimes, otherwise = NULL)
aop_meta_df <- data_urls %>% purrr::map_df(~poss_get_img_datetimes(.x))
aop_meta_df %>% write_csv('results/aop_meta_df.csv')
# data_files_req <- GET(data_urls[1])
# data_files <- content(data_files_req, as = "text") %>% fromJSON()
#
# data_files$data$siteCode
# data_files$data$month
# data_files$data$files$name[1]
# Digital camera: FLHTSTRT_EHCCCCCC(IMAGEDATETIME)-NNNN_ort.tif
# IMAGEDATETIME: Date and time of image capture, YYYYMMDDHHmmSS
# make a table of flight dates JOIN all sensor positions
| /03-aop-dates.R | no_license | khondula/neon-aquatics | R | false | false | 3,635 | r | # Get timing of images for AOP data
# adapted from
# https://www.neonscience.org/resources/learning-hub/tutorials/neon-api-usage
# use IMAGEDATETIME in digital camera file names
library(tidyverse)
library(httr)
library(jsonlite)
library(glue)
library(lubridate)
## table of flight dates
aop_dates <- read_csv('~/Documents/data/NEON/meta/flight.dates.AOP.csv')
aop_dates <- aop_dates %>%
mutate(year = substr(YearSiteVisit, 1, 4),
siteid = substr(YearSiteVisit, 6, 9),
flightdate = ymd(substr(FlightDate, 1, 8)))
aop_dates %>% write_csv('results/all_aop_dates.csv')
aop_dates <- read_csv('results/all_aop_dates.csv')
sites_x_aop <- read_csv('results/sites_x_aop.csv')
sites_x_aop_sub <- sites_x_aop %>%
dplyr::select(domanID, siteID, domainName, flightbxID, aop_site_id) %>%
distinct()
aop_dates %>%
left_join(sites_x_aop_sub, by = c('siteid' = 'aop_site_id')) %>%
dplyr::filter(!is.na(siteID)) %>%
write_csv('results/aquatic-sites-aop-dates.csv')
sites_join_aop_dates <- sites_x_aop %>% left_join(aop_dates, by = c("aop_site_id" = "siteid"))
sites_join_aop_dates %>% write_csv('results/sites_join_aop_dates.csv')
# aquatic to aop sites
aop_dates <- read_csv('results/sites_join_aop_dates.csv')
# siteID is the aquatic site
get_aop_dates <- function(aq_siteids){
aop_dates <- read_csv('results/sites_join_aop_dates.csv') %>%
dplyr::filter(siteID %in% aq_siteids) %>%
dplyr::select(siteID, aop_site_id, flightdate) %>%
arrange(flightdate) %>% distinct()
return(aop_dates)
}
get_aop_dates('CARI')
get_aop_dates('KING')
### Or from API ###
base_url <- 'http://data.neonscience.org/api/v0/'
# hs_data_id <- 'DP3.30010.001'
data_id <- 'DP1.30010.001' # digital camera 10cm imagery
req_aop <- GET(glue('{base_url}/products/{data_id}'))
avail_aop <- content(req_aop, as = 'text') %>%
# readLines() %>%
fromJSON(simplifyDataFrame = TRUE, flatten = TRUE)
# List of products by site code with month
# eg ABBY/2017-06
data_urls_list <- avail_aop$data$siteCodes$availableDataUrls
data_urls <- data_urls_list %>% unlist()
# make this into a table
avail_df <- data_urls_list %>%
purrr::map(~str_sub(.x, 56, 67)) %>%
unlist() %>% as.data.frame() %>%
mutate(siteid = str_sub(., 1, 4)) %>%
mutate(month = str_sub(., 6, 12)) %>%
dplyr::select(siteid, month)
my_url <- data_urls[1]
# actual files available
get_img_datetimes <- function(my_url){
data_files_req <- GET(my_url)
data_files <- content(data_files_req, as = "text") %>% fromJSON()
# filter to just the tifs
imgs <- data_files$data$files$name %>% fs::path_filter("*ort.tif")
# extract image dates from parenthesis
img_datetimes <- imgs %>%
str_match_all("(?<=\\().+?(?=\\))") %>%
unlist() %>% sort() %>% lubridate::as_datetime()
# one row data frame of results
meta <- data.frame(siteid = data_files$data$siteCode,
month = data_files$data$month,
first_img = head(img_datetimes, 1),
last_img = tail(img_datetimes, 1))
return(meta)
}
get_img_datetimes(data_urls[13])
poss_get_img_datetimes <- purrr::possibly(get_img_datetimes, otherwise = NULL)
aop_meta_df <- data_urls %>% purrr::map_df(~poss_get_img_datetimes(.x))
aop_meta_df %>% write_csv('results/aop_meta_df.csv')
# data_files_req <- GET(data_urls[1])
# data_files <- content(data_files_req, as = "text") %>% fromJSON()
#
# data_files$data$siteCode
# data_files$data$month
# data_files$data$files$name[1]
# Digital camera: FLHTSTRT_EHCCCCCC(IMAGEDATETIME)-NNNN_ort.tif
# IMAGEDATETIME: Date and time of image capture, YYYYMMDDHHmmSS
# make a table of flight dates JOIN all sensor positions
|
setwd("~/Google Drive/Study/child-name-popularity/")
loadDataset <- function() {
data.all <- read.csv(paste(getwd(),'all_states.csv', sep='/'), header = FALSE)
colnames(data.all) <- c('State', 'Gender', 'Year', 'Name', 'Count')
data.all
}
#dataForState <- function(ds, year) {
# data.state <- subset(ds, Year == year)
# data.state <- data.state[order(-data.state$Count),]
# data.state
#}
#dataForStateAndYear <- function(ds, state, year) {
# data.state <- subset(ds, Year == year & State == state)
# data.state <- data.state[order(-data.state$Count),]
# data.state
#}
#topN <- function(ds, year, numberOfNames) {
# data.state <- dataForState(ds, year)
# data.top <- as.character(data.state[1:numberOfNames,]$Name)
# data.top
#}
#topNPerState <- function(ds, state, year, numberOfNames) {
# data.state <- dataForStateAndYear(ds, state, year)
# data.top <- as.character(data.state[1:numberOfNames,]$Name)
# data.top
#}
topOverall <- function(ds, numNames) {
# Most polular names
data.popular <- aggregate(ds$Count, by=list(Name=data.all$Name), FUN=sum)
colnames(data.popular) <- c('Name', 'Count')
total <- sum(data.popular$Count)
data.popular <- data.popular[with(data.popular, order(-Count)),]
data.popular <- data.popular[c(1:numNames),]
data.popular$percentOf <- round(data.popular$Count / total * 100, 2)
data.popular
}
| /global.R | no_license | hegrobler/child-name-popularity | R | false | false | 1,375 | r | setwd("~/Google Drive/Study/child-name-popularity/")
loadDataset <- function() {
data.all <- read.csv(paste(getwd(),'all_states.csv', sep='/'), header = FALSE)
colnames(data.all) <- c('State', 'Gender', 'Year', 'Name', 'Count')
data.all
}
#dataForState <- function(ds, year) {
# data.state <- subset(ds, Year == year)
# data.state <- data.state[order(-data.state$Count),]
# data.state
#}
#dataForStateAndYear <- function(ds, state, year) {
# data.state <- subset(ds, Year == year & State == state)
# data.state <- data.state[order(-data.state$Count),]
# data.state
#}
#topN <- function(ds, year, numberOfNames) {
# data.state <- dataForState(ds, year)
# data.top <- as.character(data.state[1:numberOfNames,]$Name)
# data.top
#}
#topNPerState <- function(ds, state, year, numberOfNames) {
# data.state <- dataForStateAndYear(ds, state, year)
# data.top <- as.character(data.state[1:numberOfNames,]$Name)
# data.top
#}
topOverall <- function(ds, numNames) {
# Most polular names
data.popular <- aggregate(ds$Count, by=list(Name=data.all$Name), FUN=sum)
colnames(data.popular) <- c('Name', 'Count')
total <- sum(data.popular$Count)
data.popular <- data.popular[with(data.popular, order(-Count)),]
data.popular <- data.popular[c(1:numNames),]
data.popular$percentOf <- round(data.popular$Count / total * 100, 2)
data.popular
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadDataPack.R
\name{readSheet}
\alias{readSheet}
\title{Read data from a DataPack object}
\usage{
readSheet(
d,
sheet = 1,
range = NULL,
col_names = TRUE,
col_types = "text",
na = "",
guess_max = 1000,
progress = readxl::readxl_progress(),
.name_repair = "minimal"
)
}
\arguments{
\item{d}{DataPack object, created via \code{loadDataPack}.}
\item{sheet}{Sheet to read. Either a string (the name of a sheet), or an
integer (the position of the sheet). Ignored if the sheet is specified via
\code{range}. If neither argument specifies the sheet, defaults to the first
sheet.}
\item{range}{A cell range to read from, as described in \link[readxl]{cell-specification}.
Includes typical Excel ranges like "B3:D87", possibly including the sheet
name like "Budget!B2:G14", and more. Interpreted strictly, even if the
range forces the inclusion of leading or trailing empty rows or columns.
Takes precedence over \code{skip}, \code{n_max} and \code{sheet}.}
\item{col_names}{\code{TRUE} to use the first row as column names, \code{FALSE} to get
default names, or a character vector giving a name for each column. If user
provides \code{col_types} as a vector, \code{col_names} can have one entry per
column, i.e. have the same length as \code{col_types}, or one entry per
unskipped column.}
\item{col_types}{Either \code{NULL} to guess all from the spreadsheet or a
character vector containing one entry per column from these options:
"skip", "guess", "logical", "numeric", "date", "text" or "list". If exactly
one \code{col_type} is specified, it will be recycled. The content of a cell in
a skipped column is never read and that column will not appear in the data
frame output. A list cell loads a column as a list of length 1 vectors,
which are typed using the type guessing logic from \code{col_types = NULL}, but
on a cell-by-cell basis.}
\item{na}{Character vector of strings to interpret as missing values. By
default, readxl treats blank cells as missing data.}
\item{guess_max}{Maximum number of data rows to use for guessing column
types.}
\item{progress}{Display a progress spinner? By default, the spinner appears
only in an interactive session, outside the context of knitting a document,
and when the call is likely to run for several seconds or more. See
\code{\link[readxl:readxl_progress]{readxl_progress()}} for more details.}
\item{.name_repair}{Handling of column names. Passed along to
\code{\link[tibble:as_tibble]{tibble::as_tibble()}}. readxl's default is `.name_repair = "unique", which
ensures column names are not empty and are unique.}
}
\value{
A \link[tibble:tibble-package]{tibble}
}
\description{
Reads data from a sheet in a DataPack object. This function is
essentially a wrapper for \code{readxl}'s \code{read_excel} function, but with additional
support for selecting default parameters per DataPack setup.
}
\author{
Scott Jackson
}
| /man/readSheet.Rd | permissive | jason-p-pickering/datapackr | R | false | true | 2,967 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadDataPack.R
\name{readSheet}
\alias{readSheet}
\title{Read data from a DataPack object}
\usage{
readSheet(
d,
sheet = 1,
range = NULL,
col_names = TRUE,
col_types = "text",
na = "",
guess_max = 1000,
progress = readxl::readxl_progress(),
.name_repair = "minimal"
)
}
\arguments{
\item{d}{DataPack object, created via \code{loadDataPack}.}
\item{sheet}{Sheet to read. Either a string (the name of a sheet), or an
integer (the position of the sheet). Ignored if the sheet is specified via
\code{range}. If neither argument specifies the sheet, defaults to the first
sheet.}
\item{range}{A cell range to read from, as described in \link[readxl]{cell-specification}.
Includes typical Excel ranges like "B3:D87", possibly including the sheet
name like "Budget!B2:G14", and more. Interpreted strictly, even if the
range forces the inclusion of leading or trailing empty rows or columns.
Takes precedence over \code{skip}, \code{n_max} and \code{sheet}.}
\item{col_names}{\code{TRUE} to use the first row as column names, \code{FALSE} to get
default names, or a character vector giving a name for each column. If user
provides \code{col_types} as a vector, \code{col_names} can have one entry per
column, i.e. have the same length as \code{col_types}, or one entry per
unskipped column.}
\item{col_types}{Either \code{NULL} to guess all from the spreadsheet or a
character vector containing one entry per column from these options:
"skip", "guess", "logical", "numeric", "date", "text" or "list". If exactly
one \code{col_type} is specified, it will be recycled. The content of a cell in
a skipped column is never read and that column will not appear in the data
frame output. A list cell loads a column as a list of length 1 vectors,
which are typed using the type guessing logic from \code{col_types = NULL}, but
on a cell-by-cell basis.}
\item{na}{Character vector of strings to interpret as missing values. By
default, readxl treats blank cells as missing data.}
\item{guess_max}{Maximum number of data rows to use for guessing column
types.}
\item{progress}{Display a progress spinner? By default, the spinner appears
only in an interactive session, outside the context of knitting a document,
and when the call is likely to run for several seconds or more. See
\code{\link[readxl:readxl_progress]{readxl_progress()}} for more details.}
\item{.name_repair}{Handling of column names. Passed along to
\code{\link[tibble:as_tibble]{tibble::as_tibble()}}. readxl's default is `.name_repair = "unique", which
ensures column names are not empty and are unique.}
}
\value{
A \link[tibble:tibble-package]{tibble}
}
\description{
Reads data from a sheet in a DataPack object. This function is
essentially a wrapper for \code{readxl}'s \code{read_excel} function, but with additional
support for selecting default parameters per DataPack setup.
}
\author{
Scott Jackson
}
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(8.05951547075097e+282, 127919.372550964, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 1L)))
result <- do.call(distr6:::C_EmpiricalMVCdf,testlist)
str(result) | /distr6/inst/testfiles/C_EmpiricalMVCdf/libFuzzer_C_EmpiricalMVCdf/C_EmpiricalMVCdf_valgrind_files/1610383534-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 224 | r | testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(8.05951547075097e+282, 127919.372550964, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 1L)))
result <- do.call(distr6:::C_EmpiricalMVCdf,testlist)
str(result) |
library(here)
i_am(
"R/ordTbl.R"
)
library(tidyverse)
#ordinate
#' Organize ordination attributes into plot-able tibble
#'
#' @param ...commTbl
#' @param ...metaTbl tbl of site info as cols
#'
#' @return tibble of coordinates and sample attributes
#' @export
#' @import tidyverse, vegan
#' @examples
getOrdVarTbl <- function(
...commTbl,
...metaTbl
) {
#PCA----
ord <- vegan::rda(
...commTbl
)
#xy----
ordTbl <- vegan::scores(
ord
)$sites %>%
as_tibble()
#percent----
ordAxes <- ord %>%
summary() %>%
.$cont %>%
.$importance %>%
as_tibble(.) %>%
# select(
# PC1, PC2
# ) %>%
filter(
row_number() == 2
) %>%
round(4)
# as.numeric()
#join----
cbind(
...metaTbl,
ordTbl
) %>%
as_tibble() %>%
mutate(
ordAxes = ordAxes %>%
rename_with(
~ paste0(
.,
"_prop"
)
)
) %>%
unnest(
ordAxes
# names_repair = "universal"
) %>%
as_tibble() %>%
return()
}
#ggplot::scale_color_brewer()
#experimental----
#' Get stats from ordination
#'
#' @param ...commTbl
#' @param ...cleanData
#' @param uniqueLevels vector of variable names
#'
#' @return
#' @export
#' @import vegan, tidyverse
#' @examples
getOrdStatTbl <- function(
...commTbl,
...cleanData,
...uniqueLevels,
...mainVar
) {
uLevels <- quote(
uniqueLevels
)
distMat <- vegdist(
commTbl
)
metaTbl <- ...cleanData %>%
distinct(
#likelyIssueHere
uLevels
)
# ordModel <- quote(
# distMat ~
# ...mainVar
# )
ordStat <- adonis(
ordModel,
metaTbl,
# 99999
permutations = 99
)
}
| /R/ordTbl.R | permissive | nmedina17/oir | R | false | false | 1,718 | r | library(here)
i_am(
"R/ordTbl.R"
)
library(tidyverse)
#ordinate
#' Organize ordination attributes into plot-able tibble
#'
#' @param ...commTbl
#' @param ...metaTbl tbl of site info as cols
#'
#' @return tibble of coordinates and sample attributes
#' @export
#' @import tidyverse, vegan
#' @examples
getOrdVarTbl <- function(
...commTbl,
...metaTbl
) {
#PCA----
ord <- vegan::rda(
...commTbl
)
#xy----
ordTbl <- vegan::scores(
ord
)$sites %>%
as_tibble()
#percent----
ordAxes <- ord %>%
summary() %>%
.$cont %>%
.$importance %>%
as_tibble(.) %>%
# select(
# PC1, PC2
# ) %>%
filter(
row_number() == 2
) %>%
round(4)
# as.numeric()
#join----
cbind(
...metaTbl,
ordTbl
) %>%
as_tibble() %>%
mutate(
ordAxes = ordAxes %>%
rename_with(
~ paste0(
.,
"_prop"
)
)
) %>%
unnest(
ordAxes
# names_repair = "universal"
) %>%
as_tibble() %>%
return()
}
#ggplot::scale_color_brewer()
#experimental----
#' Get stats from ordination
#'
#' @param ...commTbl
#' @param ...cleanData
#' @param uniqueLevels vector of variable names
#'
#' @return
#' @export
#' @import vegan, tidyverse
#' @examples
getOrdStatTbl <- function(
...commTbl,
...cleanData,
...uniqueLevels,
...mainVar
) {
uLevels <- quote(
uniqueLevels
)
distMat <- vegdist(
commTbl
)
metaTbl <- ...cleanData %>%
distinct(
#likelyIssueHere
uLevels
)
# ordModel <- quote(
# distMat ~
# ...mainVar
# )
ordStat <- adonis(
ordModel,
metaTbl,
# 99999
permutations = 99
)
}
|
ames_train <- read.csv(file.choose())
ames_test <- read.csv(file.choose())
View(ames_train)
str(ames_test)
###conbining traing and test data set
df.combined <- rbind(within(ames_train,rm("Id","SalePrice")),within(ames_test,rm("Id")))
dim(df.combined)
summary(ames_train$SalePrice)
library(e1071) ### skewness is present in e1071
skewness(ames_train$SalePrice)
#### sales price is right skewed.
hist(ames_train$SalePrice)
### log transform of saleprice to improve linearity of data
skewness(log(ames_train$SalePrice))
hist(log(ames_train$SalePrice)) ### data is normally distributed
#### finding out the data type of the independent variables
sapply(ames_train,class)
##################################### Replacing the NA values ###########################
###finding out the number of NAs
na.cols <- which(colSums(is.na(df.combined)) > 0)
sort(colSums(sapply(df.combined[na.cols],is.na)),decreasing=TRUE)
paste('There are', length(na.cols), 'columns with missing values')
# helper function for plotting categoric data for easier data visualization
plot.categoric <- function(cols, df){
for (col in cols) {
order.cols <- names(sort(table(df.combined[,col]), decreasing = TRUE))
num.plot <- qplot(df[,col]) +
geom_bar(fill = 'cornflowerblue') +
geom_text(aes(label = ..count..), stat='count', vjust=-0.5) +
theme_minimal() +
scale_y_continuous(limits = c(0,max(table(df[,col]))*1.1)) +
scale_x_discrete(limits = order.cols) +
xlab(col) +
theme(axis.text.x = element_text(angle = 30, size=12))
print(num.plot)
}
}
################### PoolQC: Pool quality
table(df.combined$PoolQC)
plot.categoric('PoolQC', df.combined)
## finding out rows where pool area >0 and pool quality is n.a.. for these rows n.a values should be replaced
##by non zero values
df.combined[(df.combined$PoolArea>0)& (is.na(df.combined$PoolQC)),c("PoolArea","PoolQC")]
## finding the avg. pool area of the 3 categories of pool quality
tapply(df.combined$PoolArea,df.combined$PoolQC,mean)
### the assigning the category closest to avg. value of the pool areas in those categories
df.combined[2421,'PoolQC'] <- 'Ex'
df.combined[2504,'PoolQC'] <- 'Ex'
df.combined[2600,'PoolQC'] <- 'Fa'
df.combined$PoolQC <- as.character(df.combined$PoolQC) ##to add none as a factor
df.combined$PoolQC[is.na(df.combined$PoolQC)] <- 'None'
df.combined$PoolQC <- as.factor(df.combined$PoolQC)
summary(df.combined$PoolQC)
############# Garage features
### GarageYrBlt: Year garage was built
length(which(df.combined$YearBuilt==df.combined$GarageYrBlt)) ##tells us 2216 of the 2919 houses have same year for for GarageYrBlt and YearBuit
### replacing the NA values with the year the house was built
idx <- which(is.na(df.combined$GarageYrBlt))
df.combined$GarageYrBlt[idx] <- df.combined$YearBuilt[idx]
##### checking for 'GarageQual', 'GarageFinish', 'GarageCond', 'GarageType'
garage.cols <- c('GarageArea', 'GarageCars', 'GarageQual', 'GarageFinish', 'GarageCond', 'GarageType')
df.combined[is.na(df.combined$GarageCond),garage.cols]
idx <- which(((df.combined$GarageArea < 370) & (df.combined$GarageArea > 350)) & (df.combined$GarageCars == 1))
names(sapply(df.combined[idx, garage.cols], function(x) sort(table(x), decreasing=TRUE)[1]))
##assigning the most frequent values
df.combined[2127,'GarageQual'] = 'TA'
df.combined[2127, 'GarageFinish'] = 'Unf'
df.combined[2127, 'GarageCond'] = 'TA'
str(df.combined[idx,garage.cols])
df.combined$GarageFinish <- as.character(df.combined$GarageFinish)
df.combined$GarageFinish[is.na(df.combined$GarageFinish)] <- 'None'
df.combined$GarageFinish <- as.factor(df.combined$GarageFinish)
df.combined$GarageCond <- as.character(df.combined$GarageCond)
df.combined$GarageCond[is.na(df.combined$GarageCond)] <- 'None'
df.combined$GarageCond <- as.factor(df.combined$GarageCond)
df.combined$GarageType <- as.character(df.combined$GarageType)
df.combined$GarageType[is.na(df.combined$GarageType)] <- 'None'
df.combined$GarageType <- as.factor(df.combined$GarageType)
df.combined$GarageQual <- as.character(df.combined$GarageQual)
df.combined$GarageQual[is.na(df.combined$GarageQual)] <- 'None'
df.combined$GarageQual <- as.factor(df.combined$GarageQual)
df.combined$GarageArea[2577] <- 0
df.combined$GarageCars[2577] <- 0
##############KitchenQual: Kitchen quality and Electrical: Electrical system
## replacing NA with most frequent value. (only 1 na present for both)
table(df.combined$KitchenQual)
df.combined$KitchenQual[is.na(df.combined$KitchenQual)] = 'TA'
table(df.combined$Electrical)
df.combined$Electrical[is.na(df.combined$Electrical)] = 'SBrkr'
###############Basement features
install.packages("stringr")
library(stringr) ##for str_detect() funtion
### locating the NA rows of all the basement features
bsmt.cols <- names(df.combined)[sapply(names(df.combined), function(x) str_detect(x, 'Bsmt'))]
str(df.combined[is.na(df.combined$BsmtExposure),bsmt.cols])
###no is the most frequent value.
table(df.combined[,"BsmtExposure"])
df.combined[c(949, 1488, 2349), 'BsmtExposure'] = 'No'
## giving the value of None to the other rows
df.combined$BsmtQual <- as.character(df.combined$BsmtQual)
df.combined$BsmtQual[is.na(df.combined$BsmtQual)] <- 'None'
df.combined$BsmtQual <- as.factor(df.combined$BsmtQual)
df.combined$BsmtCond <- as.character(df.combined$BsmtCond)
df.combined$BsmtCond[is.na(df.combined$BsmtCond)] <- 'None'
df.combined$BsmtCond <- as.factor(df.combined$BsmtCond)
df.combined$BsmtExposure <- as.character(df.combined$BsmtExposure)
df.combined$BsmtExposure[is.na(df.combined$BsmtExposure)] <- 'None'
df.combined$BsmtExposure <- as.factor(df.combined$BsmtExposure)
df.combined$BsmtFinType1 <- as.character(df.combined$BsmtFinType1)
df.combined$BsmtFinType1[is.na(df.combined$BsmtFinType1)] <- 'None'
df.combined$BsmtFinType1 <- as.factor(df.combined$BsmtFinType1)
df.combined$BsmtFinType2 <- as.character(df.combined$BsmtFinType2)
df.combined$BsmtFinType2[is.na(df.combined$BsmtFinType2)] <- 'None'
df.combined$BsmtFinType2 <- as.factor(df.combined$BsmtFinType2)
for (col in bsmt.cols){
if (sapply(df.combined[col], is.numeric) == TRUE){
df.combined[sapply(df.combined[col], is.na),col] = 0
}
}
########### Exterior features
table(df.combined$Exterior1st)
table(df.combined$Exterior2nd)
#### since only 1 N.A value for each.. we are replacing them with "other" as NA is likely due to having an exterior cover that is not listed.
df.combined$Exterior1st <- as.character(df.combined$Exterior1st)
df.combined$Exterior1st[is.na(df.combined$Exterior1st)] <- "Other"
df.combined$Exterior1st <- as.factor(df.combined$Exterior1st)
df.combined$Exterior2nd <- as.character(df.combined$Exterior2nd)
df.combined$Exterior2nd[is.na(df.combined$Exterior2nd)] <- "Other"
df.combined$Exterior2nd <- as.factor(df.combined$Exterior2nd)
########### Sale type
### sale type and sale condition are related to each other
## finding the sale condition for the sale type = N.A
df.combined[which(is.na(df.combined$SaleType)),"SaleCondition"]
##### finding out the most frequent sale type for sale condition=Normal
table(df.combined$SaleCondition,df.combined$SaleType)
##replacing NA with WD
df.combined$SaleType[is.na(df.combined$SaleType)] = 'WD'
#################Functional
df.combined[which(is.na(df.combined$Functional)),"OverallCond"]
table(df.combined$OverallCond,df.combined$Functional)
df.combined$Functional[2217] = 'Typ'
df.combined$Functional[2474] = 'Maj1'
#####################Utilities
## all are PUB values except for 1
table(df.combined$Utilities)
## the only non PUB value belongs to the training set
which(df.combined$Utilities=="NoSeWa")
## dropping the utilities column.. as it shows no variation
utilities.drop <- "Utilities"
df.combined <- df.combined[,!names(df.combined) %in% c("Utilities") ]
################# MSZoning feature
### MSZoning is realted to MS Sub class
df.combined[which(is.na(df.combined$MSZoning)),c("MSZoning","MSSubClass")]
table(df.combined$MSZoning,df.combined$MSSubClass)
### gving the values of higest fequency appropriately
df.combined$MSZoning[1916] <- "RM"
df.combined$MSZoning[2217] <- "RL"
df.combined$MSZoning[2251] <- "RM"
df.combined$MSZoning[2905] <- "RL"
############# MasVnrType: Masonry veneer type andMasVnrArea: Masonry veneer area in square feet
### checking if the NA values for both are for the same rows in the data set
df.combined[(is.na(df.combined$MasVnrType)) | (is.na(df.combined$MasVnrType)),c("MasVnrType","MasVnrArea")]
### find the avg area for each type
tapply(df.combined$MasVnrArea,df.combined$MasVnrType,mean)
df.combined[2611,"MasVnrType"] <- "BrkCmn"
## asssigning 0 to the remaining areas and none to the remaining types
df.combined$MasVnrArea[is.na(df.combined$MasVnrArea)] <- 0
df.combined$MasVnrType[is.na(df.combined$MasVnrType)] = 'None'
############################ LotFrontage: Linear feet of street connected to property
tapply(df.combined$LotFrontage,df.combined$Neighborhood,median,na.rm=T)
library(dplyr) ### for group_by function
df.combined['Nbrh.factor'] <- factor(df.combined$Neighborhood, levels = unique(df.combined$Neighborhood))
lot.by.nbrh <- df.combined[,c('Neighborhood','LotFrontage')] %>%
group_by(Neighborhood) %>%
summarise(median = median(LotFrontage, na.rm = TRUE))
(lot.by.nbrh)
idx = which(is.na(df.combined$LotFrontage))
for (i in idx){
lot.median <- lot.by.nbrh[lot.by.nbrh$Neighborhood == df.combined$Neighborhood[i],'median']
df.combined[i,'LotFrontage'] <- lot.median[[1]]
}
############ Fence: Fence quality and misc. feature
#We can replace any missing vlues for Fence and MiscFeature with 'None'
#as they probably don't have this feature with their property.
df.combined$Fence <- as.character(df.combined$Fence)
df.combined$Fence[is.na(df.combined$Fence)] <- "None"
df.combined$Fence <- as.factor(df.combined$Fence)
df.combined$MiscFeature <- as.character(df.combined$MiscFeature)
df.combined$MiscFeature[is.na(df.combined$MiscFeature)] <- "None"
df.combined$MiscFeature <- as.factor(df.combined$MiscFeature)
###########Fireplaces: Number of fireplaces and FireplaceQu: Fireplace quality
table(df.combined$Fireplaces,df.combined$FireplaceQu)
### no such combination is there
which((df.combined$Fireplaces > 0) & (is.na(df.combined$FireplaceQu)))
df.combined$FireplaceQu <- as.character(df.combined$FireplaceQu)
df.combined$FireplaceQu[is.na(df.combined$FireplaceQu)] = 'None'
df.combined$FireplaceQu <- as.factor(df.combined$FireplaceQu)
########## Alley
df.combined$Alley <- as.character(df.combined$Alley)
df.combined$Alley[is.na(df.combined$Alley)] = 'None'
df.combined$Alley <- as.factor(df.combined$Alley)
#################################
paste('There are', sum(sapply(df.combined, is.na)), 'missing values left')
################################ separating numeric and categorical features
num_features <- names(which(sapply(df.combined, is.numeric)))
cat_features <- names(which(sapply(df.combined, is.factor)))
cat_features
df.numeric <- df.combined[num_features]
###############################converting ordinal data into numeric
sapply(df.combined,class)
##splitting into train data
group.df <- df.combined[1:1460,]
group.df$SalePrice <- ames_train$SalePrice
dim(group.df)
install.packages("ggplot2")
library(ggplot2)
install.packages("magrittr")
library(magrittr)
install.packages("scales")
library(scales)
library(dplyr)
group.prices <- function(col) {
group.table <- group.df[,c(col, 'SalePrice', 'OverallQual')] %>%
group_by_(col) %>%
summarise(mean.Quality = round(mean(OverallQual),2),
mean.Price = mean(SalePrice), n = n()) %>%
arrange(mean.Quality)
print(qplot(x=reorder(group.table[[col]], -group.table[['mean.Price']]), y=group.table[['mean.Price']]) +
geom_bar(stat='identity', fill='cornflowerblue') +
theme_minimal() +
scale_y_continuous(labels = dollar) +
labs(x=col, y='Mean SalePrice') +
theme(axis.text.x = element_text(angle = 45)))
return(data.frame(group.table))
}
## functional to compute the mean overall quality for each quality
quality.mean <- function(col) {
group.table <- df.combined[,c(col, 'OverallQual')] %>%
group_by_(col) %>%
summarise(mean.qual = mean(OverallQual)) %>%
arrange(mean.qual)
return(data.frame(group.table))
}
# function that maps a categoric value to its corresponding numeric value and returns that column to the data frame
map.fcn <- function(cols, map.list, df){
for (col in cols){
df[col] <- as.numeric(map.list[df.combined[,col]])
}
return(df)
}
###Any of the columns with the suffix 'Qual' or 'Cond' denote the quality or condition of that specific feature.
###Each of these columns have the potential values: TA, Fa, Gd, None, Ex, Po.
###We'll compute the mean house prices for these unique values to get a better sense of what their abbreviations mean.
qual.cols <- c('ExterQual', 'ExterCond', 'GarageQual', 'GarageCond', 'FireplaceQu', 'KitchenQual', 'HeatingQC', 'BsmtQual')
group.prices('FireplaceQu')
group.prices('BsmtQual')
group.prices('KitchenQual')
###From seeing the mean saleprices from a few of the quality and condition features we can infer that the abbreviations mean poor, fair, typical/average, good and excelent.
###We'll map numeric values from 0-5 to their corresponding categoric values (including 0 for None) and combine that to our dataframe.
##Note: we will set 'None' = 0 for all categories as None signifies that the house does not have that particular quality/condition to rank
###and regardless of the houses overall quality or sale price we will keep 'None' = 0 for consistency.
qual.list <- c('None' = 0, 'Po' = 1, 'Fa' = 2, 'TA' = 3, 'Gd' = 4, 'Ex' = 5)
df.numeric <- map.fcn(qual.cols, qual.list, df.numeric)
group.prices('BsmtExposure')
bsmt.list <- c('None' = 0, 'No' = 1, 'Mn' = 2, 'Av' = 3, 'Gd' = 4)
df.numeric = map.fcn(c('BsmtExposure'), bsmt.list, df.numeric)
group.prices('BsmtFinType1')
# visualization for BsmtFinTyp2 instead of another table
df.combined[,c('BsmtFinType1', 'BsmtFinSF1')] %>%
group_by(BsmtFinType1) %>%
summarise(medianArea = median(BsmtFinSF1), counts = n()) %>%
arrange(medianArea) %>%
ggplot(aes(x=reorder(BsmtFinType1,-medianArea), y=medianArea)) +
geom_bar(stat = 'identity', fill='cornflowerblue') +
labs(x='BsmtFinType2', y='Median of BsmtFinSF2') +
geom_text(aes(label = sort(medianArea)), vjust = -0.5) +
scale_y_continuous(limits = c(0,850)) +
theme_minimal()
##Through investigating the relationships between the basement quality and areas we an see the true order of qualities of each basement to be
##'None' < 'Unf' < 'LwQ' < 'BLQ' < 'Rec' < 'ALQ' < 'GLQ'.
bsmt.fin.list <- c('None' = 0, 'Unf' = 1, 'LwQ' = 2,'Rec'= 3, 'BLQ' = 4, 'ALQ' = 5, 'GLQ' = 6)
df.numeric <- map.fcn(c('BsmtFinType1','BsmtFinType2'), bsmt.fin.list, df.numeric)
group.prices('Functional')
functional.list <- c('None' = 0, 'Sal' = 1, 'Sev' = 2, 'Maj2' = 3, 'Maj1' = 4, 'Mod' = 5, 'Min2' = 6, 'Min1' = 7, 'Typ'= 8)
df.numeric['Functional'] <- as.numeric(functional.list[df.combined$Functional])
group.prices('GarageFinish')
garage.fin.list <- c('None' = 0,'Unf' = 1, 'RFn' = 2, 'Fin' = 3)
df.numeric['GarageFinish'] <- as.numeric(garage.fin.list[df.combined$GarageFinish])
group.prices('Fence')
fence.list <- c('None' = 0, 'MnWw' = 1, 'GdWo' = 2, 'MnPrv' = 3, 'GdPrv' = 5)
df.numeric['Fence'] <- as.numeric(fence.list[df.combined$Fence])
MSdwelling.list <- c('20' = 1, '30'= 0, '40' = 0, '45' = 0,'50' = 0, '60' = 1, '70' = 0, '75' = 0, '80' = 0, '85' = 0, '90' = 0, '120' = 1, '150' = 0, '160' = 0, '180' = 0, '190' = 0)
df.numeric['NewerDwelling'] <- as.numeric(MSdwelling.list[as.character(df.combined$MSSubClass)])
######### calculating the correlation between sale price and the categorical variables(which have been converted to ordinal variables)
library(corrplot)
# need the SalePrice column
corr.df <- cbind(df.numeric[1:1460,], ames_train['SalePrice'])
# only using the first 1460 rows - training data
correlations <- cor(corr.df)
# only want the columns that show strong correlations with SalePrice
corr.SalePrice <- as.matrix(sort(correlations[,'SalePrice'], decreasing = TRUE))
corr.idx <- names(which(apply(corr.SalePrice, 1, function(x) (x > 0.5 | x < -0.5))))
corrplot(as.matrix(correlations[corr.idx,corr.idx]), type = 'upper', method='color', addCoef.col = 'black', tl.cex = .7,cl.cex = .7, number.cex=.7)
###matrix of scatter plots to see what these relationships look like under the hood
###to get a better sense of whats going on.
install.packages("GGally")
library(GGally)
lm.plt <- function(data, mapping, ...){
plt <- ggplot(data = data, mapping = mapping) +
geom_point(shape = 20, alpha = 0.7, color = 'darkseagreen') +
geom_smooth(method=loess, fill="red", color="red") +
geom_smooth(method=lm, fill="blue", color="blue") +
theme_minimal()
return(plt)
}
#The blue lines in the scatter plots represent a simple linear regression fit while the red lines represent a local polynomial fit.
#We can see both OverallQual and GrLivArea and TotalBsmtSF follow a linear model, but have some outliers we may want to look into.
#For instance, there are multiple houses with an overall quality of 10, but have suspisciously low prices.
#We can see similar behavior in GrLivArea and TotalBsmtSF. GarageCars and GarageArea both follow more of a quadratic fit.
#It seems that having a 4 car garage does not result in a higher house price and same with an extremely large area.
ggpairs(corr.df, corr.idx[1:6], lower = list(continuous = lm.plt))
ggpairs(corr.df, corr.idx[c(1,7:11)], lower = list(continuous = lm.plt))
##############################################
##########Nominal Variables
#LotShape has 3 values for having an irregular shape and only 1 for regular.
#We can create a binary column that returns 1 for houses with a regular lot shape and 0 for houses with any of the 3 irregular lot shapes.
#Using this method of turning a categoric feature into a binary column will ultimately help our data
#train better through boosted models without using numeric placeholders on nominal data.
plot.categoric('LotShape', df.combined)
df.numeric['RegularLotShape'] <- (df.combined$LotShape == 'Reg') * 1
table(df.numeric$RegularLotShape)
table(df.combined$LotShape)
# Same process is applied to the other nominal variables as well
plot.categoric('LandContour', df.combined)
df.numeric['LandLeveled'] <- (df.combined$LandContour == 'Lvl') * 1
plot.categoric('LandSlope', df.combined)
df.numeric['LandSlopeGentle'] <- (df.combined$LandSlope == 'Gtl') * 1
plot.categoric('Electrical', df.combined)
df.numeric['ElectricalSB'] <- (df.combined$Electrical == 'SBrkr') * 1
plot.categoric('GarageType', df.combined)
df.numeric['GarageDetchd'] <- (df.combined$GarageType == 'Detchd') * 1
plot.categoric('PavedDrive', df.combined)
df.numeric['HasPavedDrive'] <- (df.combined$PavedDrive == 'Y') * 1
df.numeric['HasWoodDeck'] <- (df.combined$WoodDeckSF > 0) * 1
df.numeric['Has2ndFlr'] <- (df.combined$X2ndFlrSF > 0) * 1
df.numeric['HasMasVnr'] <- (df.combined$MasVnrArea > 0) * 1
table(df.combined$WoodDeckSF)
plot.categoric('MiscFeature', df.combined)
#For MiscFeature the only feature with a significant amount of houses having it is Shed.
#We can one-hot encode houses that have Sheds vs those who do not.
df.numeric['HasShed'] <- (df.combined$MiscFeature == 'Shed') * 1
################# feature engineering
#Many of the houses recorded the same year for YearBuilt and YearRemodAdd.
#We can create a new column that records that a house was remodelled
#if the year it was built is different than the remodel year. This
df.numeric['Remodeled'] <- (df.combined$YearBuilt != df.combined$YearRemodAdd) * 1
#We can also create a column that seperates which houses have been recently remodelled vs those who are not.
#Houses that have been remodelled after the year they were sold will fall into this category.
df.numeric['RecentRemodel'] <- (df.combined$YearRemodAdd >= df.combined$YrSold) * 1
#There can be potential value to homes who were sold the same year they were built as this could be an indicator
#that these houses were hot in the marke
df.numeric['NewHouse'] <- (df.combined$YearBuilt == df.combined$YrSold) * 1
#What about the houses with area based features equal to 0? Houses with 0 square footage for a columnshows that the house does not have that feature at all.
#We add a one-hot encoded column for returning 1 for any house with an area greater than 0
#since this means that the house does have this feature and 0 for those who do not
cols.binary <- c('X2ndFlrSF', 'MasVnrArea', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'X3SsnPorch', 'ScreenPorch')
for (col in cols.binary){
df.numeric[str_c('Has',col)] <- (df.combined[,col] != 0) * 1
}
### see how houses sold month wise
ggplot(df.combined, aes(x=MoSold)) +
geom_bar(fill = 'cornflowerblue') +
geom_text(aes(label=..count..), stat='count', vjust = -.5) +
theme_minimal() +
scale_x_continuous(breaks = 1:12)
#The largest proportion of houses sold is during the summer months: May, June, July.
#Let's add a column that seperates the the summer houses from the rest.
df.numeric['HighSeason'] <- (df.combined$MoSold %in% c(5,6,7)) * 1
### some neighbourhoods are more expensive than others
ames_train[,c('Neighborhood','SalePrice')] %>%
group_by(Neighborhood) %>%
summarise(median.price = median(SalePrice, na.rm = TRUE)) %>%
arrange(median.price) %>%
mutate(nhbr.sorted = factor(Neighborhood, levels=Neighborhood)) %>%
ggplot(aes(x=nhbr.sorted, y=median.price)) +
geom_point() +
geom_text(aes(label = median.price, angle = 45), vjust = 2) +
theme_minimal() +
labs(x='Neighborhood', y='Median price') +
theme(text = element_text(size=12),
axis.text.x = element_text(angle=45))
library(dplyr) ### needed for group_by function
#StoneBr, NoRidge, NridgHt have a large gap between them versus the rest of the median prices from any of the other neighborhods.
#It would be wise of us to check if this is from outliers or if these houses are much pricier as a whole.
other.nbrh <- unique(df.combined$Neighborhood)[!unique(df.combined$Neighborhood) %in% c('StoneBr', 'NoRidge','NridgHt')]
ggplot(ames_train, aes(x=SalePrice, y=GrLivArea, colour=Neighborhood)) +
geom_point(shape=16, alpha=.8, size=4) +
scale_color_manual(limits = c(other.nbrh, 'StoneBr', 'NoRidge', 'NridgHt'), values = c(rep('black', length(other.nbrh)), 'indianred',
'cornflowerblue', 'darkseagreen')) +
theme_minimal() +
scale_x_continuous(label=dollar)
#lets one-hot encode the more expensive neighborhoods and add that to our dataframe
nbrh.rich <- c('Crawfor', 'Somerst, Timber', 'StoneBr', 'NoRidge', 'NridgeHt')
df.numeric['NbrhRich'] <- (df.combined$Neighborhood %in% nbrh.rich) *1
group.prices('Neighborhood')
nbrh.map <- c('MeadowV' = 0, 'IDOTRR' = 1, 'Sawyer' = 1, 'BrDale' = 1, 'OldTown' = 1, 'Edwards' = 1,
'BrkSide' = 1, 'Blueste' = 1, 'SWISU' = 2, 'NAmes' = 2, 'NPkVill' = 2, 'Mitchel' = 2,
'SawyerW' = 2, 'Gilbert' = 2, 'NWAmes' = 2, 'Blmngtn' = 2, 'CollgCr' = 2, 'ClearCr' = 3,
'Crawfor' = 3, 'Veenker' = 3, 'Somerst' = 3, 'Timber' = 3, 'StoneBr' = 4, 'NoRidge' = 4,
'NridgHt' = 4)
df.numeric['NeighborhoodBin'] <- as.numeric(nbrh.map[df.combined$Neighborhood])
### sale condition
group.prices('SaleCondition')
df.numeric['PartialPlan'] <- (df.combined$SaleCondition == 'Partial') * 1
group.prices('HeatingQC')
heating.list <- c('Po' = 0, 'Fa' = 1, 'TA' = 2, 'Gd' = 3, 'Ex' = 4)
df.numeric['HeatingScale'] <- as.numeric(heating.list[df.combined$HeatingQC])
area.cols <- c('LotFrontage', 'LotArea', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF',
'TotalBsmtSF', 'X1stFlrSF', 'X2ndFlrSF', 'GrLivArea', 'GarageArea', 'WoodDeckSF',
'OpenPorchSF', 'EnclosedPorch', 'X3SsnPorch', 'ScreenPorch', 'LowQualFinSF', 'PoolArea')
df.numeric['TotalArea'] <- as.numeric(rowSums(df.combined[,area.cols]))
df.numeric['AreaInside'] <- as.numeric(df.combined$X1stFlrSF + df.combined$X2ndFlrSF)
#We've seen how strong of an effect the year of a house built has on the house price,
#therefore, as this dataset collects houses up until 2010
#we can determine how old a house is and how long ago the house was sold:
df.numeric['Age'] <- as.numeric(2010 - df.combined$YearBuilt)
df.numeric['TimeSinceSold'] <- as.numeric(2010 - df.combined$YrSold)
# how many years since the house was remodelled and sold
df.numeric['YearSinceRemodel'] <- as.numeric(df.combined$YrSold - df.combined$YearRemodAdd)
#####################################
###Correlation plot with OverallQual
library(corrplot)
corr.OverallQual <- as.matrix(sort(correlations[,'OverallQual'], decreasing = TRUE))
corr.idx <- names(which(apply(corr.OverallQual, 1, function(x) (x > 0.5 | x < -0.5))))
corrplot(as.matrix(correlations[corr.idx, corr.idx]), type = 'upper',
method = 'color', addCoef.col = 'black', tl.cex =.7, cl.cex = .7,
number.cex = .7)
############ outliers
train.test.df <- rbind(dplyr::select(ames_train,-SalePrice), ames_test)
train.test.df$type <- c(rep('train',1460),rep('test',1459))
ggplot(ames_train, aes(x=GrLivArea)) +
geom_histogram(fill='lightblue',color='white') +
theme_minimal()
outlier_values <- boxplot.stats(ames_train$GrLivArea)$out # outlier values.
boxplot(ames_train$GrLivArea, main="GrLivArea", boxwex=0.1)
mtext(paste("Outliers: ", paste(outlier_values[outlier_values>4000], collapse=", ")), cex=0.6)
ggplot(train.test.df, aes(x=type, y=GrLivArea, fill=type)) +
geom_boxplot() +
theme_minimal() +
scale_fill_manual(breaks = c("test", "train"), values = c("indianred", "lightblue"))
idx.outliers <- which(ames_train$GrLivArea > 4000)
df.numeric <- df.numeric[!1:nrow(df.numeric) %in% idx.outliers,]
df.combined <- df.combined[!1:nrow(df.combined) %in% idx.outliers,]
dim(df.numeric)
################################### Preprocessing
############### checking for normality of independent variable and standardizing the independent variables
###### normality check:skewness,kurtosis, Kolmogorov-Smirnof test
### log(x+1) is taken for higly skewed values
View(df.numeric)
library(moments)
library(psych)
# linear models assume normality from dependant variables
# transform any skewed data into normal
skewed <- apply(df.numeric, 2, skewness)
skewed <- skewed[(skewed > 0.8) | (skewed < -0.8)]
skewed
kurtosi <- apply(df.numeric, 2, kurtosis)
kurtosi <- kurtosis[(kurtosis > 3.0) | (kurtosis < -3.0)]
kurtosi
# not very useful in our case
ks.p.val <- NULL
for (i in 1:length(df.numeric)) {
test.stat <- ks.test(df.numeric[i], rnorm(1000))
ks.p.val[i] <- test.stat$p.value
}
ks.p.val
for(col in names(skewed)){
if(0 %in% df.numeric[, col]) {
df.numeric[,col] <- log(1+df.numeric[,col])
}
else {
df.numeric[,col] <- log(df.numeric[,col])
}
}
# normalize the data
library(caret)
scaler <- preProcess(df.numeric)
df.numeric <- predict(scaler, df.numeric)
#### For the rest of the categoric features we can one-hot encode each value to get as many splits in the data as possible
# one hot encoding for categorical data
# sparse data performs better for trees/xgboost
dummy <- dummyVars(" ~ ." , data=df.combined[,cat_features])
df.categoric <- data.frame(predict(dummy,newdata=df.combined[,cat_features]))
str(df.combined)
# every 20 years create a new bin
# 7 total bins
# min year is 1871, max year is 2010!
year.map = function(col.combined, col.name) {
for (i in 1:7) {
year.seq = seq(1871+(i-1)*20, 1871+i*20-1)
idx = which(df.combined[,col.combined] %in% year.seq)
df.categoric[idx,col.name] = i
}
return(df.categoric)
}
df.categoric['GarageYrBltBin'] = 0
df.categoric <- year.map('GarageYrBlt', 'GarageYrBltBin')
df.categoric['YearBuiltBin'] = 0
df.categoric <- year.map('YearBuilt','YearBuiltBin')
df.categoric['YearRemodAddBin'] = 0
df.categoric <- year.map('YearRemodAdd', 'YearRemodAddBin')
bin.cols <- c('GarageYrBltBin', 'YearBuiltBin', 'YearRemodAddBin')
for (col in bin.cols) {
df.categoric <- cbind(df.categoric, model.matrix(~.-1, df.categoric[col]))
}
# lets drop the orginal 'GarageYrBltBin', 'YearBuiltBin', 'YearRemodAddBin' from our dataframe
df.categoric <- df.categoric[,!names(df.categoric) %in% bin.cols]
### combining into a single df
df <- cbind(df.numeric, df.categoric)
str(df)
### distribution of housing prices
install.packages("WVPlots")
library(WVPlots)
y.true <- ames_train$SalePrice[which(!1:1460 %in% idx.outliers)]
qplot(y.true, geom='density') +# +(train, aes(x=SalePrice)) +
geom_histogram(aes(y=..density..), color='white',
fill='lightblue', alpha=.5, bins = 60) +
geom_line(aes(y=..density..), color='cornflowerblue', lwd = 1, stat = 'density') +
stat_function(fun = dnorm, colour = 'indianred', lwd = 1, args =
list(mean(ames_train$SalePrice), sd(ames_train$SalePrice))) +
scale_x_continuous(breaks = seq(0,800000,100000), labels = dollar) +
scale_y_continuous(labels = comma) +
theme_minimal() +
annotate('text', label = paste('skewness =', signif(skewness(ames_train$SalePrice),4)),
x=500000,y=7.5e-06)
qqnorm(ames_train$SalePrice)
qqline(ames_train$SalePrice)
#We can see from the histogram and the quantile-quantile plot that the distribution of sale prices is right-skewed and does not follow a normal distribution.
#Lets make a log-transformation and see how our data looks
y_train <- log(y.true+1)
qplot(y_train, geom = 'density') +
geom_histogram(aes(y=..density..), color = 'white', fill = 'lightblue', alpha = .5, bins = 60) +
scale_x_continuous(breaks = seq(0,800000,100000), labels = comma) +
geom_line(aes(y=..density..), color='dodgerblue4', lwd = 1, stat = 'density') +
stat_function(fun = dnorm, colour = 'indianred', lwd = 1, args =
list(mean(y_train), sd(y_train))) +
#scale_x_continuous(breaks = seq(0,800000,100000), labels = dollar) +
scale_y_continuous(labels = comma) +
theme_minimal() +
annotate('text', label = paste('skewness =', signif(skewness(y_train),4)),
x=13,y=1) +
labs(x = 'log(SalePrice + 1)')
qqnorm(y_train)
qqline(y_train)
paste('The dataframe has', dim(df)[1], 'rows and', dim(df)[2], 'columns')
| /AMES.R | no_license | suvirmulky/House-Prices-Advanced-Regression-Techniques | R | false | false | 31,452 | r | ames_train <- read.csv(file.choose())
ames_test <- read.csv(file.choose())
View(ames_train)
str(ames_test)
###conbining traing and test data set
df.combined <- rbind(within(ames_train,rm("Id","SalePrice")),within(ames_test,rm("Id")))
dim(df.combined)
summary(ames_train$SalePrice)
library(e1071) ### skewness is present in e1071
skewness(ames_train$SalePrice)
#### sales price is right skewed.
hist(ames_train$SalePrice)
### log transform of saleprice to improve linearity of data
skewness(log(ames_train$SalePrice))
hist(log(ames_train$SalePrice)) ### data is normally distributed
#### finding out the data type of the independent variables
sapply(ames_train,class)
##################################### Replacing the NA values ###########################
###finding out the number of NAs
na.cols <- which(colSums(is.na(df.combined)) > 0)
sort(colSums(sapply(df.combined[na.cols],is.na)),decreasing=TRUE)
paste('There are', length(na.cols), 'columns with missing values')
# helper function for plotting categoric data for easier data visualization
plot.categoric <- function(cols, df){
for (col in cols) {
order.cols <- names(sort(table(df.combined[,col]), decreasing = TRUE))
num.plot <- qplot(df[,col]) +
geom_bar(fill = 'cornflowerblue') +
geom_text(aes(label = ..count..), stat='count', vjust=-0.5) +
theme_minimal() +
scale_y_continuous(limits = c(0,max(table(df[,col]))*1.1)) +
scale_x_discrete(limits = order.cols) +
xlab(col) +
theme(axis.text.x = element_text(angle = 30, size=12))
print(num.plot)
}
}
################### PoolQC: Pool quality
table(df.combined$PoolQC)
plot.categoric('PoolQC', df.combined)
## finding out rows where pool area >0 and pool quality is n.a.. for these rows n.a values should be replaced
##by non zero values
df.combined[(df.combined$PoolArea>0)& (is.na(df.combined$PoolQC)),c("PoolArea","PoolQC")]
## finding the avg. pool area of the 3 categories of pool quality
tapply(df.combined$PoolArea,df.combined$PoolQC,mean)
### the assigning the category closest to avg. value of the pool areas in those categories
df.combined[2421,'PoolQC'] <- 'Ex'
df.combined[2504,'PoolQC'] <- 'Ex'
df.combined[2600,'PoolQC'] <- 'Fa'
df.combined$PoolQC <- as.character(df.combined$PoolQC) ##to add none as a factor
df.combined$PoolQC[is.na(df.combined$PoolQC)] <- 'None'
df.combined$PoolQC <- as.factor(df.combined$PoolQC)
summary(df.combined$PoolQC)
############# Garage features
### GarageYrBlt: Year garage was built
length(which(df.combined$YearBuilt==df.combined$GarageYrBlt)) ##tells us 2216 of the 2919 houses have same year for for GarageYrBlt and YearBuit
### replacing the NA values with the year the house was built
idx <- which(is.na(df.combined$GarageYrBlt))
df.combined$GarageYrBlt[idx] <- df.combined$YearBuilt[idx]
##### checking for 'GarageQual', 'GarageFinish', 'GarageCond', 'GarageType'
garage.cols <- c('GarageArea', 'GarageCars', 'GarageQual', 'GarageFinish', 'GarageCond', 'GarageType')
df.combined[is.na(df.combined$GarageCond),garage.cols]
idx <- which(((df.combined$GarageArea < 370) & (df.combined$GarageArea > 350)) & (df.combined$GarageCars == 1))
names(sapply(df.combined[idx, garage.cols], function(x) sort(table(x), decreasing=TRUE)[1]))
##assigning the most frequent values
df.combined[2127,'GarageQual'] = 'TA'
df.combined[2127, 'GarageFinish'] = 'Unf'
df.combined[2127, 'GarageCond'] = 'TA'
str(df.combined[idx,garage.cols])
df.combined$GarageFinish <- as.character(df.combined$GarageFinish)
df.combined$GarageFinish[is.na(df.combined$GarageFinish)] <- 'None'
df.combined$GarageFinish <- as.factor(df.combined$GarageFinish)
df.combined$GarageCond <- as.character(df.combined$GarageCond)
df.combined$GarageCond[is.na(df.combined$GarageCond)] <- 'None'
df.combined$GarageCond <- as.factor(df.combined$GarageCond)
df.combined$GarageType <- as.character(df.combined$GarageType)
df.combined$GarageType[is.na(df.combined$GarageType)] <- 'None'
df.combined$GarageType <- as.factor(df.combined$GarageType)
df.combined$GarageQual <- as.character(df.combined$GarageQual)
df.combined$GarageQual[is.na(df.combined$GarageQual)] <- 'None'
df.combined$GarageQual <- as.factor(df.combined$GarageQual)
df.combined$GarageArea[2577] <- 0
df.combined$GarageCars[2577] <- 0
##############KitchenQual: Kitchen quality and Electrical: Electrical system
## replacing NA with most frequent value. (only 1 na present for both)
table(df.combined$KitchenQual)
df.combined$KitchenQual[is.na(df.combined$KitchenQual)] = 'TA'
table(df.combined$Electrical)
df.combined$Electrical[is.na(df.combined$Electrical)] = 'SBrkr'
###############Basement features
install.packages("stringr")
library(stringr) ##for str_detect() funtion
### locating the NA rows of all the basement features
bsmt.cols <- names(df.combined)[sapply(names(df.combined), function(x) str_detect(x, 'Bsmt'))]
str(df.combined[is.na(df.combined$BsmtExposure),bsmt.cols])
###no is the most frequent value.
table(df.combined[,"BsmtExposure"])
df.combined[c(949, 1488, 2349), 'BsmtExposure'] = 'No'
## giving the value of None to the other rows
df.combined$BsmtQual <- as.character(df.combined$BsmtQual)
df.combined$BsmtQual[is.na(df.combined$BsmtQual)] <- 'None'
df.combined$BsmtQual <- as.factor(df.combined$BsmtQual)
df.combined$BsmtCond <- as.character(df.combined$BsmtCond)
df.combined$BsmtCond[is.na(df.combined$BsmtCond)] <- 'None'
df.combined$BsmtCond <- as.factor(df.combined$BsmtCond)
df.combined$BsmtExposure <- as.character(df.combined$BsmtExposure)
df.combined$BsmtExposure[is.na(df.combined$BsmtExposure)] <- 'None'
df.combined$BsmtExposure <- as.factor(df.combined$BsmtExposure)
df.combined$BsmtFinType1 <- as.character(df.combined$BsmtFinType1)
df.combined$BsmtFinType1[is.na(df.combined$BsmtFinType1)] <- 'None'
df.combined$BsmtFinType1 <- as.factor(df.combined$BsmtFinType1)
df.combined$BsmtFinType2 <- as.character(df.combined$BsmtFinType2)
df.combined$BsmtFinType2[is.na(df.combined$BsmtFinType2)] <- 'None'
df.combined$BsmtFinType2 <- as.factor(df.combined$BsmtFinType2)
for (col in bsmt.cols){
if (sapply(df.combined[col], is.numeric) == TRUE){
df.combined[sapply(df.combined[col], is.na),col] = 0
}
}
########### Exterior features
table(df.combined$Exterior1st)
table(df.combined$Exterior2nd)
#### since only 1 N.A value for each.. we are replacing them with "other" as NA is likely due to having an exterior cover that is not listed.
df.combined$Exterior1st <- as.character(df.combined$Exterior1st)
df.combined$Exterior1st[is.na(df.combined$Exterior1st)] <- "Other"
df.combined$Exterior1st <- as.factor(df.combined$Exterior1st)
df.combined$Exterior2nd <- as.character(df.combined$Exterior2nd)
df.combined$Exterior2nd[is.na(df.combined$Exterior2nd)] <- "Other"
df.combined$Exterior2nd <- as.factor(df.combined$Exterior2nd)
########### Sale type
### sale type and sale condition are related to each other
## finding the sale condition for the sale type = N.A
df.combined[which(is.na(df.combined$SaleType)),"SaleCondition"]
##### finding out the most frequent sale type for sale condition=Normal
table(df.combined$SaleCondition,df.combined$SaleType)
##replacing NA with WD
df.combined$SaleType[is.na(df.combined$SaleType)] = 'WD'
#################Functional
df.combined[which(is.na(df.combined$Functional)),"OverallCond"]
table(df.combined$OverallCond,df.combined$Functional)
df.combined$Functional[2217] = 'Typ'
df.combined$Functional[2474] = 'Maj1'
#####################Utilities
## all are PUB values except for 1
table(df.combined$Utilities)
## the only non PUB value belongs to the training set
which(df.combined$Utilities=="NoSeWa")
## dropping the utilities column.. as it shows no variation
utilities.drop <- "Utilities"
df.combined <- df.combined[,!names(df.combined) %in% c("Utilities") ]
################# MSZoning feature
### MSZoning is realted to MS Sub class
df.combined[which(is.na(df.combined$MSZoning)),c("MSZoning","MSSubClass")]
table(df.combined$MSZoning,df.combined$MSSubClass)
### gving the values of higest fequency appropriately
df.combined$MSZoning[1916] <- "RM"
df.combined$MSZoning[2217] <- "RL"
df.combined$MSZoning[2251] <- "RM"
df.combined$MSZoning[2905] <- "RL"
############# MasVnrType: Masonry veneer type andMasVnrArea: Masonry veneer area in square feet
### checking if the NA values for both are for the same rows in the data set
df.combined[(is.na(df.combined$MasVnrType)) | (is.na(df.combined$MasVnrType)),c("MasVnrType","MasVnrArea")]
### find the avg area for each type
tapply(df.combined$MasVnrArea,df.combined$MasVnrType,mean)
df.combined[2611,"MasVnrType"] <- "BrkCmn"
## asssigning 0 to the remaining areas and none to the remaining types
df.combined$MasVnrArea[is.na(df.combined$MasVnrArea)] <- 0
df.combined$MasVnrType[is.na(df.combined$MasVnrType)] = 'None'
############################ LotFrontage: Linear feet of street connected to property
tapply(df.combined$LotFrontage,df.combined$Neighborhood,median,na.rm=T)
library(dplyr) ### for group_by function
df.combined['Nbrh.factor'] <- factor(df.combined$Neighborhood, levels = unique(df.combined$Neighborhood))
lot.by.nbrh <- df.combined[,c('Neighborhood','LotFrontage')] %>%
group_by(Neighborhood) %>%
summarise(median = median(LotFrontage, na.rm = TRUE))
(lot.by.nbrh)
idx = which(is.na(df.combined$LotFrontage))
for (i in idx){
lot.median <- lot.by.nbrh[lot.by.nbrh$Neighborhood == df.combined$Neighborhood[i],'median']
df.combined[i,'LotFrontage'] <- lot.median[[1]]
}
############ Fence: Fence quality and misc. feature
#We can replace any missing vlues for Fence and MiscFeature with 'None'
#as they probably don't have this feature with their property.
df.combined$Fence <- as.character(df.combined$Fence)
df.combined$Fence[is.na(df.combined$Fence)] <- "None"
df.combined$Fence <- as.factor(df.combined$Fence)
df.combined$MiscFeature <- as.character(df.combined$MiscFeature)
df.combined$MiscFeature[is.na(df.combined$MiscFeature)] <- "None"
df.combined$MiscFeature <- as.factor(df.combined$MiscFeature)
###########Fireplaces: Number of fireplaces and FireplaceQu: Fireplace quality
table(df.combined$Fireplaces,df.combined$FireplaceQu)
### no such combination is there
which((df.combined$Fireplaces > 0) & (is.na(df.combined$FireplaceQu)))
df.combined$FireplaceQu <- as.character(df.combined$FireplaceQu)
df.combined$FireplaceQu[is.na(df.combined$FireplaceQu)] = 'None'
df.combined$FireplaceQu <- as.factor(df.combined$FireplaceQu)
########## Alley
df.combined$Alley <- as.character(df.combined$Alley)
df.combined$Alley[is.na(df.combined$Alley)] = 'None'
df.combined$Alley <- as.factor(df.combined$Alley)
#################################
paste('There are', sum(sapply(df.combined, is.na)), 'missing values left')
################################ separating numeric and categorical features
num_features <- names(which(sapply(df.combined, is.numeric)))
cat_features <- names(which(sapply(df.combined, is.factor)))
cat_features
df.numeric <- df.combined[num_features]
###############################converting ordinal data into numeric
sapply(df.combined,class)
##splitting into train data
group.df <- df.combined[1:1460,]
group.df$SalePrice <- ames_train$SalePrice
dim(group.df)
install.packages("ggplot2")
library(ggplot2)
install.packages("magrittr")
library(magrittr)
install.packages("scales")
library(scales)
library(dplyr)
group.prices <- function(col) {
group.table <- group.df[,c(col, 'SalePrice', 'OverallQual')] %>%
group_by_(col) %>%
summarise(mean.Quality = round(mean(OverallQual),2),
mean.Price = mean(SalePrice), n = n()) %>%
arrange(mean.Quality)
print(qplot(x=reorder(group.table[[col]], -group.table[['mean.Price']]), y=group.table[['mean.Price']]) +
geom_bar(stat='identity', fill='cornflowerblue') +
theme_minimal() +
scale_y_continuous(labels = dollar) +
labs(x=col, y='Mean SalePrice') +
theme(axis.text.x = element_text(angle = 45)))
return(data.frame(group.table))
}
## functional to compute the mean overall quality for each quality
quality.mean <- function(col) {
group.table <- df.combined[,c(col, 'OverallQual')] %>%
group_by_(col) %>%
summarise(mean.qual = mean(OverallQual)) %>%
arrange(mean.qual)
return(data.frame(group.table))
}
# function that maps a categoric value to its corresponding numeric value and returns that column to the data frame
map.fcn <- function(cols, map.list, df){
for (col in cols){
df[col] <- as.numeric(map.list[df.combined[,col]])
}
return(df)
}
###Any of the columns with the suffix 'Qual' or 'Cond' denote the quality or condition of that specific feature.
###Each of these columns have the potential values: TA, Fa, Gd, None, Ex, Po.
###We'll compute the mean house prices for these unique values to get a better sense of what their abbreviations mean.
qual.cols <- c('ExterQual', 'ExterCond', 'GarageQual', 'GarageCond', 'FireplaceQu', 'KitchenQual', 'HeatingQC', 'BsmtQual')
group.prices('FireplaceQu')
group.prices('BsmtQual')
group.prices('KitchenQual')
###From seeing the mean saleprices from a few of the quality and condition features we can infer that the abbreviations mean poor, fair, typical/average, good and excelent.
###We'll map numeric values from 0-5 to their corresponding categoric values (including 0 for None) and combine that to our dataframe.
##Note: we will set 'None' = 0 for all categories as None signifies that the house does not have that particular quality/condition to rank
###and regardless of the houses overall quality or sale price we will keep 'None' = 0 for consistency.
qual.list <- c('None' = 0, 'Po' = 1, 'Fa' = 2, 'TA' = 3, 'Gd' = 4, 'Ex' = 5)
df.numeric <- map.fcn(qual.cols, qual.list, df.numeric)
group.prices('BsmtExposure')
bsmt.list <- c('None' = 0, 'No' = 1, 'Mn' = 2, 'Av' = 3, 'Gd' = 4)
df.numeric = map.fcn(c('BsmtExposure'), bsmt.list, df.numeric)
group.prices('BsmtFinType1')
# visualization for BsmtFinTyp2 instead of another table
df.combined[,c('BsmtFinType1', 'BsmtFinSF1')] %>%
group_by(BsmtFinType1) %>%
summarise(medianArea = median(BsmtFinSF1), counts = n()) %>%
arrange(medianArea) %>%
ggplot(aes(x=reorder(BsmtFinType1,-medianArea), y=medianArea)) +
geom_bar(stat = 'identity', fill='cornflowerblue') +
labs(x='BsmtFinType2', y='Median of BsmtFinSF2') +
geom_text(aes(label = sort(medianArea)), vjust = -0.5) +
scale_y_continuous(limits = c(0,850)) +
theme_minimal()
##Through investigating the relationships between the basement quality and areas we an see the true order of qualities of each basement to be
##'None' < 'Unf' < 'LwQ' < 'BLQ' < 'Rec' < 'ALQ' < 'GLQ'.
bsmt.fin.list <- c('None' = 0, 'Unf' = 1, 'LwQ' = 2,'Rec'= 3, 'BLQ' = 4, 'ALQ' = 5, 'GLQ' = 6)
df.numeric <- map.fcn(c('BsmtFinType1','BsmtFinType2'), bsmt.fin.list, df.numeric)
group.prices('Functional')
functional.list <- c('None' = 0, 'Sal' = 1, 'Sev' = 2, 'Maj2' = 3, 'Maj1' = 4, 'Mod' = 5, 'Min2' = 6, 'Min1' = 7, 'Typ'= 8)
df.numeric['Functional'] <- as.numeric(functional.list[df.combined$Functional])
group.prices('GarageFinish')
garage.fin.list <- c('None' = 0,'Unf' = 1, 'RFn' = 2, 'Fin' = 3)
df.numeric['GarageFinish'] <- as.numeric(garage.fin.list[df.combined$GarageFinish])
group.prices('Fence')
fence.list <- c('None' = 0, 'MnWw' = 1, 'GdWo' = 2, 'MnPrv' = 3, 'GdPrv' = 5)
df.numeric['Fence'] <- as.numeric(fence.list[df.combined$Fence])
MSdwelling.list <- c('20' = 1, '30'= 0, '40' = 0, '45' = 0,'50' = 0, '60' = 1, '70' = 0, '75' = 0, '80' = 0, '85' = 0, '90' = 0, '120' = 1, '150' = 0, '160' = 0, '180' = 0, '190' = 0)
df.numeric['NewerDwelling'] <- as.numeric(MSdwelling.list[as.character(df.combined$MSSubClass)])
######### calculating the correlation between sale price and the categorical variables(which have been converted to ordinal variables)
library(corrplot)
# need the SalePrice column
corr.df <- cbind(df.numeric[1:1460,], ames_train['SalePrice'])
# only using the first 1460 rows - training data
correlations <- cor(corr.df)
# only want the columns that show strong correlations with SalePrice
corr.SalePrice <- as.matrix(sort(correlations[,'SalePrice'], decreasing = TRUE))
corr.idx <- names(which(apply(corr.SalePrice, 1, function(x) (x > 0.5 | x < -0.5))))
corrplot(as.matrix(correlations[corr.idx,corr.idx]), type = 'upper', method='color', addCoef.col = 'black', tl.cex = .7,cl.cex = .7, number.cex=.7)
###matrix of scatter plots to see what these relationships look like under the hood
###to get a better sense of whats going on.
install.packages("GGally")
library(GGally)
lm.plt <- function(data, mapping, ...){
plt <- ggplot(data = data, mapping = mapping) +
geom_point(shape = 20, alpha = 0.7, color = 'darkseagreen') +
geom_smooth(method=loess, fill="red", color="red") +
geom_smooth(method=lm, fill="blue", color="blue") +
theme_minimal()
return(plt)
}
#The blue lines in the scatter plots represent a simple linear regression fit while the red lines represent a local polynomial fit.
#We can see both OverallQual and GrLivArea and TotalBsmtSF follow a linear model, but have some outliers we may want to look into.
#For instance, there are multiple houses with an overall quality of 10, but have suspisciously low prices.
#We can see similar behavior in GrLivArea and TotalBsmtSF. GarageCars and GarageArea both follow more of a quadratic fit.
#It seems that having a 4 car garage does not result in a higher house price and same with an extremely large area.
ggpairs(corr.df, corr.idx[1:6], lower = list(continuous = lm.plt))
ggpairs(corr.df, corr.idx[c(1,7:11)], lower = list(continuous = lm.plt))
##############################################
##########Nominal Variables
#LotShape has 3 values for having an irregular shape and only 1 for regular.
#We can create a binary column that returns 1 for houses with a regular lot shape and 0 for houses with any of the 3 irregular lot shapes.
#Using this method of turning a categoric feature into a binary column will ultimately help our data
#train better through boosted models without using numeric placeholders on nominal data.
plot.categoric('LotShape', df.combined)
df.numeric['RegularLotShape'] <- (df.combined$LotShape == 'Reg') * 1
table(df.numeric$RegularLotShape)
table(df.combined$LotShape)
# Same process is applied to the other nominal variables as well
plot.categoric('LandContour', df.combined)
df.numeric['LandLeveled'] <- (df.combined$LandContour == 'Lvl') * 1
plot.categoric('LandSlope', df.combined)
df.numeric['LandSlopeGentle'] <- (df.combined$LandSlope == 'Gtl') * 1
plot.categoric('Electrical', df.combined)
df.numeric['ElectricalSB'] <- (df.combined$Electrical == 'SBrkr') * 1
plot.categoric('GarageType', df.combined)
df.numeric['GarageDetchd'] <- (df.combined$GarageType == 'Detchd') * 1
plot.categoric('PavedDrive', df.combined)
df.numeric['HasPavedDrive'] <- (df.combined$PavedDrive == 'Y') * 1
df.numeric['HasWoodDeck'] <- (df.combined$WoodDeckSF > 0) * 1
df.numeric['Has2ndFlr'] <- (df.combined$X2ndFlrSF > 0) * 1
df.numeric['HasMasVnr'] <- (df.combined$MasVnrArea > 0) * 1
table(df.combined$WoodDeckSF)
plot.categoric('MiscFeature', df.combined)
#For MiscFeature the only feature with a significant amount of houses having it is Shed.
#We can one-hot encode houses that have Sheds vs those who do not.
df.numeric['HasShed'] <- (df.combined$MiscFeature == 'Shed') * 1
################# feature engineering
#Many of the houses recorded the same year for YearBuilt and YearRemodAdd.
#We can create a new column that records that a house was remodelled
#if the year it was built is different than the remodel year. This
df.numeric['Remodeled'] <- (df.combined$YearBuilt != df.combined$YearRemodAdd) * 1
#We can also create a column that seperates which houses have been recently remodelled vs those who are not.
#Houses that have been remodelled after the year they were sold will fall into this category.
df.numeric['RecentRemodel'] <- (df.combined$YearRemodAdd >= df.combined$YrSold) * 1
#There can be potential value to homes who were sold the same year they were built as this could be an indicator
#that these houses were hot in the marke
df.numeric['NewHouse'] <- (df.combined$YearBuilt == df.combined$YrSold) * 1
#What about the houses with area based features equal to 0? Houses with 0 square footage for a columnshows that the house does not have that feature at all.
#We add a one-hot encoded column for returning 1 for any house with an area greater than 0
#since this means that the house does have this feature and 0 for those who do not
cols.binary <- c('X2ndFlrSF', 'MasVnrArea', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'X3SsnPorch', 'ScreenPorch')
for (col in cols.binary){
df.numeric[str_c('Has',col)] <- (df.combined[,col] != 0) * 1
}
### see how houses sold month wise
ggplot(df.combined, aes(x=MoSold)) +
geom_bar(fill = 'cornflowerblue') +
geom_text(aes(label=..count..), stat='count', vjust = -.5) +
theme_minimal() +
scale_x_continuous(breaks = 1:12)
#The largest proportion of houses sold is during the summer months: May, June, July.
#Let's add a column that seperates the the summer houses from the rest.
df.numeric['HighSeason'] <- (df.combined$MoSold %in% c(5,6,7)) * 1
### some neighbourhoods are more expensive than others
ames_train[,c('Neighborhood','SalePrice')] %>%
group_by(Neighborhood) %>%
summarise(median.price = median(SalePrice, na.rm = TRUE)) %>%
arrange(median.price) %>%
mutate(nhbr.sorted = factor(Neighborhood, levels=Neighborhood)) %>%
ggplot(aes(x=nhbr.sorted, y=median.price)) +
geom_point() +
geom_text(aes(label = median.price, angle = 45), vjust = 2) +
theme_minimal() +
labs(x='Neighborhood', y='Median price') +
theme(text = element_text(size=12),
axis.text.x = element_text(angle=45))
library(dplyr) ### needed for group_by function
#StoneBr, NoRidge, NridgHt have a large gap between them versus the rest of the median prices from any of the other neighborhods.
#It would be wise of us to check if this is from outliers or if these houses are much pricier as a whole.
other.nbrh <- unique(df.combined$Neighborhood)[!unique(df.combined$Neighborhood) %in% c('StoneBr', 'NoRidge','NridgHt')]
ggplot(ames_train, aes(x=SalePrice, y=GrLivArea, colour=Neighborhood)) +
geom_point(shape=16, alpha=.8, size=4) +
scale_color_manual(limits = c(other.nbrh, 'StoneBr', 'NoRidge', 'NridgHt'), values = c(rep('black', length(other.nbrh)), 'indianred',
'cornflowerblue', 'darkseagreen')) +
theme_minimal() +
scale_x_continuous(label=dollar)
#lets one-hot encode the more expensive neighborhoods and add that to our dataframe
nbrh.rich <- c('Crawfor', 'Somerst, Timber', 'StoneBr', 'NoRidge', 'NridgeHt')
df.numeric['NbrhRich'] <- (df.combined$Neighborhood %in% nbrh.rich) *1
group.prices('Neighborhood')
nbrh.map <- c('MeadowV' = 0, 'IDOTRR' = 1, 'Sawyer' = 1, 'BrDale' = 1, 'OldTown' = 1, 'Edwards' = 1,
'BrkSide' = 1, 'Blueste' = 1, 'SWISU' = 2, 'NAmes' = 2, 'NPkVill' = 2, 'Mitchel' = 2,
'SawyerW' = 2, 'Gilbert' = 2, 'NWAmes' = 2, 'Blmngtn' = 2, 'CollgCr' = 2, 'ClearCr' = 3,
'Crawfor' = 3, 'Veenker' = 3, 'Somerst' = 3, 'Timber' = 3, 'StoneBr' = 4, 'NoRidge' = 4,
'NridgHt' = 4)
df.numeric['NeighborhoodBin'] <- as.numeric(nbrh.map[df.combined$Neighborhood])
### sale condition
group.prices('SaleCondition')
df.numeric['PartialPlan'] <- (df.combined$SaleCondition == 'Partial') * 1
group.prices('HeatingQC')
heating.list <- c('Po' = 0, 'Fa' = 1, 'TA' = 2, 'Gd' = 3, 'Ex' = 4)
df.numeric['HeatingScale'] <- as.numeric(heating.list[df.combined$HeatingQC])
area.cols <- c('LotFrontage', 'LotArea', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF',
'TotalBsmtSF', 'X1stFlrSF', 'X2ndFlrSF', 'GrLivArea', 'GarageArea', 'WoodDeckSF',
'OpenPorchSF', 'EnclosedPorch', 'X3SsnPorch', 'ScreenPorch', 'LowQualFinSF', 'PoolArea')
df.numeric['TotalArea'] <- as.numeric(rowSums(df.combined[,area.cols]))
df.numeric['AreaInside'] <- as.numeric(df.combined$X1stFlrSF + df.combined$X2ndFlrSF)
#We've seen how strong of an effect the year of a house built has on the house price,
#therefore, as this dataset collects houses up until 2010
#we can determine how old a house is and how long ago the house was sold:
df.numeric['Age'] <- as.numeric(2010 - df.combined$YearBuilt)
df.numeric['TimeSinceSold'] <- as.numeric(2010 - df.combined$YrSold)
# how many years since the house was remodelled and sold
df.numeric['YearSinceRemodel'] <- as.numeric(df.combined$YrSold - df.combined$YearRemodAdd)
#####################################
###Correlation plot with OverallQual
library(corrplot)
corr.OverallQual <- as.matrix(sort(correlations[,'OverallQual'], decreasing = TRUE))
corr.idx <- names(which(apply(corr.OverallQual, 1, function(x) (x > 0.5 | x < -0.5))))
corrplot(as.matrix(correlations[corr.idx, corr.idx]), type = 'upper',
method = 'color', addCoef.col = 'black', tl.cex =.7, cl.cex = .7,
number.cex = .7)
############ outliers
train.test.df <- rbind(dplyr::select(ames_train,-SalePrice), ames_test)
train.test.df$type <- c(rep('train',1460),rep('test',1459))
ggplot(ames_train, aes(x=GrLivArea)) +
geom_histogram(fill='lightblue',color='white') +
theme_minimal()
outlier_values <- boxplot.stats(ames_train$GrLivArea)$out # outlier values.
boxplot(ames_train$GrLivArea, main="GrLivArea", boxwex=0.1)
mtext(paste("Outliers: ", paste(outlier_values[outlier_values>4000], collapse=", ")), cex=0.6)
ggplot(train.test.df, aes(x=type, y=GrLivArea, fill=type)) +
geom_boxplot() +
theme_minimal() +
scale_fill_manual(breaks = c("test", "train"), values = c("indianred", "lightblue"))
idx.outliers <- which(ames_train$GrLivArea > 4000)
df.numeric <- df.numeric[!1:nrow(df.numeric) %in% idx.outliers,]
df.combined <- df.combined[!1:nrow(df.combined) %in% idx.outliers,]
dim(df.numeric)
################################### Preprocessing
############### checking for normality of independent variable and standardizing the independent variables
###### normality check:skewness,kurtosis, Kolmogorov-Smirnof test
### log(x+1) is taken for higly skewed values
View(df.numeric)
library(moments)
library(psych)
# linear models assume normality from dependant variables
# transform any skewed data into normal
skewed <- apply(df.numeric, 2, skewness)
skewed <- skewed[(skewed > 0.8) | (skewed < -0.8)]
skewed
kurtosi <- apply(df.numeric, 2, kurtosis)
kurtosi <- kurtosis[(kurtosis > 3.0) | (kurtosis < -3.0)]
kurtosi
# not very useful in our case
ks.p.val <- NULL
for (i in 1:length(df.numeric)) {
test.stat <- ks.test(df.numeric[i], rnorm(1000))
ks.p.val[i] <- test.stat$p.value
}
ks.p.val
for(col in names(skewed)){
if(0 %in% df.numeric[, col]) {
df.numeric[,col] <- log(1+df.numeric[,col])
}
else {
df.numeric[,col] <- log(df.numeric[,col])
}
}
# normalize the data
library(caret)
scaler <- preProcess(df.numeric)
df.numeric <- predict(scaler, df.numeric)
#### For the rest of the categoric features we can one-hot encode each value to get as many splits in the data as possible
# one hot encoding for categorical data
# sparse data performs better for trees/xgboost
dummy <- dummyVars(" ~ ." , data=df.combined[,cat_features])
df.categoric <- data.frame(predict(dummy,newdata=df.combined[,cat_features]))
str(df.combined)
# every 20 years create a new bin
# 7 total bins
# min year is 1871, max year is 2010!
year.map = function(col.combined, col.name) {
for (i in 1:7) {
year.seq = seq(1871+(i-1)*20, 1871+i*20-1)
idx = which(df.combined[,col.combined] %in% year.seq)
df.categoric[idx,col.name] = i
}
return(df.categoric)
}
df.categoric['GarageYrBltBin'] = 0
df.categoric <- year.map('GarageYrBlt', 'GarageYrBltBin')
df.categoric['YearBuiltBin'] = 0
df.categoric <- year.map('YearBuilt','YearBuiltBin')
df.categoric['YearRemodAddBin'] = 0
df.categoric <- year.map('YearRemodAdd', 'YearRemodAddBin')
bin.cols <- c('GarageYrBltBin', 'YearBuiltBin', 'YearRemodAddBin')
for (col in bin.cols) {
df.categoric <- cbind(df.categoric, model.matrix(~.-1, df.categoric[col]))
}
# lets drop the orginal 'GarageYrBltBin', 'YearBuiltBin', 'YearRemodAddBin' from our dataframe
df.categoric <- df.categoric[,!names(df.categoric) %in% bin.cols]
### combining into a single df
df <- cbind(df.numeric, df.categoric)
str(df)
### distribution of housing prices
install.packages("WVPlots")
library(WVPlots)
y.true <- ames_train$SalePrice[which(!1:1460 %in% idx.outliers)]
qplot(y.true, geom='density') +# +(train, aes(x=SalePrice)) +
geom_histogram(aes(y=..density..), color='white',
fill='lightblue', alpha=.5, bins = 60) +
geom_line(aes(y=..density..), color='cornflowerblue', lwd = 1, stat = 'density') +
stat_function(fun = dnorm, colour = 'indianred', lwd = 1, args =
list(mean(ames_train$SalePrice), sd(ames_train$SalePrice))) +
scale_x_continuous(breaks = seq(0,800000,100000), labels = dollar) +
scale_y_continuous(labels = comma) +
theme_minimal() +
annotate('text', label = paste('skewness =', signif(skewness(ames_train$SalePrice),4)),
x=500000,y=7.5e-06)
qqnorm(ames_train$SalePrice)
qqline(ames_train$SalePrice)
#We can see from the histogram and the quantile-quantile plot that the distribution of sale prices is right-skewed and does not follow a normal distribution.
#Lets make a log-transformation and see how our data looks
y_train <- log(y.true+1)
qplot(y_train, geom = 'density') +
geom_histogram(aes(y=..density..), color = 'white', fill = 'lightblue', alpha = .5, bins = 60) +
scale_x_continuous(breaks = seq(0,800000,100000), labels = comma) +
geom_line(aes(y=..density..), color='dodgerblue4', lwd = 1, stat = 'density') +
stat_function(fun = dnorm, colour = 'indianred', lwd = 1, args =
list(mean(y_train), sd(y_train))) +
#scale_x_continuous(breaks = seq(0,800000,100000), labels = dollar) +
scale_y_continuous(labels = comma) +
theme_minimal() +
annotate('text', label = paste('skewness =', signif(skewness(y_train),4)),
x=13,y=1) +
labs(x = 'log(SalePrice + 1)')
qqnorm(y_train)
qqline(y_train)
paste('The dataframe has', dim(df)[1], 'rows and', dim(df)[2], 'columns')
|
##' Function to remove non-existing record
##'
##' Sometimes non-existing records (observation status flag and method flag and
##' value all NA) are returned by the database, or can be created by
##' denormalising the data. This function removes these records.
##'
##' @param data The data containing non-existing record
##' @param areaVar The column name corresponding to the geographic
##' area.
##' @param itemVar The column name corresponding to the commodity
##' item.
##' @param elementVar The column name corresponding to the measured
##' element.
##' @param yearVar The column name corresponding to the year.
##' @param flagObsVar The column name corresponding to the observation
##' status flag.
##' @param flagMethodVar The column name corresponding to the method
##' flag.
##' @param valueVar The column name corresponding to the value.
##'
##' @return Data with non-existing records omitted.
##'
##' @export
##'
removeNonExistingRecord = function(data,
areaVar = "geographicAreaM49",
itemVar = "measuredItemCPC",
elementVar = "measuredElement",
yearVar = "timePointYears",
flagObsVar = "flagObservationStatus",
flagMethodVar = "flagMethod",
valueVar = "Value"){
dataCopy = copy(data)
requiredColumn = c(areaVar, itemVar, elementVar, yearVar,
flagObsVar, flagMethodVar, valueVar)
if(!all(requiredColumn %in% colnames(dataCopy)))
stop("Required column not in data, data has to be normalised!")
dataCopy[!is.na(dataCopy[[flagObsVar]]) & !is.na(dataCopy[[flagMethodVar]]), ]
}
| /R/removeNonExistingRecord.R | no_license | SWS-Methodology/faoswsProcessing | R | false | false | 1,800 | r | ##' Function to remove non-existing record
##'
##' Sometimes non-existing records (observation status flag and method flag and
##' value all NA) are returned by the database, or can be created by
##' denormalising the data. This function removes these records.
##'
##' @param data The data containing non-existing record
##' @param areaVar The column name corresponding to the geographic
##' area.
##' @param itemVar The column name corresponding to the commodity
##' item.
##' @param elementVar The column name corresponding to the measured
##' element.
##' @param yearVar The column name corresponding to the year.
##' @param flagObsVar The column name corresponding to the observation
##' status flag.
##' @param flagMethodVar The column name corresponding to the method
##' flag.
##' @param valueVar The column name corresponding to the value.
##'
##' @return Data with non-existing records omitted.
##'
##' @export
##'
removeNonExistingRecord = function(data,
areaVar = "geographicAreaM49",
itemVar = "measuredItemCPC",
elementVar = "measuredElement",
yearVar = "timePointYears",
flagObsVar = "flagObservationStatus",
flagMethodVar = "flagMethod",
valueVar = "Value"){
dataCopy = copy(data)
requiredColumn = c(areaVar, itemVar, elementVar, yearVar,
flagObsVar, flagMethodVar, valueVar)
if(!all(requiredColumn %in% colnames(dataCopy)))
stop("Required column not in data, data has to be normalised!")
dataCopy[!is.na(dataCopy[[flagObsVar]]) & !is.na(dataCopy[[flagMethodVar]]), ]
}
|
library(testthat)
library(MANOVA.RM)
test_check("MANOVA.RM")
| /tests/testthat.R | no_license | smn74/MANOVA.RM | R | false | false | 62 | r | library(testthat)
library(MANOVA.RM)
test_check("MANOVA.RM")
|
############################################################################################
# Project: Path Optimization using Simulated Annealing
# Will Daewook Kwon - will.dw.kwon@gmail.com
#
# Description:
# 10 Cities and their distance to each other are given. Using the Metropolis-Hastings
# algorith, we will generate more often those path with shorter distance. As the number
# of iteration grows, the generated path will converge to the shortest path possible.
############################################################################################
rm(list=ls())
## The Distance Matrix
A<-c(0,587,1212,701,1936,604,748,2139,2182,543)
B<-c(0,0,920,940,1745,1188,713,1858,1737,597)
C<-c(0,0,0,879,831,1726,1631,949,1021,1494)
D<-c(0,0,0,0,1374,968,1420,1645,1891,1220)
E<-c(0,0,0,0,0,2339,2451,347,959,2300)
F<-c(0,0,0,0,0,0,1092,2594,2734,923)
G<-c(0,0,0,0,0,0,0,2571,2408,205)
H<-c(0,0,0,0,0,0,0,0,678,2442)
I<-c(0,0,0,0,0,0,0,0,0,2329)
J<-c(0,0,0,0,0,0,0,0,0,0)
distance<-cbind(A,B,C,D,E,F,G,H,I,J)
rownames(distance)=c("A","B","C","D","E","F","G","H","I","J")
distance=distance+t(distance)
## Distance Calculator: V(x)
DistCal<-function(vector){
vec<-c()
for(i in 1:9){
location<-vector[c(i,i+1)]
vec[i]<-distance[location[1],location[2]]
}
score=sum(vec)
return(score)
}
## y Generator: proposal function
## The number of neighbours are the same with 45=choose(10,2))
## Pick two arbitary indices and switch
Proposal<-function(vector){
co1<-sample(1:10,2)
co2=rev(co1)
vector[co1]<-vector[co2];y=vector
return(y)
}
## Set x0 (By letting A=1,B=2....K=10)
x0=c(1,2,3,4,5,6,7,8,9,10)
## Simulate the Markov Chain
Simulator<-function(N,x0){
collect<-list(x0)
x=x0
for(i in 1:N){
lambda<-1*log(1+i)
y=Proposal(x)
accept=min(1, exp(lambda*(-DistCal(y)/1000))/exp(lambda*(-DistCal(x)/1000)))
if(runif(1)<=accept)
{x=y} else {x=x}
collect[[i]]<-x
}
return(collect)
}
## Simulation
sim<-Simulator(10000,x0)
## Calculate distances of each vector in MC and ovserve distance reduction
vec<-c()
for(i in 1:10000){
z<-sim[[i]]
vec[i]<-DistCal(z)
}
plot(ts(vec))
## The Limiting Distribution and the Maximum Value (Shortest Path)
optimal<-sim[length(sim)];optimal
DistCal(optimal[[1]])
| /My Favorites/PathOptimization.R | no_license | WillKwon/R_College_Projects | R | false | false | 2,313 | r | ############################################################################################
# Project: Path Optimization using Simulated Annealing
# Will Daewook Kwon - will.dw.kwon@gmail.com
#
# Description:
# 10 Cities and their distance to each other are given. Using the Metropolis-Hastings
# algorith, we will generate more often those path with shorter distance. As the number
# of iteration grows, the generated path will converge to the shortest path possible.
############################################################################################
rm(list=ls())
## The Distance Matrix
A<-c(0,587,1212,701,1936,604,748,2139,2182,543)
B<-c(0,0,920,940,1745,1188,713,1858,1737,597)
C<-c(0,0,0,879,831,1726,1631,949,1021,1494)
D<-c(0,0,0,0,1374,968,1420,1645,1891,1220)
E<-c(0,0,0,0,0,2339,2451,347,959,2300)
F<-c(0,0,0,0,0,0,1092,2594,2734,923)
G<-c(0,0,0,0,0,0,0,2571,2408,205)
H<-c(0,0,0,0,0,0,0,0,678,2442)
I<-c(0,0,0,0,0,0,0,0,0,2329)
J<-c(0,0,0,0,0,0,0,0,0,0)
distance<-cbind(A,B,C,D,E,F,G,H,I,J)
rownames(distance)=c("A","B","C","D","E","F","G","H","I","J")
distance=distance+t(distance)
## Distance Calculator: V(x)
DistCal<-function(vector){
vec<-c()
for(i in 1:9){
location<-vector[c(i,i+1)]
vec[i]<-distance[location[1],location[2]]
}
score=sum(vec)
return(score)
}
## y Generator: proposal function
## The number of neighbours are the same with 45=choose(10,2))
## Pick two arbitary indices and switch
Proposal<-function(vector){
co1<-sample(1:10,2)
co2=rev(co1)
vector[co1]<-vector[co2];y=vector
return(y)
}
## Set x0 (By letting A=1,B=2....K=10)
x0=c(1,2,3,4,5,6,7,8,9,10)
## Simulate the Markov Chain
Simulator<-function(N,x0){
collect<-list(x0)
x=x0
for(i in 1:N){
lambda<-1*log(1+i)
y=Proposal(x)
accept=min(1, exp(lambda*(-DistCal(y)/1000))/exp(lambda*(-DistCal(x)/1000)))
if(runif(1)<=accept)
{x=y} else {x=x}
collect[[i]]<-x
}
return(collect)
}
## Simulation
sim<-Simulator(10000,x0)
## Calculate distances of each vector in MC and ovserve distance reduction
vec<-c()
for(i in 1:10000){
z<-sim[[i]]
vec[i]<-DistCal(z)
}
plot(ts(vec))
## The Limiting Distribution and the Maximum Value (Shortest Path)
optimal<-sim[length(sim)];optimal
DistCal(optimal[[1]])
|
#' @export
#' @title Unpack data from Data Pack sheets.
#'
#' @description
#' Loops through all critical sheets in a submitted Data Pack
#' and extracts data, then compiles into single flat dataframe.
#'
#' @inheritParams datapackr_params
#' @param check_sheets Logical. Should sheet data be validated?
#' @param separate_datasets Logical. Should datasets be separated?
#'
#' @return d
#'
unPackSheets <- function(d,
sheets = NULL,
check_sheets = TRUE,
separate_datasets = TRUE) {
interactive_print("Unpacking sheets...")
if (d$info$tool != "Data Pack") {
stop("Cannot process that kind of tool. :(")
}
# Check sheets param provided
# If sheets parameter not provided, use names of sheets in d$sheets
if (is.null(d$sheets)) {
d <- loadSheets(d)
}
sheets <- sheets %||% grep("PSNUxIM", names(d$sheets), value = TRUE, invert = TRUE)
sheets <- checkSheets(sheets = sheets,
cop_year = d$info$cop_year,
tool = d$info$tool,
all_sheets = FALSE,
psnuxim = FALSE)
# Check sheets against actual sheets found in d$sheets
if (!all(sheets %in% names(d$sheets))) {
invalid_sheets <- unique(sheets[!sheets %in% names(d$sheets)])
sheets <- sheets[sheets %in% names(d$sheets)]
interactive_warning(
paste0("You've asked us to unpack the following sheets, which do not ",
"appear in your submission.: -> \n\t* ",
paste(invalid_sheets, collapse = "\n\t* "),
"\n"))
}
# Don't proceed with any sheets where *any* index columns are missing (PSNU,
# Age, Sex, KeyPop), or no rows of data
d <- checkToolEmptySheets(d, sheets = sheets)
no_data <- c(d$tests$missing_index_columns$sheet_name,
d$tests$no_rows_data$sheet_name) %>%
unique()
sheets <- sheets[!sheets %in% no_data]
# Check sheet data
if (check_sheets) {
d <- checkSheetData(d, sheets = sheets)
}
# Unpack Sheet Data ----
targets <- unPackDataPackSheet(d, sheets)
# Separate Sheet Data ----
if (separate_datasets) {
interactive_print("Separating datasets...")
datasets <- separateDataSets(data = targets,
cop_year = d$info$cop_year,
tool = d$info$tool)
d$data$MER <- datasets$MER
d$data$SUBNAT_IMPATT <- datasets$SUBNAT_IMPATT
} else {
d$data$targets <- targets
}
return(d)
}
| /R/unPackSheets.R | permissive | jason-p-pickering/datapackr | R | false | false | 2,530 | r | #' @export
#' @title Unpack data from Data Pack sheets.
#'
#' @description
#' Loops through all critical sheets in a submitted Data Pack
#' and extracts data, then compiles into single flat dataframe.
#'
#' @inheritParams datapackr_params
#' @param check_sheets Logical. Should sheet data be validated?
#' @param separate_datasets Logical. Should datasets be separated?
#'
#' @return d
#'
unPackSheets <- function(d,
sheets = NULL,
check_sheets = TRUE,
separate_datasets = TRUE) {
interactive_print("Unpacking sheets...")
if (d$info$tool != "Data Pack") {
stop("Cannot process that kind of tool. :(")
}
# Check sheets param provided
# If sheets parameter not provided, use names of sheets in d$sheets
if (is.null(d$sheets)) {
d <- loadSheets(d)
}
sheets <- sheets %||% grep("PSNUxIM", names(d$sheets), value = TRUE, invert = TRUE)
sheets <- checkSheets(sheets = sheets,
cop_year = d$info$cop_year,
tool = d$info$tool,
all_sheets = FALSE,
psnuxim = FALSE)
# Check sheets against actual sheets found in d$sheets
if (!all(sheets %in% names(d$sheets))) {
invalid_sheets <- unique(sheets[!sheets %in% names(d$sheets)])
sheets <- sheets[sheets %in% names(d$sheets)]
interactive_warning(
paste0("You've asked us to unpack the following sheets, which do not ",
"appear in your submission.: -> \n\t* ",
paste(invalid_sheets, collapse = "\n\t* "),
"\n"))
}
# Don't proceed with any sheets where *any* index columns are missing (PSNU,
# Age, Sex, KeyPop), or no rows of data
d <- checkToolEmptySheets(d, sheets = sheets)
no_data <- c(d$tests$missing_index_columns$sheet_name,
d$tests$no_rows_data$sheet_name) %>%
unique()
sheets <- sheets[!sheets %in% no_data]
# Check sheet data
if (check_sheets) {
d <- checkSheetData(d, sheets = sheets)
}
# Unpack Sheet Data ----
targets <- unPackDataPackSheet(d, sheets)
# Separate Sheet Data ----
if (separate_datasets) {
interactive_print("Separating datasets...")
datasets <- separateDataSets(data = targets,
cop_year = d$info$cop_year,
tool = d$info$tool)
d$data$MER <- datasets$MER
d$data$SUBNAT_IMPATT <- datasets$SUBNAT_IMPATT
} else {
d$data$targets <- targets
}
return(d)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R.installation.qualification.R
\name{.getDependencyList}
\alias{.getDependencyList}
\title{Title}
\usage{
.getDependencyList()
}
\value{
data.frame containing the dependencies for PMDatR
}
\description{
Title
}
| /man/dot-getDependencyList.Rd | no_license | qPharmetra/PMDatR | R | false | true | 289 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R.installation.qualification.R
\name{.getDependencyList}
\alias{.getDependencyList}
\title{Title}
\usage{
.getDependencyList()
}
\value{
data.frame containing the dependencies for PMDatR
}
\description{
Title
}
|
library(tidyverse)
stations <- read_rds("Data/stations_with_bg.rds") %>%
filter(name != "Emeryville")
distances <- read_rds("Data/all_station_bg_dists.rds")
# only want distances between block groups that have fueling stations
distances_sta <- distances %>%
semi_join(stations, by = c("T_GEOID" = "nearest_bg"))
# get nearest three relevant bgs for each bg with a station, compare to euclidean distance
stations_xy <- stations %>%
select(id, name, address, nearest_bg, X, Y)
potential_matches <- distances_sta %>%
left_join(stations_xy, c("STA_GEOID" = "nearest_bg")) %>%
left_join(stations_xy, c("T_GEOID" = "nearest_bg"), suffix = c("", "_nearest")) %>%
filter(id != id_nearest) %>%
with_groups(id, slice_min, d_meters, n = 1) %>%
mutate(d_meters_euclidean = sqrt((X - X_nearest)^2 + (Y - Y_nearest)^2),
d_meters = pmax(d_meters_euclidean, d_meters)) %>%
select(-d_meters_euclidean)
nearest_sta <- potential_matches %>%
select(id, name, address, d_meters, id_nearest, name_nearest, address_nearest)
nearest_sta %>%
write_csv("Data/fcev_station_to_nearest_station.csv")
| /FCEV_04_station-station-distances.R | no_license | davisadamw/fcev-map | R | false | false | 1,122 | r | library(tidyverse)
stations <- read_rds("Data/stations_with_bg.rds") %>%
filter(name != "Emeryville")
distances <- read_rds("Data/all_station_bg_dists.rds")
# only want distances between block groups that have fueling stations
distances_sta <- distances %>%
semi_join(stations, by = c("T_GEOID" = "nearest_bg"))
# get nearest three relevant bgs for each bg with a station, compare to euclidean distance
stations_xy <- stations %>%
select(id, name, address, nearest_bg, X, Y)
potential_matches <- distances_sta %>%
left_join(stations_xy, c("STA_GEOID" = "nearest_bg")) %>%
left_join(stations_xy, c("T_GEOID" = "nearest_bg"), suffix = c("", "_nearest")) %>%
filter(id != id_nearest) %>%
with_groups(id, slice_min, d_meters, n = 1) %>%
mutate(d_meters_euclidean = sqrt((X - X_nearest)^2 + (Y - Y_nearest)^2),
d_meters = pmax(d_meters_euclidean, d_meters)) %>%
select(-d_meters_euclidean)
nearest_sta <- potential_matches %>%
select(id, name, address, d_meters, id_nearest, name_nearest, address_nearest)
nearest_sta %>%
write_csv("Data/fcev_station_to_nearest_station.csv")
|
suppressPackageStartupMessages(library(GenomicRanges))
files <- list.files(".", "granges.rds")
for(f in files) {
message(f, " ", appendLF=FALSE)
f.gr <- updateObject(readRDS(f))
message(length(f.gr))
}
| /pipeline/count_granges.r | no_license | zeitlingerlab/he_johnston_nbt_2014 | R | false | false | 211 | r | suppressPackageStartupMessages(library(GenomicRanges))
files <- list.files(".", "granges.rds")
for(f in files) {
message(f, " ", appendLF=FALSE)
f.gr <- updateObject(readRDS(f))
message(length(f.gr))
}
|
#######
# amend precrec for each graph with spiecResid
#######
library(ROCR)
parameters<-list(c(seq(10,30,2)),c(seq(10,120,10)), c(seq(0.5,5,0.5)/20),
c(seq(1,30,5)),c(seq(0.1,0.4,0.05)))
names(parameters)<-c("d","n","prob","r","dens")
Bgraph<-100
#####
#precrec simple
#####
build_precrec<-function(){
for(type in c("erdos","tree","scale-free","cluster")){
cparam<-switch(type,"erdos"=c("n","prob"),"tree"=c("n"),
"cluster"=c("n","dens","r"),"scale-free"=c("n"))
path<-paste0("/Users/raphaellemomal/simulations/Simu/PLNcov/")
### variable
for(variable in cparam){
### valeur de la variable
sapply(parameters[[variable]],function(x){
print(paste0(type," // ", variable," // ",parameters[[variable]]))
precrec<-data.frame(prec=double(),rec=double(),method=character(),
var=double(),B=integer(),param=integer(),
stringsAsFactors=FALSE)
colnames(precrec)<-c("prec","rec","method","var","B","param")
path2<-paste0(path,type,"/",variable)
### le numero du graph
sapply(1:Bgraph, function(nbgraph){
### la methode utilisee
sapply(c("_treeggm_","_spiecResid","_oracle"), function(method){
vec<-build_vec(path2, nbgraph,x,variable,method)
#####
vec_pred<-vec[[1]]
vec_obs<-vec[[2]]
prediction<-prediction(vec_pred,vec_obs)
ROC_precision<-performance(prediction,"prec")
ROC_recal<-performance(prediction,"rec")
#####
tmp<-data.frame(ROC_precision@y.values,ROC_recal@y.values,method,x,B=1,nbgraph)
colnames(tmp)<-c("prec","rec","method","var","B","param")
precrec<<- rbind(precrec,tmp)
})
})
saveRDS(precrec,paste0(path,type,"/",variable,"/precrec/precrec","_",x,".rds"))
})
}
}
}
build_precrec()
#####
# courbes moyennes superosΓ©e au nuage de points
#####
path<-"/Users/raphaellemomal/simulations/Simu/PLNcov/"
visu_precrec<-function(type,variable,x,path="/Users/raphaellemomal/simulations/Simu/PLNcov/"){
precrec <- readRDS(paste0("/Users/raphaellemomal/simulations/Simu/PLNcov/",type,
"/",variable,"/precrec/precrec","_",x,".rds"))
indices<-which(precrec$method=="_treeggm_" | precrec$method=="_spiecResid")
precrecpool<- readRDS(paste0(path,type,"/",variable,"/precrec_pool/precrec_",x,".rds"))
indices2<-which(precrecpool$method=="_treeggm_" | precrecpool$method=="_spiecResid")
precrecpool$method<-paste0(precrecpool$method,"pool")
superimpose<-rbind(precrec[indices,c("prec","rec","method")],precrecpool[indices2,c("prec","rec","method")])
# triche : crΓ©er des filtres pour gΓ©rer les nuages de points sΓ©parΓ©ment des courbes moyennes
d_filtered <- superimpose %>%
group_by(method) %>%
filter(method=="_treeggm_pool" || method=="_spiecResidpool" || method=="_oraclepool") %>%
ungroup()
d_treeggm <- superimpose %>%
group_by(method) %>%
filter(method=="_treeggm_") %>%
ungroup()
d_spiecResid<- superimpose %>%
group_by(method) %>%
filter(method=="_spiecResid") %>%
ungroup()
ggplot(superimpose) +
theme_bw()+
geom_point(aes(rec,prec, group = method),data=d_spiecResid,colour = alpha("#ff7f0e", 0.3),size=0.5) +
geom_point(aes(rec,prec, group = method),data=d_treeggm,colour = alpha("#d62728", 0.3),size=0.5) +
# colourise only the filtered data
geom_line(aes(rec,prec, colour = method), data = d_filtered, size=1)+
scale_color_manual(values=c("#d62728","#ff7f0e","#fc5f94"), #jaune,bleu, rose
breaks=c("_treeggm_pool","_spiecResidpool","_oraclepool"),
labels=c("EM ","SpiecResid","Oracle" ))+
theme(legend.position="bottom", legend.title = element_blank(),
legend.text = element_text(size=12))+
labs(x="Recall",y="Precision")
}
makegraph<-function(type, variable, x){
pdf(paste0(path,"images/SansOracle",type,"_",variable,x,".pdf"),
width=6,
height=4,onefile=TRUE)
print(visu_precrec(type,variable,x))
dev.off()
}
for(type in c("erdos","cluster","scale-free","tree")){
cparam<-switch(type,"erdos"=c("n","prob"),"tree"=c("n"),
"cluster"=c("n","dens","r"),"scale-free"=c("n"))
for(variable in cparam){
min_max<-c(min(parameters[[variable]]),max(parameters[[variable]]))
for(x in min_max){
makegraph(type, variable, x)
}
}
}
#################################################################################
# POOL
##############
vec_obs_pred<-function(obs, pred){
# browser()
nvar<-ncol(obs)
obs[which(abs(obs)<1e-16)]<-0
indices_nuls<-which(obs==0)
label<-matrix(1,nrow=nrow(obs),ncol=ncol(obs))
label[indices_nuls]<-0
vec_pred<-as.vector(pred[upper.tri(pred)])
vec_obs<-as.vector(label[upper.tri(label)])
return(list(vec_pred,vec_obs))
}
build_vec<-function(path2, nbgraph,x,variable,method){
if(variable=="n"){
obs<-readRDS(paste0(path2,"/Sets_param/Graph",nbgraph,".rds"))$omega
}else{
obs<-readRDS(paste0(path2,"/Sets_param/Graph",nbgraph,"_",x,".rds"))$omega
}
if(method=="_treeggm_"|| method=="_oracle"){
pred<- readRDS(paste0(path2,"/Scores/Graph",nbgraph,method,x,".rds"))[["probaCond"]]
}else{
pred<- readRDS(paste0(path2,"/Scores/Graph",nbgraph,method,x,".rds"))
}
return(vec_obs_pred(obs,pred))
}
build_precrecPool<-function(){
for(type in c("erdos","tree","scale-free","cluster")){
# path<-paste0(getwd(),"/Simu/PLNcov/")
cparam<-switch(type,"erdos"=c("n","prob"),"tree"=c("n"),
"cluster"=c("n","dens","r"),"scale-free"=c("n"))
path<-paste0("/Users/raphaellemomal/simulations/Simu/PLNcov/")
for(variable in cparam){
#fill df
sapply(parameters[[variable]],function(x){
print(paste0("type: ", type," // var: ", variable," // valeur :",x))
fatListe_methodes<-lapply(c("_treeggm_","_spiecResid","_oracle"),function(method){#c("treeggm","one_step","glasso","oracle","spiecResid")
grosseListe<-lapply(1:Bgraph,function(nbgraph){
# browser()
path2<-paste0(path,type,"/",variable)
build_vec(path2,nbgraph,x,variable,method=method)
})
# browser()
#les scores pour chaque croisement type*variable*valeur*method sont transformΓ©s et
# concatenes sur tous les graphes generes. On obtient deux grands vecteurs de prΓ©dictions et d'observations
vec_pred<-do.call(c,lapply(grosseListe,function(x) x[[1]]))
vec_obs<-do.call(c,lapply(grosseListe,function(x) x[[2]]))
prediction<-prediction(vec_pred,vec_obs)
# sur lesquels on calcule enfin les stat prec et rec
ROC_precision<-performance(prediction,"prec")
ROC_recal<-performance(prediction,"rec")
precrec<-data.frame(ROC_precision@y.values,ROC_recal@y.values,prediction@cutoffs ,method)
colnames(precrec)<-c("prec","rec","cut","method")
return(precrec)
})
#les frames pour les diffferentes methodes sont concatenes en vue des plots
fatfatListe<-as_tibble(do.call(rbind,fatListe_methodes)) #42e3 lignes
saveRDS(fatfatListe,paste0(path,type,"/",variable,"/precrec_pool/precrec_",x,".rds"))
})
}
}
}
build_precrecPool()
##### LOOK PERFORMANCES
# type<-"cluster"
# variable<-"n"
# x<-100
# path<-"/Users/raphaellemomal/simulations/Simu/PLNcov/"
# precrecpool<- readRDS(paste0(path,type,"/",variable,"/precrec_",x,".rds"))
# indices2<-which(precrecpool$method=="treeggm" | precrecpool$method=="glasso")
# ggplot(precrecpool[indices2,],aes(rec,prec,colour=method,shape=method))+
# geom_point()+
# scale_shape_manual(values=c(16,15,9,8,17),
# breaks=c("treeggm","one_step", "glasso","spiecResid" ,"oracle"),
# labels=c("EM ","1 step","SpiecEasi","spiecResid", "oracle" ) )+
# scale_color_manual(values=c("#E69F00","#076443", "#8037c9","#fc5f94" ,"#56B4E9"),
# breaks=c("treeggm","one_step", "glasso","spiecResid" ,"oracle"),
# labels=c("EM ","1 step","SpiecEasi","spiecResid", "oracle" )
# )+
# guides(shape = guide_legend(override.aes = list(size = 3)))+
# labs(title="")+
# scale_y_continuous(limits = c(0,1))+
# theme_bw()
#
# ggplot(precrec,aes(rec,prec,colour=method,linetype=method))+
# geom_line(size=1)+
# # scale_type_manual(values=c(16,15,9,8,17),
# # breaks=c("treeggm","one_step", "glasso","spiecResid" ,"oracle"),
# # labels=c("EM ","1 step","SpiecEasi","spiecResid", "oracle" ) )+
# scale_linetype_manual(values=c("twodash", "solid", "dashed", "dotted", "dotdash" ),breaks=c("treeggm","one_step", "glasso","spiecResid" ,"oracle"),
# labels=c("EM ","1 step","SpiecEasi","SpiecResid", "Oracle" ) )+
# scale_color_manual(values=c("#E69F00","#076443", "#8037c9","#fc5f94" ,"#56B4E9"),
# breaks=c("treeggm","one_step", "glasso","spiecResid" ,"oracle"),
# labels=c("EM ","1 step","SpiecEasi","SpiecResid", "Oracle" )
# )+
# guides(shape = guide_legend(override.aes = list(size = 2)))+
# labs(title="")+
# scale_y_continuous(limits = c(0,1))+
# theme_bw()
#
#
| /R/codes/precrecPool.R | no_license | Rmomal/these | R | false | false | 9,454 | r |
#######
# amend precrec for each graph with spiecResid
#######
library(ROCR)
parameters<-list(c(seq(10,30,2)),c(seq(10,120,10)), c(seq(0.5,5,0.5)/20),
c(seq(1,30,5)),c(seq(0.1,0.4,0.05)))
names(parameters)<-c("d","n","prob","r","dens")
Bgraph<-100
#####
#precrec simple
#####
build_precrec<-function(){
for(type in c("erdos","tree","scale-free","cluster")){
cparam<-switch(type,"erdos"=c("n","prob"),"tree"=c("n"),
"cluster"=c("n","dens","r"),"scale-free"=c("n"))
path<-paste0("/Users/raphaellemomal/simulations/Simu/PLNcov/")
### variable
for(variable in cparam){
### valeur de la variable
sapply(parameters[[variable]],function(x){
print(paste0(type," // ", variable," // ",parameters[[variable]]))
precrec<-data.frame(prec=double(),rec=double(),method=character(),
var=double(),B=integer(),param=integer(),
stringsAsFactors=FALSE)
colnames(precrec)<-c("prec","rec","method","var","B","param")
path2<-paste0(path,type,"/",variable)
### le numero du graph
sapply(1:Bgraph, function(nbgraph){
### la methode utilisee
sapply(c("_treeggm_","_spiecResid","_oracle"), function(method){
vec<-build_vec(path2, nbgraph,x,variable,method)
#####
vec_pred<-vec[[1]]
vec_obs<-vec[[2]]
prediction<-prediction(vec_pred,vec_obs)
ROC_precision<-performance(prediction,"prec")
ROC_recal<-performance(prediction,"rec")
#####
tmp<-data.frame(ROC_precision@y.values,ROC_recal@y.values,method,x,B=1,nbgraph)
colnames(tmp)<-c("prec","rec","method","var","B","param")
precrec<<- rbind(precrec,tmp)
})
})
saveRDS(precrec,paste0(path,type,"/",variable,"/precrec/precrec","_",x,".rds"))
})
}
}
}
build_precrec()
#####
# courbes moyennes superosΓ©e au nuage de points
#####
path<-"/Users/raphaellemomal/simulations/Simu/PLNcov/"
visu_precrec<-function(type,variable,x,path="/Users/raphaellemomal/simulations/Simu/PLNcov/"){
precrec <- readRDS(paste0("/Users/raphaellemomal/simulations/Simu/PLNcov/",type,
"/",variable,"/precrec/precrec","_",x,".rds"))
indices<-which(precrec$method=="_treeggm_" | precrec$method=="_spiecResid")
precrecpool<- readRDS(paste0(path,type,"/",variable,"/precrec_pool/precrec_",x,".rds"))
indices2<-which(precrecpool$method=="_treeggm_" | precrecpool$method=="_spiecResid")
precrecpool$method<-paste0(precrecpool$method,"pool")
superimpose<-rbind(precrec[indices,c("prec","rec","method")],precrecpool[indices2,c("prec","rec","method")])
# triche : crΓ©er des filtres pour gΓ©rer les nuages de points sΓ©parΓ©ment des courbes moyennes
d_filtered <- superimpose %>%
group_by(method) %>%
filter(method=="_treeggm_pool" || method=="_spiecResidpool" || method=="_oraclepool") %>%
ungroup()
d_treeggm <- superimpose %>%
group_by(method) %>%
filter(method=="_treeggm_") %>%
ungroup()
d_spiecResid<- superimpose %>%
group_by(method) %>%
filter(method=="_spiecResid") %>%
ungroup()
ggplot(superimpose) +
theme_bw()+
geom_point(aes(rec,prec, group = method),data=d_spiecResid,colour = alpha("#ff7f0e", 0.3),size=0.5) +
geom_point(aes(rec,prec, group = method),data=d_treeggm,colour = alpha("#d62728", 0.3),size=0.5) +
# colourise only the filtered data
geom_line(aes(rec,prec, colour = method), data = d_filtered, size=1)+
scale_color_manual(values=c("#d62728","#ff7f0e","#fc5f94"), #jaune,bleu, rose
breaks=c("_treeggm_pool","_spiecResidpool","_oraclepool"),
labels=c("EM ","SpiecResid","Oracle" ))+
theme(legend.position="bottom", legend.title = element_blank(),
legend.text = element_text(size=12))+
labs(x="Recall",y="Precision")
}
makegraph<-function(type, variable, x){
pdf(paste0(path,"images/SansOracle",type,"_",variable,x,".pdf"),
width=6,
height=4,onefile=TRUE)
print(visu_precrec(type,variable,x))
dev.off()
}
for(type in c("erdos","cluster","scale-free","tree")){
cparam<-switch(type,"erdos"=c("n","prob"),"tree"=c("n"),
"cluster"=c("n","dens","r"),"scale-free"=c("n"))
for(variable in cparam){
min_max<-c(min(parameters[[variable]]),max(parameters[[variable]]))
for(x in min_max){
makegraph(type, variable, x)
}
}
}
#################################################################################
# POOL
##############
vec_obs_pred<-function(obs, pred){
# browser()
nvar<-ncol(obs)
obs[which(abs(obs)<1e-16)]<-0
indices_nuls<-which(obs==0)
label<-matrix(1,nrow=nrow(obs),ncol=ncol(obs))
label[indices_nuls]<-0
vec_pred<-as.vector(pred[upper.tri(pred)])
vec_obs<-as.vector(label[upper.tri(label)])
return(list(vec_pred,vec_obs))
}
build_vec<-function(path2, nbgraph,x,variable,method){
if(variable=="n"){
obs<-readRDS(paste0(path2,"/Sets_param/Graph",nbgraph,".rds"))$omega
}else{
obs<-readRDS(paste0(path2,"/Sets_param/Graph",nbgraph,"_",x,".rds"))$omega
}
if(method=="_treeggm_"|| method=="_oracle"){
pred<- readRDS(paste0(path2,"/Scores/Graph",nbgraph,method,x,".rds"))[["probaCond"]]
}else{
pred<- readRDS(paste0(path2,"/Scores/Graph",nbgraph,method,x,".rds"))
}
return(vec_obs_pred(obs,pred))
}
build_precrecPool<-function(){
for(type in c("erdos","tree","scale-free","cluster")){
# path<-paste0(getwd(),"/Simu/PLNcov/")
cparam<-switch(type,"erdos"=c("n","prob"),"tree"=c("n"),
"cluster"=c("n","dens","r"),"scale-free"=c("n"))
path<-paste0("/Users/raphaellemomal/simulations/Simu/PLNcov/")
for(variable in cparam){
#fill df
sapply(parameters[[variable]],function(x){
print(paste0("type: ", type," // var: ", variable," // valeur :",x))
fatListe_methodes<-lapply(c("_treeggm_","_spiecResid","_oracle"),function(method){#c("treeggm","one_step","glasso","oracle","spiecResid")
grosseListe<-lapply(1:Bgraph,function(nbgraph){
# browser()
path2<-paste0(path,type,"/",variable)
build_vec(path2,nbgraph,x,variable,method=method)
})
# browser()
#les scores pour chaque croisement type*variable*valeur*method sont transformΓ©s et
# concatenes sur tous les graphes generes. On obtient deux grands vecteurs de prΓ©dictions et d'observations
vec_pred<-do.call(c,lapply(grosseListe,function(x) x[[1]]))
vec_obs<-do.call(c,lapply(grosseListe,function(x) x[[2]]))
prediction<-prediction(vec_pred,vec_obs)
# sur lesquels on calcule enfin les stat prec et rec
ROC_precision<-performance(prediction,"prec")
ROC_recal<-performance(prediction,"rec")
precrec<-data.frame(ROC_precision@y.values,ROC_recal@y.values,prediction@cutoffs ,method)
colnames(precrec)<-c("prec","rec","cut","method")
return(precrec)
})
#les frames pour les diffferentes methodes sont concatenes en vue des plots
fatfatListe<-as_tibble(do.call(rbind,fatListe_methodes)) #42e3 lignes
saveRDS(fatfatListe,paste0(path,type,"/",variable,"/precrec_pool/precrec_",x,".rds"))
})
}
}
}
build_precrecPool()
##### LOOK PERFORMANCES
# type<-"cluster"
# variable<-"n"
# x<-100
# path<-"/Users/raphaellemomal/simulations/Simu/PLNcov/"
# precrecpool<- readRDS(paste0(path,type,"/",variable,"/precrec_",x,".rds"))
# indices2<-which(precrecpool$method=="treeggm" | precrecpool$method=="glasso")
# ggplot(precrecpool[indices2,],aes(rec,prec,colour=method,shape=method))+
# geom_point()+
# scale_shape_manual(values=c(16,15,9,8,17),
# breaks=c("treeggm","one_step", "glasso","spiecResid" ,"oracle"),
# labels=c("EM ","1 step","SpiecEasi","spiecResid", "oracle" ) )+
# scale_color_manual(values=c("#E69F00","#076443", "#8037c9","#fc5f94" ,"#56B4E9"),
# breaks=c("treeggm","one_step", "glasso","spiecResid" ,"oracle"),
# labels=c("EM ","1 step","SpiecEasi","spiecResid", "oracle" )
# )+
# guides(shape = guide_legend(override.aes = list(size = 3)))+
# labs(title="")+
# scale_y_continuous(limits = c(0,1))+
# theme_bw()
#
# ggplot(precrec,aes(rec,prec,colour=method,linetype=method))+
# geom_line(size=1)+
# # scale_type_manual(values=c(16,15,9,8,17),
# # breaks=c("treeggm","one_step", "glasso","spiecResid" ,"oracle"),
# # labels=c("EM ","1 step","SpiecEasi","spiecResid", "oracle" ) )+
# scale_linetype_manual(values=c("twodash", "solid", "dashed", "dotted", "dotdash" ),breaks=c("treeggm","one_step", "glasso","spiecResid" ,"oracle"),
# labels=c("EM ","1 step","SpiecEasi","SpiecResid", "Oracle" ) )+
# scale_color_manual(values=c("#E69F00","#076443", "#8037c9","#fc5f94" ,"#56B4E9"),
# breaks=c("treeggm","one_step", "glasso","spiecResid" ,"oracle"),
# labels=c("EM ","1 step","SpiecEasi","SpiecResid", "Oracle" )
# )+
# guides(shape = guide_legend(override.aes = list(size = 2)))+
# labs(title="")+
# scale_y_continuous(limits = c(0,1))+
# theme_bw()
#
#
|
# generate perturbed alphas/lambdas
library(tidyverse)
source("R/gradient_asymmetry.R")
source("R/gradient_fitness_diff.R")
source("R/gradient_niche_diff.R")
source("R/gradient_strength_dist.R")
# read data ---------------------------------------------------------------
lambda <- read.csv2("./results/lambda.csv",stringsAsFactors = FALSE)
alpha.df <- read.csv2("results/alpha.csv",stringsAsFactors = FALSE,
row.names = 1)
alpha.matrix <- as.matrix(alpha.df)
# as the set of present species varies from plot to plot/year to year,
# I need to load the observed abundances in order to constrain the communities
# in addition, competition file tells me about the focal sp present
abund <- read.csv2("../Caracoles/data/abundances.csv",
header = TRUE,stringsAsFactors = FALSE)
sp.rates <- read.csv2("../Caracoles/data/plant_species_traits.csv",
header = TRUE,stringsAsFactors = FALSE)
sp.valid <- sp.rates$species.code[which(!is.na(sp.rates$germination.rate))]
base.abund <- abund %>%
filter(species %in% sp.valid & species %in% rownames(alpha.matrix))
years <- sort(unique(base.abund$year))
plots <- sort(unique(base.abund$plot))
# some constants ----------------------------------------------------------
steps <- 10
types <- c("obs","nd","fd","ia","id")
communities <- list()
# generate perturbed values -----------------------------------------------
for(i.year in 1:length(years)){
communities[[i.year]] <- list()
for(i.plot in 1:length(plots)){
communities[[i.year]][[i.plot]] <- list()
# subset present species
present.sp <- sort(unique(base.abund$species[base.abund$year == years[i.year] &
base.abund$plot == plots[i.plot] &
base.abund$individuals > 0]))
# 3 - sum observed abundances
abund.obs <- base.abund %>%
filter(year == years[i.year] &
plot == plots[i.plot] &
species %in% present.sp) %>%
group_by(species) %>%
summarise(abundance = sum(individuals))
lambda.obs <- lambda[lambda$sp %in% present.sp,]
lambda.obs <- arrange(lambda.obs,sp)
alpha.obs <- alpha.matrix[present.sp,present.sp]
alpha.obs[which(is.na(alpha.obs))] <- 0
for(i.type in 1:length(types)){
communities[[i.year]][[i.plot]][[i.type]] <- list()
if(types[i.type] == "obs"){
if(length(present.sp)>1){
communities[[i.year]][[i.plot]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.type]][[2]] <- lambda.obs
communities[[i.year]][[i.plot]][[i.type]][[3]] <- alpha.obs
}else{
communities[[i.year]][[i.plot]][[i.type]][[1]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[2]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[3]] <- NA
}
}else if(types[i.type] == "nd"){
if(length(present.sp)>1){
alpha.nd <- gradient_niche_diff(A = alpha.obs,steps = steps)
communities[[i.year]][[i.plot]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.type]][[2]] <- lambda.obs
communities[[i.year]][[i.plot]][[i.type]][[3]] <- alpha.nd
}else{
communities[[i.year]][[i.plot]][[i.type]][[1]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[2]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[3]] <- NA
}
}else if(types[i.type] == "fd"){
if(length(present.sp)>1){
lambda.fd <- gradient_fitness_diff(lambda = lambda.obs$lambda,
steps = steps)
lambda.fd.list <- list()
for(i.step in 1:length(lambda.fd)){
lambda.fd.list[[i.step]] <- data.frame(sp = lambda.obs$sp,
lambda = lambda.fd[[i.step]])
}
communities[[i.year]][[i.plot]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.type]][[2]] <- lambda.fd.list
communities[[i.year]][[i.plot]][[i.type]][[3]] <- alpha.obs
}else{
communities[[i.year]][[i.plot]][[i.type]][[1]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[2]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[3]] <- NA
}
}else if(types[i.type] == "ia"){
if(length(present.sp)>1){
alpha.ia <- gradient_asymmetry(A = alpha.obs,steps = steps)
communities[[i.year]][[i.plot]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.type]][[2]] <- lambda.obs
communities[[i.year]][[i.plot]][[i.type]][[3]] <- alpha.ia
}else{
communities[[i.year]][[i.plot]][[i.type]][[1]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[2]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[3]] <- NA
}
}else if(types[i.type] == "id"){
if(length(present.sp)>1){
alpha.id <- gradient_strength_dist(A = alpha.obs,steps = steps)
communities[[i.year]][[i.plot]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.type]][[2]] <- lambda.obs
communities[[i.year]][[i.plot]][[i.type]][[3]] <- alpha.id
}else{
communities[[i.year]][[i.plot]][[i.type]][[1]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[2]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[3]] <- NA
}
}
names(communities[[i.year]][[i.plot]][[i.type]]) <- c("abundances",
"lambda","alpha")
}# for each type
names(communities[[i.year]][[i.plot]]) <- types
}# for i.plot
}# for i.year
names(communities) <- years
# store results -----------------------------------------------------------
save(communities,file = "results/communities.Rdata")
| /R/generate_perturbed_communities.R | no_license | garciacallejas/MCT_SAD | R | false | false | 6,135 | r |
# generate perturbed alphas/lambdas
library(tidyverse)
source("R/gradient_asymmetry.R")
source("R/gradient_fitness_diff.R")
source("R/gradient_niche_diff.R")
source("R/gradient_strength_dist.R")
# read data ---------------------------------------------------------------
lambda <- read.csv2("./results/lambda.csv",stringsAsFactors = FALSE)
alpha.df <- read.csv2("results/alpha.csv",stringsAsFactors = FALSE,
row.names = 1)
alpha.matrix <- as.matrix(alpha.df)
# as the set of present species varies from plot to plot/year to year,
# I need to load the observed abundances in order to constrain the communities
# in addition, competition file tells me about the focal sp present
abund <- read.csv2("../Caracoles/data/abundances.csv",
header = TRUE,stringsAsFactors = FALSE)
sp.rates <- read.csv2("../Caracoles/data/plant_species_traits.csv",
header = TRUE,stringsAsFactors = FALSE)
sp.valid <- sp.rates$species.code[which(!is.na(sp.rates$germination.rate))]
base.abund <- abund %>%
filter(species %in% sp.valid & species %in% rownames(alpha.matrix))
years <- sort(unique(base.abund$year))
plots <- sort(unique(base.abund$plot))
# some constants ----------------------------------------------------------
steps <- 10
types <- c("obs","nd","fd","ia","id")
communities <- list()
# generate perturbed values -----------------------------------------------
for(i.year in 1:length(years)){
communities[[i.year]] <- list()
for(i.plot in 1:length(plots)){
communities[[i.year]][[i.plot]] <- list()
# subset present species
present.sp <- sort(unique(base.abund$species[base.abund$year == years[i.year] &
base.abund$plot == plots[i.plot] &
base.abund$individuals > 0]))
# 3 - sum observed abundances
abund.obs <- base.abund %>%
filter(year == years[i.year] &
plot == plots[i.plot] &
species %in% present.sp) %>%
group_by(species) %>%
summarise(abundance = sum(individuals))
lambda.obs <- lambda[lambda$sp %in% present.sp,]
lambda.obs <- arrange(lambda.obs,sp)
alpha.obs <- alpha.matrix[present.sp,present.sp]
alpha.obs[which(is.na(alpha.obs))] <- 0
for(i.type in 1:length(types)){
communities[[i.year]][[i.plot]][[i.type]] <- list()
if(types[i.type] == "obs"){
if(length(present.sp)>1){
communities[[i.year]][[i.plot]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.type]][[2]] <- lambda.obs
communities[[i.year]][[i.plot]][[i.type]][[3]] <- alpha.obs
}else{
communities[[i.year]][[i.plot]][[i.type]][[1]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[2]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[3]] <- NA
}
}else if(types[i.type] == "nd"){
if(length(present.sp)>1){
alpha.nd <- gradient_niche_diff(A = alpha.obs,steps = steps)
communities[[i.year]][[i.plot]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.type]][[2]] <- lambda.obs
communities[[i.year]][[i.plot]][[i.type]][[3]] <- alpha.nd
}else{
communities[[i.year]][[i.plot]][[i.type]][[1]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[2]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[3]] <- NA
}
}else if(types[i.type] == "fd"){
if(length(present.sp)>1){
lambda.fd <- gradient_fitness_diff(lambda = lambda.obs$lambda,
steps = steps)
lambda.fd.list <- list()
for(i.step in 1:length(lambda.fd)){
lambda.fd.list[[i.step]] <- data.frame(sp = lambda.obs$sp,
lambda = lambda.fd[[i.step]])
}
communities[[i.year]][[i.plot]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.type]][[2]] <- lambda.fd.list
communities[[i.year]][[i.plot]][[i.type]][[3]] <- alpha.obs
}else{
communities[[i.year]][[i.plot]][[i.type]][[1]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[2]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[3]] <- NA
}
}else if(types[i.type] == "ia"){
if(length(present.sp)>1){
alpha.ia <- gradient_asymmetry(A = alpha.obs,steps = steps)
communities[[i.year]][[i.plot]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.type]][[2]] <- lambda.obs
communities[[i.year]][[i.plot]][[i.type]][[3]] <- alpha.ia
}else{
communities[[i.year]][[i.plot]][[i.type]][[1]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[2]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[3]] <- NA
}
}else if(types[i.type] == "id"){
if(length(present.sp)>1){
alpha.id <- gradient_strength_dist(A = alpha.obs,steps = steps)
communities[[i.year]][[i.plot]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.type]][[2]] <- lambda.obs
communities[[i.year]][[i.plot]][[i.type]][[3]] <- alpha.id
}else{
communities[[i.year]][[i.plot]][[i.type]][[1]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[2]] <- NA
communities[[i.year]][[i.plot]][[i.type]][[3]] <- NA
}
}
names(communities[[i.year]][[i.plot]][[i.type]]) <- c("abundances",
"lambda","alpha")
}# for each type
names(communities[[i.year]][[i.plot]]) <- types
}# for i.plot
}# for i.year
names(communities) <- years
# store results -----------------------------------------------------------
save(communities,file = "results/communities.Rdata")
|
## Copyright (C) 2012, 2013 Bitergia
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##
## This file is a part of the vizGrimoire.R package
## http://vizgrimoire.bitergia.org/
##
## Analyze and extract metrics data gathered by Bicho tool
## http://metricsgrimoire.github.com/Bicho
##
## Authors:
## Daniel Izquierdo Cortazar <dizquierdo@bitergia.com>
## Alvaro del Castillo <acs@bitergia.com>
##
##
## Usage:
## R --vanilla --args -d dbname < scr-analysis.R
## or
## R CMD BATCH scm-analysis.R
##
library("vizgrimoire")
library("ISOweek")
options(stringsAsFactors = FALSE) # avoid merge factors for toJSON
conf <- ConfFromOptParse()
SetDBChannel (database = conf$database, user = conf$dbuser, password = conf$dbpassword)
if (conf$granularity == 'years') {
period = 'year'
nperiod = 365
} else if (conf$granularity == 'months') {
period = 'month'
nperiod = 31
} else if (conf$granularity == 'weeks') {
period = 'week'
nperiod = 7
} else if (conf$granularity == 'days'){
period = 'day'
nperiod = 1
} else {stop(paste("Incorrect period:",conf$granularity))}
# destination directory
destdir <- conf$destination
#type of analysis
reports=strsplit(conf$reports,",",fixed=TRUE)[[1]]
# BOTS filtered
# WARNING: info specific for the wikimedia case, this should be removed for other communities
# or in the case that bots are required to be in the analysis
bots = c('wikibugs','gerrit-wm','wikibugs_','wm-bot','','Translation updater bot','jenkins-bot')
#########
#EVOLUTIONARY DATA
########
print ("ANALYSIS PER TYPE OF REVIEW")
reviews.evol = NA
#Reviews info
data = EvolReviewsSubmitted(period, conf$startdate, conf$enddate)
reviews.evol <- completePeriodIds(data, conf$granularity, conf)
data = EvolReviewsOpened(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolReviewsNew(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolReviewsInProgress(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolReviewsClosed(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolReviewsMerged(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolReviewsAbandoned(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
# print(reviews.evol)
#Patches info
data = EvolPatchesVerified(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolPatchesApproved(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolPatchesCodeReview(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolPatchesSent(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
# print(reviews.evol)
#Waiting for actions info
data = EvolWaiting4Reviewer(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolWaiting4Submitter(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
# print(reviews.evol)
#Reviewers info
data = EvolReviewers(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
# print(reviews.evol)
createJSON(reviews.evol, paste(destdir,"/scr-evolutionary.json", sep=''))
#########
#STATIC DATA
#########
reviews.static = NA
#Reviews info
reviews.static = StaticReviewsSubmitted(period, conf$startdate, conf$enddate)
reviews.static = merge(reviews.static, StaticReviewsOpened(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticReviewsNew(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticReviewsInProgress(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticReviewsClosed(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticReviewsMerged(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticReviewsAbandoned(period, conf$startdate, conf$enddate))
# print(reviews.static)
#Patches info
reviews.static = merge(reviews.static, StaticPatchesVerified(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticPatchesApproved(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticPatchesCodeReview(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticPatchesSent(period, conf$startdate, conf$enddate))
# print(reviews.static)
#Waiting for actions info
reviews.static = merge(reviews.static, StaticWaiting4Reviewer(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticWaiting4Submitter(period, conf$startdate, conf$enddate))
# print(reviews.static)
#Reviewers info
reviews.static = merge(reviews.static, StaticReviewers(period, conf$startdate, conf$enddate))
# print(reviews.static)
createJSON(reviews.static, paste(destdir,"/scr-static.json", sep=''))
########
#ANALYSIS PER REPOSITORY
########
print("ANALYSIS PER REPOSITORY BASIC")
if ('repositories' %in% reports) {
# repos <- GetReposSCRName(conf$startdate, conf$enddate, 30)
repos <- GetReposSCRName(conf$startdate, conf$enddate)
repos <- repos$name
repos_file_names = gsub("/","_",repos)
createJSON(repos_file_names, paste(destdir,"/scr-repos.json", sep=''))
# missing information from the rest of type of reviews, patches and
# number of patches waiting for reviewer and submitter
for (repo in repos) {
print (repo)
repo_file = gsub("/","_",repo)
type_analysis = list('repository', repo)
# Evol
submitted <- EvolReviewsSubmitted(period, conf$startdate, conf$enddate, type_analysis)
submitted <- completePeriodIds(submitted, conf$granularity, conf)
merged <- EvolReviewsMerged(period, conf$startdate, conf$enddate, type_analysis)
merged <- completePeriodIds(merged, conf$granularity, conf)
abandoned <- EvolReviewsAbandoned(period, conf$startdate, conf$enddate, type_analysis)
abandoned <- completePeriodIds(abandoned, conf$granularity, conf)
evol = merge(submitted, merged, all = TRUE)
evol = merge(evol, abandoned, all = TRUE)
evol <- completePeriodIds(evol, conf$granularity, conf)
createJSON(evol, paste(destdir, "/",repo_file,"-scr-evolutionary.json", sep=''))
# Static
static <- StaticReviewsSubmitted(period, conf$startdate, conf$enddate, type_analysis)
static <- merge(static, StaticReviewsMerged(period, conf$startdate, conf$enddate, type_analysis))
static <- merge(static, StaticReviewsAbandoned(period, conf$startdate, conf$enddate, type_analysis))
createJSON(static, paste(destdir, "/",repo_file,"-scr-static.json", sep=''))
}
}
########
#ANALYSIS PER COMPANY
########
print("ANALYSIS PER COMPANY BASIC")
if ('companies' %in% reports) {
# repos <- GetReposSCRName(conf$startdate, conf$enddate, 30)
companies <- GetCompaniesSCRName(conf$startdate, conf$enddate, conf$identities_db)
companies <- companies$name
companies_file_names = gsub("/","_",companies)
createJSON(companies_file_names, paste(destdir,"/scr-companies.json", sep=''))
# missing information from the rest of type of reviews, patches and
# number of patches waiting for reviewer and submitter
for (company in companies) {
print(company)
company_file = gsub("/","_",company)
type_analysis = list('company', company)
# Evol
submitted <- EvolReviewsSubmitted(period, conf$startdate, conf$enddate, type_analysis, conf$identities_db)
submitted <- completePeriodIds(submitted, conf$granularity, conf)
merged <- EvolReviewsMerged(period, conf$startdate, conf$enddate, type_analysis, conf$identities_db)
merged <- completePeriodIds(merged, conf$granularity, conf)
abandoned <- EvolReviewsAbandoned(period, conf$startdate, conf$enddate, type_analysis, conf$identities_db)
abandoned <- completePeriodIds(abandoned, conf$granularity, conf)
evol = merge(submitted, merged, all = TRUE)
evol = merge(evol, abandoned, all = TRUE)
evol <- completePeriodIds(evol, conf$granularity, conf)
createJSON(evol, paste(destdir, "/",company_file,"-scr-evolutionary.json", sep=''))
# Static
static <- StaticReviewsSubmitted(period, conf$startdate, conf$enddate, type_analysis, conf$identities_db)
static <- merge(static, StaticReviewsMerged(period, conf$startdate, conf$enddate, type_analysis, conf$identities_db))
static <- merge(static, StaticReviewsAbandoned(period, conf$startdate, conf$enddate, type_analysis, conf$identities_db))
createJSON(static, paste(destdir, "/",company_file,"-scr-static.json", sep=''))
}
}
########
# PEOPLE
########
if ('people' %in% reports) {
print("PEOPLE ANALYSIS")
people = GetPeopleListSCR(conf$startdate, conf$enddate)
people = people$id
limit = 60
if (length(people)<limit) limit = length(people);
people = people[1:limit]
createJSON(people, paste(destdir,"/scr-people.json",sep=''))
for (upeople_id in people){
evol = GetPeopleEvolSCR(upeople_id, period, conf$startdate, conf$enddate)
evol <- completePeriodIds(evol, conf$granularity, conf)
evol[is.na(evol)] <- 0
createJSON(evol, paste(destdir,"/people-",upeople_id,"-scr-evolutionary.json", sep=''))
static <- GetPeopleStaticSCR(upeople_id, conf$startdate, conf$enddate)
createJSON(static, paste(destdir,"/people-",upeople_id,"-scr-static.json", sep=''))
}
}
# Tops
top_reviewers <- list()
top_reviewers[['reviewers']] <- GetTopReviewersSCR(0, conf$startdate, conf$enddate, conf$identities_db, bots)
top_reviewers[['reviewers.last year']]<- GetTopReviewersSCR(365, conf$startdate, conf$enddate, conf$identities_db, bots)
top_reviewers[['reviewers.last month']]<- GetTopReviewersSCR(31, conf$startdate, conf$enddate, conf$identities_db, bots)
# Top openers
top_openers <- list()
top_openers[['openers.']]<-GetTopOpenersSCR(0, conf$startdate, conf$enddate,conf$identities_db, bots)
top_openers[['openers.last year']]<-GetTopOpenersSCR(365, conf$startdate, conf$enddate,conf$identities_db, bots)
top_openers[['openers.last_month']]<-GetTopOpenersSCR(31, conf$startdate, conf$enddate,conf$identities_db, bots)
# Top mergers
top_mergers <- list()
top_mergers[['mergers.']]<-GetTopMergersSCR(0, conf$startdate, conf$enddate,conf$identities_db, bots)
top_mergers[['mergers.last year']]<-GetTopMergersSCR(365, conf$startdate, conf$enddate,conf$identities_db, bots)
top_mergers[['mergers.last_month']]<-GetTopMergersSCR(31, conf$startdate, conf$enddate,conf$identities_db, bots)
createJSON (c(top_reviewers, top_openers, top_mergers), paste(destdir,"/scr-top.json", sep=''))
| /vizGrimoireJS/scr-analysis.R | no_license | aaparrui/VizGrimoireR | R | false | false | 12,239 | r | ## Copyright (C) 2012, 2013 Bitergia
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##
## This file is a part of the vizGrimoire.R package
## http://vizgrimoire.bitergia.org/
##
## Analyze and extract metrics data gathered by Bicho tool
## http://metricsgrimoire.github.com/Bicho
##
## Authors:
## Daniel Izquierdo Cortazar <dizquierdo@bitergia.com>
## Alvaro del Castillo <acs@bitergia.com>
##
##
## Usage:
## R --vanilla --args -d dbname < scr-analysis.R
## or
## R CMD BATCH scm-analysis.R
##
library("vizgrimoire")
library("ISOweek")
options(stringsAsFactors = FALSE) # avoid merge factors for toJSON
conf <- ConfFromOptParse()
SetDBChannel (database = conf$database, user = conf$dbuser, password = conf$dbpassword)
if (conf$granularity == 'years') {
period = 'year'
nperiod = 365
} else if (conf$granularity == 'months') {
period = 'month'
nperiod = 31
} else if (conf$granularity == 'weeks') {
period = 'week'
nperiod = 7
} else if (conf$granularity == 'days'){
period = 'day'
nperiod = 1
} else {stop(paste("Incorrect period:",conf$granularity))}
# destination directory
destdir <- conf$destination
#type of analysis
reports=strsplit(conf$reports,",",fixed=TRUE)[[1]]
# BOTS filtered
# WARNING: info specific for the wikimedia case, this should be removed for other communities
# or in the case that bots are required to be in the analysis
bots = c('wikibugs','gerrit-wm','wikibugs_','wm-bot','','Translation updater bot','jenkins-bot')
#########
#EVOLUTIONARY DATA
########
print ("ANALYSIS PER TYPE OF REVIEW")
reviews.evol = NA
#Reviews info
data = EvolReviewsSubmitted(period, conf$startdate, conf$enddate)
reviews.evol <- completePeriodIds(data, conf$granularity, conf)
data = EvolReviewsOpened(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolReviewsNew(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolReviewsInProgress(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolReviewsClosed(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolReviewsMerged(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolReviewsAbandoned(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
# print(reviews.evol)
#Patches info
data = EvolPatchesVerified(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolPatchesApproved(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolPatchesCodeReview(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolPatchesSent(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
# print(reviews.evol)
#Waiting for actions info
data = EvolWaiting4Reviewer(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
data = EvolWaiting4Submitter(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
# print(reviews.evol)
#Reviewers info
data = EvolReviewers(period, conf$startdate, conf$enddate)
reviews.evol = merge(reviews.evol, completePeriodIds(data, conf$granularity, conf), all=TRUE)
# print(reviews.evol)
createJSON(reviews.evol, paste(destdir,"/scr-evolutionary.json", sep=''))
#########
#STATIC DATA
#########
reviews.static = NA
#Reviews info
reviews.static = StaticReviewsSubmitted(period, conf$startdate, conf$enddate)
reviews.static = merge(reviews.static, StaticReviewsOpened(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticReviewsNew(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticReviewsInProgress(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticReviewsClosed(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticReviewsMerged(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticReviewsAbandoned(period, conf$startdate, conf$enddate))
# print(reviews.static)
#Patches info
reviews.static = merge(reviews.static, StaticPatchesVerified(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticPatchesApproved(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticPatchesCodeReview(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticPatchesSent(period, conf$startdate, conf$enddate))
# print(reviews.static)
#Waiting for actions info
reviews.static = merge(reviews.static, StaticWaiting4Reviewer(period, conf$startdate, conf$enddate))
reviews.static = merge(reviews.static, StaticWaiting4Submitter(period, conf$startdate, conf$enddate))
# print(reviews.static)
#Reviewers info
reviews.static = merge(reviews.static, StaticReviewers(period, conf$startdate, conf$enddate))
# print(reviews.static)
createJSON(reviews.static, paste(destdir,"/scr-static.json", sep=''))
########
#ANALYSIS PER REPOSITORY
########
print("ANALYSIS PER REPOSITORY BASIC")
if ('repositories' %in% reports) {
# repos <- GetReposSCRName(conf$startdate, conf$enddate, 30)
repos <- GetReposSCRName(conf$startdate, conf$enddate)
repos <- repos$name
repos_file_names = gsub("/","_",repos)
createJSON(repos_file_names, paste(destdir,"/scr-repos.json", sep=''))
# missing information from the rest of type of reviews, patches and
# number of patches waiting for reviewer and submitter
for (repo in repos) {
print (repo)
repo_file = gsub("/","_",repo)
type_analysis = list('repository', repo)
# Evol
submitted <- EvolReviewsSubmitted(period, conf$startdate, conf$enddate, type_analysis)
submitted <- completePeriodIds(submitted, conf$granularity, conf)
merged <- EvolReviewsMerged(period, conf$startdate, conf$enddate, type_analysis)
merged <- completePeriodIds(merged, conf$granularity, conf)
abandoned <- EvolReviewsAbandoned(period, conf$startdate, conf$enddate, type_analysis)
abandoned <- completePeriodIds(abandoned, conf$granularity, conf)
evol = merge(submitted, merged, all = TRUE)
evol = merge(evol, abandoned, all = TRUE)
evol <- completePeriodIds(evol, conf$granularity, conf)
createJSON(evol, paste(destdir, "/",repo_file,"-scr-evolutionary.json", sep=''))
# Static
static <- StaticReviewsSubmitted(period, conf$startdate, conf$enddate, type_analysis)
static <- merge(static, StaticReviewsMerged(period, conf$startdate, conf$enddate, type_analysis))
static <- merge(static, StaticReviewsAbandoned(period, conf$startdate, conf$enddate, type_analysis))
createJSON(static, paste(destdir, "/",repo_file,"-scr-static.json", sep=''))
}
}
########
#ANALYSIS PER COMPANY
########
print("ANALYSIS PER COMPANY BASIC")
if ('companies' %in% reports) {
# repos <- GetReposSCRName(conf$startdate, conf$enddate, 30)
companies <- GetCompaniesSCRName(conf$startdate, conf$enddate, conf$identities_db)
companies <- companies$name
companies_file_names = gsub("/","_",companies)
createJSON(companies_file_names, paste(destdir,"/scr-companies.json", sep=''))
# missing information from the rest of type of reviews, patches and
# number of patches waiting for reviewer and submitter
for (company in companies) {
print(company)
company_file = gsub("/","_",company)
type_analysis = list('company', company)
# Evol
submitted <- EvolReviewsSubmitted(period, conf$startdate, conf$enddate, type_analysis, conf$identities_db)
submitted <- completePeriodIds(submitted, conf$granularity, conf)
merged <- EvolReviewsMerged(period, conf$startdate, conf$enddate, type_analysis, conf$identities_db)
merged <- completePeriodIds(merged, conf$granularity, conf)
abandoned <- EvolReviewsAbandoned(period, conf$startdate, conf$enddate, type_analysis, conf$identities_db)
abandoned <- completePeriodIds(abandoned, conf$granularity, conf)
evol = merge(submitted, merged, all = TRUE)
evol = merge(evol, abandoned, all = TRUE)
evol <- completePeriodIds(evol, conf$granularity, conf)
createJSON(evol, paste(destdir, "/",company_file,"-scr-evolutionary.json", sep=''))
# Static
static <- StaticReviewsSubmitted(period, conf$startdate, conf$enddate, type_analysis, conf$identities_db)
static <- merge(static, StaticReviewsMerged(period, conf$startdate, conf$enddate, type_analysis, conf$identities_db))
static <- merge(static, StaticReviewsAbandoned(period, conf$startdate, conf$enddate, type_analysis, conf$identities_db))
createJSON(static, paste(destdir, "/",company_file,"-scr-static.json", sep=''))
}
}
########
# PEOPLE
########
if ('people' %in% reports) {
print("PEOPLE ANALYSIS")
people = GetPeopleListSCR(conf$startdate, conf$enddate)
people = people$id
limit = 60
if (length(people)<limit) limit = length(people);
people = people[1:limit]
createJSON(people, paste(destdir,"/scr-people.json",sep=''))
for (upeople_id in people){
evol = GetPeopleEvolSCR(upeople_id, period, conf$startdate, conf$enddate)
evol <- completePeriodIds(evol, conf$granularity, conf)
evol[is.na(evol)] <- 0
createJSON(evol, paste(destdir,"/people-",upeople_id,"-scr-evolutionary.json", sep=''))
static <- GetPeopleStaticSCR(upeople_id, conf$startdate, conf$enddate)
createJSON(static, paste(destdir,"/people-",upeople_id,"-scr-static.json", sep=''))
}
}
# Tops
top_reviewers <- list()
top_reviewers[['reviewers']] <- GetTopReviewersSCR(0, conf$startdate, conf$enddate, conf$identities_db, bots)
top_reviewers[['reviewers.last year']]<- GetTopReviewersSCR(365, conf$startdate, conf$enddate, conf$identities_db, bots)
top_reviewers[['reviewers.last month']]<- GetTopReviewersSCR(31, conf$startdate, conf$enddate, conf$identities_db, bots)
# Top openers
top_openers <- list()
top_openers[['openers.']]<-GetTopOpenersSCR(0, conf$startdate, conf$enddate,conf$identities_db, bots)
top_openers[['openers.last year']]<-GetTopOpenersSCR(365, conf$startdate, conf$enddate,conf$identities_db, bots)
top_openers[['openers.last_month']]<-GetTopOpenersSCR(31, conf$startdate, conf$enddate,conf$identities_db, bots)
# Top mergers
top_mergers <- list()
top_mergers[['mergers.']]<-GetTopMergersSCR(0, conf$startdate, conf$enddate,conf$identities_db, bots)
top_mergers[['mergers.last year']]<-GetTopMergersSCR(365, conf$startdate, conf$enddate,conf$identities_db, bots)
top_mergers[['mergers.last_month']]<-GetTopMergersSCR(31, conf$startdate, conf$enddate,conf$identities_db, bots)
createJSON (c(top_reviewers, top_openers, top_mergers), paste(destdir,"/scr-top.json", sep=''))
|
library(tidyverse)
df <- starwars
skimr::skim(df) %>% View()
df %>%
count(sex)
df %>%
count(gender)
df <- df %>%
filter(!is.na(sex) & !is.na(gender))
saveRDS(df, "data/starwars.rds")
| /data-raw/script_inicial.R | no_license | rodrigoest93/Analise_starwars | R | false | false | 196 | r | library(tidyverse)
df <- starwars
skimr::skim(df) %>% View()
df %>%
count(sex)
df %>%
count(gender)
df <- df %>%
filter(!is.na(sex) & !is.na(gender))
saveRDS(df, "data/starwars.rds")
|
#' Plot the outliers of a variable
#'
#' @param ... Like other gg_formula functions, the first argument can optionally be a gg object (usually via a pipe). There must always
#' be a formula, again as in other gg_formula commands.
#' @param size as in other ggplot2 geoms
#' @param color ditto
#' @param alpha ditto
#'
#'
#' This is just like gf_boxplot(), but doesn't draw the box!
#'
#' @export
gf_outlier <- function(...) {
args <- list(...)
Prev <- tilde <- NULL
if (inherits(args[[1]], "gg")) {
Prev <- args[[1]]
args <- args[-1] # take it off the list
}
if (!inherits(args[[1]], "formula"))
stop("Must provide a tilde expression")
tilde <- args[[1]]
args <- args[-1] # take it off the list
if ("color" %in% names(args)) {
color <- args[["color"]]
args <- args[names(args) != "color"]
} else {
color = "blue"
}
if ("alpha" %in% names(args)) {
alpha <- args[["alpha"]]
args <- args[names(args) != "alpha"]
} else {
alpha = 1.0
}
if ("size" %in% names(args)) {
size <- args[["size"]]
args <- args[names(args) != size]
} else {
size = 0.5
}
suppressWarnings(
gf_boxplot(Prev, tilde, outlier.color = color, outlier.alpha = alpha,
color = color, fill=NA, outlier.size = size, outlier.fill = color,
...)
)
}
| /R/gf_outlier.R | no_license | dtkaplan/SDSdata | R | false | false | 1,328 | r | #' Plot the outliers of a variable
#'
#' @param ... Like other gg_formula functions, the first argument can optionally be a gg object (usually via a pipe). There must always
#' be a formula, again as in other gg_formula commands.
#' @param size as in other ggplot2 geoms
#' @param color ditto
#' @param alpha ditto
#'
#'
#' This is just like gf_boxplot(), but doesn't draw the box!
#'
#' @export
gf_outlier <- function(...) {
args <- list(...)
Prev <- tilde <- NULL
if (inherits(args[[1]], "gg")) {
Prev <- args[[1]]
args <- args[-1] # take it off the list
}
if (!inherits(args[[1]], "formula"))
stop("Must provide a tilde expression")
tilde <- args[[1]]
args <- args[-1] # take it off the list
if ("color" %in% names(args)) {
color <- args[["color"]]
args <- args[names(args) != "color"]
} else {
color = "blue"
}
if ("alpha" %in% names(args)) {
alpha <- args[["alpha"]]
args <- args[names(args) != "alpha"]
} else {
alpha = 1.0
}
if ("size" %in% names(args)) {
size <- args[["size"]]
args <- args[names(args) != size]
} else {
size = 0.5
}
suppressWarnings(
gf_boxplot(Prev, tilde, outlier.color = color, outlier.alpha = alpha,
color = color, fill=NA, outlier.size = size, outlier.fill = color,
...)
)
}
|
library(ggplot2)
library(dplyr)
plotDE = function(deseq.res.df, title, sigThreshold=0.01, xlim=NULL, ylim=NULL, maxLabels=100, labelSize=2.2) {
p = ggplot(deseq.res.df %>% mutate(sig=(padj < sigThreshold)), aes(x=baseMean, y=log2FoldChange, col=sig)) +
geom_point(size=0.5) +
scale_color_manual(values=c("black", "red", "dodgerblue")) +
theme_bw(14) +
scale_x_log10() + xlab("DESeq2 baseMean expression") +
theme(legend.position="none") +
ggtitle(title)
# Determine the points to label by fitting a spline to the points and
# adjusting it until we get the desired number of points beyond the line
deseq.res.sig = deseq.res.df %>% filter(padj < sigThreshold) %>%
arrange(baseMean)
deseq.res.sig.neg = deseq.res.sig %>% filter(log2FoldChange < 0)
deseq.res.sig.pos = deseq.res.sig %>% filter(log2FoldChange > 0)
fit.neg <- smooth.spline(log10(deseq.res.sig.neg$baseMean), deseq.res.sig.neg$log2FoldChange, df=7)
fit.pos <- smooth.spline(log10(deseq.res.sig.pos$baseMean), deseq.res.sig.pos$log2FoldChange, df=7)
if (nrow(deseq.res.sig) <= maxLabels) {
deseq.res.sig.plot = deseq.res.sig
} else {
# Fit splines to the significant genes, and adjust these to get the
# desired number of points labelled
factor = 0.9
offset = -0.2
numlabels = sum(deseq.res.sig.pos$log2FoldChange > (predict(fit.pos, log10(deseq.res.sig.pos$baseMean))$y * factor + offset)) +
sum(deseq.res.sig.neg$log2FoldChange < (predict(fit.neg, log10(deseq.res.sig.neg$baseMean))$y * factor - offset))
while (numlabels > maxLabels) {
offset = offset + 0.1
factor = factor * 1.04
numlabels = sum(deseq.res.sig.pos$log2FoldChange > (predict(fit.pos, log10(deseq.res.sig.pos$baseMean))$y * factor + offset)) +
sum(deseq.res.sig.neg$log2FoldChange < (predict(fit.neg, log10(deseq.res.sig.neg$baseMean))$y * factor - offset))
}
deseq.res.sig.plot = rbind(deseq.res.sig.pos[deseq.res.sig.pos$log2FoldChange > (predict(fit.pos, log10(deseq.res.sig.pos$baseMean))$y * factor + offset),],
deseq.res.sig.neg[deseq.res.sig.neg$log2FoldChange < (predict(fit.neg, log10(deseq.res.sig.neg$baseMean))$y * factor - offset),])
}
p = p + annotate(geom="text", x=deseq.res.sig.plot$baseMean, y=deseq.res.sig.plot$log2FoldChange,
label=deseq.res.sig.plot$gene_name, col="blue", hjust = -0.2, size=labelSize)
#p = p + geom_line(aes(10^x, y*factor - offset), data=as.data.frame(fit.neg[c("x","y")]), col="grey90") +
# geom_line(aes(10^x, y*factor + offset), data=as.data.frame(fit.pos[c("x","y")]), col="grey90")
if (!is.null(xlim) | !is.null(ylim)) {
p = p + coord_cartesian(xlim=xlim, ylim=ylim)
}
return(p)
}
| /R/plotDE.R | no_license | Jeremy37/ot | R | false | false | 2,748 | r | library(ggplot2)
library(dplyr)
plotDE = function(deseq.res.df, title, sigThreshold=0.01, xlim=NULL, ylim=NULL, maxLabels=100, labelSize=2.2) {
p = ggplot(deseq.res.df %>% mutate(sig=(padj < sigThreshold)), aes(x=baseMean, y=log2FoldChange, col=sig)) +
geom_point(size=0.5) +
scale_color_manual(values=c("black", "red", "dodgerblue")) +
theme_bw(14) +
scale_x_log10() + xlab("DESeq2 baseMean expression") +
theme(legend.position="none") +
ggtitle(title)
# Determine the points to label by fitting a spline to the points and
# adjusting it until we get the desired number of points beyond the line
deseq.res.sig = deseq.res.df %>% filter(padj < sigThreshold) %>%
arrange(baseMean)
deseq.res.sig.neg = deseq.res.sig %>% filter(log2FoldChange < 0)
deseq.res.sig.pos = deseq.res.sig %>% filter(log2FoldChange > 0)
fit.neg <- smooth.spline(log10(deseq.res.sig.neg$baseMean), deseq.res.sig.neg$log2FoldChange, df=7)
fit.pos <- smooth.spline(log10(deseq.res.sig.pos$baseMean), deseq.res.sig.pos$log2FoldChange, df=7)
if (nrow(deseq.res.sig) <= maxLabels) {
deseq.res.sig.plot = deseq.res.sig
} else {
# Fit splines to the significant genes, and adjust these to get the
# desired number of points labelled
factor = 0.9
offset = -0.2
numlabels = sum(deseq.res.sig.pos$log2FoldChange > (predict(fit.pos, log10(deseq.res.sig.pos$baseMean))$y * factor + offset)) +
sum(deseq.res.sig.neg$log2FoldChange < (predict(fit.neg, log10(deseq.res.sig.neg$baseMean))$y * factor - offset))
while (numlabels > maxLabels) {
offset = offset + 0.1
factor = factor * 1.04
numlabels = sum(deseq.res.sig.pos$log2FoldChange > (predict(fit.pos, log10(deseq.res.sig.pos$baseMean))$y * factor + offset)) +
sum(deseq.res.sig.neg$log2FoldChange < (predict(fit.neg, log10(deseq.res.sig.neg$baseMean))$y * factor - offset))
}
deseq.res.sig.plot = rbind(deseq.res.sig.pos[deseq.res.sig.pos$log2FoldChange > (predict(fit.pos, log10(deseq.res.sig.pos$baseMean))$y * factor + offset),],
deseq.res.sig.neg[deseq.res.sig.neg$log2FoldChange < (predict(fit.neg, log10(deseq.res.sig.neg$baseMean))$y * factor - offset),])
}
p = p + annotate(geom="text", x=deseq.res.sig.plot$baseMean, y=deseq.res.sig.plot$log2FoldChange,
label=deseq.res.sig.plot$gene_name, col="blue", hjust = -0.2, size=labelSize)
#p = p + geom_line(aes(10^x, y*factor - offset), data=as.data.frame(fit.neg[c("x","y")]), col="grey90") +
# geom_line(aes(10^x, y*factor + offset), data=as.data.frame(fit.pos[c("x","y")]), col="grey90")
if (!is.null(xlim) | !is.null(ylim)) {
p = p + coord_cartesian(xlim=xlim, ylim=ylim)
}
return(p)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h2otools_models.R
\name{h2o.auc}
\alias{h2o.auc}
\title{h2o.auc}
\usage{
h2o.auc(x, y)
}
| /man/h2o.auc.Rd | no_license | rocalabern/h2otools | R | false | true | 168 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h2otools_models.R
\name{h2o.auc}
\alias{h2o.auc}
\title{h2o.auc}
\usage{
h2o.auc(x, y)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taskqueue_objects.R
\name{Task}
\alias{Task}
\title{TaskQueue API Objects
Accesses a Google App Engine Pull Task Queue over REST.}
\usage{
Task(enqueueTimestamp = NULL, id = NULL, leaseTimestamp = NULL,
payloadBase64 = NULL, queueName = NULL, retry_count = NULL,
tag = NULL)
}
\arguments{
\item{enqueueTimestamp}{Time (in seconds since the epoch) at which the task was enqueued}
\item{id}{Name of the task}
\item{leaseTimestamp}{Time (in seconds since the epoch) at which the task lease will expire}
\item{payloadBase64}{A bag of bytes which is the task payload}
\item{queueName}{Name of the queue that the task is in}
\item{retry_count}{The number of leases applied to this task}
\item{tag}{Tag for the task, could be used later to lease tasks grouped by a specific tag}
}
\value{
Task object
}
\description{
Auto-generated code by googleAuthR::gar_create_api_objects
at 2016-09-03 23:49:25
filename: /Users/mark/dev/R/autoGoogleAPI/googletaskqueuev1beta2.auto/R/taskqueue_objects.R
api_json: api_json
}
\details{
Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
Task Object
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\seealso{
Other Task functions: \code{\link{tasks.insert}},
\code{\link{tasks.patch}}, \code{\link{tasks.update}}
}
| /googletaskqueuev1beta2.auto/man/Task.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,404 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taskqueue_objects.R
\name{Task}
\alias{Task}
\title{TaskQueue API Objects
Accesses a Google App Engine Pull Task Queue over REST.}
\usage{
Task(enqueueTimestamp = NULL, id = NULL, leaseTimestamp = NULL,
payloadBase64 = NULL, queueName = NULL, retry_count = NULL,
tag = NULL)
}
\arguments{
\item{enqueueTimestamp}{Time (in seconds since the epoch) at which the task was enqueued}
\item{id}{Name of the task}
\item{leaseTimestamp}{Time (in seconds since the epoch) at which the task lease will expire}
\item{payloadBase64}{A bag of bytes which is the task payload}
\item{queueName}{Name of the queue that the task is in}
\item{retry_count}{The number of leases applied to this task}
\item{tag}{Tag for the task, could be used later to lease tasks grouped by a specific tag}
}
\value{
Task object
}
\description{
Auto-generated code by googleAuthR::gar_create_api_objects
at 2016-09-03 23:49:25
filename: /Users/mark/dev/R/autoGoogleAPI/googletaskqueuev1beta2.auto/R/taskqueue_objects.R
api_json: api_json
}
\details{
Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
Task Object
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\seealso{
Other Task functions: \code{\link{tasks.insert}},
\code{\link{tasks.patch}}, \code{\link{tasks.update}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/models.R
\name{JAGSmodel}
\alias{JAGSmodel}
\title{Return the appropriate JAGS model to use}
\usage{
JAGSmodel(
withInfectTimes = TRUE,
delayAdjust = TRUE,
withMissing = TRUE,
single = FALSE
)
}
\arguments{
\item{withInfectTimes}{Use extrapolated infection times for estimating R.}
\item{delayAdjust}{Make a delay adjustment, based on the distribution of
times from symptom onset to diagnosis, or infection to diagnosis.}
\item{withMissing}{Does the symptom onset dates vector contain missing
values? If so, dates of diagnosis should also be supplied.}
\item{single}{Do we work only with symptom onset dates? (No diagnosis
dates). If \code{TRUE}, then arguments \code{delayAdjust} and
\code{withMissing} are ignored (no delay adjustment possible and no
missing symptom onset dates allowed).}
}
\value{
An character vector of length 1, containing the JAGS model to use
}
\description{
This function formats and returns the appropriate JAGS model according
to the details of the estimation desired, i.e. whether the infection times
or symptom times are used, whether a delay adjustment is desired, whether
missing values exist in the symptom onset dates vector, and whether we
work only with symptom onset dates and not diagnosis dates.
}
| /man/JAGSmodel.Rd | no_license | furqan915/bayEStim | R | false | true | 1,333 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/models.R
\name{JAGSmodel}
\alias{JAGSmodel}
\title{Return the appropriate JAGS model to use}
\usage{
JAGSmodel(
withInfectTimes = TRUE,
delayAdjust = TRUE,
withMissing = TRUE,
single = FALSE
)
}
\arguments{
\item{withInfectTimes}{Use extrapolated infection times for estimating R.}
\item{delayAdjust}{Make a delay adjustment, based on the distribution of
times from symptom onset to diagnosis, or infection to diagnosis.}
\item{withMissing}{Does the symptom onset dates vector contain missing
values? If so, dates of diagnosis should also be supplied.}
\item{single}{Do we work only with symptom onset dates? (No diagnosis
dates). If \code{TRUE}, then arguments \code{delayAdjust} and
\code{withMissing} are ignored (no delay adjustment possible and no
missing symptom onset dates allowed).}
}
\value{
An character vector of length 1, containing the JAGS model to use
}
\description{
This function formats and returns the appropriate JAGS model according
to the details of the estimation desired, i.e. whether the infection times
or symptom times are used, whether a delay adjustment is desired, whether
missing values exist in the symptom onset dates vector, and whether we
work only with symptom onset dates and not diagnosis dates.
}
|
library(stringr)
library(purrr)
padroniza_emissor <- function(col){
tolower(col) %>%
str_split("[*,/-]") %>%
map(1) %>%
unlist()
}
noticias_tema <- function(data, pattern, secao, get_from_pattern = T){
true_false_vector <- data %>% select(secao) %>%
unlist() %>% as.vector() %>%
tolower() %>% str_detect(pattern)
if(get_from_pattern){
noticias <- data %>% filter(true_false_vector == TRUE)
} else {
noticias <- data %>% filter(true_false_vector == FALSE)
}
return(noticias)
}
# cria arquivo com todas as noticias
build_corpus <- function(conteudo){
texto <- Corpus(VectorSource(conteudo))
texto <- tm_map(texto, tolower)
#texto <- tm_map(texto, stemDocument, language="pt")
texto <- tm_map(texto, removePunctuation, preserve_intra_word_dashes = TRUE)
texto <- tm_map(texto, removeWords, stopwords("pt"))
texto <- tm_map(texto, removeNumbers)
texto <- tm_map(texto, stripWhitespace)
texto <- tm_map(texto, PlainTextDocument)
texto <- paste(strwrap(texto[[1]]), collapse = " ")
return(texto)
}
gera_tabela_frequencias <- function(texto){
texto <- Corpus(VectorSource(texto))
texto <- tm_map(texto, tolower)
texto <- tm_map(texto, removePunctuation, preserve_intra_word_dashes = TRUE)
texto <- tm_map(texto, removeWords, stopwords("pt"))
texto <- tm_map(texto, removeNumbers)
texto <- tm_map(texto, stripWhitespace)
texto <- tm_map(texto, stemDocument)
texto <- tm_map(texto, PlainTextDocument)
dtm <- TermDocumentMatrix(texto)
matriz <- as.matrix(dtm)
vector <- sort(rowSums(matriz),decreasing=TRUE)
data <- data.frame(word = names(vector),freq=vector)
return(data)
} | /word-association/utils/utils.R | no_license | allansales/bias-in-brazilian-elections | R | false | false | 1,677 | r | library(stringr)
library(purrr)
padroniza_emissor <- function(col){
tolower(col) %>%
str_split("[*,/-]") %>%
map(1) %>%
unlist()
}
noticias_tema <- function(data, pattern, secao, get_from_pattern = T){
true_false_vector <- data %>% select(secao) %>%
unlist() %>% as.vector() %>%
tolower() %>% str_detect(pattern)
if(get_from_pattern){
noticias <- data %>% filter(true_false_vector == TRUE)
} else {
noticias <- data %>% filter(true_false_vector == FALSE)
}
return(noticias)
}
# cria arquivo com todas as noticias
build_corpus <- function(conteudo){
texto <- Corpus(VectorSource(conteudo))
texto <- tm_map(texto, tolower)
#texto <- tm_map(texto, stemDocument, language="pt")
texto <- tm_map(texto, removePunctuation, preserve_intra_word_dashes = TRUE)
texto <- tm_map(texto, removeWords, stopwords("pt"))
texto <- tm_map(texto, removeNumbers)
texto <- tm_map(texto, stripWhitespace)
texto <- tm_map(texto, PlainTextDocument)
texto <- paste(strwrap(texto[[1]]), collapse = " ")
return(texto)
}
gera_tabela_frequencias <- function(texto){
texto <- Corpus(VectorSource(texto))
texto <- tm_map(texto, tolower)
texto <- tm_map(texto, removePunctuation, preserve_intra_word_dashes = TRUE)
texto <- tm_map(texto, removeWords, stopwords("pt"))
texto <- tm_map(texto, removeNumbers)
texto <- tm_map(texto, stripWhitespace)
texto <- tm_map(texto, stemDocument)
texto <- tm_map(texto, PlainTextDocument)
dtm <- TermDocumentMatrix(texto)
matriz <- as.matrix(dtm)
vector <- sort(rowSums(matriz),decreasing=TRUE)
data <- data.frame(word = names(vector),freq=vector)
return(data)
} |
#include "AEConfig.h"
#include "AE_EffectVers.h"
#ifndef AE_OS_WIN
#include <AE_General.r>
#endif
resource 'PiPL' (16000) {
{ /* array properties: 12 elements */
/* [1] */
Kind {
AEEffect
},
/* [2] */
Name {
"UWV"
},
/* [3] */
Category {
"Sample Plug-ins"
},
#ifdef AE_OS_WIN
#ifdef AE_PROC_INTELx64
CodeWin64X86 {"EntryPointFunc"},
#else
CodeWin32X86 {"EntryPointFunc"},
#endif
#else
#ifdef AE_OS_MAC
CodeMachOPowerPC {"EntryPointFunc"},
CodeMacIntel32 {"EntryPointFunc"},
CodeMacIntel64 {"EntryPointFunc"},
#endif
#endif
/* [6] */
AE_PiPL_Version {
2,
0
},
/* [7] */
AE_Effect_Spec_Version {
PF_PLUG_IN_VERSION,
PF_PLUG_IN_SUBVERS
},
/* [8] */
AE_Effect_Version {
524289 /* 1.0 */
},
/* [9] */
AE_Effect_Info_Flags {
0
},
/* [10] */
AE_Effect_Global_OutFlags {
0x06000600 //100663808
},
AE_Effect_Global_OutFlags_2 {
0x00000008 //8
},
/* [11] */
AE_Effect_Match_Name {
"ADBE UWV"
},
/* [12] */
AE_Reserved_Info {
0
}
}
};
| /UWVPiPL.r | permissive | ShaoBJ/UWV | R | false | false | 1,048 | r | #include "AEConfig.h"
#include "AE_EffectVers.h"
#ifndef AE_OS_WIN
#include <AE_General.r>
#endif
resource 'PiPL' (16000) {
{ /* array properties: 12 elements */
/* [1] */
Kind {
AEEffect
},
/* [2] */
Name {
"UWV"
},
/* [3] */
Category {
"Sample Plug-ins"
},
#ifdef AE_OS_WIN
#ifdef AE_PROC_INTELx64
CodeWin64X86 {"EntryPointFunc"},
#else
CodeWin32X86 {"EntryPointFunc"},
#endif
#else
#ifdef AE_OS_MAC
CodeMachOPowerPC {"EntryPointFunc"},
CodeMacIntel32 {"EntryPointFunc"},
CodeMacIntel64 {"EntryPointFunc"},
#endif
#endif
/* [6] */
AE_PiPL_Version {
2,
0
},
/* [7] */
AE_Effect_Spec_Version {
PF_PLUG_IN_VERSION,
PF_PLUG_IN_SUBVERS
},
/* [8] */
AE_Effect_Version {
524289 /* 1.0 */
},
/* [9] */
AE_Effect_Info_Flags {
0
},
/* [10] */
AE_Effect_Global_OutFlags {
0x06000600 //100663808
},
AE_Effect_Global_OutFlags_2 {
0x00000008 //8
},
/* [11] */
AE_Effect_Match_Name {
"ADBE UWV"
},
/* [12] */
AE_Reserved_Info {
0
}
}
};
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/aggregateData.R
\name{aggregateData}
\alias{aggregateData}
\title{Averages photographs of the same type}
\usage{
aggregateData(projectName, varFunc = "se", replicate = c("line", "type"),
overwrite = TRUE, save = TRUE)
}
\arguments{
\item{projectName}{the short name in use for the project.}
\item{varFunc}{what type of variation measurment to perform. Currently supports \code{varFunc} = "se" to calculate the standard error, \code{varFun} = "cv" to calculate the coefficient of variation or any built-in R function (e.g., sd).}
\item{replicate}{a character vector indicating which the column names that contain which factors to use. Defaults to c("line", "type"). Note that if the typeVector name was changed in \code{createDataframe} this should be reflected here.}
\item{overwrite}{a logical value indicating whether to overwrite existing aggregate dataframe for the same project name. This allows you to save different dataframes averaging across different factors or using different variance measures}
\item{save}{denotes whether to overwrite the existing .csv file or just update the .df in the R global environment. Defaults to TRUE.}
}
\value{
A dataframe "projectName.ag" is saved to the global environment and a .csv file "projectName_ag.csv" is exported to the "parameter_files" directory.
}
\description{
Uses a user-supplied variance measure (currently supported: standard error, coefficient of variation, built-in R functions (e.g., sd) to calculate variance among photographs of the same type
}
\examples{
\dontrun{
aggregateData("myProject")
aggregateData("myProject", varFunc= "sd", replicate = c("line", "drugAmt"), overwrite = FALSE)
}
}
\seealso{
\code{\link{addType}} if there multiple factors in your experiment. Add whatever the new factor is called (default: "type2") to the replicate vector if this is appropriate.
}
| /man/aggregateData.Rd | no_license | cran/diskImageR | R | false | false | 1,935 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/aggregateData.R
\name{aggregateData}
\alias{aggregateData}
\title{Averages photographs of the same type}
\usage{
aggregateData(projectName, varFunc = "se", replicate = c("line", "type"),
overwrite = TRUE, save = TRUE)
}
\arguments{
\item{projectName}{the short name in use for the project.}
\item{varFunc}{what type of variation measurment to perform. Currently supports \code{varFunc} = "se" to calculate the standard error, \code{varFun} = "cv" to calculate the coefficient of variation or any built-in R function (e.g., sd).}
\item{replicate}{a character vector indicating which the column names that contain which factors to use. Defaults to c("line", "type"). Note that if the typeVector name was changed in \code{createDataframe} this should be reflected here.}
\item{overwrite}{a logical value indicating whether to overwrite existing aggregate dataframe for the same project name. This allows you to save different dataframes averaging across different factors or using different variance measures}
\item{save}{denotes whether to overwrite the existing .csv file or just update the .df in the R global environment. Defaults to TRUE.}
}
\value{
A dataframe "projectName.ag" is saved to the global environment and a .csv file "projectName_ag.csv" is exported to the "parameter_files" directory.
}
\description{
Uses a user-supplied variance measure (currently supported: standard error, coefficient of variation, built-in R functions (e.g., sd) to calculate variance among photographs of the same type
}
\examples{
\dontrun{
aggregateData("myProject")
aggregateData("myProject", varFunc= "sd", replicate = c("line", "drugAmt"), overwrite = FALSE)
}
}
\seealso{
\code{\link{addType}} if there multiple factors in your experiment. Add whatever the new factor is called (default: "type2") to the replicate vector if this is appropriate.
}
|
assign_group_id <- function(points, polygons, use_col){
# to test visually
# plot(polygons, reset = F)
# plot(st_geometry(points), add = TRUE)
# drop CRS so that sf treats these like rectangles instead of curved shapes for the purposes of overlap/intersection
polygons <- sf::st_set_crs(polygons, NA)
points <- sf::st_set_crs(points, NA)
box_subset <- polygons %>% st_drop_geometry()
points %>% mutate(group_id = {st_intersects(x = points, y = polygons) %>% unlist %>% polygons$group_id[.]}) %>%
st_drop_geometry() %>%
left_join(box_subset, by = 'group_id') %>%
select(group_id, group_bbox, !!use_col)
}
| /src/group_utils.R | permissive | SimonTopp/lakesurf-data-release | R | false | false | 637 | r |
assign_group_id <- function(points, polygons, use_col){
# to test visually
# plot(polygons, reset = F)
# plot(st_geometry(points), add = TRUE)
# drop CRS so that sf treats these like rectangles instead of curved shapes for the purposes of overlap/intersection
polygons <- sf::st_set_crs(polygons, NA)
points <- sf::st_set_crs(points, NA)
box_subset <- polygons %>% st_drop_geometry()
points %>% mutate(group_id = {st_intersects(x = points, y = polygons) %>% unlist %>% polygons$group_id[.]}) %>%
st_drop_geometry() %>%
left_join(box_subset, by = 'group_id') %>%
select(group_id, group_bbox, !!use_col)
}
|
#-------------------------------------------------------------------------------
#
# Union of classes numeric and logical
#
#-------------------------------------------------------------------------------
setClassUnion( "numericORlogical", c( "numeric", "logical" ))
#
# TT.Params Class
#
setClass(
Class="TT.Params",
representation(
mtry="numeric",
ntree="numeric",
feature.select="logical",
min.probes="numeric",
cor.thresh="numeric",
OOB="logical",
quantreg="logical",
tune.cor.P="numericORlogical"
)
)
#
# TT.Params constructor
#
TT.Params <- function( mtry=2, ntree=1000, feature.select=TRUE, min.probes=15, cor.thresh=0, OOB=FALSE, quantreg=FALSE, tune.cor.P=NA )
{
# VALIDATION
# 1 <= mtry <= 10
if ( mtry < 2 | mtry > 10 )
stop( "Invalid value for 'mtry'. Should be between 2 and 10 inclusive." )
# 1 <= ntree <= 100000
if ( ntree < 5 | ntree > 10000 )
stop( "Invalid value for 'ntree'. Should be between 5 and 10000 inclusive." )
# 1 <= min.probes <= 250
if ( min.probes < 1 | min.probes > 250 )
stop( "Invalid value for 'min.probes'. Should be between 1 and 250 inclusive." )
# -1 <= cor.thresh <= 1
if ( cor.thresh < -1 | cor.thresh > 1 )
stop( "Invalid value for 'cor.thresh'. Should be between -1 and 1 inclusive." )
# now it's safe to build the object
object <- new( "TT.Params", mtry=mtry, ntree=ntree, feature.select=feature.select, min.probes=min.probes, cor.thresh=cor.thresh, OOB=OOB, quantreg=quantreg, tune.cor.P=tune.cor.P )
return( object )
}
#
# TT.Params show method
#
setMethod(
f="show",
signature( object="TT.Params" ),
function( object )
{
cat( "mtry =", object@mtry, "\n" )
cat( "ntree =", object@ntree, "\n" )
cat( "feature.select =", object@feature.select, "\n" )
cat( "min.probes =", object@min.probes, "\n" )
cat( "cor.thresh =", object@cor.thresh, "\n" )
cat( "OOB =", object@OOB, "\n" )
cat( "QuantReg =", object@quantreg, "\n" )
cat( "Tune (OOB cor.P) =", object@tune.cor.P, "\n" )
}
)
| /R/TT_Params.R | no_license | hjanime/MaLTE | R | false | false | 2,065 | r | #-------------------------------------------------------------------------------
#
# Union of classes numeric and logical
#
#-------------------------------------------------------------------------------
setClassUnion( "numericORlogical", c( "numeric", "logical" ))
#
# TT.Params Class
#
setClass(
Class="TT.Params",
representation(
mtry="numeric",
ntree="numeric",
feature.select="logical",
min.probes="numeric",
cor.thresh="numeric",
OOB="logical",
quantreg="logical",
tune.cor.P="numericORlogical"
)
)
#
# TT.Params constructor
#
TT.Params <- function( mtry=2, ntree=1000, feature.select=TRUE, min.probes=15, cor.thresh=0, OOB=FALSE, quantreg=FALSE, tune.cor.P=NA )
{
# VALIDATION
# 1 <= mtry <= 10
if ( mtry < 2 | mtry > 10 )
stop( "Invalid value for 'mtry'. Should be between 2 and 10 inclusive." )
# 1 <= ntree <= 100000
if ( ntree < 5 | ntree > 10000 )
stop( "Invalid value for 'ntree'. Should be between 5 and 10000 inclusive." )
# 1 <= min.probes <= 250
if ( min.probes < 1 | min.probes > 250 )
stop( "Invalid value for 'min.probes'. Should be between 1 and 250 inclusive." )
# -1 <= cor.thresh <= 1
if ( cor.thresh < -1 | cor.thresh > 1 )
stop( "Invalid value for 'cor.thresh'. Should be between -1 and 1 inclusive." )
# now it's safe to build the object
object <- new( "TT.Params", mtry=mtry, ntree=ntree, feature.select=feature.select, min.probes=min.probes, cor.thresh=cor.thresh, OOB=OOB, quantreg=quantreg, tune.cor.P=tune.cor.P )
return( object )
}
#
# TT.Params show method
#
setMethod(
f="show",
signature( object="TT.Params" ),
function( object )
{
cat( "mtry =", object@mtry, "\n" )
cat( "ntree =", object@ntree, "\n" )
cat( "feature.select =", object@feature.select, "\n" )
cat( "min.probes =", object@min.probes, "\n" )
cat( "cor.thresh =", object@cor.thresh, "\n" )
cat( "OOB =", object@OOB, "\n" )
cat( "QuantReg =", object@quantreg, "\n" )
cat( "Tune (OOB cor.P) =", object@tune.cor.P, "\n" )
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lmm_funcions.R
\name{reproduce_lmm_COMPLETENESS_6}
\alias{reproduce_lmm_COMPLETENESS_6}
\title{Reproduce alternative lmm model 6 for Completeness}
\usage{
reproduce_lmm_COMPLETENESS_6()
}
\value{
an object of class lme which represents the Model 6 for Completeness
}
\description{
Reproduce alternative lmm model 6 for Completeness
}
\examples{
reproduce_lmm_COMPLETENESS_6()
}
| /man/reproduce_lmm_COMPLETENESS_6.Rd | no_license | karacitir/reproducerTaskGra | R | false | true | 456 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lmm_funcions.R
\name{reproduce_lmm_COMPLETENESS_6}
\alias{reproduce_lmm_COMPLETENESS_6}
\title{Reproduce alternative lmm model 6 for Completeness}
\usage{
reproduce_lmm_COMPLETENESS_6()
}
\value{
an object of class lme which represents the Model 6 for Completeness
}
\description{
Reproduce alternative lmm model 6 for Completeness
}
\examples{
reproduce_lmm_COMPLETENESS_6()
}
|
\name{write.distance}
\alias{write.distance}
\title{Write distance matrices in NBI format}
\usage{
write.distance(dm, file = NULL, gzip = T)
}
\arguments{
\item{dm}{A distance object or matrix}
\item{file}{string containing path to file}
\item{gzip}{logical value indicating if output should be
compressed in gzip format}
}
\description{
Write distance matrices in NBI format
}
\examples{
\dontrun{
x <- data.frame(S1=1:4,S2=2:5,S3=3:6,row.names=letters[1:4])
dm <- dist(x)
write.distance(dm,"test.distance.nbi",gzip=F) # by default gzip=T
}
}
\seealso{
\code{\link{read.distance}, \link{write.nbi}}
}
| /data/cibm.utils/man/write.distance.Rd | no_license | vfpimenta/corruption-profiler | R | false | false | 619 | rd | \name{write.distance}
\alias{write.distance}
\title{Write distance matrices in NBI format}
\usage{
write.distance(dm, file = NULL, gzip = T)
}
\arguments{
\item{dm}{A distance object or matrix}
\item{file}{string containing path to file}
\item{gzip}{logical value indicating if output should be
compressed in gzip format}
}
\description{
Write distance matrices in NBI format
}
\examples{
\dontrun{
x <- data.frame(S1=1:4,S2=2:5,S3=3:6,row.names=letters[1:4])
dm <- dist(x)
write.distance(dm,"test.distance.nbi",gzip=F) # by default gzip=T
}
}
\seealso{
\code{\link{read.distance}, \link{write.nbi}}
}
|
#!/usr/bin/env Rscript
args <- commandArgs(trailingOnly=TRUE)
source("simfuncs.R")
a1s <- c(1,-1)
a2s <- c(1,-1)
alpha_small <- c(1, 0.1, 0.067, 0.15, 0.1, 0.25, -0.4)
alpha_med <- c(1, 0.1, 0.37, 0.15, 0.1, 0.25, -0.4)
alpha_large <- c(1, 0.1, 0.68, 0.15, 0.1, 0.25, -0.4)
effsizenames <- c('small','med','large')
alphalist <- setNames(list(alpha_small, alpha_med, alpha_large),
effsizenames)
psi <- c('1'= 0.1, '-1' = -0.1)
theta <- -0.2
tvec <- c(0, 0.5, 1.5, 2, 2.25, 2.5, 3)
knot <- tvec[4]
sigma <- 1
cutoff <- 1.1
ff_Zgen <- Y ~ 1 + time
G <- matrix(c(1, -0.4, -0.4, 2),
nrow=2,byrow=T)
covfunc_epsilon <- NULL
simparm <- get_simparm(args, a1s, a2s, alphalist, effsizenames,
psi, theta, tvec, knot, sigma, cutoff,
ff_Zgen, G, covfunc_epsilon)
cat("effect sizes: \n")
print(simparm$effsizelist)
runsim(simparm, 'sim2') | /sim2.R | no_license | d3center-isr/smart-mm | R | false | false | 911 | r | #!/usr/bin/env Rscript
args <- commandArgs(trailingOnly=TRUE)
source("simfuncs.R")
a1s <- c(1,-1)
a2s <- c(1,-1)
alpha_small <- c(1, 0.1, 0.067, 0.15, 0.1, 0.25, -0.4)
alpha_med <- c(1, 0.1, 0.37, 0.15, 0.1, 0.25, -0.4)
alpha_large <- c(1, 0.1, 0.68, 0.15, 0.1, 0.25, -0.4)
effsizenames <- c('small','med','large')
alphalist <- setNames(list(alpha_small, alpha_med, alpha_large),
effsizenames)
psi <- c('1'= 0.1, '-1' = -0.1)
theta <- -0.2
tvec <- c(0, 0.5, 1.5, 2, 2.25, 2.5, 3)
knot <- tvec[4]
sigma <- 1
cutoff <- 1.1
ff_Zgen <- Y ~ 1 + time
G <- matrix(c(1, -0.4, -0.4, 2),
nrow=2,byrow=T)
covfunc_epsilon <- NULL
simparm <- get_simparm(args, a1s, a2s, alphalist, effsizenames,
psi, theta, tvec, knot, sigma, cutoff,
ff_Zgen, G, covfunc_epsilon)
cat("effect sizes: \n")
print(simparm$effsizelist)
runsim(simparm, 'sim2') |
\name{harmonise.owin}
\alias{harmonise.owin}
\alias{harmonize.owin}
\title{Make Windows Compatible}
\description{
Convert several windows to a common pixel raster.
}
\usage{
\method{harmonise}{owin}(\dots)
\method{harmonize}{owin}(\dots)
}
\arguments{
\item{\dots}{
Any number of windows (objects of class \code{"owin"})
or data which can be converted to windows by \code{\link{as.owin}}.
}
}
\details{
This function makes any number of windows compatible,
by converting them all to a common pixel grid.
This only has an effect if one of the windows is a binary mask.
If all the windows are rectangular or polygonal, they are
returned unchanged.
The command \code{\link{harmonise}} is generic. This is the
method for objects of class \code{"owin"}.
Each argument must be a window (object of class \code{"owin"}),
or data that can be converted to a window by \code{\link{as.owin}}.
The common pixel grid is determined by inspecting all the windows
in the argument list, computing the bounding box of all the
windows, then finding the binary mask with the finest spatial resolution,
and extending its pixel grid to cover the bounding box.
The return value is a list with entries corresponding to the input
arguments.
If the arguments were named (\code{name=value}) then the return value
also carries these names.
If you just want to determine the appropriate pixel resolution,
without converting the windows, use \code{\link{commonGrid}}.
}
\value{
A list of windows, of length equal to the number of arguments
\code{\dots}. The list belongs to the class \code{"solist"}.
}
\author{\adrian
and \rolf
}
\examples{
harmonise(X=letterR,
Y=grow.rectangle(Frame(letterR), 0.2),
Z=as.mask(letterR, eps=0.1),
V=as.mask(letterR, eps=0.07))
}
\seealso{
\code{\link{commonGrid}},
\code{\link{harmonise.im}},
\code{\link{as.owin}}
}
\keyword{spatial}
\keyword{manip}
| /man/harmonise.owin.Rd | no_license | rubak/spatstat | R | false | false | 1,983 | rd | \name{harmonise.owin}
\alias{harmonise.owin}
\alias{harmonize.owin}
\title{Make Windows Compatible}
\description{
Convert several windows to a common pixel raster.
}
\usage{
\method{harmonise}{owin}(\dots)
\method{harmonize}{owin}(\dots)
}
\arguments{
\item{\dots}{
Any number of windows (objects of class \code{"owin"})
or data which can be converted to windows by \code{\link{as.owin}}.
}
}
\details{
This function makes any number of windows compatible,
by converting them all to a common pixel grid.
This only has an effect if one of the windows is a binary mask.
If all the windows are rectangular or polygonal, they are
returned unchanged.
The command \code{\link{harmonise}} is generic. This is the
method for objects of class \code{"owin"}.
Each argument must be a window (object of class \code{"owin"}),
or data that can be converted to a window by \code{\link{as.owin}}.
The common pixel grid is determined by inspecting all the windows
in the argument list, computing the bounding box of all the
windows, then finding the binary mask with the finest spatial resolution,
and extending its pixel grid to cover the bounding box.
The return value is a list with entries corresponding to the input
arguments.
If the arguments were named (\code{name=value}) then the return value
also carries these names.
If you just want to determine the appropriate pixel resolution,
without converting the windows, use \code{\link{commonGrid}}.
}
\value{
A list of windows, of length equal to the number of arguments
\code{\dots}. The list belongs to the class \code{"solist"}.
}
\author{\adrian
and \rolf
}
\examples{
harmonise(X=letterR,
Y=grow.rectangle(Frame(letterR), 0.2),
Z=as.mask(letterR, eps=0.1),
V=as.mask(letterR, eps=0.07))
}
\seealso{
\code{\link{commonGrid}},
\code{\link{harmonise.im}},
\code{\link{as.owin}}
}
\keyword{spatial}
\keyword{manip}
|
#' This is not magrittr's pipe (actually it is)
#' @name %>%
#' @importFrom magrittr %>%
#' @export
#' @keywords internal
NULL
#' @importFrom methods setClass setGeneric setMethod as is
#' callNextMethod new validObject
#' @importFrom grDevices palette
#' @importFrom graphics axis boxplot legend lines par plot points segments text
#' title
#' @importFrom stats as.formula loess predict sd rnorm
#' @importFrom utils installed.packages
#' @importFrom parallel makePSOCKcluster clusterExport parLapplyLB stopCluster
#' clusterExport clusterEvalQ
NULL
| /R/import_from.R | no_license | jacobbien/simulator | R | false | false | 553 | r |
#' This is not magrittr's pipe (actually it is)
#' @name %>%
#' @importFrom magrittr %>%
#' @export
#' @keywords internal
NULL
#' @importFrom methods setClass setGeneric setMethod as is
#' callNextMethod new validObject
#' @importFrom grDevices palette
#' @importFrom graphics axis boxplot legend lines par plot points segments text
#' title
#' @importFrom stats as.formula loess predict sd rnorm
#' @importFrom utils installed.packages
#' @importFrom parallel makePSOCKcluster clusterExport parLapplyLB stopCluster
#' clusterExport clusterEvalQ
NULL
|
#' The Phylogeny class
#'
#' An S4 base class representing a phylogeny
#' @slot Name Object of class \code{\link{character}} representing the name of the phylogeny
#' @slot NbSNV Object of class integer, attribute of the class Phylogeny representing the number of single nucleotide variations (SNVs).
#' @slot NbSCNAs Object of class integer, attribute of the class Phylogeny representing the number of somatic copy number alterations (SCNAs)
# #' @slot NbSNVClusters Object of class integer, attribute of the class Phylogeny representing the number of SNVs clusters.
#' @slot snv_ids Object of class list, attribute of the class Phylogeny representing the identifiers or names of the NbSNVs SNVs clusters.
# #' @slot snvclusters_ids Object of class list, attribute of the class Phylogeny representing the identifiers or names of the NbSNVClusters SNV clusters.
# #' @slot snv_clutsers Object of class list, attribute of the class Phylogeny giving the clusters assigned to each SNV
#' @slot scna_list Object of class list, attribute of the class Phylogeny representing the list of SCNA given in the form list(scna_1_name=scna_1_attibutes, scna_2_name=scna_2_attibutes,...,scna_NbSCNAs_name=scna_NbSCNAs_attibutes ).
#' Each SCNA attribute is a list containing 2 fields :
#' \describe{
#' \item{CN}{A pair of integer representing the major and minor copies numbers of the SCNA given in the form c(major, minor)}
#' \item{LOC}{ The location or genomic region affected by the SCNA represented in term of the list of SNVs spanned by the SCNA. LOC is given inform of a vector (a_1, a_2, ..., a_NbSNV\]). Each a_i takes values in {0,1,2,3,4} as follow:
#' \describe{
#' \item{a_i=0}{the scna do not span the locus of the SNV i}
#' \item{a_i=1}{the SCNA span the SNV i locus, and the SNV is harbored by all the copies of the major copy number chromosome}
#' \item{a_i=2}{the SCNA span the SNV i locus, and the SNV is harbored by all the copies of the minor copy number chromosome}
#' \item{a_i=3}{ the SCNA span the SNV i locus, and the SNV is harbored by one copy of the major copy number chromosome}
#' \item{a_i=4}{the SCNA span the SNV i locus, and the SNV is harbored by one copy of the minor copy number chromosome}
#' }
#' }
#' }
#' @slot Clones Object of class list, attribute of the Class Phylogeny containing the list of the clones given in the form list(clone_1_name=clone_1_attibutes, clone_2_name=clone_2_attibutes,...,clone_NbClones_name=clone_NbClones_attibutes). The Germline clones do not need to be list, it will be deduced. Each clone's attribute is a list containing the following fields :
#' \describe{
#' \item{snv}{ID list of the SNVs harbored by cells of the clone}
#' \item{scna}{ID list of the SCNAs affecting the cells of the clone}
#' \item{prev}{Cellular prevalence of the clone}
#' }
#'
#'
#' @seealso \code{\link{simulation.Phylogeny}},
#' @export Phylogeny
#' @exportClass Phylogeny
#'
Phylogeny <- setClass("Phylogeny",
slots = c(
Name = "character",
NbSNVs = "numeric",
NbSCNAs = "numeric",
NbSNVClusters = "numeric",
snv_ids = "character",
# snvclusters_ids = "list",
# snv_clusters = "list",
scna_list = "list",
Clones = "list"),
prototype = list(Name = "NormalSample",
NbSNV = 0,
NbSCNAs = 0,
NbSNVClusters = 0,
snv_ids = NULL,
# snvclusters_ids = list(),
# snv_clusters = list(),
scna_list = NULL,
Clones = NULL
),
validity=function(object)
{
is.wholenumber <-
function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
if(!is.wholenumber(NbSNVs) || !is.wholenumber(NbSCNAs) #|| !is.integer(NbSNVClusters)
||NbSNVs<0 || NbSCNAs <0 #|| NbSNVClusters <0
)
return("The attributes NbSNVs and NbSCNA should all be positive integer. Check their values ")
if(length(snv_ids) != NbSNVs)
return(NbSNVs, "SNV IDs expected but ", lenght(snv_ids) , "provided. Check your parameter snv_ids")
#if(length(snvclusters_ids) != NbSNVClusters)
# return(NbSNVClusters, "SNV IDs expected but ", lenght(snvclusters_ids) , "provided. Check your parameter snvclusters_ids")
# if(length(snv_clusters) != NbSNVs)
# return(NbSNVs, "clusters IDs expected but ", lenght(snv_clutsers) , "provided. Check your parameter snv_clutsers")
if(length(scna_list) != NbSCNAs)
return(NbSCNAs, "SNV IDs expected but ", lenght(scna_list) , " provided. Check your parameter snv_ids")
# if(!(unique(snv_clusters) %in% snvclusters_ids))
# return(setdiff(unique(snv_clusters), snvclusters_ids), " are unknown cluster IDs")
for(scna in scna_list){
if(!("CN" %in% names(scna)))
return("Parameter CN (copy numbers) is missing for this SCNA.")
if(!("LOC" %in% names(scna)))
return("Parameter LOC (Genomic Location) is missing for this SCNA.")
}
for(clone in Clones){
if(sum(!(names(clone) %in% c("snv","scna","prev" )))>1)
return(paste("Unknown parameters to clones : ", setdiff(names(clone), c("snv","scna","prev" ))))
# if(!("scna" %in% names(clone)))
# return("Parameter scna is missing for this clone.")
if(!is.null(clone$snv))
if(sum(!(clone$snv %in% snv_ids ))>0)
return( "Unknown snv provided :",setdiff(cluster,snvclusters_ids ))
if(!is.null(clone$scna))
if(sum(!(clone$scna %in% names(scna_list) ))>0)
return( "Unknown scna provided :",setdiff(cna,names(scna_list) ))
}
}
)
#Examples
#Examples
#load a configuration
phylogeny="phylogeny11"
#Number of SNVs and Number of SCNAs
NbSNVs= 5
NbSCNAs= 2
#NbSNVClusters = 5
snv_ids = c("M1","M2","M3","M4","M5")
#snvclusters_ids = c("M1","M2","M3","M4","M5")
#snv_clusters = c("M1","M2","M3","M4","M5")
scna_list = list(
"M6"=list(CN=c(2,1),LOC=c(0,0,1,0,0) ),
"M7"=list(CN=c(2,0),LOC=c(0,0,0,0,1))
)
Clones = list(
# "Germline" = list(snv=c(0,0,0,0,0),0p=0.0 ),
"CloneA" = list(snv="M1",
prev=0.1),
"CloneB" = list(snv=c("M1","M2"),
prev=0.3),
"CloneC" = list(snv=c("M1","M2","M3"),
prev=0.1),
"CloneD" = list(snv=c("M1","M2","M3"),
scna="M6",
prev=0.15),
"CloneE" = list(snv=c("M1","M2","M3","M4"),
scna="M6",
prev=0.15),
"CloneF" = list(snv=c("M1","M2","M5"),
scna="M7",
prev=0.20)
)
phylogeny11=Phylogeny(Name="phylogeny11",NbSNVs=NbSNVs,NbSCNAs=NbSCNAs, snv_ids=snv_ids, scna_list=scna_list, Clones=Clones)
| /R/simulation.R | no_license | cwcyau/OncoPhase-1 | R | false | false | 8,219 | r |
#' The Phylogeny class
#'
#' An S4 base class representing a phylogeny
#' @slot Name Object of class \code{\link{character}} representing the name of the phylogeny
#' @slot NbSNV Object of class integer, attribute of the class Phylogeny representing the number of single nucleotide variations (SNVs).
#' @slot NbSCNAs Object of class integer, attribute of the class Phylogeny representing the number of somatic copy number alterations (SCNAs)
# #' @slot NbSNVClusters Object of class integer, attribute of the class Phylogeny representing the number of SNVs clusters.
#' @slot snv_ids Object of class list, attribute of the class Phylogeny representing the identifiers or names of the NbSNVs SNVs clusters.
# #' @slot snvclusters_ids Object of class list, attribute of the class Phylogeny representing the identifiers or names of the NbSNVClusters SNV clusters.
# #' @slot snv_clutsers Object of class list, attribute of the class Phylogeny giving the clusters assigned to each SNV
#' @slot scna_list Object of class list, attribute of the class Phylogeny representing the list of SCNA given in the form list(scna_1_name=scna_1_attibutes, scna_2_name=scna_2_attibutes,...,scna_NbSCNAs_name=scna_NbSCNAs_attibutes ).
#' Each SCNA attribute is a list containing 2 fields :
#' \describe{
#' \item{CN}{A pair of integer representing the major and minor copies numbers of the SCNA given in the form c(major, minor)}
#' \item{LOC}{ The location or genomic region affected by the SCNA represented in term of the list of SNVs spanned by the SCNA. LOC is given inform of a vector (a_1, a_2, ..., a_NbSNV\]). Each a_i takes values in {0,1,2,3,4} as follow:
#' \describe{
#' \item{a_i=0}{the scna do not span the locus of the SNV i}
#' \item{a_i=1}{the SCNA span the SNV i locus, and the SNV is harbored by all the copies of the major copy number chromosome}
#' \item{a_i=2}{the SCNA span the SNV i locus, and the SNV is harbored by all the copies of the minor copy number chromosome}
#' \item{a_i=3}{ the SCNA span the SNV i locus, and the SNV is harbored by one copy of the major copy number chromosome}
#' \item{a_i=4}{the SCNA span the SNV i locus, and the SNV is harbored by one copy of the minor copy number chromosome}
#' }
#' }
#' }
#' @slot Clones Object of class list, attribute of the Class Phylogeny containing the list of the clones given in the form list(clone_1_name=clone_1_attibutes, clone_2_name=clone_2_attibutes,...,clone_NbClones_name=clone_NbClones_attibutes). The Germline clones do not need to be list, it will be deduced. Each clone's attribute is a list containing the following fields :
#' \describe{
#' \item{snv}{ID list of the SNVs harbored by cells of the clone}
#' \item{scna}{ID list of the SCNAs affecting the cells of the clone}
#' \item{prev}{Cellular prevalence of the clone}
#' }
#'
#'
#' @seealso \code{\link{simulation.Phylogeny}},
#' @export Phylogeny
#' @exportClass Phylogeny
#'
Phylogeny <- setClass("Phylogeny",
slots = c(
Name = "character",
NbSNVs = "numeric",
NbSCNAs = "numeric",
NbSNVClusters = "numeric",
snv_ids = "character",
# snvclusters_ids = "list",
# snv_clusters = "list",
scna_list = "list",
Clones = "list"),
prototype = list(Name = "NormalSample",
NbSNV = 0,
NbSCNAs = 0,
NbSNVClusters = 0,
snv_ids = NULL,
# snvclusters_ids = list(),
# snv_clusters = list(),
scna_list = NULL,
Clones = NULL
),
validity=function(object)
{
is.wholenumber <-
function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
if(!is.wholenumber(NbSNVs) || !is.wholenumber(NbSCNAs) #|| !is.integer(NbSNVClusters)
||NbSNVs<0 || NbSCNAs <0 #|| NbSNVClusters <0
)
return("The attributes NbSNVs and NbSCNA should all be positive integer. Check their values ")
if(length(snv_ids) != NbSNVs)
return(NbSNVs, "SNV IDs expected but ", lenght(snv_ids) , "provided. Check your parameter snv_ids")
#if(length(snvclusters_ids) != NbSNVClusters)
# return(NbSNVClusters, "SNV IDs expected but ", lenght(snvclusters_ids) , "provided. Check your parameter snvclusters_ids")
# if(length(snv_clusters) != NbSNVs)
# return(NbSNVs, "clusters IDs expected but ", lenght(snv_clutsers) , "provided. Check your parameter snv_clutsers")
if(length(scna_list) != NbSCNAs)
return(NbSCNAs, "SNV IDs expected but ", lenght(scna_list) , " provided. Check your parameter snv_ids")
# if(!(unique(snv_clusters) %in% snvclusters_ids))
# return(setdiff(unique(snv_clusters), snvclusters_ids), " are unknown cluster IDs")
for(scna in scna_list){
if(!("CN" %in% names(scna)))
return("Parameter CN (copy numbers) is missing for this SCNA.")
if(!("LOC" %in% names(scna)))
return("Parameter LOC (Genomic Location) is missing for this SCNA.")
}
for(clone in Clones){
if(sum(!(names(clone) %in% c("snv","scna","prev" )))>1)
return(paste("Unknown parameters to clones : ", setdiff(names(clone), c("snv","scna","prev" ))))
# if(!("scna" %in% names(clone)))
# return("Parameter scna is missing for this clone.")
if(!is.null(clone$snv))
if(sum(!(clone$snv %in% snv_ids ))>0)
return( "Unknown snv provided :",setdiff(cluster,snvclusters_ids ))
if(!is.null(clone$scna))
if(sum(!(clone$scna %in% names(scna_list) ))>0)
return( "Unknown scna provided :",setdiff(cna,names(scna_list) ))
}
}
)
#Examples
#Examples
#load a configuration
phylogeny="phylogeny11"
#Number of SNVs and Number of SCNAs
NbSNVs= 5
NbSCNAs= 2
#NbSNVClusters = 5
snv_ids = c("M1","M2","M3","M4","M5")
#snvclusters_ids = c("M1","M2","M3","M4","M5")
#snv_clusters = c("M1","M2","M3","M4","M5")
scna_list = list(
"M6"=list(CN=c(2,1),LOC=c(0,0,1,0,0) ),
"M7"=list(CN=c(2,0),LOC=c(0,0,0,0,1))
)
Clones = list(
# "Germline" = list(snv=c(0,0,0,0,0),0p=0.0 ),
"CloneA" = list(snv="M1",
prev=0.1),
"CloneB" = list(snv=c("M1","M2"),
prev=0.3),
"CloneC" = list(snv=c("M1","M2","M3"),
prev=0.1),
"CloneD" = list(snv=c("M1","M2","M3"),
scna="M6",
prev=0.15),
"CloneE" = list(snv=c("M1","M2","M3","M4"),
scna="M6",
prev=0.15),
"CloneF" = list(snv=c("M1","M2","M5"),
scna="M7",
prev=0.20)
)
phylogeny11=Phylogeny(Name="phylogeny11",NbSNVs=NbSNVs,NbSCNAs=NbSCNAs, snv_ids=snv_ids, scna_list=scna_list, Clones=Clones)
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.54838728247968e+147, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615784959-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 329 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.54838728247968e+147, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
#' @useDynLib distinct, .registration=TRUE
#' @importFrom scater sumCountsAcrossCells
#' @importFrom limma is.fullrank
#' @importFrom Rcpp evalCpp
#' @importFrom stats p.adjust
#' @importFrom methods is
#' @import SingleCellExperiment
#' @importFrom SummarizedExperiment assays
#' @importFrom SummarizedExperiment colData
#' @importFrom Matrix Matrix
#' @importFrom Matrix rowSums
#' @importFrom Matrix t
#' @importFrom foreach foreach
#' @importFrom foreach '%dopar%'
#' @importFrom parallel makeCluster
#' @importFrom parallel stopCluster
#' @importFrom doParallel registerDoParallel
#' @importFrom doParallel stopImplicitCluster
#' @importFrom doRNG '%dorng%'
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 element_blank
#' @importFrom ggplot2 geom_hline
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 stat_ecdf
#' @importFrom ggplot2 geom_density
#' @importFrom ggplot2 stat_density
#' @importFrom ggplot2 theme
#' @importFrom ggplot2 theme_bw
#' @importFrom ggplot2 labs
NULL
| /R/roxygen_tags.R | no_license | SimoneTiberi/distinct | R | false | false | 990 | r | #' @useDynLib distinct, .registration=TRUE
#' @importFrom scater sumCountsAcrossCells
#' @importFrom limma is.fullrank
#' @importFrom Rcpp evalCpp
#' @importFrom stats p.adjust
#' @importFrom methods is
#' @import SingleCellExperiment
#' @importFrom SummarizedExperiment assays
#' @importFrom SummarizedExperiment colData
#' @importFrom Matrix Matrix
#' @importFrom Matrix rowSums
#' @importFrom Matrix t
#' @importFrom foreach foreach
#' @importFrom foreach '%dopar%'
#' @importFrom parallel makeCluster
#' @importFrom parallel stopCluster
#' @importFrom doParallel registerDoParallel
#' @importFrom doParallel stopImplicitCluster
#' @importFrom doRNG '%dorng%'
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 element_blank
#' @importFrom ggplot2 geom_hline
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 stat_ecdf
#' @importFrom ggplot2 geom_density
#' @importFrom ggplot2 stat_density
#' @importFrom ggplot2 theme
#' @importFrom ggplot2 theme_bw
#' @importFrom ggplot2 labs
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{sir_init}
\alias{sir_init}
\title{sir_init}
\usage{
sir_init(B, X, Y, bw, ncore)
}
\description{
sir initial value function
}
\keyword{internal}
| /orthoDr/man/sir_init.Rd | no_license | vincentskywalkers/orthoDr | R | false | true | 255 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{sir_init}
\alias{sir_init}
\title{sir_init}
\usage{
sir_init(B, X, Y, bw, ncore)
}
\description{
sir initial value function
}
\keyword{internal}
|
##
## Read the complete power consumption file, then subset to select only
## the 2 days to be plotted (01Feb2007 - 02Feb2007)
##
power <- read.table("data/household_power_consumption.txt", na.strings = "?",
stringsAsFactors = FALSE, sep = ";", header = TRUE,
colClasses = c("character","character","numeric",
"numeric","numeric","numeric","numeric",
"numeric","numeric"), comment.char = "")
power <- power[(power$Date == "1/2/2007" | power$Date == "2/2/2007"),]
power$DateTime <- strptime(paste(power$Date, power$Time), "%d/%m/%Y %H:%M:%S", tz="GMT")
##
## Generate the line plot to the PNG graphics device file as required
##
png(filename = "../datasciencecoursera/ExData_Plotting1/plot2.png")
plot(power$DateTime, power$Global_active_power, type="l", xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | pfurrow/ExData_Plotting1 | R | false | false | 941 | r | ##
## Read the complete power consumption file, then subset to select only
## the 2 days to be plotted (01Feb2007 - 02Feb2007)
##
power <- read.table("data/household_power_consumption.txt", na.strings = "?",
stringsAsFactors = FALSE, sep = ";", header = TRUE,
colClasses = c("character","character","numeric",
"numeric","numeric","numeric","numeric",
"numeric","numeric"), comment.char = "")
power <- power[(power$Date == "1/2/2007" | power$Date == "2/2/2007"),]
power$DateTime <- strptime(paste(power$Date, power$Time), "%d/%m/%Y %H:%M:%S", tz="GMT")
##
## Generate the line plot to the PNG graphics device file as required
##
png(filename = "../datasciencecoursera/ExData_Plotting1/plot2.png")
plot(power$DateTime, power$Global_active_power, type="l", xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off() |
library(shiny)
library(ggplot2)
library(tidyverse)
library(sf)
library(leaflet)
#read in data: ----
df_total_catch <- read.delim("df_total_catch")
df_discards <- read.delim("df_discards")
df_eez <-read.delim("df_eez")
sau_id <-read.delim("sau_id")
eez_shp <- st_read("World_EEZ_v8_2014.shp")
#Add the SeeAroundUs EEZ Id to the shapefile: ----
eez_shp_sau <- merge(eez_shp, sau_id, by="Country", all.x=T)
# Define UI for seaaroundus app ----
ui <- fluidPage(
# App title ----
titlePanel("seaaroundus"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Select-option for type of information in plots ----
selectInput(inputId = "type",
label = "Select type",
choices = c("total catch", "discards")),
# Input: Slider for range of years to be displayed ----
sliderInput(inputId = "range",
label = "Select time span",
min = 1950,
max = 2014,
sep = "",
value = c(1950, 2014)),
# Input: Select-option for number of countries to be displayed in plots
selectInput(inputId = "number",
label = "Select number of countries in Graph",
choices = c(1:12),
selected = 6),
# Input: Select-option for number of entries to be listed in tables
radioButtons(inputId = "table_len",
label = "Select number of entries in Table",
inline=T,
choiceNames = c("5", "10", "15", "20", "all"),
choiceValues = c(5, 10, 15, 20, 197))
),
# Main panel for displaying outputs with options ----
mainPanel(
# Output: Two tab panels: ----
tabsetPanel(type="tabs",
tabPanel("Graph", plotOutput(outputId = "tabPlot")),
tabPanel("Table", tableOutput("tabTable")),
tabPanel("Map", leafletOutput(outputId = "tabMap"))
)
)
)
)
# Define server logic required to draw plots and show tables ----
server <- function(input, output) {
# First reactive function returning basis for plots and tables (output 1-3)
dataInput <- reactive({
data <- switch(input$type,
"total catch" = df_total_catch,
"discards" = df_discards)
range <- input$range
number <- input$number
#Filter the dataframe (total_catch or discards) depending on the selected time range:
data_plot <- data %>% filter(years>=range[1], years<=range[2])
if(input$type == "total catch"){
#average total catch for every country:
data_table <- data_plot %>% gather(., key="country", value="tonnage", -c(years)) %>%
group_by(country) %>% summarise(avg=sum(tonnage))
#average total catch for every EEZ:
data_map <- df_eez %>%
filter(years>=range[1], years<=range[2]) %>% group_by(sau_id) %>%
summarise(avg=mean(landings+discards))
#summarise Russia's EEZs:
data_map$avg[which(data_map$sau_id == 648)] <-sum(data_map$avg[which(data_map$sau_id %in% c(648,645,647,649,912,913))])
#summarise USA's EEZs
data_map$avg[which(data_map$sau_id == 953)] <-sum(data_map$avg[which(data_map$sau_id %in% c(953,954,956))])
#title for EEZ-map
title_map <- "Total catch in tons"
}
else {
#average percentage of discards for every country:
data_table <- data %>%
mutate(perc_disc = (discards/(landings+discards))*100) %>%
group_by(country) %>% summarise(avg=mean(perc_disc))
#average percentage of discards for every EEZ:
data_map <- df_eez %>%
filter(years>=range[1], years<=range[2]) %>% group_by(sau_id) %>%
summarise(avg=mean(discards/(landings+discards))*100)
#summarise Russia's EEZs:
data_map$avg[which(data_map$sau_id == 648)] <-mean(data_map$avg[which(data_map$sau_id %in% c(648,645,647,649,912,913))])
#summarise USA's EEZs:
data_map$avg[which(data_map$sau_id == 953)] <-mean(data_map$avg[which(data_map$sau_id %in% c(953,954,956))])
#title for EEZ-map
title_map <- "Discards in %"
}
return(list("data_plot"=data_plot, "data_table"=data_table, "data_map"=data_map,
"number"=number, "title_map"=title_map))
})
# Second reactive expression returning values for tables (output 2) ----
calcTable <- reactive({
data_table <- dataInput()$data_table
#sorting the tables by average values (from highest to lowest), take only the selected table length:
if(input$type=="total catch"){
data_table <- arrange(data_table, desc(avg))[c(1:input$table_len),]
colnames(data_table) <- c("country", "average catch per year in tons")
}
else{
data_table <- arrange(data_table, desc(avg))[c(1:input$table_len),]
colnames(data_table) <- c("country", "average share of discards in total catch per year in %")
}
return(data_table)
})
# Output 1: Plots ----
output$tabPlot <- renderPlot({
data <- dataInput()$data_plot
number <- as.integer(dataInput()$number) #the number of countries shown in the graph
# - prepare the data frames for ggplot, summarise the countries with lower averages to "others"
# - plot as stacked plot
if (input$type == "total catch"){
data <- data %>% gather(., key="country", value="tonnage", -c(years))
data_arranged <- data %>% group_by(country) %>% summarise(avg=mean(tonnage)) %>% arrange(., desc(avg))
data_high <- data %>% filter(country %in% data_arranged[c(1:number),]$country)
data_low <- data %>% filter(country %in% data_arranged[c((number +1):nrow(data)),]$country) %>% group_by(years) %>%
summarise(tonnage = sum(tonnage)) %>% mutate(country = "Others") %>%
select(years,country,tonnage)
data <- bind_rows(data_high, data_low)
ggplot(data=data, aes(x= years, y=tonnage))+
geom_area(aes(fill=factor(country, levels=c(data_arranged[c(1:number),]$country, "Others"))))+
theme(legend.position = "right")+
guides(fill=guide_legend(title="countries"))+
labs(title = "Total catch grouped by country (ordered descendingly by average)")
}
else {
data <- data %>%
mutate(perc_disc = (discards/(landings+discards))*100)
data1 <- data %>%
group_by(country) %>%
summarise(avg=mean(perc_disc)) %>%
arrange(., desc(avg)) %>%
top_n(., n=number)
data_high <- data %>% filter(country %in% data1$country)
ggplot(data=data_high, aes(x=years, y=perc_disc, colour=country))+
geom_line(size=1.3)+
theme(legend.position = "right")+
labs(title = "Share of discards in total catch (grouped by country, ordered descendingly by average)", y = "percentage")
}
})
# Output 2: Table ----
output$tabTable <- renderTable({
calcTable()
})
# Output 3: Map ----
output$tabMap <- renderLeaflet({
data_map <- dataInput()$data_map
title_map <- dataInput()$title_map
#merge the average values (total catch or percentage of discards) and the shapefile:
eez_merge <- merge(eez_shp_sau, data_map, by="sau_id", all.x=T)
#define color-scale and labels for map
pal <- colorNumeric("Reds", domain = eez_merge$avg)
labels <- sprintf(
"<strong>%s</strong><br/>%g",
eez_merge$EEZ, eez_merge$avg) %>% lapply(htmltools::HTML)
#create interactive map with leaflet
leaflet(options = leafletOptions(minZoom = 2, maxZoom = 10)) %>%
addTiles() %>%
setView(lng = 0, lat = 40, zoom = 2) %>%
addProviderTiles("Stamen.TerrainBackground") %>%
addPolygons(data=eez_merge, stroke = TRUE, color = ~pal(eez_merge$avg),
fillOpacity = 0.6, smoothFactor = 1, weight = 0.5,
highlightOptions = highlightOptions(color = "black", weight = 2, bringToFront = TRUE),
label = labels) %>%
addLegend("bottomright", pal=pal, values= eez_merge$avg, title = title_map)
})
}
# Create Shiny app ----
shinyApp(ui = ui, server = server)
| /shiny_final.R | no_license | lg132/gdbv | R | false | false | 8,429 | r | library(shiny)
library(ggplot2)
library(tidyverse)
library(sf)
library(leaflet)
#read in data: ----
df_total_catch <- read.delim("df_total_catch")
df_discards <- read.delim("df_discards")
df_eez <-read.delim("df_eez")
sau_id <-read.delim("sau_id")
eez_shp <- st_read("World_EEZ_v8_2014.shp")
#Add the SeeAroundUs EEZ Id to the shapefile: ----
eez_shp_sau <- merge(eez_shp, sau_id, by="Country", all.x=T)
# Define UI for seaaroundus app ----
ui <- fluidPage(
# App title ----
titlePanel("seaaroundus"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Select-option for type of information in plots ----
selectInput(inputId = "type",
label = "Select type",
choices = c("total catch", "discards")),
# Input: Slider for range of years to be displayed ----
sliderInput(inputId = "range",
label = "Select time span",
min = 1950,
max = 2014,
sep = "",
value = c(1950, 2014)),
# Input: Select-option for number of countries to be displayed in plots
selectInput(inputId = "number",
label = "Select number of countries in Graph",
choices = c(1:12),
selected = 6),
# Input: Select-option for number of entries to be listed in tables
radioButtons(inputId = "table_len",
label = "Select number of entries in Table",
inline=T,
choiceNames = c("5", "10", "15", "20", "all"),
choiceValues = c(5, 10, 15, 20, 197))
),
# Main panel for displaying outputs with options ----
mainPanel(
# Output: Two tab panels: ----
tabsetPanel(type="tabs",
tabPanel("Graph", plotOutput(outputId = "tabPlot")),
tabPanel("Table", tableOutput("tabTable")),
tabPanel("Map", leafletOutput(outputId = "tabMap"))
)
)
)
)
# Define server logic required to draw plots and show tables ----
server <- function(input, output) {
# First reactive function returning basis for plots and tables (output 1-3)
dataInput <- reactive({
data <- switch(input$type,
"total catch" = df_total_catch,
"discards" = df_discards)
range <- input$range
number <- input$number
#Filter the dataframe (total_catch or discards) depending on the selected time range:
data_plot <- data %>% filter(years>=range[1], years<=range[2])
if(input$type == "total catch"){
#average total catch for every country:
data_table <- data_plot %>% gather(., key="country", value="tonnage", -c(years)) %>%
group_by(country) %>% summarise(avg=sum(tonnage))
#average total catch for every EEZ:
data_map <- df_eez %>%
filter(years>=range[1], years<=range[2]) %>% group_by(sau_id) %>%
summarise(avg=mean(landings+discards))
#summarise Russia's EEZs:
data_map$avg[which(data_map$sau_id == 648)] <-sum(data_map$avg[which(data_map$sau_id %in% c(648,645,647,649,912,913))])
#summarise USA's EEZs
data_map$avg[which(data_map$sau_id == 953)] <-sum(data_map$avg[which(data_map$sau_id %in% c(953,954,956))])
#title for EEZ-map
title_map <- "Total catch in tons"
}
else {
#average percentage of discards for every country:
data_table <- data %>%
mutate(perc_disc = (discards/(landings+discards))*100) %>%
group_by(country) %>% summarise(avg=mean(perc_disc))
#average percentage of discards for every EEZ:
data_map <- df_eez %>%
filter(years>=range[1], years<=range[2]) %>% group_by(sau_id) %>%
summarise(avg=mean(discards/(landings+discards))*100)
#summarise Russia's EEZs:
data_map$avg[which(data_map$sau_id == 648)] <-mean(data_map$avg[which(data_map$sau_id %in% c(648,645,647,649,912,913))])
#summarise USA's EEZs:
data_map$avg[which(data_map$sau_id == 953)] <-mean(data_map$avg[which(data_map$sau_id %in% c(953,954,956))])
#title for EEZ-map
title_map <- "Discards in %"
}
return(list("data_plot"=data_plot, "data_table"=data_table, "data_map"=data_map,
"number"=number, "title_map"=title_map))
})
# Second reactive expression returning values for tables (output 2) ----
calcTable <- reactive({
data_table <- dataInput()$data_table
#sorting the tables by average values (from highest to lowest), take only the selected table length:
if(input$type=="total catch"){
data_table <- arrange(data_table, desc(avg))[c(1:input$table_len),]
colnames(data_table) <- c("country", "average catch per year in tons")
}
else{
data_table <- arrange(data_table, desc(avg))[c(1:input$table_len),]
colnames(data_table) <- c("country", "average share of discards in total catch per year in %")
}
return(data_table)
})
# Output 1: Plots ----
output$tabPlot <- renderPlot({
data <- dataInput()$data_plot
number <- as.integer(dataInput()$number) #the number of countries shown in the graph
# - prepare the data frames for ggplot, summarise the countries with lower averages to "others"
# - plot as stacked plot
if (input$type == "total catch"){
data <- data %>% gather(., key="country", value="tonnage", -c(years))
data_arranged <- data %>% group_by(country) %>% summarise(avg=mean(tonnage)) %>% arrange(., desc(avg))
data_high <- data %>% filter(country %in% data_arranged[c(1:number),]$country)
data_low <- data %>% filter(country %in% data_arranged[c((number +1):nrow(data)),]$country) %>% group_by(years) %>%
summarise(tonnage = sum(tonnage)) %>% mutate(country = "Others") %>%
select(years,country,tonnage)
data <- bind_rows(data_high, data_low)
ggplot(data=data, aes(x= years, y=tonnage))+
geom_area(aes(fill=factor(country, levels=c(data_arranged[c(1:number),]$country, "Others"))))+
theme(legend.position = "right")+
guides(fill=guide_legend(title="countries"))+
labs(title = "Total catch grouped by country (ordered descendingly by average)")
}
else {
data <- data %>%
mutate(perc_disc = (discards/(landings+discards))*100)
data1 <- data %>%
group_by(country) %>%
summarise(avg=mean(perc_disc)) %>%
arrange(., desc(avg)) %>%
top_n(., n=number)
data_high <- data %>% filter(country %in% data1$country)
ggplot(data=data_high, aes(x=years, y=perc_disc, colour=country))+
geom_line(size=1.3)+
theme(legend.position = "right")+
labs(title = "Share of discards in total catch (grouped by country, ordered descendingly by average)", y = "percentage")
}
})
# Output 2: Table ----
output$tabTable <- renderTable({
calcTable()
})
# Output 3: Map ----
output$tabMap <- renderLeaflet({
data_map <- dataInput()$data_map
title_map <- dataInput()$title_map
#merge the average values (total catch or percentage of discards) and the shapefile:
eez_merge <- merge(eez_shp_sau, data_map, by="sau_id", all.x=T)
#define color-scale and labels for map
pal <- colorNumeric("Reds", domain = eez_merge$avg)
labels <- sprintf(
"<strong>%s</strong><br/>%g",
eez_merge$EEZ, eez_merge$avg) %>% lapply(htmltools::HTML)
#create interactive map with leaflet
leaflet(options = leafletOptions(minZoom = 2, maxZoom = 10)) %>%
addTiles() %>%
setView(lng = 0, lat = 40, zoom = 2) %>%
addProviderTiles("Stamen.TerrainBackground") %>%
addPolygons(data=eez_merge, stroke = TRUE, color = ~pal(eez_merge$avg),
fillOpacity = 0.6, smoothFactor = 1, weight = 0.5,
highlightOptions = highlightOptions(color = "black", weight = 2, bringToFront = TRUE),
label = labels) %>%
addLegend("bottomright", pal=pal, values= eez_merge$avg, title = title_map)
})
}
# Create Shiny app ----
shinyApp(ui = ui, server = server)
|
#' Sets the default breaks for a time axis
#'
#' \code{xgx_breaks_time} sets the default breaks for a time axis,
#' given the units of the data and the units of the plot.
#' It is inspired by scales::extended_breaks
#'
#' for the extended breaks function,
#' Q is a set of nice increments
#' w is a set of 4 weights for
#' \enumerate{
#' \item simplicity - how early in the Q order are you
#' \item coverage - labelings that don't extend outside the data: range(data)/range(labels)
#' \item density (previously granuality) - how cloes to the number of ticks do you get (default is 5)
#' \item legibility - has to do with fontsize and formatting to prevent label overlap
#' }
#'
#' @references Talbot, Justin, Sharon Lin, and Pat Hanrahan. "An extension of Wilkinsonβs
#' algorithm for positioning tick labels on axes." IEEE Transactions on visualization and
#' computer graphics 16.6 (2010): 1036-1043.
#'
#'
#' @param data.range range of the data
#' @param units.plot units to use in the plot
#'
#' @export
#'
#' xgx_breaks_time
#'
#' @examples
#' library(ggplot2)
#' xgx_breaks_time(c(0,5),"h")
#' xgx_breaks_time(c(0,6),"h")
#' xgx_breaks_time(c(-3,5),"h")
#' xgx_breaks_time(c(0,24),"h")
#' xgx_breaks_time(c(0,12),"h")
#' xgx_breaks_time(c(1,4),"d")
#' xgx_breaks_time(c(1,12),"d")
#' xgx_breaks_time(c(1,14),"d")
#' xgx_breaks_time(c(1,50),"d")
#' xgx_breaks_time(c(1000,3000),"d")
#' xgx_breaks_time(c(-21,100),"d")
#' xgx_breaks_time(c(-1,10),"w")
xgx_breaks_time <- function(data.range,units.plot){
dmin = min(data.range)
dmax = max(data.range)
dspan = dmax - dmin
m = 5 #number of breaks to aim for
Q.default = c(1, 5, 2, 4, 3,1) #default Q (spacing)
w.default = c(0.25, 0.2, 0.5, 0.05)
w.simple = c(1,.2,.5,.05)
if (units.plot %in% c("h","m") && dspan >= 48) {
Q = c(24,12,6,3)
w = w.simple
} else if (units.plot %in% c("h","m") && dspan >= 24) {
Q = c(3,12,6,2)
w = w.simple
} else if (units.plot %in% c("h","m") && dspan < 24) {
Q = c(6,3,2,1)
w = w.simple
} else if (units.plot == "d" && dspan >= 12) {
Q = c(7,14,28)
w = w.simple
} else {
Q = Q.default
w = w.default
}
breaks = labeling::extended(dmin,dmax,m,Q=Q,w=w)
return(breaks)
}
| /Rlib/xgxr-master/R/xgx_breaks_time.R | no_license | concertris/xgx | R | false | false | 2,292 | r | #' Sets the default breaks for a time axis
#'
#' \code{xgx_breaks_time} sets the default breaks for a time axis,
#' given the units of the data and the units of the plot.
#' It is inspired by scales::extended_breaks
#'
#' for the extended breaks function,
#' Q is a set of nice increments
#' w is a set of 4 weights for
#' \enumerate{
#' \item simplicity - how early in the Q order are you
#' \item coverage - labelings that don't extend outside the data: range(data)/range(labels)
#' \item density (previously granuality) - how cloes to the number of ticks do you get (default is 5)
#' \item legibility - has to do with fontsize and formatting to prevent label overlap
#' }
#'
#' @references Talbot, Justin, Sharon Lin, and Pat Hanrahan. "An extension of Wilkinsonβs
#' algorithm for positioning tick labels on axes." IEEE Transactions on visualization and
#' computer graphics 16.6 (2010): 1036-1043.
#'
#'
#' @param data.range range of the data
#' @param units.plot units to use in the plot
#'
#' @export
#'
#' xgx_breaks_time
#'
#' @examples
#' library(ggplot2)
#' xgx_breaks_time(c(0,5),"h")
#' xgx_breaks_time(c(0,6),"h")
#' xgx_breaks_time(c(-3,5),"h")
#' xgx_breaks_time(c(0,24),"h")
#' xgx_breaks_time(c(0,12),"h")
#' xgx_breaks_time(c(1,4),"d")
#' xgx_breaks_time(c(1,12),"d")
#' xgx_breaks_time(c(1,14),"d")
#' xgx_breaks_time(c(1,50),"d")
#' xgx_breaks_time(c(1000,3000),"d")
#' xgx_breaks_time(c(-21,100),"d")
#' xgx_breaks_time(c(-1,10),"w")
xgx_breaks_time <- function(data.range,units.plot){
dmin = min(data.range)
dmax = max(data.range)
dspan = dmax - dmin
m = 5 #number of breaks to aim for
Q.default = c(1, 5, 2, 4, 3,1) #default Q (spacing)
w.default = c(0.25, 0.2, 0.5, 0.05)
w.simple = c(1,.2,.5,.05)
if (units.plot %in% c("h","m") && dspan >= 48) {
Q = c(24,12,6,3)
w = w.simple
} else if (units.plot %in% c("h","m") && dspan >= 24) {
Q = c(3,12,6,2)
w = w.simple
} else if (units.plot %in% c("h","m") && dspan < 24) {
Q = c(6,3,2,1)
w = w.simple
} else if (units.plot == "d" && dspan >= 12) {
Q = c(7,14,28)
w = w.simple
} else {
Q = Q.default
w = w.default
}
breaks = labeling::extended(dmin,dmax,m,Q=Q,w=w)
return(breaks)
}
|
# Pedotransfer functions
#' Calculate wilting point
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc Soil organic matter percent
#' @return Wilting point at 1500 kpa
#' @keywords internal
#' @export
wilt_point <- function(sand, clay, soc) {
theta1500t <- -0.024 * sand + 0.487 * clay + 0.006 * soc +
(0.005 * sand * soc) - (0.013 * clay * soc) + (0.068 * sand * clay) +
0.031
theta1500 <- theta1500t + (0.14 * theta1500t - 0.02)
return(theta1500)
}
#' Calculates field capacity
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc soil organic matter percent
#' @return Field capacity at 33 kpa
#' @keywords internal
#' @export
field_cap <- function(sand, clay, soc) {
theta33t <- -0.251 * sand + 0.195 * clay + 0.011 * soc +
(0.006 * sand * soc) - (0.027 * clay * soc) +
(0.452 * sand * clay) + 0.299
theta33 <- theta33t + ((1.283 * theta33t^2) - 0.374 * theta33t - 0.015)
return(theta33)
}
#' Calculates saturated moisture content, requires function field_cap
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc soil organic matter percent
#' @return Field capacity at 33 kpa
#' @keywords internal
#' @export
theta_s <- function(sand, clay, soc) {
thetas_33t <- 0.278 * sand + 0.034 * clay + 0.022 * soc -
(0.018 * sand * soc) - (0.027 * clay * soc) - (0.584 * sand * clay) + 0.078
thetas_33 <- thetas_33t + (0.636 * thetas_33t - 0.107)
theta33 <- field_cap(sand, clay, soc)
thetas <- theta33 + thetas_33 - 0.097 * sand + 0.043
return(thetas)
}
#' Matric density accounting for compaction
#' @param thetas Saturation water content
#' @param DF Density factor between 0.9 and 1.3, normal (default) at 1
#' @return Matric density
#' @keywords internal
#' @export
ro_df <- function(thetas, DF = 1) {
rodf <- ((1 - thetas) * 2.65) * DF
return(rodf)
}
#' Bulk density accounting for compaction plus gravel
#' @param thetas Saturation water content (without compaction)
#' @param DF Density factor between 0.9 and 1.3, normal (default) at 1
#' @param gravel Gravel percent by weight
#' @keywords internal
#' @export
bdens <- function(thetas, DF = 1, gravel = 0) {
rodf <- ro_df(thetas, DF)
gravel_pctv <- ((rodf / 2.65 ) * gravel) / (1 - gravel * ( 1 - rodf / 2.65))
ro_b <- gravel_pctv * 2.65 + (1 - gravel_pctv) * rodf
return(ro_b)
}
#' Calculates saturated water content, accounting for compaction
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc Soil organic matter percent
#' @param DF Density factor between 0.9 and 1.3, no effect if set to 1
#' @keywords internal
#' @export
theta_sdf <- function(sand, clay, soc, DF) {
thetas <- theta_s(sand, clay, soc)
rodf <- ro_df(thetas, DF)
thetasdf <- 1 - (rodf / 2.65)
return(thetasdf)
}
#' Calculated field capacity accounting for compaction
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc Soil organic matter percent
#' @param DF Density factor between 0.9 and 1.3, normal (default) at 1
#' @keywords internal
#' @export
field_cap_df <- function(sand, clay, soc, DF) {
thetas <- theta_sdf(sand, clay, soc, DF = 1) # Normal theta_s
thetasdf <- theta_sdf(sand, clay, soc, DF) # theta_s with compaction
fcdf <- field_cap(sand, clay, soc) - 0.2 * (thetas - thetasdf)
return(fcdf)
}
#' Saturated hydraulic conductivity, including gravel effects.
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc Soil organic matter percent
#' @param DF Density factor between 0.9 and 1.3, normal (default) at 1
#' @param gravel Gravel percent by weight (0 by default)
#' @keywords internal
#' @export
ksat <- function(sand, clay, soc, DF = 1, gravel = 0) {
fcdf <- field_cap_df(sand, clay, soc, DF)
wp <- wilt_point(sand, clay, soc)
lambda <- (log(fcdf) - log(wp)) / (log(1500) - log(33)) # = 1/Beta
thetas <- theta_s(sand, clay, soc) # theta_sdf no density effects
mdens <- bdens(thetas, DF, gravel = 0) # BD no gravel to get matric density
thetasdf <- theta_sdf(sand, clay, soc, DF = DF) # ThetaSDF w/density effects
theta_sdf_fcdf <- thetasdf - fcdf
theta_sdf_fcdf <- ifelse(theta_sdf_fcdf < 0, 0, theta_sdf_fcdf) # FC ! > por.
kbks <- (1 - gravel) / (1 - gravel * (1 - 1.5 * (mdens / 2.65)))
ks <- 1930 * (theta_sdf_fcdf)^(3 - lambda) * kbks
return(ks)
}
#' Plant available water, adjusted for gravel and density effects.
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc Soil organic matter percent
#' @param DF Density factor between 0.9 and 1.3, normal (default) at 1
#' @param gravel Gravel percent by weight (0 by default)
#' @keywords internal
#' @export
paw <- function(sand, clay, soc, DF = 1, gravel = 0) {
thetas <- theta_sdf(sand, clay, soc, DF = 1)
thetasdf <- theta_sdf(sand, clay, soc, DF = DF)
rodf <- ro_df(thetas, DF)
gravel_pctv <- ((rodf / 2.65 ) * gravel) / (1 - gravel * ( 1 - rodf / 2.65))
fcdf <- field_cap(sand, clay, soc) - 0.2 * (thetas - thetasdf)
wp <- wilt_point(sand, clay, soc)
paw <- (fcdf - wp) * (1 - gravel_pctv)
return(paw)
}
#' Calculates various soil hydraulic properties, following Saxton & Rawls, 2006
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc Soil organic matter percent
#' @param DF Density factor between 0.9 and 1.3, normal (default) at 1
#' @param gravel Gravel percent by weight (0 by default)
#' @param digits Number of significant digits (4 by default)
#' @param PAW Gravel and density adjusted plant available water (TRUE or FALSE)
#' @details A single function producing estimates of wilting point,
#' field capacity, saturated water content, bulk density, and saturdated
#' hydraulic conductivity, account for soil density and gravel effects, based on
#' methods described by Saxton and Rawls (2006). Internal functions for each
#' variables can be also used separately, as needed. Per Saxton & Rawls (2006),
#' these functions are only valid for SOC <= 8% clay <= 60%. Functions were
#' checked against equations available for download with SPAW model,
#' downloadable at http://hrsl.arsusda.gov/SPAW/SPAWDownload.html.
#' @references
#' Saxton, K.E. & Rawls, W.J. (2006) Soil water characteristic estimates by
#' texture and organic matter for hydrologic solutions. Soil Sci Soc Am J, 70,
#' 1569β1578.
#' @examples
#' soil_hydraulics(sand = 0.29, clay = 0.32, soc = 3.51, DF = 1, gravel = 0)
#' soil_hydraulics(sand = 0.29, clay = 0.32, soc = 3.51, DF = 0.8, gravel = 0)
#' soil_hydraulics(sand = 0.29, clay = 0.32, soc = 3.51, DF = 1, gravel = 0.2)
#' soil_hydraulics(sand = 0.29, clay = 0.32, soc = 3.51, DF = 0.8, gravel = 0.2)
#' @export
soil_hydraulics <- function(sand, clay, soc, DF = 1, gravel = 0, digits = 4,
PAW = TRUE) {
if((sand > 1) | (clay > 1)) {
stop("Sand & clay must be fractions, soc a percentage", call. = FALSE)
}
if((clay > 0.6) | soc > 8) {
warning(paste("Validity of results questionable for sand fractions > 0.8",
"or SOC percentage > 8"))
}
# pedotransfer functions
wp <- wilt_point(sand, clay, soc) # Wilting point
fc <- field_cap(sand, clay, soc) # Field capacity, no density effects
fcdf <- field_cap_df(sand, clay, soc, DF) # Field capacity, w/density
thetas <- theta_s(sand, clay, soc) # Satured moisture content, no density
thetasdf <- theta_sdf(sand, clay, soc, DF) # Satured moisture content, density
bd <- bdens(thetas, DF, gravel) # Bulk density
ks <- ksat(sand, clay, soc, DF, gravel) # KSat, w/density and gravel
# output
out <- c("fc" = fcdf, "wp" = wp, "sat" = thetasdf, "bd" = bd, "ksat" = ks)
if(PAW == TRUE) {
rodf <- ro_df(thetas, DF)
gravel_pctv <- ((rodf / 2.65 ) * gravel) / (1 - gravel * ( 1 - rodf / 2.65))
PAW <- (fcdf - wp) * (1 - gravel_pctv)
out <- c(out, "PAW" = PAW)
}
return(round(out, digits))
}
| /R/pedotransfer.R | no_license | gcostaneto/rcropmod | R | false | false | 7,955 | r | # Pedotransfer functions
#' Calculate wilting point
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc Soil organic matter percent
#' @return Wilting point at 1500 kpa
#' @keywords internal
#' @export
wilt_point <- function(sand, clay, soc) {
theta1500t <- -0.024 * sand + 0.487 * clay + 0.006 * soc +
(0.005 * sand * soc) - (0.013 * clay * soc) + (0.068 * sand * clay) +
0.031
theta1500 <- theta1500t + (0.14 * theta1500t - 0.02)
return(theta1500)
}
#' Calculates field capacity
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc soil organic matter percent
#' @return Field capacity at 33 kpa
#' @keywords internal
#' @export
field_cap <- function(sand, clay, soc) {
theta33t <- -0.251 * sand + 0.195 * clay + 0.011 * soc +
(0.006 * sand * soc) - (0.027 * clay * soc) +
(0.452 * sand * clay) + 0.299
theta33 <- theta33t + ((1.283 * theta33t^2) - 0.374 * theta33t - 0.015)
return(theta33)
}
#' Calculates saturated moisture content, requires function field_cap
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc soil organic matter percent
#' @return Field capacity at 33 kpa
#' @keywords internal
#' @export
theta_s <- function(sand, clay, soc) {
thetas_33t <- 0.278 * sand + 0.034 * clay + 0.022 * soc -
(0.018 * sand * soc) - (0.027 * clay * soc) - (0.584 * sand * clay) + 0.078
thetas_33 <- thetas_33t + (0.636 * thetas_33t - 0.107)
theta33 <- field_cap(sand, clay, soc)
thetas <- theta33 + thetas_33 - 0.097 * sand + 0.043
return(thetas)
}
#' Matric density accounting for compaction
#' @param thetas Saturation water content
#' @param DF Density factor between 0.9 and 1.3, normal (default) at 1
#' @return Matric density
#' @keywords internal
#' @export
ro_df <- function(thetas, DF = 1) {
rodf <- ((1 - thetas) * 2.65) * DF
return(rodf)
}
#' Bulk density accounting for compaction plus gravel
#' @param thetas Saturation water content (without compaction)
#' @param DF Density factor between 0.9 and 1.3, normal (default) at 1
#' @param gravel Gravel percent by weight
#' @keywords internal
#' @export
bdens <- function(thetas, DF = 1, gravel = 0) {
rodf <- ro_df(thetas, DF)
gravel_pctv <- ((rodf / 2.65 ) * gravel) / (1 - gravel * ( 1 - rodf / 2.65))
ro_b <- gravel_pctv * 2.65 + (1 - gravel_pctv) * rodf
return(ro_b)
}
#' Calculates saturated water content, accounting for compaction
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc Soil organic matter percent
#' @param DF Density factor between 0.9 and 1.3, no effect if set to 1
#' @keywords internal
#' @export
theta_sdf <- function(sand, clay, soc, DF) {
thetas <- theta_s(sand, clay, soc)
rodf <- ro_df(thetas, DF)
thetasdf <- 1 - (rodf / 2.65)
return(thetasdf)
}
#' Calculated field capacity accounting for compaction
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc Soil organic matter percent
#' @param DF Density factor between 0.9 and 1.3, normal (default) at 1
#' @keywords internal
#' @export
field_cap_df <- function(sand, clay, soc, DF) {
thetas <- theta_sdf(sand, clay, soc, DF = 1) # Normal theta_s
thetasdf <- theta_sdf(sand, clay, soc, DF) # theta_s with compaction
fcdf <- field_cap(sand, clay, soc) - 0.2 * (thetas - thetasdf)
return(fcdf)
}
#' Saturated hydraulic conductivity, including gravel effects.
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc Soil organic matter percent
#' @param DF Density factor between 0.9 and 1.3, normal (default) at 1
#' @param gravel Gravel percent by weight (0 by default)
#' @keywords internal
#' @export
ksat <- function(sand, clay, soc, DF = 1, gravel = 0) {
fcdf <- field_cap_df(sand, clay, soc, DF)
wp <- wilt_point(sand, clay, soc)
lambda <- (log(fcdf) - log(wp)) / (log(1500) - log(33)) # = 1/Beta
thetas <- theta_s(sand, clay, soc) # theta_sdf no density effects
mdens <- bdens(thetas, DF, gravel = 0) # BD no gravel to get matric density
thetasdf <- theta_sdf(sand, clay, soc, DF = DF) # ThetaSDF w/density effects
theta_sdf_fcdf <- thetasdf - fcdf
theta_sdf_fcdf <- ifelse(theta_sdf_fcdf < 0, 0, theta_sdf_fcdf) # FC ! > por.
kbks <- (1 - gravel) / (1 - gravel * (1 - 1.5 * (mdens / 2.65)))
ks <- 1930 * (theta_sdf_fcdf)^(3 - lambda) * kbks
return(ks)
}
#' Plant available water, adjusted for gravel and density effects.
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc Soil organic matter percent
#' @param DF Density factor between 0.9 and 1.3, normal (default) at 1
#' @param gravel Gravel percent by weight (0 by default)
#' @keywords internal
#' @export
paw <- function(sand, clay, soc, DF = 1, gravel = 0) {
thetas <- theta_sdf(sand, clay, soc, DF = 1)
thetasdf <- theta_sdf(sand, clay, soc, DF = DF)
rodf <- ro_df(thetas, DF)
gravel_pctv <- ((rodf / 2.65 ) * gravel) / (1 - gravel * ( 1 - rodf / 2.65))
fcdf <- field_cap(sand, clay, soc) - 0.2 * (thetas - thetasdf)
wp <- wilt_point(sand, clay, soc)
paw <- (fcdf - wp) * (1 - gravel_pctv)
return(paw)
}
#' Calculates various soil hydraulic properties, following Saxton & Rawls, 2006
#' @param sand Fraction of sand
#' @param clay Fraction of clay
#' @param soc Soil organic matter percent
#' @param DF Density factor between 0.9 and 1.3, normal (default) at 1
#' @param gravel Gravel percent by weight (0 by default)
#' @param digits Number of significant digits (4 by default)
#' @param PAW Gravel and density adjusted plant available water (TRUE or FALSE)
#' @details A single function producing estimates of wilting point,
#' field capacity, saturated water content, bulk density, and saturdated
#' hydraulic conductivity, account for soil density and gravel effects, based on
#' methods described by Saxton and Rawls (2006). Internal functions for each
#' variables can be also used separately, as needed. Per Saxton & Rawls (2006),
#' these functions are only valid for SOC <= 8% clay <= 60%. Functions were
#' checked against equations available for download with SPAW model,
#' downloadable at http://hrsl.arsusda.gov/SPAW/SPAWDownload.html.
#' @references
#' Saxton, K.E. & Rawls, W.J. (2006) Soil water characteristic estimates by
#' texture and organic matter for hydrologic solutions. Soil Sci Soc Am J, 70,
#' 1569β1578.
#' @examples
#' soil_hydraulics(sand = 0.29, clay = 0.32, soc = 3.51, DF = 1, gravel = 0)
#' soil_hydraulics(sand = 0.29, clay = 0.32, soc = 3.51, DF = 0.8, gravel = 0)
#' soil_hydraulics(sand = 0.29, clay = 0.32, soc = 3.51, DF = 1, gravel = 0.2)
#' soil_hydraulics(sand = 0.29, clay = 0.32, soc = 3.51, DF = 0.8, gravel = 0.2)
#' @export
soil_hydraulics <- function(sand, clay, soc, DF = 1, gravel = 0, digits = 4,
PAW = TRUE) {
if((sand > 1) | (clay > 1)) {
stop("Sand & clay must be fractions, soc a percentage", call. = FALSE)
}
if((clay > 0.6) | soc > 8) {
warning(paste("Validity of results questionable for sand fractions > 0.8",
"or SOC percentage > 8"))
}
# pedotransfer functions
wp <- wilt_point(sand, clay, soc) # Wilting point
fc <- field_cap(sand, clay, soc) # Field capacity, no density effects
fcdf <- field_cap_df(sand, clay, soc, DF) # Field capacity, w/density
thetas <- theta_s(sand, clay, soc) # Satured moisture content, no density
thetasdf <- theta_sdf(sand, clay, soc, DF) # Satured moisture content, density
bd <- bdens(thetas, DF, gravel) # Bulk density
ks <- ksat(sand, clay, soc, DF, gravel) # KSat, w/density and gravel
# output
out <- c("fc" = fcdf, "wp" = wp, "sat" = thetasdf, "bd" = bd, "ksat" = ks)
if(PAW == TRUE) {
rodf <- ro_df(thetas, DF)
gravel_pctv <- ((rodf / 2.65 ) * gravel) / (1 - gravel * ( 1 - rodf / 2.65))
PAW <- (fcdf - wp) * (1 - gravel_pctv)
out <- c(out, "PAW" = PAW)
}
return(round(out, digits))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/met.process.R
\name{db.site.lat.lon}
\alias{db.site.lat.lon}
\title{db.site.lat.lon}
\usage{
db.site.lat.lon(site.id, con)
}
\author{
Betsy Cowdery
}
| /modules/data.atmosphere/man/db.site.lat.lon.Rd | permissive | yogeshdarji/pecan | R | false | true | 229 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/met.process.R
\name{db.site.lat.lon}
\alias{db.site.lat.lon}
\title{db.site.lat.lon}
\usage{
db.site.lat.lon(site.id, con)
}
\author{
Betsy Cowdery
}
|
# assemble each individual counts matrix
args <- commandArgs(trailingOnly = TRUE)
input.folder<- args[1]
output.file<- args[2]
# Set the working directory
setwd(input.folder)
filelist = list.files(pattern="*counts.txt.gz")
print(filelist)
#assuming tab separated values with a header
datalist = lapply(filelist, function(x)read.table(gzfile(x), header=TRUE, row.names=1, fill = FALSE))
cts = do.call("cbind", datalist)
#remove last 5 rows which contains summary info
cts <- cts[1:(nrow(cts)-5),]
write.table(cts, gz(output.file), append = FALSE, quote = TRUE, sep="\t")
| /scripts/analysis/buildCountMatrix.R | no_license | andrewf5201/scrna-seq-custom | R | false | false | 578 | r | # assemble each individual counts matrix
args <- commandArgs(trailingOnly = TRUE)
input.folder<- args[1]
output.file<- args[2]
# Set the working directory
setwd(input.folder)
filelist = list.files(pattern="*counts.txt.gz")
print(filelist)
#assuming tab separated values with a header
datalist = lapply(filelist, function(x)read.table(gzfile(x), header=TRUE, row.names=1, fill = FALSE))
cts = do.call("cbind", datalist)
#remove last 5 rows which contains summary info
cts <- cts[1:(nrow(cts)-5),]
write.table(cts, gz(output.file), append = FALSE, quote = TRUE, sep="\t")
|
setwd("D:/Profiles/dverhann/Desktop/R/Course_Exploratory_Data_Analysis/week1")
library(sqldf)
DataFileLoc="./exdata_data_household_power_consumption/household_power_consumption.txt"
SqlStatement="select Date,Time,Global_active_power from file WHERE Date='1/2/2007' OR Date='2/2/2007'"
mydata <- read.csv.sql(DataFileLoc, sql = SqlStatement,header = TRUE, sep= ';')
mydata$Date <- as.Date(mydata$Date , "%d/%m/%Y")
hist(mydata$Global_active_power, xlab="Global Active Power (kilowatts)", main="Global Active Power", col="Red")
dev.copy(png,'plot1.png', width=480, height=480)
dev.off() | /plot1.R | no_license | dverhann/ExData_Plotting1 | R | false | false | 590 | r | setwd("D:/Profiles/dverhann/Desktop/R/Course_Exploratory_Data_Analysis/week1")
library(sqldf)
DataFileLoc="./exdata_data_household_power_consumption/household_power_consumption.txt"
SqlStatement="select Date,Time,Global_active_power from file WHERE Date='1/2/2007' OR Date='2/2/2007'"
mydata <- read.csv.sql(DataFileLoc, sql = SqlStatement,header = TRUE, sep= ';')
mydata$Date <- as.Date(mydata$Date , "%d/%m/%Y")
hist(mydata$Global_active_power, xlab="Global Active Power (kilowatts)", main="Global Active Power", col="Red")
dev.copy(png,'plot1.png', width=480, height=480)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy_lab_pe.R
\name{tidy_lab_pe}
\alias{tidy_lab_pe}
\title{Tidy Provide Enterprise laboratory data.}
\usage{
tidy_lab_pe(x)
}
\arguments{
\item{x}{A data-frame.}
}
\value{
A tibble.
}
\description{
Tidy the Provide Enterprise "Test Results by Client With ID" report.
}
| /man/tidy_lab_pe.Rd | no_license | zhaoy/zhaoy | R | false | true | 348 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy_lab_pe.R
\name{tidy_lab_pe}
\alias{tidy_lab_pe}
\title{Tidy Provide Enterprise laboratory data.}
\usage{
tidy_lab_pe(x)
}
\arguments{
\item{x}{A data-frame.}
}
\value{
A tibble.
}
\description{
Tidy the Provide Enterprise "Test Results by Client With ID" report.
}
|
# Simple Linear Regressor
dataset = read.csv('Salary_Data.csv')
library(caTools)
# split dataset into train and test
set.seed(123)
split = sample.split(dataset$YearsExperience, SplitRatio = 1/3)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == TRUE)
regressor = lm(formula = YearExperience ~.
data = training_set)
| /Regression/Simple Linear Regression/Simple Linear Regression_R.R | no_license | dhirajwagh1612/Machine_Learning_Models | R | false | false | 366 | r | # Simple Linear Regressor
dataset = read.csv('Salary_Data.csv')
library(caTools)
# split dataset into train and test
set.seed(123)
split = sample.split(dataset$YearsExperience, SplitRatio = 1/3)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == TRUE)
regressor = lm(formula = YearExperience ~.
data = training_set)
|
#####################################################
#################### KEYNESS ###################
#####################################################
## Note: This file subsets the dataframe (not the corpus object)
keyness_container <- reactiveValues(keyness = NULL,
keyness_check = FALSE)
observeEvent(input$get_keyness, {
if(dfm_container$dfm_check == FALSE){
shinyalert::shinyalert("Error!",
"Keyness requires a dfm of a corpus. Please create a dfm first after having uploaded your corpus (see menu on the left).",
type = "error")
keyness_container$keyness <- NULL
keyness_container$keyness_check <- FALSE
} else if(!(input$keyness_variable %in% colnames(docvars(corpus_container$corp)))){
shinyalert::shinyalert("Error!",
"Keyness variable does not exist in your data.",
type = "error")
keyness_container$keyness <- NULL
keyness_container$keyness_check <- FALSE
} else if(!(input$keyness_target %in% unique(docvars(corpus_container$corp, input$keyness_variable)))){
shinyalert::shinyalert("Error!",
"Keyness target does not exist in your keyness variable.",
type = "error")
keyness_container$keyness <- NULL
keyness_container$keyness_check <- FALSE
} else{
withProgress(message = 'Estimating Keyness...', value = 0.5, {
dfm <- dfm_container$dfm
dfm_grouped <- dfm_group(dfm, groups = docvars(corpus_container$corp, input$keyness_variable))
keyness_result <- textstat_keyness(dfm_grouped, target = input$keyness_target, measure = input$keyness_measure)
keyness_container$keyness <- keyness_result
keyness_container$keyness_check <- TRUE
})
}
})
output$keyness_plot <- renderPlot({
validate(need(input$get_keyness, "The plot will be displayed here once you have estimated the Keyness model."))
validate(need(dfm_container$dfm_check, "Please create a dfm."))
validate(need(keyness_container$keyness_check, "Please estimate Keyness."))
textplot_keyness(keyness_container$keyness, show_reference = input$keyness_ref == "show")
}) | /R/keyness_server.R | no_license | stefan-mueller/tada | R | false | false | 2,310 | r | #####################################################
#################### KEYNESS ###################
#####################################################
## Note: This file subsets the dataframe (not the corpus object)
keyness_container <- reactiveValues(keyness = NULL,
keyness_check = FALSE)
observeEvent(input$get_keyness, {
if(dfm_container$dfm_check == FALSE){
shinyalert::shinyalert("Error!",
"Keyness requires a dfm of a corpus. Please create a dfm first after having uploaded your corpus (see menu on the left).",
type = "error")
keyness_container$keyness <- NULL
keyness_container$keyness_check <- FALSE
} else if(!(input$keyness_variable %in% colnames(docvars(corpus_container$corp)))){
shinyalert::shinyalert("Error!",
"Keyness variable does not exist in your data.",
type = "error")
keyness_container$keyness <- NULL
keyness_container$keyness_check <- FALSE
} else if(!(input$keyness_target %in% unique(docvars(corpus_container$corp, input$keyness_variable)))){
shinyalert::shinyalert("Error!",
"Keyness target does not exist in your keyness variable.",
type = "error")
keyness_container$keyness <- NULL
keyness_container$keyness_check <- FALSE
} else{
withProgress(message = 'Estimating Keyness...', value = 0.5, {
dfm <- dfm_container$dfm
dfm_grouped <- dfm_group(dfm, groups = docvars(corpus_container$corp, input$keyness_variable))
keyness_result <- textstat_keyness(dfm_grouped, target = input$keyness_target, measure = input$keyness_measure)
keyness_container$keyness <- keyness_result
keyness_container$keyness_check <- TRUE
})
}
})
output$keyness_plot <- renderPlot({
validate(need(input$get_keyness, "The plot will be displayed here once you have estimated the Keyness model."))
validate(need(dfm_container$dfm_check, "Please create a dfm."))
validate(need(keyness_container$keyness_check, "Please estimate Keyness."))
textplot_keyness(keyness_container$keyness, show_reference = input$keyness_ref == "show")
}) |
/r-course/data/tema1/scripts/04-analisis.fwf.R | permissive | miguel-ossa/CursoR | R | false | false | 815 | r | ||
## Depth power fit that is regionally specific
library(ggplot2)
gridCells = read.csv("metadata_by_grid.csv",stringsAsFactors = FALSE)
GreenlandFID = c(3774:3776,3759:3762,3737:3742,3711:3715,3681:3685,3639:3643,3584:3588,3522:3525,3452:3454,3378:3380)
AntarcFID = 3791:4163 # These are FID_1 on TC's documents already corrected in CM
#gridCells$rechargeFull = factor(gridCells$rechargeVolumeFull)
gridCells$rechargeType = gridCells$rechargeDepth
gridCells$combined = factor(paste(gridCells$rechargeVolume,gridCells$crustScheme2))
gridCells$rechargeVolume = factor(gridCells$rechargeVolume)
gridCells$rechargeType = factor(gridCells$rechargeDepth)
gridCells$crustScheme2 = factor(gridCells$crustScheme2)
gridCells$rechargeFull = factor(gridCells$Descriptio)
gridCells$combined_cv = factor(gridCells$combined_cv)
gridCells$combined_cr = factor(gridCells$combined_cr)
gridCells$rechargeShort = factor(gridCells$rechargeShort)
df.trimmed = read.csv("cores_with_PCR.csv")
# Select direct measurements only
all = df.trimmed[which(df.trimmed$MethodCM=="direct"),]
# makes sure all parameters are in correct format
all$Depth = as.numeric(all$Depth) # in meters
all$cellsPer = as.numeric(all$cellsPer) # in cell cm-3
# load indices
myIndices = as.matrix(read.csv("1000_indices_for_bootstrap.csv",header=FALSE))
bootstraps = nrow(myIndices)
depthsToIterate = gridCells$Z122_Med_HF_km*1000 # in meters
plot(log10(all$Depth),log10(all$cellsPer))
AntarcFID = 3791:4163 # These are FID_1 on TC's documents already corrected in CM
GreenlandFID = c(3774:3776,3759:3762,3737:3742,3711:3715,3681:3685,3639:3643,3584:3588,3522:3525,3452:3454,3378:3380)
linearModel = function(var,map,train,test){
biomass = 0
error = matrix(nrow=0,ncol=2)
univar = sort(unique(var))
loopAB=matrix(nrow=1,ncol=3*length(univar))
gridVec = vector(length=nrow(gridCells))
for (i in 1:length(univar)){
#print(univar[i])
#print(lm(c~d,all[which(var==univar[i]),]))
powerFit = lm(c~d,train[which(var==univar[i]),])
pf.test = test[which(var==univar[i]),]
a = powerFit$coefficients[1]
b = powerFit$coefficients[2]
loopAB[,(i-1)*3+1]=a
loopAB[,(i-1)*3+2]=b
loopAB[,(i-1)*3+3]=summary(powerFit)$r.squared
powerResult = predict(powerFit,pf.test)
tempError = cbind(pf.test$c,powerResult)
error = rbind(error,tempError)
# if(mean((tempError[1]-tempError[2])^2)>1){print(univar[i])}
### if (is.na(b)){b=0}
for (g in which(map==univar[i])){
if (gridCells$FID[g]%in%GreenlandFID){
integralFun = function(x) {(10^7.73)*x^-0.66}
biomass = biomass + gridCells$grid_area_m2[g]*100*100*(15000000000+integrate(integralFun,1,depthsToIterate[g])$value*100)
gridVec[g]=gridCells$grid_area_m2[g]*100*100*(15000000000+integrate(integralFun,1,depthsToIterate[g])$value*100)
}
else if (gridCells$FID[g]%in%AntarcFID) {
integralFun = function(x) {(10^6)*x^-0.66}
biomass = biomass + gridCells$grid_area_m2[g]*100*100*(2150000000+integrate(integralFun,1,depthsToIterate[g])$value*100)
gridVec[g]=gridCells$grid_area_m2[g]*100*100*(2150000000+integrate(integralFun,1,depthsToIterate[g])$value*100)
}
else{
integralFun = function(x) {(10^a)*x^b}
biomass= biomass + gridCells$grid_area_m2[g]*100*100*integrate(integralFun,1,depthsToIterate[g])$value*100
gridVec[g] = gridCells$grid_area_m2[g]*100*100*integrate(integralFun,1,depthsToIterate[g])$value*100
}
}
}
# print(dim(error))
mse = mean((error[1]-error[2])^2)
biomassAndError = t(matrix(c(biomass,mse)))[1,]
#print(biomass)
if(mse>2){myList=list(estimate=rep(NA,2),grid=rep(NA,length=nrow(gridCells)),params=rep(NA,length(univar)*3))}
else if (mse==0){myList=list(estimate=rep(NA,2),grid=rep(NA,length=nrow(gridCells)),params=rep(NA,length(univar)*3))}
else{myList = list(estimate=biomassAndError,grid = gridVec,params=loopAB)}
return(myList)
}
all$d = log10(all$Depth)
all$c = log10(all$cellsPer)
index=myIndices
newEstimate = matrix(nrow=nrow(index), ncol=2)
gridValues = data.frame(matrix(nrow=nrow(gridCells),ncol=nrow(index)))
parameters = matrix(nrow=nrow(index), ncol=3*5)
for (n in 1:nrow(index)){
if(n%%10==0){print(n)}
trainSet = all[index[n,],]
testSet = all[-index[n,],]
if(sum(table(trainSet$crustScheme2)<4)>0){print(n); newEstimate[n]=NA}
else{
output = linearModel(trainSet$crustScheme2,gridCells$crustScheme2,trainSet,testSet)
newEstimate[n,]=output$estimate
gridValues[,n]=output$grid
parameters[n,]=output$params
}
}
crustTypes = sort(unique(trainSet$crustScheme2))
median_params = apply(parameters,2,median,na.rm=TRUE)
colors = c("blue","green","red","purple","orange")
par(mfrow=c(3,2))
for (i in 1:length(crustTypes)){
tmp = all[which(all$crustScheme2==crustTypes[i]),]
plot(tmp$d,tmp$c,col=colors[i],xlim=c(-1,4),ylim=c(3,10),
main=paste(paste(crustTypes[i],"; R-squared = ",sep=""),toString(round(median_params[(i-1)*3+3],digits = 2))))
#print(summary(lm(tmp$c~tmp$d))$r.squared)
#print(lm(tmp$c~tmp$d)$coefficients)
tmp.lm = lm(tmp$c~tmp$d)
lwr = predict(tmp.lm,data.frame(tmp$d), interval="predict")[,2]
upr = predict(tmp.lm,data.frame(tmp$d), interval="predict")[,3]
abline(a=median_params[(i-1)*3+1],b=median_params[(i-1)*3+2],col=colors[i])
lines(tmp$d,lwr)
lines(tmp$d,upr)
print(mean(upr-lwr))
}
totdat.lm = lm(all$c~all$d)
lwr = predict(totdat.lm,data.frame(all$d), interval="predict")[,2]
upr = predict(totdat.lm,data.frame(all$d), interval="predict")[,3]
plot(all$d,all$c,main="Total Dataset; R-squared = 0.19")
abline(a=coef(totdat.lm)[1],b=coef(totdat.lm)[2],col="gray" )
lines(all$d,lwr,col="gray")
lines(all$d,upr,col="gray")
print(mean(upr-lwr))
| /Crust_Specific_Fits.R | no_license | cmagnabosco/Subsurface_Biomass_and_Biodiversity | R | false | false | 5,817 | r | ## Depth power fit that is regionally specific
library(ggplot2)
gridCells = read.csv("metadata_by_grid.csv",stringsAsFactors = FALSE)
GreenlandFID = c(3774:3776,3759:3762,3737:3742,3711:3715,3681:3685,3639:3643,3584:3588,3522:3525,3452:3454,3378:3380)
AntarcFID = 3791:4163 # These are FID_1 on TC's documents already corrected in CM
#gridCells$rechargeFull = factor(gridCells$rechargeVolumeFull)
gridCells$rechargeType = gridCells$rechargeDepth
gridCells$combined = factor(paste(gridCells$rechargeVolume,gridCells$crustScheme2))
gridCells$rechargeVolume = factor(gridCells$rechargeVolume)
gridCells$rechargeType = factor(gridCells$rechargeDepth)
gridCells$crustScheme2 = factor(gridCells$crustScheme2)
gridCells$rechargeFull = factor(gridCells$Descriptio)
gridCells$combined_cv = factor(gridCells$combined_cv)
gridCells$combined_cr = factor(gridCells$combined_cr)
gridCells$rechargeShort = factor(gridCells$rechargeShort)
df.trimmed = read.csv("cores_with_PCR.csv")
# Select direct measurements only
all = df.trimmed[which(df.trimmed$MethodCM=="direct"),]
# makes sure all parameters are in correct format
all$Depth = as.numeric(all$Depth) # in meters
all$cellsPer = as.numeric(all$cellsPer) # in cell cm-3
# load indices
myIndices = as.matrix(read.csv("1000_indices_for_bootstrap.csv",header=FALSE))
bootstraps = nrow(myIndices)
depthsToIterate = gridCells$Z122_Med_HF_km*1000 # in meters
plot(log10(all$Depth),log10(all$cellsPer))
AntarcFID = 3791:4163 # These are FID_1 on TC's documents already corrected in CM
GreenlandFID = c(3774:3776,3759:3762,3737:3742,3711:3715,3681:3685,3639:3643,3584:3588,3522:3525,3452:3454,3378:3380)
linearModel = function(var,map,train,test){
biomass = 0
error = matrix(nrow=0,ncol=2)
univar = sort(unique(var))
loopAB=matrix(nrow=1,ncol=3*length(univar))
gridVec = vector(length=nrow(gridCells))
for (i in 1:length(univar)){
#print(univar[i])
#print(lm(c~d,all[which(var==univar[i]),]))
powerFit = lm(c~d,train[which(var==univar[i]),])
pf.test = test[which(var==univar[i]),]
a = powerFit$coefficients[1]
b = powerFit$coefficients[2]
loopAB[,(i-1)*3+1]=a
loopAB[,(i-1)*3+2]=b
loopAB[,(i-1)*3+3]=summary(powerFit)$r.squared
powerResult = predict(powerFit,pf.test)
tempError = cbind(pf.test$c,powerResult)
error = rbind(error,tempError)
# if(mean((tempError[1]-tempError[2])^2)>1){print(univar[i])}
### if (is.na(b)){b=0}
for (g in which(map==univar[i])){
if (gridCells$FID[g]%in%GreenlandFID){
integralFun = function(x) {(10^7.73)*x^-0.66}
biomass = biomass + gridCells$grid_area_m2[g]*100*100*(15000000000+integrate(integralFun,1,depthsToIterate[g])$value*100)
gridVec[g]=gridCells$grid_area_m2[g]*100*100*(15000000000+integrate(integralFun,1,depthsToIterate[g])$value*100)
}
else if (gridCells$FID[g]%in%AntarcFID) {
integralFun = function(x) {(10^6)*x^-0.66}
biomass = biomass + gridCells$grid_area_m2[g]*100*100*(2150000000+integrate(integralFun,1,depthsToIterate[g])$value*100)
gridVec[g]=gridCells$grid_area_m2[g]*100*100*(2150000000+integrate(integralFun,1,depthsToIterate[g])$value*100)
}
else{
integralFun = function(x) {(10^a)*x^b}
biomass= biomass + gridCells$grid_area_m2[g]*100*100*integrate(integralFun,1,depthsToIterate[g])$value*100
gridVec[g] = gridCells$grid_area_m2[g]*100*100*integrate(integralFun,1,depthsToIterate[g])$value*100
}
}
}
# print(dim(error))
mse = mean((error[1]-error[2])^2)
biomassAndError = t(matrix(c(biomass,mse)))[1,]
#print(biomass)
if(mse>2){myList=list(estimate=rep(NA,2),grid=rep(NA,length=nrow(gridCells)),params=rep(NA,length(univar)*3))}
else if (mse==0){myList=list(estimate=rep(NA,2),grid=rep(NA,length=nrow(gridCells)),params=rep(NA,length(univar)*3))}
else{myList = list(estimate=biomassAndError,grid = gridVec,params=loopAB)}
return(myList)
}
all$d = log10(all$Depth)
all$c = log10(all$cellsPer)
index=myIndices
newEstimate = matrix(nrow=nrow(index), ncol=2)
gridValues = data.frame(matrix(nrow=nrow(gridCells),ncol=nrow(index)))
parameters = matrix(nrow=nrow(index), ncol=3*5)
for (n in 1:nrow(index)){
if(n%%10==0){print(n)}
trainSet = all[index[n,],]
testSet = all[-index[n,],]
if(sum(table(trainSet$crustScheme2)<4)>0){print(n); newEstimate[n]=NA}
else{
output = linearModel(trainSet$crustScheme2,gridCells$crustScheme2,trainSet,testSet)
newEstimate[n,]=output$estimate
gridValues[,n]=output$grid
parameters[n,]=output$params
}
}
crustTypes = sort(unique(trainSet$crustScheme2))
median_params = apply(parameters,2,median,na.rm=TRUE)
colors = c("blue","green","red","purple","orange")
par(mfrow=c(3,2))
for (i in 1:length(crustTypes)){
tmp = all[which(all$crustScheme2==crustTypes[i]),]
plot(tmp$d,tmp$c,col=colors[i],xlim=c(-1,4),ylim=c(3,10),
main=paste(paste(crustTypes[i],"; R-squared = ",sep=""),toString(round(median_params[(i-1)*3+3],digits = 2))))
#print(summary(lm(tmp$c~tmp$d))$r.squared)
#print(lm(tmp$c~tmp$d)$coefficients)
tmp.lm = lm(tmp$c~tmp$d)
lwr = predict(tmp.lm,data.frame(tmp$d), interval="predict")[,2]
upr = predict(tmp.lm,data.frame(tmp$d), interval="predict")[,3]
abline(a=median_params[(i-1)*3+1],b=median_params[(i-1)*3+2],col=colors[i])
lines(tmp$d,lwr)
lines(tmp$d,upr)
print(mean(upr-lwr))
}
totdat.lm = lm(all$c~all$d)
lwr = predict(totdat.lm,data.frame(all$d), interval="predict")[,2]
upr = predict(totdat.lm,data.frame(all$d), interval="predict")[,3]
plot(all$d,all$c,main="Total Dataset; R-squared = 0.19")
abline(a=coef(totdat.lm)[1],b=coef(totdat.lm)[2],col="gray" )
lines(all$d,lwr,col="gray")
lines(all$d,upr,col="gray")
print(mean(upr-lwr))
|
plotOutput <- function(session) {
.validateRunSession(session)
pace <- session$pace
output <- .kpiOutput(session)
output.spline <- spline(output)
y.limit <- ceiling(max(abs(range(output))))
plot(
output.spline,
type = "n",
xlab = "Segment",
ylab = "Output",
ylim = c(-y.limit, y.limit),
xaxt = "n",
cex.lab = .8,
main = "Output"
)
axis(1, at = 1:length(pace), labels = 0:(length(pace) - 1), cex.axis = .8)
lines(output.spline, col = .colours$black)
points(output.spline, pch = 19, col = ifelse(output.spline$y < 0, .colours$red, .colours$green))
abline(h = 0, lty = "dashed", col = .colours$black)
abline(v = seq(6, to = length(pace), by = 5), col = .colours$black, lty = "dashed")
}
| /R/plotOutput.R | permissive | michelcaradec/runR | R | false | false | 745 | r | plotOutput <- function(session) {
.validateRunSession(session)
pace <- session$pace
output <- .kpiOutput(session)
output.spline <- spline(output)
y.limit <- ceiling(max(abs(range(output))))
plot(
output.spline,
type = "n",
xlab = "Segment",
ylab = "Output",
ylim = c(-y.limit, y.limit),
xaxt = "n",
cex.lab = .8,
main = "Output"
)
axis(1, at = 1:length(pace), labels = 0:(length(pace) - 1), cex.axis = .8)
lines(output.spline, col = .colours$black)
points(output.spline, pch = 19, col = ifelse(output.spline$y < 0, .colours$red, .colours$green))
abline(h = 0, lty = "dashed", col = .colours$black)
abline(v = seq(6, to = length(pace), by = 5), col = .colours$black, lty = "dashed")
}
|
#' Get Traits
#'
#' Retrieves life history traits from FishLife
#'
#' This function returns the mean un-logged life history traits for the closest match to the
#' supplied taxonomic information.
#'
#' @param Class Character input for taxonomic class
#' @param Order Character input for taxonomic class
#' @param Family Character input for taxonomic class
#' @param Genus Character input for taxonomic class
#' @param Species Character input for taxonomic class
#' @param verbose logical where TRUE prints closest match, FALSE does not
#'
#' @return a dataframe of mean trait values
#' @export
#'
#' @examples
#' \dontrun{
#' life_traits <- Get_traits(Genus = "Lutjanus", Species = "campechanus")
#' }
Get_traits_az <- function( Class="predictive", Order="predictive", Family="predictive", Genus="predictive", Species="predictive",verbose = FALSE) {
Genus="Pagellus"
Species="bogaraveo"
closest_match <- FishLife::Search_species(Class = Class, Order = Order, Family = Family, Genus = Genus, Species = Species)# Database="FishBase" )
trait_table <- as.data.frame(t(FishLife::FishBase$ParHat$beta_gj[closest_match$GroupNum[[1]],]))
trait_table[colnames(trait_table) != 'Temperature'] <-
exp(trait_table[colnames(trait_table) != 'Temperature'])
return(trait_table)
} | /R/get_traits_az.R | permissive | lennon-thomas/spasm_azores | R | false | false | 1,282 | r | #' Get Traits
#'
#' Retrieves life history traits from FishLife
#'
#' This function returns the mean un-logged life history traits for the closest match to the
#' supplied taxonomic information.
#'
#' @param Class Character input for taxonomic class
#' @param Order Character input for taxonomic class
#' @param Family Character input for taxonomic class
#' @param Genus Character input for taxonomic class
#' @param Species Character input for taxonomic class
#' @param verbose logical where TRUE prints closest match, FALSE does not
#'
#' @return a dataframe of mean trait values
#' @export
#'
#' @examples
#' \dontrun{
#' life_traits <- Get_traits(Genus = "Lutjanus", Species = "campechanus")
#' }
Get_traits_az <- function( Class="predictive", Order="predictive", Family="predictive", Genus="predictive", Species="predictive",verbose = FALSE) {
Genus="Pagellus"
Species="bogaraveo"
closest_match <- FishLife::Search_species(Class = Class, Order = Order, Family = Family, Genus = Genus, Species = Species)# Database="FishBase" )
trait_table <- as.data.frame(t(FishLife::FishBase$ParHat$beta_gj[closest_match$GroupNum[[1]],]))
trait_table[colnames(trait_table) != 'Temperature'] <-
exp(trait_table[colnames(trait_table) != 'Temperature'])
return(trait_table)
} |
##Creates PNG line graph of submeters for 02/01/2007-02/02/2007
#Read data into data table, make data frame
powCon <- fread("household_power_consumption.txt", nrows=2880, skip="1/2/2007")
powConNames <- names(fread("household_power_consumption.txt", nrows=1))
setnames(powCon, powConNames)
powCon <- as.data.frame(powCon)
#Convert date/time, new column
powCon$datetime <- as.POSIXct(paste(powCon$Date, powCon$Time), format="%d/%m/%Y %H:%M:%S")
#Create graph from subsets of data
png(filename="plot3.png")
with(powCon, plot(datetime, Sub_metering_1, type="l",
ylab="Energy sub metering", xlab=""))
lines(powCon$datetime, powCon$Sub_metering_2, col="red")
lines(powCon$datetime, powCon$Sub_metering_3, col="blue")
legend("topright", lwd=1, col=c("black", "red", "blue"),
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() | /plot3.R | no_license | viator753/ExData_Plotting1 | R | false | false | 870 | r | ##Creates PNG line graph of submeters for 02/01/2007-02/02/2007
#Read data into data table, make data frame
powCon <- fread("household_power_consumption.txt", nrows=2880, skip="1/2/2007")
powConNames <- names(fread("household_power_consumption.txt", nrows=1))
setnames(powCon, powConNames)
powCon <- as.data.frame(powCon)
#Convert date/time, new column
powCon$datetime <- as.POSIXct(paste(powCon$Date, powCon$Time), format="%d/%m/%Y %H:%M:%S")
#Create graph from subsets of data
png(filename="plot3.png")
with(powCon, plot(datetime, Sub_metering_1, type="l",
ylab="Energy sub metering", xlab=""))
lines(powCon$datetime, powCon$Sub_metering_2, col="red")
lines(powCon$datetime, powCon$Sub_metering_3, col="blue")
legend("topright", lwd=1, col=c("black", "red", "blue"),
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() |
#-------------------------------------------------
# Project: Coral_Reef_Distribution
#
# Date: 2021-10-29
# Author: Lewis A. Jones
# Copyright (c) Lewis A. Jones, 2021
# Email: LewisA.Jones@outlook.com
#
# Script name:
# prepare_fossil_data.R
#
# Script description:
# Prepare fossil occurrence data
#
#-------------------------------------------------
#Load libraries, functions and analyses options
library(dplyr)
library(stringr)
library(raster)
library(chronosphere)
source("./R/options.R")
source("./R/functions/bin_assignment.R")
#-------------------------------------------------
#load stage bins
bins <- read.csv("./data/stage_bins.csv")
#round mid age
bins$mid_ma <- round(bins$mid_ma, 3)
#Load PARED data
PARED <- read.csv("./data/occurrences/PARED_06_10_2021.csv")
collections <- as.integer(PARED$collection)
collections <- na.omit(collections)
r_number <- as.integer(PARED$r_number)
PBDB <- read.csv("./data/occurrences/PBDB_data_12_10_2021.csv")
#-------------------------------------------------
#PREPARE PARED DATA
#retain only coral reefs
PARED <- subset(PARED, biota_main_t == "Corals" | biota_sec_text == "Corals")
#retain only outcropping reefs
PARED <- subset(PARED, subsurface_text == "Outcropping reef")
#retain only true reefs
PARED <- subset(PARED, type_text == "True reef")
#remove cold water/temperate coral reefs
PARED <- subset(PARED, tropical_text == "Tropical or unknown")
#create empty cells for populating
PARED$mid_ma <- NA
PARED$prob <- NA
#assign bin based on probability duration
for(i in 1:nrow(PARED)){
print(round(i/nrow(PARED)*100)) #print percentage
tmp <- assign_bins_prob(max = PARED$max_ma[i], min = PARED$min_ma[i], bins = bins$min_ma) #assign bins based on age duration
PARED$mid_ma[i] <- as.numeric(tmp$mid_ma)
PARED$prob[i] <- as.numeric(tmp$prob)
}
#round mid age
PARED$mid_ma <- round(PARED$mid_ma, 3)
#retain data with 0.5 probability of being in assigned stage
PARED <- subset(PARED, prob >= 0.5)
#drop columns to avoid duplication
PARED <- subset(PARED, select=-c(max_ma, min_ma))
#join stage names based on assigned mid age
PARED <- inner_join(x = PARED, y = bins, by = c("mid_ma"))
#remove data younger than 2.588 Ma
PARED <- subset(PARED, min_ma >= 2.588)
#remove data older than 247.2 Ma
PARED <- subset(PARED, max_ma <= 247.2)
#load PARED rotations
rotations <- read.csv("/Users/lewis/Documents/Data/Rotations/PARED_rotated_02_11_2021.csv")
#add id columns for joining data
rotations$join <- paste(rotations$r_number, rotations$stage, sep = "_")
PARED$join <- paste(PARED$r_number, PARED$interval_name, sep = "_")
#join data to Getech rotations
PARED <- left_join(x = PARED, rotations[,c("join", "P.Long", "P.Lat")], by = "join")
#remove data without GETECH palaeocoordinates
PARED <- subset(PARED, !is.na(P.Long) & !is.na(P.Lat))
#-------------------------------------------------
#PREPARE PBDB DATA
#retain only true reefs based on environment and lithology
PBDB <- PBDB %>% filter(!environment %in% c("perireef or subreef"))
PBDB <- PBDB %>% filter(!lithology1 %in% c("shale",
"marl",
"claystone",
"wackestone",
"breccia",
"phosphorite",
"volcaniclastic",
"tuff",
"siliciclastic",
"conglomerate",
"sandstone",
"siltstone",
"not reported"))
#filter by reef number for pre-removed reef data
remove <- r_number[!r_number %in% PARED$r_number]
remove <- paste("Reef ", remove, sep = "")
PBDB <- PBDB %>% filter(!collection_aka %in% remove)
PBDB$mid_ma <- NA
PBDB$prob <- NA
#assign bin based on probability duration
for(i in 1:nrow(PBDB)){
print(round(i/nrow(PBDB)*100)) #print percentage
tmp <- assign_bins_prob(max = PBDB$max_ma[i], min = PBDB$min_ma[i], bins = bins$min_ma) #assign bins based on age duration
PBDB$mid_ma[i] <- tmp$mid_ma
PBDB$prob[i] <- tmp$prob
}
#round mid age
PBDB$mid_ma <- round(PBDB$mid_ma, 3)
#retain data with 0.5 probability of being in assigned stage
PBDB <- subset(PBDB, prob >= 0.5)
#drop columns to avoid duplication
PBDB <- subset(PBDB, select=-c(max_ma, min_ma))
#join stage names based on assigned mid age
PBDB <- inner_join(x = PBDB, y = bins, by = c("mid_ma"))
#remove data younger than 2.588 Ma
PBDB <- subset(PBDB, min_ma >= 2.588)
#remove data older than 247.2 Ma
PBDB <- subset(PBDB, max_ma <= 247.2)
#load PBDB rotations
rotations <- read.csv("/Users/lewis/Documents/Data/Rotations/Getech_rotated_collections_04_10_2021.csv")
#add id columns for joining data
rotations$join <- paste(rotations$collection_no, rotations$stage_bin, sep = "_")
PBDB$join <- paste(PBDB$collection_no, PBDB$interval_name, sep = "_")
#join data to Getech rotations
PBDB <- left_join(x = PBDB, rotations[,c("join", "P.Long", "P.Lat")], by = "join")
#remove data without GETECH palaeocoordinates
PBDB <- subset(PBDB, !is.na(P.Long) & !is.na(P.Lat))
#-------------------------------------------------
#subset and tidy data
PBDB$r_number <- NA
PBDB <- PBDB[,c("collection_no","r_number", "interval_name", "max_ma","mid_ma", "min_ma", "lng", "lat", "P.Long", "P.Lat")]
PBDB$data_source <- c("PaleoBioDB")
PARED$collection_no <- PARED$collection
PARED$collection_no[PARED$collection_no == ""] <- NA
PARED$collection_no[PARED$collection_no == 0] <- NA
PARED$lng <- PARED$longit
PARED$lat <- PARED$lat
PARED <- PARED[,c("collection_no", "r_number", "interval_name", "max_ma", "mid_ma", "min_ma", "lng", "lat", "P.Long", "P.Lat")]
PARED$data_source <- c("PaleoReefDB")
#bind data
PARED <- rbind.data.frame(PARED, PBDB)
#-------------------------------------------------
#Rotate data with PALEOMAP for sensitivity testing
#get plate model
pm <- fetch("paleomap", "model", datadir="./data/model/") #download plate model
PARED$paleolng <- NA
PARED$paleolat <- NA
pb <- txtProgressBar(min = 0, # Minimum value of the progress bar
max = nrow(PARED), # Maximum value of the progress bar
style = 3, # Progress bar style (also available style = 1 and style = 2)
width = 50, # Progress bar width. Defaults to getOption("width")
char = "=") # Character used to create the bar
for(i in 1:nrow(PARED)){
coords <- reconstruct(x = PARED[i, c("lng", "lat")], #coordinates of data
age = PARED$mid_ma[i], #age of data
model=pm, #plate model
dir = "./data/model/", #directory of plate model
#path.gplates="/Volumes/GPlates-2.2.0-Darwin-x86_64/",
cleanup = TRUE)
PARED$paleolng[i] <- coords[,c("lng")]
PARED$paleolat[i] <- coords[,c("lat")]
setTxtProgressBar(pb, i)
}
#write data
write.csv(PARED, "./data/occurrences/PARED_cleaned.csv", row.names = FALSE)
#-------------------------------------------------
#filter data on continents
stages <- unique(PARED$interval_name)
#create empty dataframe
master <- data.frame()
#run for loop
for(i in stages){
tmp <- subset(PARED, interval_name == i)
DEM <- raster(paste("./data/enm/layers/", i, "/dem.asc", sep = ""))
ext <- extract(x = DEM, y = tmp[,c("P.Long","P.Lat")], df = TRUE)
ext <- which(!is.na(ext$dem))
tmp <- tmp[ext,]
master <- rbind.data.frame(master, tmp)
}
#write data
write.csv(master, "./data/occurrences/PARED_clip.csv", row.names = FALSE)
#assign data
PARED <- master
#spatially subsample data
r <- raster(res = res)
#create empty data frame
master <- data.frame()
#get unique stages of data
stages <- unique(PARED$interval_name)
#run for loop
for(i in stages){
interval_name <- i
df <- subset(PARED, interval_name == i)
ras <- rasterize(x = df[,c("P.Long", "P.Lat")], y = r, field = 1)
pts_ras <- data.frame(rasterToPoints(ras))
pts_ras <- data.frame(pts_ras[,c("x","y")])
pts_ras <- cbind.data.frame(pts_ras, interval_name)
master <- rbind.data.frame(master, pts_ras)
}
#update column names
colnames(master) <- c("P.Long", "P.Lat", "interval_name")
#write data
write.csv(master, "./data/occurrences/PARED_subsampled.csv", row.names = FALSE)
#notification
beepr::beep(2)
| /R/subscripts/prepare_fossil_reef_data.R | no_license | LewisAJones/Coral_Reef_Distribution | R | false | false | 8,497 | r | #-------------------------------------------------
# Project: Coral_Reef_Distribution
#
# Date: 2021-10-29
# Author: Lewis A. Jones
# Copyright (c) Lewis A. Jones, 2021
# Email: LewisA.Jones@outlook.com
#
# Script name:
# prepare_fossil_data.R
#
# Script description:
# Prepare fossil occurrence data
#
#-------------------------------------------------
#Load libraries, functions and analyses options
library(dplyr)
library(stringr)
library(raster)
library(chronosphere)
source("./R/options.R")
source("./R/functions/bin_assignment.R")
#-------------------------------------------------
#load stage bins
bins <- read.csv("./data/stage_bins.csv")
#round mid age
bins$mid_ma <- round(bins$mid_ma, 3)
#Load PARED data
PARED <- read.csv("./data/occurrences/PARED_06_10_2021.csv")
collections <- as.integer(PARED$collection)
collections <- na.omit(collections)
r_number <- as.integer(PARED$r_number)
PBDB <- read.csv("./data/occurrences/PBDB_data_12_10_2021.csv")
#-------------------------------------------------
#PREPARE PARED DATA
#retain only coral reefs
PARED <- subset(PARED, biota_main_t == "Corals" | biota_sec_text == "Corals")
#retain only outcropping reefs
PARED <- subset(PARED, subsurface_text == "Outcropping reef")
#retain only true reefs
PARED <- subset(PARED, type_text == "True reef")
#remove cold water/temperate coral reefs
PARED <- subset(PARED, tropical_text == "Tropical or unknown")
#create empty cells for populating
PARED$mid_ma <- NA
PARED$prob <- NA
#assign bin based on probability duration
for(i in 1:nrow(PARED)){
print(round(i/nrow(PARED)*100)) #print percentage
tmp <- assign_bins_prob(max = PARED$max_ma[i], min = PARED$min_ma[i], bins = bins$min_ma) #assign bins based on age duration
PARED$mid_ma[i] <- as.numeric(tmp$mid_ma)
PARED$prob[i] <- as.numeric(tmp$prob)
}
#round mid age
PARED$mid_ma <- round(PARED$mid_ma, 3)
#retain data with 0.5 probability of being in assigned stage
PARED <- subset(PARED, prob >= 0.5)
#drop columns to avoid duplication
PARED <- subset(PARED, select=-c(max_ma, min_ma))
#join stage names based on assigned mid age
PARED <- inner_join(x = PARED, y = bins, by = c("mid_ma"))
#remove data younger than 2.588 Ma
PARED <- subset(PARED, min_ma >= 2.588)
#remove data older than 247.2 Ma
PARED <- subset(PARED, max_ma <= 247.2)
#load PARED rotations
rotations <- read.csv("/Users/lewis/Documents/Data/Rotations/PARED_rotated_02_11_2021.csv")
#add id columns for joining data
rotations$join <- paste(rotations$r_number, rotations$stage, sep = "_")
PARED$join <- paste(PARED$r_number, PARED$interval_name, sep = "_")
#join data to Getech rotations
PARED <- left_join(x = PARED, rotations[,c("join", "P.Long", "P.Lat")], by = "join")
#remove data without GETECH palaeocoordinates
PARED <- subset(PARED, !is.na(P.Long) & !is.na(P.Lat))
#-------------------------------------------------
#PREPARE PBDB DATA
#retain only true reefs based on environment and lithology
PBDB <- PBDB %>% filter(!environment %in% c("perireef or subreef"))
PBDB <- PBDB %>% filter(!lithology1 %in% c("shale",
"marl",
"claystone",
"wackestone",
"breccia",
"phosphorite",
"volcaniclastic",
"tuff",
"siliciclastic",
"conglomerate",
"sandstone",
"siltstone",
"not reported"))
#filter by reef number for pre-removed reef data
remove <- r_number[!r_number %in% PARED$r_number]
remove <- paste("Reef ", remove, sep = "")
PBDB <- PBDB %>% filter(!collection_aka %in% remove)
PBDB$mid_ma <- NA
PBDB$prob <- NA
#assign bin based on probability duration
for(i in 1:nrow(PBDB)){
print(round(i/nrow(PBDB)*100)) #print percentage
tmp <- assign_bins_prob(max = PBDB$max_ma[i], min = PBDB$min_ma[i], bins = bins$min_ma) #assign bins based on age duration
PBDB$mid_ma[i] <- tmp$mid_ma
PBDB$prob[i] <- tmp$prob
}
#round mid age
PBDB$mid_ma <- round(PBDB$mid_ma, 3)
#retain data with 0.5 probability of being in assigned stage
PBDB <- subset(PBDB, prob >= 0.5)
#drop columns to avoid duplication
PBDB <- subset(PBDB, select=-c(max_ma, min_ma))
#join stage names based on assigned mid age
PBDB <- inner_join(x = PBDB, y = bins, by = c("mid_ma"))
#remove data younger than 2.588 Ma
PBDB <- subset(PBDB, min_ma >= 2.588)
#remove data older than 247.2 Ma
PBDB <- subset(PBDB, max_ma <= 247.2)
#load PBDB rotations
rotations <- read.csv("/Users/lewis/Documents/Data/Rotations/Getech_rotated_collections_04_10_2021.csv")
#add id columns for joining data
rotations$join <- paste(rotations$collection_no, rotations$stage_bin, sep = "_")
PBDB$join <- paste(PBDB$collection_no, PBDB$interval_name, sep = "_")
#join data to Getech rotations
PBDB <- left_join(x = PBDB, rotations[,c("join", "P.Long", "P.Lat")], by = "join")
#remove data without GETECH palaeocoordinates
PBDB <- subset(PBDB, !is.na(P.Long) & !is.na(P.Lat))
#-------------------------------------------------
#subset and tidy data
PBDB$r_number <- NA
PBDB <- PBDB[,c("collection_no","r_number", "interval_name", "max_ma","mid_ma", "min_ma", "lng", "lat", "P.Long", "P.Lat")]
PBDB$data_source <- c("PaleoBioDB")
PARED$collection_no <- PARED$collection
PARED$collection_no[PARED$collection_no == ""] <- NA
PARED$collection_no[PARED$collection_no == 0] <- NA
PARED$lng <- PARED$longit
PARED$lat <- PARED$lat
PARED <- PARED[,c("collection_no", "r_number", "interval_name", "max_ma", "mid_ma", "min_ma", "lng", "lat", "P.Long", "P.Lat")]
PARED$data_source <- c("PaleoReefDB")
#bind data
PARED <- rbind.data.frame(PARED, PBDB)
#-------------------------------------------------
#Rotate data with PALEOMAP for sensitivity testing
#get plate model
pm <- fetch("paleomap", "model", datadir="./data/model/") #download plate model
PARED$paleolng <- NA
PARED$paleolat <- NA
pb <- txtProgressBar(min = 0, # Minimum value of the progress bar
max = nrow(PARED), # Maximum value of the progress bar
style = 3, # Progress bar style (also available style = 1 and style = 2)
width = 50, # Progress bar width. Defaults to getOption("width")
char = "=") # Character used to create the bar
for(i in 1:nrow(PARED)){
coords <- reconstruct(x = PARED[i, c("lng", "lat")], #coordinates of data
age = PARED$mid_ma[i], #age of data
model=pm, #plate model
dir = "./data/model/", #directory of plate model
#path.gplates="/Volumes/GPlates-2.2.0-Darwin-x86_64/",
cleanup = TRUE)
PARED$paleolng[i] <- coords[,c("lng")]
PARED$paleolat[i] <- coords[,c("lat")]
setTxtProgressBar(pb, i)
}
#write data
write.csv(PARED, "./data/occurrences/PARED_cleaned.csv", row.names = FALSE)
#-------------------------------------------------
#filter data on continents
stages <- unique(PARED$interval_name)
#create empty dataframe
master <- data.frame()
#run for loop
for(i in stages){
tmp <- subset(PARED, interval_name == i)
DEM <- raster(paste("./data/enm/layers/", i, "/dem.asc", sep = ""))
ext <- extract(x = DEM, y = tmp[,c("P.Long","P.Lat")], df = TRUE)
ext <- which(!is.na(ext$dem))
tmp <- tmp[ext,]
master <- rbind.data.frame(master, tmp)
}
#write data
write.csv(master, "./data/occurrences/PARED_clip.csv", row.names = FALSE)
#assign data
PARED <- master
#spatially subsample data
r <- raster(res = res)
#create empty data frame
master <- data.frame()
#get unique stages of data
stages <- unique(PARED$interval_name)
#run for loop
for(i in stages){
interval_name <- i
df <- subset(PARED, interval_name == i)
ras <- rasterize(x = df[,c("P.Long", "P.Lat")], y = r, field = 1)
pts_ras <- data.frame(rasterToPoints(ras))
pts_ras <- data.frame(pts_ras[,c("x","y")])
pts_ras <- cbind.data.frame(pts_ras, interval_name)
master <- rbind.data.frame(master, pts_ras)
}
#update column names
colnames(master) <- c("P.Long", "P.Lat", "interval_name")
#write data
write.csv(master, "./data/occurrences/PARED_subsampled.csv", row.names = FALSE)
#notification
beepr::beep(2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny.R
\name{render_gt}
\alias{render_gt}
\title{A \pkg{gt} display table render function for use in Shiny}
\usage{
render_gt(expr, env = parent.frame(), quoted = FALSE,
outputArgs = list())
}
\arguments{
\item{expr}{an expression that creates a \code{gt} table object.}
\item{env}{the environment in which to evaluate the \code{expr}.}
\item{quoted}{is expr a quoted expression (with \code{quote()})? This is
useful if you want to save an expression in a variable.}
\item{outputArgs}{A list of arguments to be passed through to the implicit
call to \code{\link{gt_output}()} when \code{render_gt} is used in an
interactive R Markdown document.}
}
\description{
A \pkg{gt} display table render function for use in Shiny
}
\seealso{
\link{gt_output}()
Other Shiny functions: \code{\link{gt_output}}
}
\concept{Shiny functions}
| /man/render_gt.Rd | permissive | yyzeng/gt | R | false | true | 911 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny.R
\name{render_gt}
\alias{render_gt}
\title{A \pkg{gt} display table render function for use in Shiny}
\usage{
render_gt(expr, env = parent.frame(), quoted = FALSE,
outputArgs = list())
}
\arguments{
\item{expr}{an expression that creates a \code{gt} table object.}
\item{env}{the environment in which to evaluate the \code{expr}.}
\item{quoted}{is expr a quoted expression (with \code{quote()})? This is
useful if you want to save an expression in a variable.}
\item{outputArgs}{A list of arguments to be passed through to the implicit
call to \code{\link{gt_output}()} when \code{render_gt} is used in an
interactive R Markdown document.}
}
\description{
A \pkg{gt} display table render function for use in Shiny
}
\seealso{
\link{gt_output}()
Other Shiny functions: \code{\link{gt_output}}
}
\concept{Shiny functions}
|
library(glmnet)
library(broom)
require(parcor)
x <- data.matrix(platelets_NoCAD_Numeric[,-44])
y <- platelets_NoCAD_Numeric$Platelet_1000.mL
y.log <- log(platelets_NoCAD_Numeric$Platelet_1000.mL)
# cross-validated fit using GLMNET and LOO XV
set.seed(1)
cvfit = cv.glmnet(x=x,y=y,family="gaussian",alpha=1,nfolds=85)
model.lasso <- cvfit$glmnet.fit
lambda.lasso <- cvfit$lambda.min
coef(cvfit, cvfit$lambda.min)
cvfit.log = cv.glmnet(x=x,y=y.log,family="gaussian",alpha=1,nfolds=10)
model.lasso.log <- cvfit.log$glmnet.fit
lambda.lasso.log <- cvfit$lambda.min
cvfit = cv.glmnet(x=x,y=y,family="gaussian",alpha=0,nfolds=85)
model.ridge <- cvfit$glmnet.fit
lambda.ridge <- cvfit$lambda.min
coef(cvfit, 5.26)
coef(cvfit, cvfit$lambda.min)
# (Intercept) 439.401508083
# Age -1.268798208
# Height -0.215169538
# Sex 12.654680974
# Current_Smoker -12.346828591
# Airway_Disease -8.677982402
# Pulse_rate_PPM -0.181281384
# Nonanginal_Chest_Pain 0.947833032
# Creatine_mg.dL -8.628501496
# Hemoglobin_g.dL -5.207754592
# Potassium_mEq.lit -10.562789284
# White_Blood_Cells.mL 0.002909197
# Platelet count goes down with current smoking... this is not the direction of effect found by
# other studies. Is it confounded by sex? Surely not, as it's in the model
# So what is going on?
# Are people who smoke in this data set somehow exceptional?
# Impossible to say without knowing what the selection criteria were
model.lars <- parcor::mylars(X=x, y=y, k = 85)
plot(model.lars$lambda,model.lars$cv,type='l',xlab='lambda',ylab='SSR.n')
abline(v=model.lars$lambda.opt,lwd=2,lty=2,col='darkgray')
# above gives lambda of 5.26
| /Platelets/lasso.r | no_license | blynock/r-studies | R | false | false | 1,906 | r | library(glmnet)
library(broom)
require(parcor)
x <- data.matrix(platelets_NoCAD_Numeric[,-44])
y <- platelets_NoCAD_Numeric$Platelet_1000.mL
y.log <- log(platelets_NoCAD_Numeric$Platelet_1000.mL)
# cross-validated fit using GLMNET and LOO XV
set.seed(1)
cvfit = cv.glmnet(x=x,y=y,family="gaussian",alpha=1,nfolds=85)
model.lasso <- cvfit$glmnet.fit
lambda.lasso <- cvfit$lambda.min
coef(cvfit, cvfit$lambda.min)
cvfit.log = cv.glmnet(x=x,y=y.log,family="gaussian",alpha=1,nfolds=10)
model.lasso.log <- cvfit.log$glmnet.fit
lambda.lasso.log <- cvfit$lambda.min
cvfit = cv.glmnet(x=x,y=y,family="gaussian",alpha=0,nfolds=85)
model.ridge <- cvfit$glmnet.fit
lambda.ridge <- cvfit$lambda.min
coef(cvfit, 5.26)
coef(cvfit, cvfit$lambda.min)
# (Intercept) 439.401508083
# Age -1.268798208
# Height -0.215169538
# Sex 12.654680974
# Current_Smoker -12.346828591
# Airway_Disease -8.677982402
# Pulse_rate_PPM -0.181281384
# Nonanginal_Chest_Pain 0.947833032
# Creatine_mg.dL -8.628501496
# Hemoglobin_g.dL -5.207754592
# Potassium_mEq.lit -10.562789284
# White_Blood_Cells.mL 0.002909197
# Platelet count goes down with current smoking... this is not the direction of effect found by
# other studies. Is it confounded by sex? Surely not, as it's in the model
# So what is going on?
# Are people who smoke in this data set somehow exceptional?
# Impossible to say without knowing what the selection criteria were
model.lars <- parcor::mylars(X=x, y=y, k = 85)
plot(model.lars$lambda,model.lars$cv,type='l',xlab='lambda',ylab='SSR.n')
abline(v=model.lars$lambda.opt,lwd=2,lty=2,col='darkgray')
# above gives lambda of 5.26
|
plant <- read.table('~/Documents/arabidopsis_thaliana_physical')
plant <- data.frame(plant$INTERACTOR_A, plant$INTERACTOR_B)
plant_uniq <- unique(plant)
library(plyr)
col1 <- count(plant_uniq, 'plant.INTERACTOR_A')
col2 <- count(plant_uniq, 'plant.INTERACTOR_B')
colnames(col1) <- c("protein", "freq")
colnames(col2) <- c("protein", "freq")
colBoth <- merge(col1, col2, by=c("protein"))
colBoth$count <- (colBoth$freq.x + colBoth$freq.y)
#Find Top 25 Physical
top25 <- data.frame(colBoth$protein, colBoth$count)
top25 <- top25[order(-top25$colBoth.count),]
list25 <- data.frame(top25$colBoth.protein[1:25], top25$colBoth.count[1:25])
write.table(list25, "~/Desktop/top_plant.txt", sep="\t")
#Log-log plotting
plant_interactions <- data.frame(colBoth$protein, colBoth$count)
total_count <- data.frame(plant_interactions$colBoth.count)
plant_dist <- count(total_count, 'plant_interactions.colBoth.count')
plot(plant_dist$plant_interactions.colBoth.count, (plant_dist$freq), log="xy", xlim=c(1,max(plant_dist$plant_interactions.colBoth.count)), ylim=c(1,max(plant_dist$freq)), main="plant Node Degree Distribution (Log Scale)", xlab="Number of Connections", ylab="Number of Nodes", pch=19)
#Linear Fitting
plot(log(plant_dist$freq) ~ log(plant_dist$plant_interactions.colBoth.count), main="plant Node Degree Distribution (Log Scale)", xlab="Number of Connections", ylab="Number of Nodes")
fit <- lm(log(plant_dist$freq) ~ log(plant_dist$plant_interactions.colBoth.count))
coef(fit)
abline(coef(fit)[1], coef(fit)[2]) | /Scripts/ArabidopsisProcessing.R | no_license | nrflynn2/EFR | R | false | false | 1,521 | r | plant <- read.table('~/Documents/arabidopsis_thaliana_physical')
plant <- data.frame(plant$INTERACTOR_A, plant$INTERACTOR_B)
plant_uniq <- unique(plant)
library(plyr)
col1 <- count(plant_uniq, 'plant.INTERACTOR_A')
col2 <- count(plant_uniq, 'plant.INTERACTOR_B')
colnames(col1) <- c("protein", "freq")
colnames(col2) <- c("protein", "freq")
colBoth <- merge(col1, col2, by=c("protein"))
colBoth$count <- (colBoth$freq.x + colBoth$freq.y)
#Find Top 25 Physical
top25 <- data.frame(colBoth$protein, colBoth$count)
top25 <- top25[order(-top25$colBoth.count),]
list25 <- data.frame(top25$colBoth.protein[1:25], top25$colBoth.count[1:25])
write.table(list25, "~/Desktop/top_plant.txt", sep="\t")
#Log-log plotting
plant_interactions <- data.frame(colBoth$protein, colBoth$count)
total_count <- data.frame(plant_interactions$colBoth.count)
plant_dist <- count(total_count, 'plant_interactions.colBoth.count')
plot(plant_dist$plant_interactions.colBoth.count, (plant_dist$freq), log="xy", xlim=c(1,max(plant_dist$plant_interactions.colBoth.count)), ylim=c(1,max(plant_dist$freq)), main="plant Node Degree Distribution (Log Scale)", xlab="Number of Connections", ylab="Number of Nodes", pch=19)
#Linear Fitting
plot(log(plant_dist$freq) ~ log(plant_dist$plant_interactions.colBoth.count), main="plant Node Degree Distribution (Log Scale)", xlab="Number of Connections", ylab="Number of Nodes")
fit <- lm(log(plant_dist$freq) ~ log(plant_dist$plant_interactions.colBoth.count))
coef(fit)
abline(coef(fit)[1], coef(fit)[2]) |
#' @include utilities.R ggplot2.customize.R
NULL
#' Easy stripchart plot with R package ggplot2
#'
#' @param data data.frame or a numeric vector. Columns are variables and rows
#' are observations.
#' @param xName The name of column containing x variable (i.e groups). Default
#' value is NULL.
#' @param yName The name of column containing y variable. If yName=NULL, data
#' should be a numeric vector.
#' @param groupName The name of column containing group variable. This variable
#' is used to color plot according to the group.
#' @param position The position adjustment to use for overlappling points.
#' Default value is position_jitter(0.2).
#' @param addMean if TRUE, the mean point is added on the plot for each group.
#' Default value is FALSE.
#' @param meanPointShape The shape of mean point.
#' @param meanPointSize The size of mean point
#' @param meanPointColor Border color of the mean point. Default value is
#' "black".
#' @param meanPointFill Fill color of mean point. This parameter is used only
#' when meanPointShape=21 to 25. Default value is "blue"
#' @param addBoxplot If TRUE, boxplot is added on the dotplot. Default value is
#' FALSE.
#' @param boxplotFill Fill color of the boxplot. Default value is white.
#' @param boxplotColor Boxplot line color. Default value is black.
#' @param boxplotLineWeight Boxplot line weight. Default value is 0.5.
#' @param groupColors Color of groups. groupColors should have the same length
#' as groups.
#' @param brewerPalette This can be also used to indicate group colors. In this
#' case the parameter groupColors should be NULL. e.g: brewerPalette="Paired".
#' @param \dots Other arguments passed on to ggplot2.customize custom function
#' or to geom_dotplot functions from ggplot2 package.
#' @return a ggplot
#' @author Alboukadel Kassambara <alboukadel.kassambara@@gmail.com>
#' @seealso \code{\link{ggplot2.boxplot}, \link{ggplot2.violinplot},
#' \link{ggplot2.dotplot}, \link{ggplot2.density}, \link{ggplot2.histogram},
#' \link{ggplot2.customize}}
#' @references http://www.sthda.com
#' @examples
#'
#' df <- ToothGrowth
#' ggplot2.stripchart(data=df, xName='dose',yName='len',
#' mainTitle="Plot of length according\n to the dose",
#' xtitle="Dose (mg)", ytitle="Length")
#'
#' #Or use this
#' plot<-ggplot2.stripchart(data=df, xName='dose',yName='len')
#' plot<-ggplot2.customize(plot, mainTitle="Plot of length according\n to the dose",
#' xtitle="Dose (mg)", ytitle="Length")
#' print(plot)
#' @name ggplot2.stripchart
#' @rdname ggplot2.stripchart
#' @export ggplot2.stripchart
ggplot2.stripchart<-function(data, xName=NULL, yName=NULL, groupName=NULL,
position=position_jitter(0.2),
addMean=FALSE, meanPointShape=5, meanPointSize=4,
meanPointColor="black", meanPointFill="blue",
addBoxplot=FALSE, boxplotFill="white", boxplotColor="black", boxplotLineWeight=0.5,
groupColors=NULL, brewerPalette=NULL,...)
{
bxpms <- .boxplot_params(...)
#if yName is missing or null, data should be a numeric vector
if(is.null(yName) & !is.numeric(data))
stop("yName is missing or NULL. In this case data should be a numeric vector")
#data is a numeric vector
else if(is.numeric(data)){
data=cbind(y=data, x=rep(1, length(data)))
xName="x"
yName="y"
}
#xName is missing or NULL => single boxplot corresponding to the deistribution of the variable
#bind group column to data
if(is.null(xName)){
data=cbind(data, x=rep(1, nrow(data)))
xName="x"
}
#data
data=data.frame(data)
data[,xName]=factor(data[,xName])
if(is.null(groupName)) p<-ggplot(data=data, aes_string(x=xName, y=yName))
else{
data[,groupName]=factor(data[,groupName])#transform groupName to factor
p<-ggplot(data=data, aes_string(x=xName, y=yName, fill=groupName, shape=groupName, colour=groupName))
}
#add boxplot
if(addBoxplot){
if(is.null(boxplotFill)) p<-p+geom_boxplot(colour=boxplotColor, position=position_dodge(0.8),
size=boxplotLineWeight, outlier.shape=NA, notch = bxpms$notch)
else p<-p+geom_boxplot(fill=boxplotFill, colour=boxplotColor, position=position_dodge(0.8),
size=boxplotLineWeight, outlier.shape=NA, notch = bxpms$notch)
}
#stripchart
spms <- .standard_params(...)
if(!is.null(groupName)) p<-p+geom_jitter(position = position)
else p<-p+geom_jitter(position = position, color = spms$color, shape = spms$shape, fill = spms$fill)
#add Mean point
if(addMean) p<-p+stat_summary(fun.y=mean, geom='point', shape=meanPointShape,
size=meanPointSize, colour=meanPointColor, fill=meanPointFill)
#group colors
if(!is.null(groupColors)){
p<-p+scale_fill_manual(values=groupColors)
p<-p+scale_colour_manual(values=groupColors)
}
else if(!is.null(brewerPalette)){
p<-p+scale_fill_brewer(palette=brewerPalette)
p<-p+scale_colour_brewer(palette=brewerPalette, guide="none")
}
#ggplot2.customize : titles, colors, background, legend, ....
p<-ggplot2.customize(p,...)
p
}
#' @rdname ggplot2.stripchart
#' @export
ggplot2.jitter<-function(...)
{
ggplot2.stripchart(...)
}
| /easyGgplot2/R/ggplot2.stripchart.R | no_license | nkapoor11/R-work | R | false | false | 5,366 | r | #' @include utilities.R ggplot2.customize.R
NULL
#' Easy stripchart plot with R package ggplot2
#'
#' @param data data.frame or a numeric vector. Columns are variables and rows
#' are observations.
#' @param xName The name of column containing x variable (i.e groups). Default
#' value is NULL.
#' @param yName The name of column containing y variable. If yName=NULL, data
#' should be a numeric vector.
#' @param groupName The name of column containing group variable. This variable
#' is used to color plot according to the group.
#' @param position The position adjustment to use for overlappling points.
#' Default value is position_jitter(0.2).
#' @param addMean if TRUE, the mean point is added on the plot for each group.
#' Default value is FALSE.
#' @param meanPointShape The shape of mean point.
#' @param meanPointSize The size of mean point
#' @param meanPointColor Border color of the mean point. Default value is
#' "black".
#' @param meanPointFill Fill color of mean point. This parameter is used only
#' when meanPointShape=21 to 25. Default value is "blue"
#' @param addBoxplot If TRUE, boxplot is added on the dotplot. Default value is
#' FALSE.
#' @param boxplotFill Fill color of the boxplot. Default value is white.
#' @param boxplotColor Boxplot line color. Default value is black.
#' @param boxplotLineWeight Boxplot line weight. Default value is 0.5.
#' @param groupColors Color of groups. groupColors should have the same length
#' as groups.
#' @param brewerPalette This can be also used to indicate group colors. In this
#' case the parameter groupColors should be NULL. e.g: brewerPalette="Paired".
#' @param \dots Other arguments passed on to ggplot2.customize custom function
#' or to geom_dotplot functions from ggplot2 package.
#' @return a ggplot
#' @author Alboukadel Kassambara <alboukadel.kassambara@@gmail.com>
#' @seealso \code{\link{ggplot2.boxplot}, \link{ggplot2.violinplot},
#' \link{ggplot2.dotplot}, \link{ggplot2.density}, \link{ggplot2.histogram},
#' \link{ggplot2.customize}}
#' @references http://www.sthda.com
#' @examples
#'
#' df <- ToothGrowth
#' ggplot2.stripchart(data=df, xName='dose',yName='len',
#' mainTitle="Plot of length according\n to the dose",
#' xtitle="Dose (mg)", ytitle="Length")
#'
#' #Or use this
#' plot<-ggplot2.stripchart(data=df, xName='dose',yName='len')
#' plot<-ggplot2.customize(plot, mainTitle="Plot of length according\n to the dose",
#' xtitle="Dose (mg)", ytitle="Length")
#' print(plot)
#' @name ggplot2.stripchart
#' @rdname ggplot2.stripchart
#' @export ggplot2.stripchart
ggplot2.stripchart<-function(data, xName=NULL, yName=NULL, groupName=NULL,
position=position_jitter(0.2),
addMean=FALSE, meanPointShape=5, meanPointSize=4,
meanPointColor="black", meanPointFill="blue",
addBoxplot=FALSE, boxplotFill="white", boxplotColor="black", boxplotLineWeight=0.5,
groupColors=NULL, brewerPalette=NULL,...)
{
bxpms <- .boxplot_params(...)
#if yName is missing or null, data should be a numeric vector
if(is.null(yName) & !is.numeric(data))
stop("yName is missing or NULL. In this case data should be a numeric vector")
#data is a numeric vector
else if(is.numeric(data)){
data=cbind(y=data, x=rep(1, length(data)))
xName="x"
yName="y"
}
#xName is missing or NULL => single boxplot corresponding to the deistribution of the variable
#bind group column to data
if(is.null(xName)){
data=cbind(data, x=rep(1, nrow(data)))
xName="x"
}
#data
data=data.frame(data)
data[,xName]=factor(data[,xName])
if(is.null(groupName)) p<-ggplot(data=data, aes_string(x=xName, y=yName))
else{
data[,groupName]=factor(data[,groupName])#transform groupName to factor
p<-ggplot(data=data, aes_string(x=xName, y=yName, fill=groupName, shape=groupName, colour=groupName))
}
#add boxplot
if(addBoxplot){
if(is.null(boxplotFill)) p<-p+geom_boxplot(colour=boxplotColor, position=position_dodge(0.8),
size=boxplotLineWeight, outlier.shape=NA, notch = bxpms$notch)
else p<-p+geom_boxplot(fill=boxplotFill, colour=boxplotColor, position=position_dodge(0.8),
size=boxplotLineWeight, outlier.shape=NA, notch = bxpms$notch)
}
#stripchart
spms <- .standard_params(...)
if(!is.null(groupName)) p<-p+geom_jitter(position = position)
else p<-p+geom_jitter(position = position, color = spms$color, shape = spms$shape, fill = spms$fill)
#add Mean point
if(addMean) p<-p+stat_summary(fun.y=mean, geom='point', shape=meanPointShape,
size=meanPointSize, colour=meanPointColor, fill=meanPointFill)
#group colors
if(!is.null(groupColors)){
p<-p+scale_fill_manual(values=groupColors)
p<-p+scale_colour_manual(values=groupColors)
}
else if(!is.null(brewerPalette)){
p<-p+scale_fill_brewer(palette=brewerPalette)
p<-p+scale_colour_brewer(palette=brewerPalette, guide="none")
}
#ggplot2.customize : titles, colors, background, legend, ....
p<-ggplot2.customize(p,...)
p
}
#' @rdname ggplot2.stripchart
#' @export
ggplot2.jitter<-function(...)
{
ggplot2.stripchart(...)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_GeoTIFF.R
\name{write_GeoTIFF}
\alias{write_GeoTIFF}
\title{write_GeoTIFF}
\usage{
write_GeoTIFF(raster_obj, ..., suffix = "names", overwrite = TRUE)
}
\arguments{
\item{raster_obj}{\href{raster::Raster-class}{Raster*} object}
\item{...}{passed to \code{\link[=write_raster]{write_raster()}}}
}
\value{
result of \code{write_raster()}
}
\description{
Thin wrapper around \code{\link[=write_raster]{write_raster()}}
}
\seealso{
\itemize{
\item \code{\link[=write_raster]{write_raster()}}
}
}
| /man/write_GeoTIFF.Rd | no_license | BAAQMD/geotools | R | false | true | 576 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_GeoTIFF.R
\name{write_GeoTIFF}
\alias{write_GeoTIFF}
\title{write_GeoTIFF}
\usage{
write_GeoTIFF(raster_obj, ..., suffix = "names", overwrite = TRUE)
}
\arguments{
\item{raster_obj}{\href{raster::Raster-class}{Raster*} object}
\item{...}{passed to \code{\link[=write_raster]{write_raster()}}}
}
\value{
result of \code{write_raster()}
}
\description{
Thin wrapper around \code{\link[=write_raster]{write_raster()}}
}
\seealso{
\itemize{
\item \code{\link[=write_raster]{write_raster()}}
}
}
|
## Load data
household_data <- read.csv2(file = "./data/household_power_consumption.txt")
inds <- household_data$Date == "1/2/2007" | household_data$Date == "2/2/2007"
target_data <- household_data[inds,]
head(target_data)
tail(target_data)
rm(household_data)
dates <- strptime(paste(target_data$Date,target_data$Time), "%d/%m/%Y %H:%M:%S")
# Plot 4
db1 <- as.numeric(matrix(target_data$Global_active_power))
db2 <- as.numeric(matrix(target_data$Voltage))
db4 <- as.numeric(matrix(target_data$Global_reactive_power))
temp1 <- as.numeric(matrix(target_data$Sub_metering_1))
temp2 <- as.numeric(matrix(target_data$Sub_metering_2))
temp3 <- as.numeric(matrix(target_data$Sub_metering_3))
name1 <- "Sub_metering_1"
name2 <- "Sub_metering_2"
name3 <- "Sub_metering_3"
par(mfrow = c(2,2), mar = c(2,2,2,2))
# subplot 1
plot(dates, db1, ylab = "Global Active Power", type = "l")
# subplot 2
plot(dates, db2, xlab = "datetime", ylab = "Voltage", type = "l")
# subplot 3
plot(dates, temp1, ylab = "Energy sub metering", type = "n")
points(dates, temp1, ylab = "Energy sub metering", type = "l", col = "green")
points(dates, temp2, ylab = "Energy sub metering", type = "l", col = "red")
points(dates, temp3, ylab = "Energy sub metering", type = "l", col = "blue")
legend("topright", legend = c(name1, name2, name3),
col = c("green", "blue", "red"),
lty = c(1,1,1))
# subplot 4
plot(dates, db4, xlab = "datetime", ylab = "Global Reactive Power", type = "l")
## Save plot
dev.copy(png, file = "plot4.png")
dev.off()
| /plot4.R | no_license | wangben9/ExData_Plotting1 | R | false | false | 1,532 | r | ## Load data
household_data <- read.csv2(file = "./data/household_power_consumption.txt")
inds <- household_data$Date == "1/2/2007" | household_data$Date == "2/2/2007"
target_data <- household_data[inds,]
head(target_data)
tail(target_data)
rm(household_data)
dates <- strptime(paste(target_data$Date,target_data$Time), "%d/%m/%Y %H:%M:%S")
# Plot 4
db1 <- as.numeric(matrix(target_data$Global_active_power))
db2 <- as.numeric(matrix(target_data$Voltage))
db4 <- as.numeric(matrix(target_data$Global_reactive_power))
temp1 <- as.numeric(matrix(target_data$Sub_metering_1))
temp2 <- as.numeric(matrix(target_data$Sub_metering_2))
temp3 <- as.numeric(matrix(target_data$Sub_metering_3))
name1 <- "Sub_metering_1"
name2 <- "Sub_metering_2"
name3 <- "Sub_metering_3"
par(mfrow = c(2,2), mar = c(2,2,2,2))
# subplot 1
plot(dates, db1, ylab = "Global Active Power", type = "l")
# subplot 2
plot(dates, db2, xlab = "datetime", ylab = "Voltage", type = "l")
# subplot 3
plot(dates, temp1, ylab = "Energy sub metering", type = "n")
points(dates, temp1, ylab = "Energy sub metering", type = "l", col = "green")
points(dates, temp2, ylab = "Energy sub metering", type = "l", col = "red")
points(dates, temp3, ylab = "Energy sub metering", type = "l", col = "blue")
legend("topright", legend = c(name1, name2, name3),
col = c("green", "blue", "red"),
lty = c(1,1,1))
# subplot 4
plot(dates, db4, xlab = "datetime", ylab = "Global Reactive Power", type = "l")
## Save plot
dev.copy(png, file = "plot4.png")
dev.off()
|
# and matrix-representation specific functions:
# - computeSigmaHat
# - computeMuHat
# - derivative.F
# initital version: YR 2011-01-21: LISREL stuff
# updates: YR 2011-12-01: group specific extraction
# YR 2012-05-17: thresholds
representation.LISREL <- function(partable=NULL, target=NULL,
extra=FALSE, remove.nonexisting=TRUE) {
# prepare target list
if(is.null(target)) target <- partable
# prepare output
N <- length(target$lhs)
tmp.mat <- character(N); tmp.row <- integer(N); tmp.col <- integer(N)
# global settings
meanstructure <- any(partable$op == "~1")
categorical <- any(partable$op == "|")
group.w.free <- any(partable$lhs == "group" & partable$op == "%")
gamma <- categorical
# number of groups
ngroups <- max(partable$group)
ov.dummy.names.nox <- vector("list", ngroups)
ov.dummy.names.x <- vector("list", ngroups)
if(extra) {
REP.mmNames <- vector("list", ngroups)
REP.mmNumber <- vector("list", ngroups)
REP.mmRows <- vector("list", ngroups)
REP.mmCols <- vector("list", ngroups)
REP.mmDimNames <- vector("list", ngroups)
REP.mmSymmetric <- vector("list", ngroups)
}
for(g in 1:ngroups) {
# info from user model per group
if(gamma) {
ov.names <- vnames(partable, "ov.nox", group=g)
} else {
ov.names <- vnames(partable, "ov", group=g)
}
nvar <- length(ov.names)
lv.names <- vnames(partable, "lv", group=g); nfac <- length(lv.names)
ov.th <- vnames(partable, "th", group=g); nth <- length(ov.th)
ov.names.x <- vnames(partable, "ov.x",group=g); nexo <- length(ov.names.x)
ov.names.nox <- vnames(partable, "ov.nox",group=g)
# in this representation, we need to create 'phantom/dummy' latent
# variables for all `x' and `y' variables not in lv.names
# (only y if categorical)
# regression dummys
if(categorical) {
tmp.names <-
unique( partable$lhs[(partable$op == "~" |
partable$op == "<~") &
partable$group == g] )
} else {
tmp.names <-
unique( c(partable$lhs[(partable$op == "~" |
partable$op == "<~") &
partable$group == g],
partable$rhs[(partable$op == "~" |
partable$op == "<~") &
partable$group == g]) )
}
dummy.names1 <- tmp.names[ !tmp.names %in% lv.names ]
# covariances involving dummys
dummy.cov.idx <- which(partable$op == "~~" & partable$group == g &
(partable$lhs %in% dummy.names1 |
partable$rhs %in% dummy.names1))
dummy.names2 <- unique( c(partable$lhs[dummy.cov.idx],
partable$rhs[dummy.cov.idx]) )
# collect all dummy variables
dummy.names <- unique(c(dummy.names1, dummy.names2))
if(length(dummy.names)) {
# make sure order is the same as ov.names
ov.dummy.names.nox[[g]] <-
ov.names.nox[ ov.names.nox %in% dummy.names ]
ov.dummy.names.x[[g]] <-
ov.names.x[ ov.names.x %in% dummy.names ]
# combine them, make sure order is identical to ov.names
tmp <- ov.names[ ov.names %in% dummy.names ]
# extend lv.names
lv.names <- c(lv.names, tmp)
nfac <- length(lv.names)
# add 'dummy' =~ entries
dummy.mat <- rep("lambda", length(dummy.names))
} else {
ov.dummy.names.nox[[g]] <- character(0)
ov.dummy.names.x[[g]] <- character(0)
}
# 1a. "=~" regular indicators
idx <- which(target$group == g &
target$op == "=~" & !(target$rhs %in% lv.names))
tmp.mat[idx] <- "lambda"
tmp.row[idx] <- match(target$rhs[idx], ov.names)
tmp.col[idx] <- match(target$lhs[idx], lv.names)
# 1b. "=~" regular higher-order lv indicators
idx <- which(target$group == g &
target$op == "=~" & !(target$rhs %in% ov.names))
tmp.mat[idx] <- "beta"
tmp.row[idx] <- match(target$rhs[idx], lv.names)
tmp.col[idx] <- match(target$lhs[idx], lv.names)
# 1c. "=~" indicators that are both in ov and lv
idx <- which(target$group == g &
target$op == "=~" & target$rhs %in% ov.names
& target$rhs %in% lv.names)
tmp.mat[idx] <- "beta"
tmp.row[idx] <- match(target$rhs[idx], lv.names)
tmp.col[idx] <- match(target$lhs[idx], lv.names)
# 2. "~" regressions
if(categorical) {
# gamma
idx <- which(target$rhs %in% ov.names.x &
target$group == g & (target$op == "~" |
target$op == "<~") )
tmp.mat[idx] <- "gamma"
tmp.row[idx] <- match(target$lhs[idx], lv.names)
tmp.col[idx] <- match(target$rhs[idx], ov.names.x)
# beta
idx <- which(!target$rhs %in% ov.names.x &
target$group == g & (target$op == "~" |
target$op == "<~") )
tmp.mat[idx] <- "beta"
tmp.row[idx] <- match(target$lhs[idx], lv.names)
tmp.col[idx] <- match(target$rhs[idx], lv.names)
} else {
idx <- which(target$group == g & (target$op == "~" |
target$op == "<~") )
tmp.mat[idx] <- "beta"
tmp.row[idx] <- match(target$lhs[idx], lv.names)
tmp.col[idx] <- match(target$rhs[idx], lv.names)
}
# 3a. "~~" ov
idx <- which(target$group == g &
target$op == "~~" & !(target$lhs %in% lv.names))
tmp.mat[idx] <- "theta"
tmp.row[idx] <- match(target$lhs[idx], ov.names)
tmp.col[idx] <- match(target$rhs[idx], ov.names)
# 3b. "~~" lv
idx <- which(target$group == g &
target$op == "~~" & target$rhs %in% lv.names)
tmp.mat[idx] <- "psi"
tmp.row[idx] <- match(target$lhs[idx], lv.names)
tmp.col[idx] <- match(target$rhs[idx], lv.names)
# 4a. "~1" ov
idx <- which(target$group == g &
target$op == "~1" & !(target$lhs %in% lv.names))
tmp.mat[idx] <- "nu"
tmp.row[idx] <- match(target$lhs[idx], ov.names)
tmp.col[idx] <- 1L
# 4b. "~1" lv
idx <- which(target$group == g &
target$op == "~1" & target$lhs %in% lv.names)
tmp.mat[idx] <- "alpha"
tmp.row[idx] <- match(target$lhs[idx], lv.names)
tmp.col[idx] <- 1L
# 5. "|" th
LABEL <- paste(target$lhs, target$op, target$rhs, sep="")
idx <- which(target$group == g &
target$op == "|" & LABEL %in% ov.th)
TH <- paste(target$lhs[idx], "|", target$rhs[idx], sep="")
tmp.mat[idx] <- "tau"
tmp.row[idx] <- match(TH, ov.th)
tmp.col[idx] <- 1L
# 6. "~*~" scales
idx <- which(target$group == g &
target$op == "~*~")
tmp.mat[idx] <- "delta"
tmp.row[idx] <- match(target$lhs[idx], ov.names)
tmp.col[idx] <- 1L
# new 0.5-12: catch lower-elements in theta/psi
idx.lower <- which(tmp.mat %in% c("theta","psi") & tmp.row > tmp.col)
if(length(idx.lower) > 0L) {
tmp <- tmp.row[idx.lower]
tmp.row[idx.lower] <- tmp.col[idx.lower]
tmp.col[idx.lower] <- tmp
}
# new 0.5-16: group weights
idx <- which(target$group == g & target$lhs == "group" &
target$op == "%")
tmp.mat[idx] <- "gw"
tmp.row[idx] <- 1L
tmp.col[idx] <- 1L
if(extra) {
# mRows
mmRows <- list(tau = nth,
delta = nvar,
nu = nvar,
lambda = nvar,
theta = nvar,
alpha = nfac,
beta = nfac,
gamma = nfac,
gw = 1L,
psi = nfac)
# mCols
mmCols <- list(tau = 1L,
delta = 1L,
nu = 1L,
lambda = nfac,
theta = nvar,
alpha = 1L,
beta = nfac,
gamma = nexo,
gw = 1L,
psi = nfac)
# dimNames for LISREL model matrices
mmDimNames <- list(tau = list( ov.th, "threshold"),
delta = list( ov.names, "scales"),
nu = list( ov.names, "intercept"),
lambda = list( ov.names, lv.names),
theta = list( ov.names, ov.names),
alpha = list( lv.names, "intercept"),
beta = list( lv.names, lv.names),
gamma = list( lv.names, ov.names.x),
gw = list( "group", "weight"),
psi = list( lv.names, lv.names))
# isSymmetric
mmSymmetric <- list(tau = FALSE,
delta = FALSE,
nu = FALSE,
lambda = FALSE,
theta = TRUE,
alpha = FALSE,
beta = FALSE,
gamma = FALSE,
gw = FALSE,
psi = TRUE)
# which mm's do we need? (always include lambda, theta and psi)
mmNames <- c("lambda", "theta", "psi")
if("beta" %in% tmp.mat) mmNames <- c(mmNames, "beta")
if(meanstructure) mmNames <- c(mmNames, "nu", "alpha")
if("tau" %in% tmp.mat) mmNames <- c(mmNames, "tau")
if("delta" %in% tmp.mat) mmNames <- c(mmNames, "delta")
if("gamma" %in% tmp.mat) mmNames <- c(mmNames, "gamma")
if("gw" %in% tmp.mat) mmNames <- c(mmNames, "gw")
REP.mmNames[[g]] <- mmNames
REP.mmNumber[[g]] <- length(mmNames)
REP.mmRows[[g]] <- unlist(mmRows[ mmNames ])
REP.mmCols[[g]] <- unlist(mmCols[ mmNames ])
REP.mmDimNames[[g]] <- mmDimNames[ mmNames ]
REP.mmSymmetric[[g]] <- unlist(mmSymmetric[ mmNames ])
} # extra
} # ngroups
REP <- list(mat = tmp.mat,
row = tmp.row,
col = tmp.col)
# remove non-existing (NAs)?
# here we remove `non-existing' parameters; this depends on the matrix
# representation (eg in LISREL rep, there is no ~~ between lv and ov)
#if(remove.nonexisting) {
# idx <- which( nchar(REP$mat) > 0L &
# !is.na(REP$row) & REP$row > 0L &
# !is.na(REP$col) & REP$col > 0L )
# # but keep ==, :=, etc.
# idx <- c(idx, which(partable$op %in% c("==", ":=", "<", ">")))
# REP$mat <- REP$mat[idx]
# REP$row <- REP$row[idx]
# REP$col <- REP$col[idx]
#
# always add 'ov.dummy.*.names' attributes
attr(REP, "ov.dummy.names.nox") <- ov.dummy.names.nox
attr(REP, "ov.dummy.names.x") <- ov.dummy.names.x
if(extra) {
attr(REP, "mmNames") <- REP.mmNames
attr(REP, "mmNumber") <- REP.mmNumber
attr(REP, "mmRows") <- REP.mmRows
attr(REP, "mmCols") <- REP.mmCols
attr(REP, "mmDimNames") <- REP.mmDimNames
attr(REP, "mmSymmetric") <- REP.mmSymmetric
}
REP
}
# ETA:
# 1) EETA
# 2) EETAx
# 3) VETA
# 4) VETAx
# 1) EETA
# compute E(ETA): expected value of latent variables (marginal over x)
# - if no eXo (and GAMMA):
# E(ETA) = (I-B)^-1 ALPHA
# - if eXo and GAMMA:
# E(ETA) = (I-B)^-1 ALPHA + (I-B)^-1 GAMMA mean.x
computeEETA.LISREL <- function(MLIST=NULL, mean.x=NULL,
sample.mean=NULL,
ov.y.dummy.ov.idx=NULL,
ov.x.dummy.ov.idx=NULL,
ov.y.dummy.lv.idx=NULL,
ov.x.dummy.lv.idx=NULL) {
LAMBDA <- MLIST$lambda; BETA <- MLIST$beta; GAMMA <- MLIST$gamma
# ALPHA? (reconstruct, but no 'fix')
ALPHA <- .internal_get_ALPHA(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# BETA?
if(!is.null(BETA)) {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
# GAMMA?
if(!is.null(GAMMA)) {
eeta <- as.vector(IB.inv %*% ALPHA + IB.inv %*% GAMMA %*% mean.x)
} else {
eeta <- as.vector(IB.inv %*% ALPHA)
}
} else {
# GAMMA?
if(!is.null(GAMMA)) {
eeta <- as.vector(ALPHA + GAMMA %*% mean.x)
} else {
eeta <- as.vector(ALPHA)
}
}
eeta
}
# 2) EETAx
# compute E(ETA|x_i): conditional expected value of latent variable,
# given specific value of x_i
# - if no eXo (and GAMMA):
# E(ETA) = (I-B)^-1 ALPHA
# we return a matrix of size [nobs x nfac] replicating E(ETA)
# - if eXo and GAMMA:
# E(ETA|x_i) = (I-B)^-1 ALPHA + (I-B)^-1 GAMMA x_i
# we return a matrix of size [nobs x nfac]
#
computeEETAx.LISREL <- function(MLIST=NULL, eXo=NULL, N=nrow(eXo),
sample.mean=NULL,
ov.y.dummy.ov.idx=NULL,
ov.x.dummy.ov.idx=NULL,
ov.y.dummy.lv.idx=NULL,
ov.x.dummy.lv.idx=NULL) {
LAMBDA <- MLIST$lambda; BETA <- MLIST$beta; GAMMA <- MLIST$gamma
nfac <- ncol(LAMBDA)
# if eXo, N must be nrow(eXo)
if(!is.null(eXo)) {
N <- nrow(eXo)
}
# ALPHA?
ALPHA <- .internal_get_ALPHA(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# construct [nobs x nfac] matrix (repeating ALPHA)
EETA <- matrix(ALPHA, N, nfac, byrow=TRUE)
# put back eXo values if dummy
if(length(ov.x.dummy.lv.idx) > 0L) {
EETA[,ov.x.dummy.lv.idx] <- eXo
}
# BETA?
if(!is.null(BETA)) {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
EETA <- EETA %*% t(IB.inv)
}
# GAMMA?
if(!is.null(GAMMA)) {
if(!is.null(BETA)) {
EETA <- EETA + eXo %*% t(IB.inv %*% GAMMA)
} else {
EETA <- EETA + eXo %*% t(GAMMA)
}
}
EETA
}
# 3) VETA
# compute V(ETA): variances/covariances of latent variables
# - if no eXo (and GAMMA)
# V(ETA) = (I-B)^-1 PSI (I-B)^-T
# - if eXo and GAMMA: (cfr lisrel submodel 3a with ksi=x)
# V(ETA) = (I-B)^-1 [ GAMMA cov.x t(GAMMA) + PSI] (I-B)^-T
computeVETA.LISREL <- function(MLIST=NULL, cov.x=NULL) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA)
PSI <- MLIST$psi
THETA <- MLIST$theta
BETA <- MLIST$beta
GAMMA <- MLIST$gamma
if(!is.null(GAMMA)) {
stopifnot(!is.null(cov.x))
# we treat 'x' as 'ksi' in the LISREL model; cov.x is PHI
PSI <- tcrossprod(GAMMA %*% cov.x, GAMMA) + PSI
}
# beta?
if(is.null(BETA)) {
VETA <- PSI
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
VETA <- tcrossprod(IB.inv %*% PSI, IB.inv)
}
VETA
}
# 4) VETAx
# compute V(ETA|x_i): variances/covariances of latent variables
# V(ETA) = (I-B)^-1 PSI (I-B)^-T + remove dummies
computeVETAx.LISREL <- function(MLIST=NULL, lv.dummy.idx=NULL) {
PSI <- MLIST$psi
BETA <- MLIST$beta
# beta?
if(is.null(BETA)) {
VETA <- PSI
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
VETA <- tcrossprod(IB.inv %*% PSI, IB.inv)
}
# remove dummy lv?
if(!is.null(lv.dummy.idx)) {
VETA <- VETA[-lv.dummy.idx, -lv.dummy.idx, drop=FALSE]
}
VETA
}
# Y
# 1) EY
# 2) EYx
# 3) EYetax
# 4) VY
# 5) VYx
# 6) VYetax
# 1) EY
# compute E(Y): expected value of observed
# E(Y) = NU + LAMBDA %*% E(eta)
# = NU + LAMBDA %*% (IB.inv %*% ALPHA) # no exo, no GAMMA
# = NU + LAMBDA %*% (IB.inv %*% ALPHA + IB.inv %*% GAMMA %*% mean.x) # eXo
# if DELTA -> E(Y) = delta * E(Y)
#
# this is similar to computeMuHat but:
# - we ALWAYS compute NU+ALPHA, even if meanstructure=FALSE
# - never used if GAMMA, since we then have categorical variables, and the
# 'part 1' structure contains the (thresholds +) intercepts, not
# the means
computeEY.LISREL <- function(MLIST=NULL, mean.x = NULL, sample.mean = NULL,
ov.y.dummy.ov.idx=NULL,
ov.x.dummy.ov.idx=NULL,
ov.y.dummy.lv.idx=NULL,
ov.x.dummy.lv.idx=NULL) {
LAMBDA <- MLIST$lambda
# get NU, but do not 'fix'
NU <- .internal_get_NU(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# compute E(ETA)
EETA <- computeEETA.LISREL(MLIST = MLIST, sample.mean = sample.mean,
mean.x = mean.x,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# EY
EY <- as.vector(NU) + as.vector(LAMBDA %*% EETA)
# if delta, scale
if(!is.null(MLIST$delta)) {
EY <- EY * as.vector(MLIST$delta)
}
EY
}
# 2) EYx
# compute E(Y|x_i): expected value of observed, conditional on x_i
# E(Y|x_i) = NU + LAMBDA %*% E(eta|x_i)
# - if no eXo (and GAMMA):
# E(ETA|x_i) = (I-B)^-1 ALPHA
# we return a matrix of size [nobs x nfac] replicating E(ETA)
# - if eXo and GAMMA:
# E(ETA|x_i) = (I-B)^-1 ALPHA + (I-B)^-1 GAMMA x_i
# we return a matrix of size [nobs x nfac]
#
# - we ALWAYS compute NU+ALPHA, even if meanstructure=FALSE
# - never used if GAMMA, since we then have categorical variables, and the
# 'part 1' structure contains the (thresholds +) intercepts, not
# the means
computeEYx.LISREL <- function(MLIST = NULL,
eXo = NULL,
N = nrow(eXo),
sample.mean = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
LAMBDA <- MLIST$lambda
# get NU, but do not 'fix'
NU <- .internal_get_NU(MLIST = MLIST,
sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# compute E(ETA|x_i)
EETAx <- computeEETAx.LISREL(MLIST = MLIST,
eXo = eXo,
N = N,
sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# EYx
EYx <- sweep(tcrossprod(EETAx, LAMBDA), 2L, STATS = NU, FUN = "+")
# if delta, scale
if(!is.null(MLIST$delta)) {
EYx <- sweep(EYx, 2L, STATS = MLIST$delta, FUN = "*")
}
EYx
}
# 3) EYetax
# compute E(Y|eta_i,x_i): conditional expected value of observed variable
# given specific value of eta_i AND x_i
#
# E(y*_i|eta_i, x_i) = NU + LAMBDA eta_i + KAPPA x_i
#
# where eta_i = predict(fit) = factor scores OR specific values for eta_i
# (as in GH integration)
#
# if nexo = 0, and eta_i is single row, YHAT is the same for each observation
# in this case, we return a single row, unless Nobs > 1L, in which case
# we return Nobs identical rows
#
# NOTE: we assume that any effect of x_i on eta_i has already been taken
# care off
# categorical version
computeEYetax.LISREL <- function(MLIST = NULL,
eXo = NULL,
ETA = NULL,
N = nrow(eXo),
sample.mean = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
LAMBDA <- MLIST$lambda
BETA <- MLIST$beta
if(!is.null(eXo)) {
N <- nrow(eXo)
} else if(!is.null(N)) {
# nothing to do
} else {
N <- 1L
}
# create ETA matrix
if(nrow(ETA) == 1L) {
ETA <- matrix(ETA, N, ncol(ETA), byrow=TRUE)
}
# always augment ETA with 'dummy values' (0 for ov.y, eXo for ov.x)
#ndummy <- length(c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx))
#if(ndummy > 0L) {
# ETA2 <- cbind(ETA, matrix(0, N, ndummy))
#} else {
ETA2 <- ETA
#}
# only if we have dummy ov.y, we need to compute the 'yhat' values
# beforehand
if(length(ov.y.dummy.lv.idx) > 0L) {
# insert eXo values
if(length(ov.x.dummy.lv.idx) > 0L) {
ETA2[,ov.x.dummy.lv.idx] <- eXo
}
# zero ov.y values
if(length(ov.y.dummy.lv.idx) > 0L) {
ETA2[,ov.y.dummy.lv.idx] <- 0
}
# ALPHA? (reconstruct, but no 'fix')
ALPHA <- .internal_get_ALPHA(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# BETA?
if(!is.null(BETA)) {
ETA2 <- sweep(tcrossprod(ETA2, BETA), 2L, STATS = ALPHA, FUN = "+")
} else {
ETA2 <- sweep(ETA2, 2L, STATS = ALPHA, FUN = "+")
}
# put back eXo values
if(length(ov.x.dummy.lv.idx) > 0L) {
ETA2[,ov.x.dummy.lv.idx] <- eXo
}
# put back ETA values for the 'real' latent variables
dummy.idx <- c(ov.x.dummy.lv.idx, ov.y.dummy.lv.idx)
if(length(dummy.idx) > 0L) {
lv.regular.idx <- seq_len( min(dummy.idx) - 1L )
ETA2[, lv.regular.idx] <- ETA[,lv.regular.idx, drop = FALSE]
}
}
# get NU, but do not 'fix'
NU <- .internal_get_NU(MLIST = MLIST,
sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# EYetax
EYetax <- sweep(tcrossprod(ETA2, LAMBDA), 2L, STATS = NU, FUN = "+")
# if delta, scale
if(!is.null(MLIST$delta)) {
EYetax <- sweep(EYetax, 2L, STATS = MLIST$delta, FUN = "*")
}
EYetax
}
# unconditional version
computeEYetax2.LISREL <- function(MLIST = NULL,
ETA = NULL,
sample.mean = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
LAMBDA <- MLIST$lambda
BETA <- MLIST$beta
# only if we have dummy ov.y, we need to compute the 'yhat' values
# beforehand, and impute them in ETA[,ov.y]
if(length(ov.y.dummy.lv.idx) > 0L) {
# ALPHA? (reconstruct, but no 'fix')
ALPHA <- .internal_get_ALPHA(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# keep all, but ov.y values
OV.NOY <- ETA[,-ov.y.dummy.lv.idx, drop = FALSE]
# ov.y rows, non-ov.y cols
BETAY <- BETA[ov.y.dummy.lv.idx,-ov.y.dummy.lv.idx, drop = FALSE]
# ov.y intercepts
ALPHAY <- ALPHA[ov.y.dummy.lv.idx,, drop=FALSE]
# impute ov.y values in ETA
ETA[,ov.y.dummy.lv.idx] <-
sweep(tcrossprod(OV.NOY, BETAY), 2L, STATS = ALPHAY, FUN = "+")
}
# get NU, but do not 'fix'
NU <- .internal_get_NU(MLIST = MLIST,
sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# EYetax
EYetax <- sweep(tcrossprod(ETA, LAMBDA), 2L, STATS = NU, FUN = "+")
# if delta, scale
if(!is.null(MLIST$delta)) {
EYetax <- sweep(EYetax, 2L, STATS = MLIST$delta, FUN = "*")
}
EYetax
}
# unconditional version
computeEYetax3.LISREL <- function(MLIST = NULL,
ETA = NULL,
sample.mean = NULL,
mean.x = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
LAMBDA <- MLIST$lambda
# special case: empty lambda
if(ncol(LAMBDA) == 0L) {
return( matrix(sample.mean,
nrow(ETA), length(sample.mean), byrow=TRUE) )
}
# lv idx
dummy.idx <- c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx)
if(length(dummy.idx) > 0L) {
nondummy.idx <- seq_len( min(dummy.idx) - 1L )
} else {
nondummy.idx <- seq_len( ncol(MLIST$lambda) )
}
# beta?
if(is.null(MLIST$beta) || length(ov.y.dummy.lv.idx) == 0L ||
length(nondummy.idx) == 0L) {
LAMBDA..IB.inv <- LAMBDA
} else {
# only keep those columns of BETA that correspond to the
# the `regular' latent variables
# (ie. ignore the structural part altogether)
MLIST2 <- MLIST
MLIST2$beta[,dummy.idx] <- 0
IB.inv <- .internal_get_IB.inv(MLIST = MLIST2)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# compute model-implied means
EY <- computeEY.LISREL(MLIST = MLIST, mean.x = mean.x,
sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
EETA <- computeEETA.LISREL(MLIST = MLIST, sample.mean = sample.mean,
mean.x = mean.x,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# center regular lv only
ETA[,nondummy.idx] <- sweep(ETA[,nondummy.idx,drop = FALSE], 2L,
STATS = EETA[nondummy.idx], FUN = "-")
# project from lv to ov, if we have any lv
if(length(nondummy.idx) > 0) {
EYetax <- sweep(tcrossprod(ETA[,nondummy.idx,drop=FALSE],
LAMBDA..IB.inv[,nondummy.idx,drop=FALSE]),
2L, STATS = EY, FUN = "+")
} else {
EYetax <- ETA
}
# put back eXo variables
if(length(ov.x.dummy.lv.idx) > 0L) {
EYetax[,ov.x.dummy.ov.idx] <- ETA[,ov.x.dummy.lv.idx, drop = FALSE]
}
# if delta, scale
if(!is.null(MLIST$delta)) {
EYetax <- sweep(EYetax, 2L, STATS = MLIST$delta, FUN = "*")
}
EYetax
}
# 4) VY
# compute the *un*conditional variance of y: V(Y) or V(Y*)
# 'unconditional' model-implied variances
# - same as diag(Sigma.hat) if all Y are continuous
# - 1.0 (or delta^2) if categorical
# - if also Gamma, cov.x is used (only if categorical)
# only in THIS case, VY is different from diag(VYx)
#
# V(Y) = LAMBDA V(ETA) t(LAMBDA) + THETA
computeVY.LISREL <- function(MLIST=NULL, cov.x=NULL) {
LAMBDA <- MLIST$lambda
THETA <- MLIST$theta
VETA <- computeVETA.LISREL(MLIST = MLIST, cov.x = cov.x)
VY <- tcrossprod(LAMBDA %*% VETA, LAMBDA) + THETA
# variances only
diag(VY)
}
# 5) VYx
# compute V(Y*|x_i) == model-implied covariance matrix
# this equals V(Y*) if no (explicit) eXo no GAMMA
computeVYx.LISREL <- computeSigmaHat.LISREL <- function(MLIST = NULL,
delta = TRUE) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA)
PSI <- MLIST$psi
THETA <- MLIST$theta
BETA <- MLIST$beta
# beta?
if(is.null(BETA)) {
LAMBDA..IB.inv <- LAMBDA
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# compute V(Y*|x_i)
VYx <- tcrossprod(LAMBDA..IB.inv %*% PSI, LAMBDA..IB.inv) + THETA
# if delta, scale
if(delta && !is.null(MLIST$delta)) {
DELTA <- diag(MLIST$delta[,1L], nrow=nvar, ncol=nvar)
VYx <- DELTA %*% VYx %*% DELTA
}
VYx
}
# 6) VYetax
# V(Y | eta_i, x_i) = THETA
computeVYetax.LISREL <- function(MLIST = NULL, delta = TRUE) {
VYetax <- MLIST$theta; nvar <- nrow(MLIST$theta)
# if delta, scale
if(delta && !is.null(MLIST$delta)) {
DELTA <- diag(MLIST$delta[,1L], nrow=nvar, ncol=nvar)
VYetax <- DELTA %*% VYetax %*% DELTA
}
VYetax
}
### compute model-implied sample statistics
#
# 1) MuHat (similar to EY, but continuous only)
# 2) TH
# 3) PI
# 4) SigmaHat == VYx
# compute MuHat for a single group -- only for the continuous case (no eXo)
#
# this is a special case of E(Y) where
# - we have no (explicit) eXogenous variables
# - only continuous
computeMuHat.LISREL <- function(MLIST=NULL) {
NU <- MLIST$nu
ALPHA <- MLIST$alpha
LAMBDA <- MLIST$lambda
BETA <- MLIST$beta
# shortcut
if(is.null(ALPHA) || is.null(NU)) return(matrix(0, nrow(LAMBDA), 1L))
# beta?
if(is.null(BETA)) {
LAMBDA..IB.inv <- LAMBDA
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# compute Mu Hat
Mu.hat <- NU + LAMBDA..IB.inv %*% ALPHA
Mu.hat
}
# compute TH for a single group
computeTH.LISREL <- function(MLIST=NULL, th.idx=NULL) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
BETA <- MLIST$beta
TAU <- MLIST$tau; nth <- nrow(TAU)
# missing alpha
if(is.null(MLIST$alpha)) {
ALPHA <- matrix(0, nfac, 1L)
} else {
ALPHA <- MLIST$alpha
}
# missing nu
if(is.null(MLIST$nu)) {
NU <- matrix(0, nvar, 1L)
} else {
NU <- MLIST$nu
}
if(is.null(th.idx)) {
th.idx <- seq_len(nth)
nlev <- rep(1L, nvar)
K_nu <- diag(nvar)
} else {
nlev <- tabulate(th.idx, nbins=nvar); nlev[nlev == 0L] <- 1L
K_nu <- matrix(0, sum(nlev), nvar)
K_nu[ cbind(seq_len(sum(nlev)), rep(seq_len(nvar), times=nlev)) ] <- 1.0
}
# shortcut
if(is.null(TAU)) return(matrix(0, length(th.idx), 1L))
# beta?
if(is.null(BETA)) {
LAMBDA..IB.inv <- LAMBDA
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# compute pi0
pi0 <- NU + LAMBDA..IB.inv %*% ALPHA
# interleave th's with zeros where we have numeric variables
th <- numeric( length(th.idx) )
th[ th.idx > 0L ] <- TAU[,1L]
# compute TH
TH <- th - (K_nu %*% pi0)
# if delta, scale
if(!is.null(MLIST$delta)) {
DELTA.diag <- MLIST$delta[,1L]
DELTA.star.diag <- rep(DELTA.diag, times=nlev)
TH <- TH * DELTA.star.diag
}
as.vector(TH)
}
# compute PI for a single group
computePI.LISREL <- function(MLIST=NULL) {
LAMBDA <- MLIST$lambda
BETA <- MLIST$beta
GAMMA <- MLIST$gamma
# shortcut
if(is.null(GAMMA)) return(matrix(0, nrow(LAMBDA), 0L))
# beta?
if(is.null(BETA)) {
LAMBDA..IB.inv <- LAMBDA
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# compute PI
PI <- LAMBDA..IB.inv %*% GAMMA
# if delta, scale
if(!is.null(MLIST$delta)) {
DELTA.diag <- MLIST$delta[,1L]
PI <- PI * DELTA.diag
}
PI
}
computeLAMBDA.LISREL <- function(MLIST = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL,
remove.dummy.lv = FALSE) {
ov.dummy.idx = c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx)
lv.dummy.idx = c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx)
# fix LAMBDA
LAMBDA <- MLIST$lambda
if(length(ov.y.dummy.ov.idx) > 0L) {
LAMBDA[ov.y.dummy.ov.idx,] <- MLIST$beta[ov.y.dummy.lv.idx,]
}
# remove dummy lv?
if(remove.dummy.lv && length(lv.dummy.idx) > 0L) {
LAMBDA <- LAMBDA[,-lv.dummy.idx,drop=FALSE]
}
LAMBDA
}
computeTHETA.LISREL <- function(MLIST=NULL,
ov.y.dummy.ov.idx=NULL,
ov.x.dummy.ov.idx=NULL,
ov.y.dummy.lv.idx=NULL,
ov.x.dummy.lv.idx=NULL) {
ov.dummy.idx = c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx)
lv.dummy.idx = c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx)
# fix THETA
THETA <- MLIST$theta
if(length(ov.dummy.idx) > 0L) {
THETA[ov.dummy.idx, ov.dummy.idx] <-
MLIST$psi[lv.dummy.idx, lv.dummy.idx]
}
THETA
}
# compute IB.inv
.internal_get_IB.inv <- function(MLIST = NULL) {
BETA <- MLIST$beta; nr <- nrow(MLIST$psi)
if(!is.null(BETA)) {
tmp <- -BETA
tmp[lav_matrix_diag_idx(nr)] <- 1
IB.inv <- solve(tmp)
} else {
IB.inv <- diag(nr)
}
IB.inv
}
# only if ALPHA=NULL but we need it anyway
# we 'reconstruct' ALPHA here (including dummy entries), no fixing
#
# without any dummy variables, this is just the zero vector
# but if we have dummy variables, we need to fill in their values
#
#
.internal_get_ALPHA <- function(MLIST = NULL, sample.mean = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
if(!is.null(MLIST$alpha)) return(MLIST$alpha)
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
BETA <- MLIST$beta
ov.dummy.idx = c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx)
lv.dummy.idx = c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx)
if(length(ov.dummy.idx) > 0L) {
ALPHA <- matrix(0, nfac, 1L)
# Note: instead of sample.mean, we need 'intercepts'
# sample.mean = NU + LAMBDA..IB.inv %*% ALPHA
# so,
# solve(LAMBDA..IB.inv) %*% (sample.mean - NU) = ALPHA
# where
# - LAMBDA..IB.inv only contains 'dummy' variables, and is square
# - NU elements are not needed (since not in ov.dummy.idx)
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
LAMBDA..IB.inv.dummy <- LAMBDA..IB.inv[ov.dummy.idx, lv.dummy.idx]
ALPHA[lv.dummy.idx] <-
solve(LAMBDA..IB.inv.dummy) %*% sample.mean[ov.dummy.idx]
} else {
ALPHA <- matrix(0, nfac, 1L)
}
ALPHA
}
# only if NU=NULL but we need it anyway
#
# since we have no meanstructure, we can assume NU is unrestricted
# and contains either:
# 1) the sample means (if not eXo)
# 2) the intercepts, if we have exogenous covariates
# since sample.mean = NU + LAMBDA %*% E(eta)
# we have NU = sample.mean - LAMBDA %*% E(eta)
.internal_get_NU <- function(MLIST = NULL, sample.mean = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
if(!is.null(MLIST$nu)) return(MLIST$nu)
# if nexo > 0, substract lambda %*% EETA
if( length(ov.x.dummy.ov.idx) > 0L ) {
EETA <- computeEETA.LISREL(MLIST, mean.x=NULL,
sample.mean=sample.mean,
ov.y.dummy.ov.idx=ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx=ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx=ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx=ov.x.dummy.lv.idx)
# 'regress' NU on X
NU <- sample.mean - MLIST$lambda %*% EETA
# just to make sure we have exact zeroes for all dummies
NU[c(ov.y.dummy.ov.idx,ov.x.dummy.ov.idx)] <- 0
} else {
# unrestricted mean
NU <- sample.mean
}
NU
}
.internal_get_KAPPA <- function(MLIST = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL,
nexo = NULL) {
nvar <- nrow(MLIST$lambda)
if(!is.null(MLIST$gamma)) {
nexo <- ncol(MLIST$gamma)
} else if(!is.null(nexo)) {
nexo <- nexo
} else {
stop("nexo not known")
}
# create KAPPA
KAPPA <- matrix(0, nvar, nexo)
if(!is.null(MLIST$gamma)) {
KAPPA[ov.y.dummy.ov.idx,] <-
MLIST$gamma[ov.y.dummy.lv.idx,,drop=FALSE]
} else if(length(ov.x.dummy.ov.idx) > 0L) {
KAPPA[ov.y.dummy.ov.idx,] <-
MLIST$beta[ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx, drop=FALSE]
}
KAPPA
}
# old version of computeEYetax (using 'fixing')
computeYHATetax.LISREL <- function(MLIST=NULL, eXo=NULL, ETA=NULL,
sample.mean=NULL,
ov.y.dummy.ov.idx=NULL,
ov.x.dummy.ov.idx=NULL,
ov.y.dummy.lv.idx=NULL,
ov.x.dummy.lv.idx=NULL,
Nobs = 1L) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
lv.dummy.idx <- c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx)
ov.dummy.idx <- c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx)
# exogenous variables?
if(is.null(eXo)) {
nexo <- 0L
} else {
nexo <- ncol(eXo)
# check ETA rows
if(!(nrow(ETA) == 1L || nrow(ETA) == nrow(eXo))) {
stop("lavaan ERROR: !(nrow(ETA) == 1L || nrow(ETA) == nrow(eXo))")
}
}
# get NU
NU <- .internal_get_NU(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# ALPHA? (reconstruct, but no 'fix')
ALPHA <- .internal_get_ALPHA(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# fix NU
if(length(lv.dummy.idx) > 0L) {
NU[ov.dummy.idx, 1L] <- ALPHA[lv.dummy.idx, 1L]
}
# fix LAMBDA (remove dummies) ## FIXME -- needed?
LAMBDA <- MLIST$lambda
if(length(lv.dummy.idx) > 0L) {
LAMBDA <- LAMBDA[, -lv.dummy.idx, drop=FALSE]
nfac <- ncol(LAMBDA)
LAMBDA[ov.y.dummy.ov.idx,] <-
MLIST$beta[ov.y.dummy.lv.idx, seq_len(nfac), drop=FALSE]
}
# compute YHAT
YHAT <- sweep(ETA %*% t(LAMBDA), MARGIN=2, NU, "+")
# Kappa + eXo?
# note: Kappa elements are either in Gamma or in Beta
if(nexo > 0L) {
# create KAPPA
KAPPA <- .internal_get_KAPPA(MLIST = MLIST,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx,
nexo = nexo)
# expand YHAT if ETA only has 1 row
if(nrow(YHAT) == 1L) {
YHAT <- sweep(eXo %*% t(KAPPA), MARGIN=2, STATS=YHAT, FUN="+")
} else {
# add fixed part
YHAT <- YHAT + (eXo %*% t(KAPPA))
}
# put back eXo
if(length(ov.x.dummy.ov.idx) > 0L) {
YHAT[, ov.x.dummy.ov.idx] <- eXo
}
} else {
# duplicate?
if(is.numeric(Nobs) && Nobs > 1L && nrow(YHAT) == 1L) {
YHAT <- matrix(YHAT, Nobs, nvar, byrow=TRUE)
# YHAT <- YHAT[ rep(1L, Nobs), ]
}
}
# delta?
# FIXME: not used here?
#if(!is.null(DELTA)) {
# YHAT <- sweep(YHAT, MARGIN=2, DELTA, "*")
#}
YHAT
}
# deal with 'dummy' OV.X latent variables
# create additional matrices (eg GAMMA), and resize
# remove all ov.x related entries
MLIST2MLISTX <- function(MLIST=NULL,
ov.x.dummy.ov.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
lv.idx <- ov.x.dummy.lv.idx
ov.idx <- ov.x.dummy.ov.idx
if(length(lv.idx) == 0L) return(MLIST)
if(!is.null(MLIST$gamma)) {
nexo <- ncol(MLIST$gamma)
} else {
nexo <- length(ov.x.dummy.ov.idx)
}
nvar <- nrow(MLIST$lambda)
nfac <- ncol(MLIST$lambda) - length(lv.idx)
# copy
MLISTX <- MLIST
# fix LAMBDA:
# - remove all ov.x related columns/rows
MLISTX$lambda <- MLIST$lambda[-ov.idx, -lv.idx,drop=FALSE]
# fix THETA:
# - remove ov.x related columns/rows
MLISTX$theta <- MLIST$theta[-ov.idx, -ov.idx, drop=FALSE]
# fix PSI:
# - remove ov.x related columns/rows
MLISTX$psi <- MLIST$psi[-lv.idx, -lv.idx, drop=FALSE]
# create GAMMA
if(length(ov.x.dummy.lv.idx) > 0L) {
MLISTX$gamma <- MLIST$beta[-lv.idx, lv.idx, drop=FALSE]
}
# fix BETA (remove if empty)
if(!is.null(MLIST$beta)) {
MLISTX$beta <- MLIST$beta[-lv.idx, -lv.idx, drop=FALSE]
if(ncol(MLISTX$beta) == 0L) MLISTX$beta <- NULL
}
# fix NU
if(!is.null(MLIST$nu)) {
MLISTX$nu <- MLIST$nu[-ov.idx, 1L, drop=FALSE]
}
# fix ALPHA
if(!is.null(MLIST$alpha)) {
MLISTX$alpha <- MLIST$alpha[-lv.idx, 1L, drop=FALSE]
}
MLISTX
}
# create MLIST from MLISTX
MLISTX2MLIST <- function(MLISTX=NULL,
ov.x.dummy.ov.idx = NULL,
ov.x.dummy.lv.idx = NULL,
mean.x=NULL,
cov.x=NULL) {
lv.idx <- ov.x.dummy.lv.idx; ndum <- length(lv.idx)
ov.idx <- ov.x.dummy.ov.idx
if(length(lv.idx) == 0L) return(MLISTX)
stopifnot(!is.null(cov.x), !is.null(mean.x))
nvar <- nrow(MLISTX$lambda); nfac <- ncol(MLISTX$lambda)
# copy
MLIST <- MLISTX
# resize matrices
MLIST$lambda <- rbind(cbind(MLISTX$lambda, matrix(0, nvar, ndum)),
matrix(0, ndum, nfac+ndum))
MLIST$psi <- rbind(cbind(MLISTX$psi, matrix(0, nfac, ndum)),
matrix(0, ndum, nfac+ndum))
MLIST$theta <- rbind(cbind(MLISTX$theta, matrix(0, nvar, ndum)),
matrix(0, ndum, nvar+ndum))
if(!is.null(MLISTX$beta)) {
MLIST$beta <- rbind(cbind(MLISTX$beta, matrix(0, nfac, ndum)),
matrix(0, ndum, nfac+ndum))
}
if(!is.null(MLISTX$alpha)) {
MLIST$alpha <- rbind(MLISTX$alpha, matrix(0, ndum, 1))
}
if(!is.null(MLISTX$nu)) {
MLIST$nu <- rbind(MLISTX$nu, matrix(0, ndum, 1))
}
# fix LAMBDA:
# - add columns for all dummy latent variables
MLIST$lambda[ cbind(ov.idx, lv.idx) ] <- 1
# fix PSI
# - move cov.x elements to PSI
MLIST$psi[lv.idx, lv.idx] <- cov.x
# move (ov.x.dummy elements of) GAMMA to BETA
MLIST$beta[seq_len(nfac), ov.x.dummy.lv.idx] <- MLISTX$gamma
MLIST$gamma <- NULL
# fix ALPHA
if(!is.null(MLIST$alpha)) {
MLIST$alpha[lv.idx] <- mean.x
}
MLIST
}
# if DELTA parameterization, compute residual elements (in theta, or psi)
# of observed categorical variables, as a function of other model parameters
setResidualElements.LISREL <- function(MLIST=NULL,
num.idx=NULL,
ov.y.dummy.ov.idx=NULL,
ov.y.dummy.lv.idx=NULL) {
# remove num.idx from ov.y.dummy.*
if(length(num.idx) > 0L && length(ov.y.dummy.ov.idx) > 0L) {
n.idx <- which(ov.y.dummy.ov.idx %in% num.idx)
if(length(n.idx) > 0L) {
ov.y.dummy.ov.idx <- ov.y.dummy.ov.idx[-n.idx]
ov.y.dummy.lv.idx <- ov.y.dummy.lv.idx[-n.idx]
}
}
# force non-numeric theta elements to be zero
if(length(num.idx) > 0L) {
diag(MLIST$theta)[-num.idx] <- 0.0
} else {
diag(MLIST$theta) <- 0.0
}
if(length(ov.y.dummy.ov.idx) > 0L) {
MLIST$psi[ cbind(ov.y.dummy.lv.idx, ov.y.dummy.lv.idx) ] <- 0.0
}
# special case: PSI=0, and lambda=I (eg ex3.12)
if(ncol(MLIST$psi) > 0L &&
sum(diag(MLIST$psi)) == 0.0 && all(diag(MLIST$lambda) == 1)) {
### FIXME: more elegant/general solution??
diag(MLIST$psi) <- 1
Sigma.hat <- computeSigmaHat.LISREL(MLIST = MLIST, delta=FALSE)
diag.Sigma <- diag(Sigma.hat) - 1.0
} else if(ncol(MLIST$psi) == 0L) {
diag.Sigma <- rep(0, ncol(MLIST$theta))
} else {
Sigma.hat <- computeSigmaHat.LISREL(MLIST = MLIST, delta=FALSE)
diag.Sigma <- diag(Sigma.hat)
}
if(is.null(MLIST$delta)) {
delta <- rep(1, length(diag.Sigma))
} else {
delta <- MLIST$delta
}
# theta = DELTA^(-1/2) - diag( LAMBDA (I-B)^-1 PSI (I-B)^-T t(LAMBDA) )
RESIDUAL <- as.vector(1/(delta*delta) - diag.Sigma)
if(length(num.idx) > 0L) {
diag(MLIST$theta)[-num.idx] <- RESIDUAL[-num.idx]
} else {
diag(MLIST$theta) <- RESIDUAL
}
# move ov.y.dummy 'RESIDUAL' elements from THETA to PSI
if(length(ov.y.dummy.ov.idx) > 0L) {
MLIST$psi[cbind(ov.y.dummy.lv.idx, ov.y.dummy.lv.idx)] <-
MLIST$theta[cbind(ov.y.dummy.ov.idx, ov.y.dummy.ov.idx)]
MLIST$theta[cbind(ov.y.dummy.ov.idx, ov.y.dummy.ov.idx)] <- 0.0
}
MLIST
}
# if THETA parameterization, compute delta elements
# of observed categorical variables, as a function of other model parameters
setDeltaElements.LISREL <- function(MLIST=NULL, num.idx=NULL) {
Sigma.hat <- computeSigmaHat.LISREL(MLIST = MLIST, delta=FALSE)
diag.Sigma <- diag(Sigma.hat)
# (1/delta^2) = diag( LAMBDA (I-B)^-1 PSI (I-B)^-T t(LAMBDA) ) + THETA
#tmp <- diag.Sigma + THETA
tmp <- diag.Sigma
tmp[tmp < 0] <- as.numeric(NA)
MLIST$delta[, 1L] <- sqrt(1/tmp)
# numeric delta's stay 1.0
if(length(num.idx) > 0L) {
MLIST$delta[num.idx] <- 1.0
}
MLIST
}
# compute Sigma/ETA: variances/covariances of BOTH observed and latent variables
computeCOV.LISREL <- function(MLIST=NULL, cov.x=NULL, delta=TRUE) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA)
PSI <- MLIST$psi; nlat <- nrow(PSI)
THETA <- MLIST$theta
BETA <- MLIST$beta
# 'extend' matrices
LAMBDA2 <- rbind(LAMBDA, diag(nlat))
THETA2 <- bdiag(THETA, matrix(0,nlat,nlat))
# beta?
if(is.null(BETA)) {
LAMBDA..IB.inv <- LAMBDA2
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA2 %*% IB.inv
}
# compute augment COV matrix
COV <- tcrossprod(LAMBDA..IB.inv %*% PSI, LAMBDA..IB.inv) + THETA2
# if delta, scale
if(delta && !is.null(MLIST$delta)) {
DELTA <- diag(MLIST$delta[,1L], nrow=nvar, ncol=nvar)
COV[seq_len(nvar),seq_len(nvar)] <-
DELTA %*% COV[seq_len(nvar),seq_len(nvar)] %*% DELTA
}
# if GAMMA, also x part
GAMMA <- MLIST$gamma
if(!is.null(GAMMA)) {
stopifnot(!is.null(cov.x))
if(is.null(BETA)) {
SX <- tcrossprod(GAMMA %*% cov.x, GAMMA)
} else {
IB.inv..GAMMA <- IB.inv %*% GAMMA
SX <- tcrossprod(IB.inv..GAMMA %*% cov.x, IB.inv..GAMMA)
}
COV[(nvar+1):(nvar+nlat),(nvar+1):(nvar+nlat)] <-
COV[(nvar+1):(nvar+nlat),(nvar+1):(nvar+nlat)] + SX
}
COV
}
# derivative of the objective function
derivative.F.LISREL <- function(MLIST=NULL, Omega=NULL, Omega.mu=NULL) {
LAMBDA <- MLIST$lambda
PSI <- MLIST$psi
BETA <- MLIST$beta
ALPHA <- MLIST$alpha
# beta?
if(is.null(BETA)) {
LAMBDA..IB.inv <- LAMBDA
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# meanstructure?
meanstructure <- FALSE; if(!is.null(Omega.mu)) meanstructure <- TRUE
# group weight?
group.w.free <- FALSE; if(!is.null(MLIST$gw)) group.w.free <- TRUE
# pre-compute some values
tLAMBDA..IB.inv <- t(LAMBDA..IB.inv)
if(!is.null(BETA)) {
Omega..LAMBDA..IB.inv..PSI..tIB.inv <-
( Omega %*% LAMBDA..IB.inv %*% PSI %*% t(IB.inv) )
} else {
Omega..LAMBDA <- Omega %*% LAMBDA
}
# 1. LAMBDA
if(!is.null(BETA)) {
if(meanstructure) {
LAMBDA.deriv <- -1.0 * ( Omega.mu %*% t(ALPHA) %*% t(IB.inv) +
Omega..LAMBDA..IB.inv..PSI..tIB.inv )
} else {
LAMBDA.deriv <- -1.0 * Omega..LAMBDA..IB.inv..PSI..tIB.inv
}
} else {
# no BETA
if(meanstructure) {
LAMBDA.deriv <- -1.0 * ( Omega.mu %*% t(ALPHA) +
Omega..LAMBDA %*% PSI )
} else {
LAMBDA.deriv <- -1.0 * (Omega..LAMBDA %*% PSI)
}
}
# 2. BETA
if(!is.null(BETA)) {
if(meanstructure) {
BETA.deriv <- -1.0*(( t(IB.inv) %*%
(t(LAMBDA) %*% Omega.mu %*% t(ALPHA)) %*%
t(IB.inv)) +
(tLAMBDA..IB.inv %*%
Omega..LAMBDA..IB.inv..PSI..tIB.inv))
} else {
BETA.deriv <- -1.0 * ( tLAMBDA..IB.inv %*%
Omega..LAMBDA..IB.inv..PSI..tIB.inv )
}
} else {
BETA.deriv <- NULL
}
# 3. PSI
PSI.deriv <- -1.0 * ( tLAMBDA..IB.inv %*% Omega %*% LAMBDA..IB.inv )
diag(PSI.deriv) <- 0.5 * diag(PSI.deriv)
# 4. THETA
THETA.deriv <- -1.0 * Omega
diag(THETA.deriv) <- 0.5 * diag(THETA.deriv)
if(meanstructure) {
# 5. NU
NU.deriv <- -1.0 * Omega.mu
# 6. ALPHA
ALPHA.deriv <- -1.0 * t( t(Omega.mu) %*% LAMBDA..IB.inv )
} else {
NU.deriv <- NULL
ALPHA.deriv <- NULL
}
if(group.w.free) {
GROUP.W.deriv <- 0.0
} else {
GROUP.W.deriv <- NULL
}
list(lambda = LAMBDA.deriv,
beta = BETA.deriv,
theta = THETA.deriv,
psi = PSI.deriv,
nu = NU.deriv,
alpha = ALPHA.deriv,
gw = GROUP.W.deriv)
}
# dSigma/dx -- per model matrix
# note:
# we avoid using the duplication and elimination matrices
# for now (perhaps until we'll use the Matrix package)
derivative.sigma.LISREL <- function(m="lambda",
# all model matrix elements, or only a few?
# NOTE: for symmetric matrices,
# we assume that the have full size
# (nvar*nvar) (but already correct for
# symmetry)
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL,
delta = TRUE) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
PSI <- MLIST$psi
# only lower.tri part of sigma (not same order as elimination matrix?)
v.idx <- lav_matrix_vech_idx( nvar ); pstar <- nvar*(nvar+1)/2
# shortcut for gamma, nu, alpha and tau: empty matrix
if(m == "nu" || m == "alpha" || m == "tau" || m == "gamma" || m == "gw") {
return( matrix(0.0, nrow=pstar, ncol=length(idx)) )
}
# Delta?
delta.flag <- FALSE
if(delta && !is.null(MLIST$delta)) {
DELTA <- MLIST$delta
delta.flag <- TRUE
} else if(m == "delta") { # modindices?
return( matrix(0.0, nrow=pstar, ncol=length(idx)) )
}
# beta?
if(!is.null(MLIST$ibeta.inv)) {
IB.inv <- MLIST$ibeta.inv
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
}
# pre
if(m == "lambda" || m == "beta" || m == "delta")
IK <- diag(nvar*nvar) + lav_matrix_commutation(nvar, nvar)
if(m == "lambda" || m == "beta") {
IB.inv..PSI..tIB.inv..tLAMBDA <-
IB.inv %*% PSI %*% t(IB.inv) %*% t(LAMBDA)
}
if(m == "beta" || m == "psi") {
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# here we go:
if(m == "lambda") {
DX <- IK %*% t(IB.inv..PSI..tIB.inv..tLAMBDA %x% diag(nvar))
if(delta.flag)
DX <- DX * as.vector(DELTA %x% DELTA)
} else if(m == "beta") {
DX <- IK %*% ( t(IB.inv..PSI..tIB.inv..tLAMBDA) %x% LAMBDA..IB.inv )
# this is not really needed (because we select idx=m.el.idx)
DX[,lav_matrix_diag_idx(nfac)] <- 0.0
if(delta.flag)
DX <- DX * as.vector(DELTA %x% DELTA)
} else if(m == "psi") {
DX <- (LAMBDA..IB.inv %x% LAMBDA..IB.inv)
# symmetry correction, but keeping all duplicated elements
# since we depend on idx=m.el.idx
# otherwise, we could simply postmultiply with the duplicationMatrix
# we sum up lower.tri + upper.tri (but not the diagonal elements!)
#imatrix <- matrix(1:nfac^2,nfac,nfac)
#lower.idx <- imatrix[lower.tri(imatrix, diag=FALSE)]
#upper.idx <- imatrix[upper.tri(imatrix, diag=FALSE)]
lower.idx <- lav_matrix_vech_idx(nfac, diagonal = FALSE)
upper.idx <- lav_matrix_vechru_idx(nfac, diagonal = FALSE)
# NOTE YR: upper.idx (see 3 lines up) is wrong in MH patch!
# fixed again 13/06/2012 after bug report of Mijke Rhemtulla.
offdiagSum <- DX[,lower.idx] + DX[,upper.idx]
DX[,c(lower.idx, upper.idx)] <- cbind(offdiagSum, offdiagSum)
if(delta.flag)
DX <- DX * as.vector(DELTA %x% DELTA)
} else if(m == "theta") {
DX <- diag(nvar*nvar) # very sparse...
# symmetry correction not needed, since all off-diagonal elements
# are zero?
if(delta.flag)
DX <- DX * as.vector(DELTA %x% DELTA)
} else if(m == "delta") {
Omega <- computeSigmaHat.LISREL(MLIST, delta=FALSE)
DD <- diag(DELTA[,1], nvar, nvar)
DD.Omega <- (DD %*% Omega)
A <- DD.Omega %x% diag(nvar); B <- diag(nvar) %x% DD.Omega
DX <- A[,lav_matrix_diag_idx(nvar),drop=FALSE] +
B[,lav_matrix_diag_idx(nvar),drop=FALSE]
} else {
stop("wrong model matrix names: ", m, "\n")
}
DX <- DX[v.idx, idx, drop=FALSE]
DX
}
# dMu/dx -- per model matrix
derivative.mu.LISREL <- function(m="alpha",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
# shortcut for empty matrices
if(m == "gamma" || m == "psi" || m == "theta" ||
m == "tau" || m == "delta"|| m == "gw") {
return( matrix(0.0, nrow=nvar, ncol=length(idx) ) )
}
# missing alpha
if(is.null(MLIST$alpha))
ALPHA <- matrix(0, nfac, 1L)
else
ALPHA <- MLIST$alpha
# beta?
if(!is.null(MLIST$ibeta.inv)) {
IB.inv <- MLIST$ibeta.inv
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
}
if(m == "nu") {
DX <- diag(nvar)
} else if(m == "lambda") {
DX <- t(IB.inv %*% ALPHA) %x% diag(nvar)
} else if(m == "beta") {
DX <- t(IB.inv %*% ALPHA) %x% (LAMBDA %*% IB.inv)
# this is not really needed (because we select idx=m.el.idx)
DX[,lav_matrix_diag_idx(nfac)] <- 0.0
} else if(m == "alpha") {
DX <- LAMBDA %*% IB.inv
} else {
stop("wrong model matrix names: ", m, "\n")
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dTh/dx -- per model matrix
derivative.th.LISREL <- function(m="tau",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
th.idx=NULL,
MLIST=NULL,
delta = TRUE) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
TAU <- MLIST$tau; nth <- nrow(TAU)
# missing alpha
if(is.null(MLIST$alpha)) {
ALPHA <- matrix(0, nfac, 1L)
} else {
ALPHA <- MLIST$alpha
}
# missing nu
if(is.null(MLIST$nu)) {
NU <- matrix(0, nvar, 1L)
} else {
NU <- MLIST$nu
}
# Delta?
delta.flag <- FALSE
if(delta && !is.null(MLIST$delta)) {
DELTA <- MLIST$delta
delta.flag <- TRUE
}
if(is.null(th.idx)) {
th.idx <- seq_len(nth)
nlev <- rep(1L, nvar)
K_nu <- diag(nvar)
} else {
nlev <- tabulate(th.idx, nbins=nvar); nlev[nlev == 0L] <- 1L
K_nu <- matrix(0, sum(nlev), nvar)
K_nu[ cbind(seq_len(sum(nlev)), rep(seq_len(nvar), times=nlev)) ] <- 1.0
}
# shortcut for empty matrices
if(m == "gamma" || m == "psi" || m == "theta" || m == "gw") {
return( matrix(0.0, nrow=length(th.idx), ncol=length(idx) ) )
}
# beta?
if(!is.null(MLIST$ibeta.inv)) {
IB.inv <- MLIST$ibeta.inv
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
}
if(m == "tau") {
DX <- matrix(0, nrow=length(th.idx), ncol=nth)
DX[ th.idx > 0L, ] <- diag(nth)
if(delta.flag)
DX <- DX * as.vector(K_nu %*% DELTA)
} else if(m == "nu") {
DX <- (-1) * K_nu
if(delta.flag)
DX <- DX * as.vector(K_nu %*% DELTA)
} else if(m == "lambda") {
DX <- (-1) * t(IB.inv %*% ALPHA) %x% diag(nvar)
DX <- K_nu %*% DX
if(delta.flag)
DX <- DX * as.vector(K_nu %*% DELTA)
} else if(m == "beta") {
DX <- (-1) * t(IB.inv %*% ALPHA) %x% (LAMBDA %*% IB.inv)
# this is not really needed (because we select idx=m.el.idx)
DX[,lav_matrix_diag_idx(nfac)] <- 0.0
DX <- K_nu %*% DX
if(delta.flag)
DX <- DX * as.vector(K_nu %*% DELTA)
} else if(m == "alpha") {
DX <- (-1) * LAMBDA %*% IB.inv
DX <- K_nu %*% DX
if(delta.flag)
DX <- DX * as.vector(K_nu %*% DELTA)
} else if(m == "delta") {
DX1 <- matrix(0, nrow=length(th.idx), ncol=1)
DX1[ th.idx > 0L, ] <- TAU
DX2 <- NU + LAMBDA %*% IB.inv %*% ALPHA
DX2 <- K_nu %*% DX2
DX <- K_nu * as.vector(DX1 - DX2)
} else {
stop("wrong model matrix names: ", m, "\n")
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dPi/dx -- per model matrix
derivative.pi.LISREL <- function(m="lambda",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
GAMMA <- MLIST$gamma; nexo <- ncol(GAMMA)
# Delta?
delta.flag <- FALSE
if(!is.null(MLIST$delta)) {
DELTA.diag <- MLIST$delta[,1L]
delta.flag <- TRUE
}
# shortcut for empty matrices
if(m == "tau" || m == "nu" || m == "alpha" || m == "psi" ||
m == "theta" || m == "gw") {
return( matrix(0.0, nrow=nvar*nexo, ncol=length(idx) ) )
}
# beta?
if(!is.null(MLIST$ibeta.inv)) {
IB.inv <- MLIST$ibeta.inv
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
}
if(m == "lambda") {
DX <- t(IB.inv %*% GAMMA) %x% diag(nvar)
if(delta.flag)
DX <- DX * DELTA.diag
} else if(m == "beta") {
DX <- t(IB.inv %*% GAMMA) %x% (LAMBDA %*% IB.inv)
# this is not really needed (because we select idx=m.el.idx)
DX[,lav_matrix_diag_idx(nfac)] <- 0.0
if(delta.flag)
DX <- DX * DELTA.diag
} else if(m == "gamma") {
DX <- diag(nexo) %x% (LAMBDA %*% IB.inv)
if(delta.flag)
DX <- DX * DELTA.diag
} else if(m == "delta") {
PRE <- rep(1, nexo) %x% diag(nvar)
DX <- PRE * as.vector(LAMBDA %*% IB.inv %*% GAMMA)
} else {
stop("wrong model matrix names: ", m, "\n")
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dGW/dx -- per model matrix
derivative.gw.LISREL <- function(m="gw",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
# shortcut for empty matrices
if(m != "gw") {
return( matrix(0.0, nrow=1L, ncol=length(idx) ) )
} else {
# m == "gw"
DX <- matrix(1.0, 1, 1)
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dlambda/dx -- per model matrix
derivative.lambda.LISREL <- function(m="lambda",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
LAMBDA <- MLIST$lambda
# shortcut for empty matrices
if(m != "lambda") {
return( matrix(0.0, nrow=length(LAMBDA), ncol=length(idx) ) )
} else {
# m == "lambda"
DX <- diag(1, nrow=length(LAMBDA), ncol=length(LAMBDA))
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dpsi/dx -- per model matrix - FIXME!!!!!
derivative.psi.LISREL <- function(m="psi",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
PSI <- MLIST$psi; nfac <- nrow(PSI)
v.idx <- lav_matrix_vech_idx( nfac )
# shortcut for empty matrices
if(m != "psi") {
DX <- matrix(0.0, nrow=length(PSI), ncol=length(idx))
return(DX[v.idx,,drop=FALSE])
} else {
# m == "psi"
DX <- diag(1, nrow=length(PSI), ncol=length(PSI))
}
DX <- DX[v.idx, idx, drop=FALSE]
DX
}
# dtheta/dx -- per model matrix
derivative.theta.LISREL <- function(m="theta",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
THETA <- MLIST$theta; nvar <- nrow(THETA)
v.idx <- lav_matrix_vech_idx(nvar)
# shortcut for empty matrices
if(m != "theta") {
DX <- matrix(0.0, nrow=length(THETA), ncol=length(idx))
return(DX[v.idx,,drop=FALSE])
} else {
# m == "theta"
DX <- diag(1, nrow=length(THETA), ncol=length(THETA))
}
DX <- DX[v.idx, idx, drop=FALSE]
DX
}
# dbeta/dx -- per model matrix
derivative.beta.LISREL <- function(m="beta",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
BETA <- MLIST$beta
# shortcut for empty matrices
if(m != "beta") {
return( matrix(0.0, nrow=length(BETA), ncol=length(idx)) )
} else {
# m == "beta"
DX <- diag(1, nrow=length(BETA), ncol=length(BETA))
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dgamma/dx -- per model matrix
derivative.gamma.LISREL <- function(m="gamma",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
GAMMA <- MLIST$gamma
# shortcut for empty matrices
if(m != "gamma") {
return( matrix(0.0, nrow=length(GAMMA), ncol=length(idx)) )
} else {
# m == "gamma"
DX <- diag(1, nrow=length(GAMMA), ncol=length(GAMMA))
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dnu/dx -- per model matrix
derivative.nu.LISREL <- function(m="nu",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
NU <- MLIST$nu
# shortcut for empty matrices
if(m != "nu") {
return( matrix(0.0, nrow=length(NU), ncol=length(idx)) )
} else {
# m == "nu"
DX <- diag(1, nrow=length(NU), ncol=length(NU))
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dtau/dx -- per model matrix
derivative.tau.LISREL <- function(m="tau",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
TAU <- MLIST$tau
# shortcut for empty matrices
if(m != "tau") {
return( matrix(0.0, nrow=length(TAU), ncol=length(idx)) )
} else {
# m == "tau"
DX <- diag(1, nrow=length(TAU), ncol=length(TAU))
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dalpha/dx -- per model matrix
derivative.alpha.LISREL <- function(m="alpha",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
ALPHA <- MLIST$alpha
# shortcut for empty matrices
if(m != "alpha") {
return( matrix(0.0, nrow=length(ALPHA), ncol=length(idx)) )
} else {
# m == "alpha"
DX <- diag(1, nrow=length(ALPHA), ncol=length(ALPHA))
}
DX <- DX[, idx, drop=FALSE]
DX
}
# MLIST = NULL; meanstructure=TRUE; th=TRUE; delta=TRUE; pi=TRUE; gw=FALSE
# lav_matrix_vech_idx <- lavaan:::lav_matrix_vech_idx; lav_matrix_vechru_idx <- lavaan:::lav_matrix_vechru_idx
# vec <- lavaan:::vec; lav_func_jacobian_complex <- lavaan:::lav_func_jacobian_complex
# computeSigmaHat.LISREL <- lavaan:::computeSigmaHat.LISREL
# setDeltaElements.LISREL <- lavaan:::setDeltaElements.LISREL
TESTING_derivatives.LISREL <- function(MLIST = NULL,
nvar = NULL, nfac = NULL, nexo = NULL,
th.idx = NULL, num.idx = NULL,
meanstructure = TRUE,
th = TRUE, delta = TRUE, pi = TRUE,
gw = FALSE, theta = FALSE,
debug = FALSE) {
if(is.null(MLIST)) {
# create artificial matrices, compare 'numerical' vs 'analytical'
# derivatives
#nvar <- 12; nfac <- 3; nexo <- 4 # this combination is special?
if(is.null(nvar)) {
nvar <- 20
}
if(is.null(nfac)) {
nfac <- 6
}
if(is.null(nexo)) {
nexo <- 5
}
if(is.null(num.idx)) {
num.idx <- sort(sample(seq_len(nvar), ceiling(nvar/2)))
}
if(is.null(th.idx)) {
th.idx <- integer(0L)
for(i in seq_len(nvar)) {
if(i %in% num.idx) {
th.idx <- c(th.idx, 0)
} else {
th.idx <- c(th.idx, rep(i, sample(c(1,1,2,6), 1L)))
}
}
}
nth <- sum(th.idx > 0L)
MLIST <- list()
MLIST$lambda <- matrix(0,nvar,nfac)
MLIST$beta <- matrix(0,nfac,nfac)
MLIST$theta <- matrix(0,nvar,nvar)
MLIST$psi <- matrix(0,nfac,nfac)
if(meanstructure) {
MLIST$alpha <- matrix(0,nfac,1L)
MLIST$nu <- matrix(0,nvar,1L)
}
if(th) MLIST$tau <- matrix(0,nth,1L)
if(delta) MLIST$delta <- matrix(0,nvar,1L)
MLIST$gamma <- matrix(0,nfac,nexo)
if(gw) MLIST$gw <- matrix(0, 1L, 1L)
# feed random numbers
MLIST <- lapply(MLIST, function(x) {x[,] <- rnorm(length(x)); x})
# fix
diag(MLIST$beta) <- 0.0
diag(MLIST$theta) <- diag(MLIST$theta)*diag(MLIST$theta) * 10
diag(MLIST$psi) <- diag(MLIST$psi)*diag(MLIST$psi) * 10
MLIST$psi[ lav_matrix_vechru_idx(nfac) ] <-
MLIST$psi[ lav_matrix_vech_idx(nfac) ]
MLIST$theta[ lav_matrix_vechru_idx(nvar) ] <-
MLIST$theta[ lav_matrix_vech_idx(nvar) ]
if(delta) MLIST$delta[,] <- abs(MLIST$delta)*10
} else {
nvar <- nrow(MLIST$lambda)
}
compute.sigma <- function(x, mm="lambda", MLIST=NULL) {
mlist <- MLIST
if(mm %in% c("psi", "theta")) {
mlist[[mm]] <- lav_matrix_vech_reverse(x)
} else {
mlist[[mm]][,] <- x
}
if(theta) {
mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx)
}
lav_matrix_vech(computeSigmaHat.LISREL(mlist))
}
compute.mu <- function(x, mm="lambda", MLIST=NULL) {
mlist <- MLIST
if(mm %in% c("psi", "theta")) {
mlist[[mm]] <- lav_matrix_vech_reverse(x)
} else {
mlist[[mm]][,] <- x
}
if(theta) {
mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx)
}
computeMuHat.LISREL(mlist)
}
compute.th2 <- function(x, mm="tau", MLIST=NULL, th.idx) {
mlist <- MLIST
if(mm %in% c("psi", "theta")) {
mlist[[mm]] <- lav_matrix_vech_reverse(x)
} else {
mlist[[mm]][,] <- x
}
if(theta) {
mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx)
}
computeTH.LISREL(mlist, th.idx=th.idx)
}
compute.pi <- function(x, mm="lambda", MLIST=NULL) {
mlist <- MLIST
if(mm %in% c("psi", "theta")) {
mlist[[mm]] <- lav_matrix_vech_reverse(x)
} else {
mlist[[mm]][,] <- x
}
if(theta) {
mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx)
}
computePI.LISREL(mlist)
}
compute.gw <- function(x, mm="gw", MLIST=NULL) {
mlist <- MLIST
if(mm %in% c("psi", "theta")) {
mlist[[mm]] <- lav_matrix_vech_reverse(x)
} else {
mlist[[mm]][,] <- x
}
if(theta) {
mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx)
}
mlist$gw[1,1]
}
# if theta, set MLIST$delta
if(theta) {
MLIST <- setDeltaElements.LISREL(MLIST = MLIST, num.idx = num.idx)
}
for(mm in names(MLIST)) {
if(mm %in% c("psi", "theta")) {
x <- lav_matrix_vech(MLIST[[mm]])
} else {
x <- lav_matrix_vec(MLIST[[mm]])
}
if(mm == "delta" && theta) next
if(debug) {
cat("### mm = ", mm, "\n")
}
# 1. sigma
DX1 <- lav_func_jacobian_complex(func=compute.sigma, x=x, mm=mm, MLIST=MLIST)
DX2 <- derivative.sigma.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST, delta = !theta)
if(mm %in% c("psi","theta")) {
# remove duplicated columns of symmetric matrices
idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal=FALSE)
if(length(idx) > 0L) DX2 <- DX2[,-idx]
}
if(theta) {
sigma.hat <- computeSigmaHat.LISREL(MLIST=MLIST, delta=FALSE)
R <- lav_deriv_cov2cor(sigma.hat, num.idx = num.idx)
DX3 <- DX2
DX2 <- R %*% DX2
}
if(debug) {
cat("[SIGMA] mm = ", sprintf("%-8s:", mm), "DX1 (numerical):\n");
print(zapsmall(DX1)); cat("\n")
cat("[SIGMA] mm = ", sprintf("%-8s:", mm), "DX2 (analytical):\n");
print(DX2); cat("\n")
cat("[SIGMA] mm = ", sprintf("%-8s:", mm), "DX3 (analytical):\n");
print(DX3); cat("\n")
}
cat("[SIGMA] mm = ", sprintf("%-8s:", mm), "sum delta = ",
sprintf("%12.9f", sum(DX1-DX2)), " max delta = ",
sprintf("%12.9f", max(DX1-DX2)), "\n")
# 2. mu
DX1 <- lav_func_jacobian_complex(func=compute.mu, x=x, mm=mm, MLIST=MLIST)
DX2 <- derivative.mu.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST)
if(mm %in% c("psi","theta")) {
# remove duplicated columns of symmetric matrices
idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE)
if(length(idx) > 0L) DX2 <- DX2[,-idx]
}
cat("[MU ] mm = ", sprintf("%-8s:", mm), "sum delta = ",
sprintf("%12.9f", sum(DX1-DX2)), " max delta = ",
sprintf("%12.9f", max(DX1-DX2)), "\n")
if(debug) {
cat("[MU ] mm = ", sprintf("%-8s:", mm), "DX1 (numerical):\n");
print(zapsmall(DX1)); cat("\n")
cat("[MU ] mm = ", sprintf("%-8s:", mm), "DX2 (analytical):\n");
print(DX2); cat("\n")
}
# 3. th
if(th) {
DX1 <- lav_func_jacobian_complex(func=compute.th2, x=x, mm=mm, MLIST=MLIST,
th.idx=th.idx)
DX2 <- derivative.th.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST, th.idx=th.idx,
delta=TRUE)
if(theta) {
# 1. compute dDelta.dx
dxSigma <-
derivative.sigma.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST, delta = !theta)
var.idx <- which(!lav_matrix_vech_idx(nvar) %in%
lav_matrix_vech_idx(nvar, diagonal = FALSE))
sigma.hat <- computeSigmaHat.LISREL(MLIST=MLIST, delta=FALSE)
dsigma <- diag(sigma.hat)
# dy/ddsigma = -0.5/(ddsigma*sqrt(ddsigma))
dDelta.dx <- dxSigma[var.idx,] * -0.5 / (dsigma*sqrt(dsigma))
# 2. compute dth.dDelta
dth.dDelta <-
derivative.th.LISREL(m="delta",
idx=seq_len(length(MLIST[["delta"]])),
MLIST=MLIST, th.idx=th.idx)
# 3. add dth.dDelta %*% dDelta.dx
no.num.idx <- which(th.idx > 0)
DX2[no.num.idx,] <- DX2[no.num.idx,,drop=FALSE] +
(dth.dDelta %*% dDelta.dx)[no.num.idx,,drop=FALSE]
#DX2 <- DX2 + dth.dDelta %*% dDelta.dx
}
if(mm %in% c("psi","theta")) {
# remove duplicated columns of symmetric matrices
idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE)
if(length(idx) > 0L) DX2 <- DX2[,-idx]
}
cat("[TH ] mm = ", sprintf("%-8s:", mm), "sum delta = ",
sprintf("%12.9f", sum(DX1-DX2)), " max delta = ",
sprintf("%12.9f", max(DX1-DX2)), "\n")
if(debug) {
cat("[TH ] mm = ",sprintf("%-8s:", mm),"DX1 (numerical):\n")
print(zapsmall(DX1)); cat("\n")
cat("[TH ] mm = ",sprintf("%-8s:", mm),"DX2 (analytical):\n")
print(DX2); cat("\n")
}
}
# 4. pi
if(pi) {
DX1 <- lav_func_jacobian_complex(func=compute.pi, x=x, mm=mm, MLIST=MLIST)
DX2 <- derivative.pi.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST)
if(mm %in% c("psi","theta")) {
# remove duplicated columns of symmetric matrices
idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE)
if(length(idx) > 0L) DX2 <- DX2[,-idx]
}
if(theta) {
# 1. compute dDelta.dx
dxSigma <-
derivative.sigma.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST, delta = !theta)
if(mm %in% c("psi","theta")) {
# remove duplicated columns of symmetric matrices
idx <- lav_matrix_vechru_idx(sqrt(ncol(dxSigma)), diagonal = FALSE)
if(length(idx) > 0L) dxSigma <- dxSigma[,-idx]
}
var.idx <- which(!lav_matrix_vech_idx(nvar) %in%
lav_matrix_vech_idx(nvar, diagonal = FALSE))
sigma.hat <- computeSigmaHat.LISREL(MLIST=MLIST, delta=FALSE)
dsigma <- diag(sigma.hat)
# dy/ddsigma = -0.5/(ddsigma*sqrt(ddsigma))
dDelta.dx <- dxSigma[var.idx,] * -0.5 / (dsigma*sqrt(dsigma))
# 2. compute dpi.dDelta
dpi.dDelta <-
derivative.pi.LISREL(m="delta",
idx=seq_len(length(MLIST[["delta"]])),
MLIST=MLIST)
# 3. add dpi.dDelta %*% dDelta.dx
no.num.idx <- which(! seq.int(1L, nvar) %in% num.idx )
no.num.idx <- rep(seq.int(0,nexo-1) * nvar,
each=length(no.num.idx)) + no.num.idx
DX2[no.num.idx,] <- DX2[no.num.idx,,drop=FALSE] +
(dpi.dDelta %*% dDelta.dx)[no.num.idx,,drop=FALSE]
}
cat("[PI ] mm = ", sprintf("%-8s:", mm), "sum delta = ",
sprintf("%12.9f", sum(DX1-DX2)), " max delta = ",
sprintf("%12.9f", max(DX1-DX2)), "\n")
if(debug) {
cat("[PI ] mm = ",sprintf("%-8s:", mm),"DX1 (numerical):\n")
print(zapsmall(DX1)); cat("\n")
cat("[PI ] mm = ",sprintf("%-8s:", mm),"DX2 (analytical):\n")
print(DX2); cat("\n")
}
}
# 5. gw
if(gw) {
DX1 <- lav_func_jacobian_complex(func=compute.gw, x=x, mm=mm, MLIST=MLIST)
DX2 <- derivative.gw.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST)
if(mm %in% c("psi","theta")) {
# remove duplicated columns of symmetric matrices
idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE)
if(length(idx) > 0L) DX2 <- DX2[,-idx]
}
cat("[GW ] mm = ", sprintf("%-8s:", mm), "sum delta = ",
sprintf("%12.9f", sum(DX1-DX2)), " max delta = ",
sprintf("%12.9f", max(DX1-DX2)), "\n")
if(debug) {
cat("[GW ] mm = ",sprintf("%-8s:", mm),"DX1 (numerical):\n")
print(DX1); cat("\n\n")
cat("[GW ] mm = ",sprintf("%-8s:", mm),"DX2 (analytical):\n")
print(DX2); cat("\n\n")
}
}
}
MLIST$th.idx <- th.idx
MLIST$num.idx <- num.idx
MLIST
}
| /lavaan/R/lav_representation_lisrel.R | no_license | ingted/R-Examples | R | false | false | 83,196 | r | # and matrix-representation specific functions:
# - computeSigmaHat
# - computeMuHat
# - derivative.F
# initital version: YR 2011-01-21: LISREL stuff
# updates: YR 2011-12-01: group specific extraction
# YR 2012-05-17: thresholds
representation.LISREL <- function(partable=NULL, target=NULL,
extra=FALSE, remove.nonexisting=TRUE) {
# prepare target list
if(is.null(target)) target <- partable
# prepare output
N <- length(target$lhs)
tmp.mat <- character(N); tmp.row <- integer(N); tmp.col <- integer(N)
# global settings
meanstructure <- any(partable$op == "~1")
categorical <- any(partable$op == "|")
group.w.free <- any(partable$lhs == "group" & partable$op == "%")
gamma <- categorical
# number of groups
ngroups <- max(partable$group)
ov.dummy.names.nox <- vector("list", ngroups)
ov.dummy.names.x <- vector("list", ngroups)
if(extra) {
REP.mmNames <- vector("list", ngroups)
REP.mmNumber <- vector("list", ngroups)
REP.mmRows <- vector("list", ngroups)
REP.mmCols <- vector("list", ngroups)
REP.mmDimNames <- vector("list", ngroups)
REP.mmSymmetric <- vector("list", ngroups)
}
for(g in 1:ngroups) {
# info from user model per group
if(gamma) {
ov.names <- vnames(partable, "ov.nox", group=g)
} else {
ov.names <- vnames(partable, "ov", group=g)
}
nvar <- length(ov.names)
lv.names <- vnames(partable, "lv", group=g); nfac <- length(lv.names)
ov.th <- vnames(partable, "th", group=g); nth <- length(ov.th)
ov.names.x <- vnames(partable, "ov.x",group=g); nexo <- length(ov.names.x)
ov.names.nox <- vnames(partable, "ov.nox",group=g)
# in this representation, we need to create 'phantom/dummy' latent
# variables for all `x' and `y' variables not in lv.names
# (only y if categorical)
# regression dummys
if(categorical) {
tmp.names <-
unique( partable$lhs[(partable$op == "~" |
partable$op == "<~") &
partable$group == g] )
} else {
tmp.names <-
unique( c(partable$lhs[(partable$op == "~" |
partable$op == "<~") &
partable$group == g],
partable$rhs[(partable$op == "~" |
partable$op == "<~") &
partable$group == g]) )
}
dummy.names1 <- tmp.names[ !tmp.names %in% lv.names ]
# covariances involving dummys
dummy.cov.idx <- which(partable$op == "~~" & partable$group == g &
(partable$lhs %in% dummy.names1 |
partable$rhs %in% dummy.names1))
dummy.names2 <- unique( c(partable$lhs[dummy.cov.idx],
partable$rhs[dummy.cov.idx]) )
# collect all dummy variables
dummy.names <- unique(c(dummy.names1, dummy.names2))
if(length(dummy.names)) {
# make sure order is the same as ov.names
ov.dummy.names.nox[[g]] <-
ov.names.nox[ ov.names.nox %in% dummy.names ]
ov.dummy.names.x[[g]] <-
ov.names.x[ ov.names.x %in% dummy.names ]
# combine them, make sure order is identical to ov.names
tmp <- ov.names[ ov.names %in% dummy.names ]
# extend lv.names
lv.names <- c(lv.names, tmp)
nfac <- length(lv.names)
# add 'dummy' =~ entries
dummy.mat <- rep("lambda", length(dummy.names))
} else {
ov.dummy.names.nox[[g]] <- character(0)
ov.dummy.names.x[[g]] <- character(0)
}
# 1a. "=~" regular indicators
idx <- which(target$group == g &
target$op == "=~" & !(target$rhs %in% lv.names))
tmp.mat[idx] <- "lambda"
tmp.row[idx] <- match(target$rhs[idx], ov.names)
tmp.col[idx] <- match(target$lhs[idx], lv.names)
# 1b. "=~" regular higher-order lv indicators
idx <- which(target$group == g &
target$op == "=~" & !(target$rhs %in% ov.names))
tmp.mat[idx] <- "beta"
tmp.row[idx] <- match(target$rhs[idx], lv.names)
tmp.col[idx] <- match(target$lhs[idx], lv.names)
# 1c. "=~" indicators that are both in ov and lv
idx <- which(target$group == g &
target$op == "=~" & target$rhs %in% ov.names
& target$rhs %in% lv.names)
tmp.mat[idx] <- "beta"
tmp.row[idx] <- match(target$rhs[idx], lv.names)
tmp.col[idx] <- match(target$lhs[idx], lv.names)
# 2. "~" regressions
if(categorical) {
# gamma
idx <- which(target$rhs %in% ov.names.x &
target$group == g & (target$op == "~" |
target$op == "<~") )
tmp.mat[idx] <- "gamma"
tmp.row[idx] <- match(target$lhs[idx], lv.names)
tmp.col[idx] <- match(target$rhs[idx], ov.names.x)
# beta
idx <- which(!target$rhs %in% ov.names.x &
target$group == g & (target$op == "~" |
target$op == "<~") )
tmp.mat[idx] <- "beta"
tmp.row[idx] <- match(target$lhs[idx], lv.names)
tmp.col[idx] <- match(target$rhs[idx], lv.names)
} else {
idx <- which(target$group == g & (target$op == "~" |
target$op == "<~") )
tmp.mat[idx] <- "beta"
tmp.row[idx] <- match(target$lhs[idx], lv.names)
tmp.col[idx] <- match(target$rhs[idx], lv.names)
}
# 3a. "~~" ov
idx <- which(target$group == g &
target$op == "~~" & !(target$lhs %in% lv.names))
tmp.mat[idx] <- "theta"
tmp.row[idx] <- match(target$lhs[idx], ov.names)
tmp.col[idx] <- match(target$rhs[idx], ov.names)
# 3b. "~~" lv
idx <- which(target$group == g &
target$op == "~~" & target$rhs %in% lv.names)
tmp.mat[idx] <- "psi"
tmp.row[idx] <- match(target$lhs[idx], lv.names)
tmp.col[idx] <- match(target$rhs[idx], lv.names)
# 4a. "~1" ov
idx <- which(target$group == g &
target$op == "~1" & !(target$lhs %in% lv.names))
tmp.mat[idx] <- "nu"
tmp.row[idx] <- match(target$lhs[idx], ov.names)
tmp.col[idx] <- 1L
# 4b. "~1" lv
idx <- which(target$group == g &
target$op == "~1" & target$lhs %in% lv.names)
tmp.mat[idx] <- "alpha"
tmp.row[idx] <- match(target$lhs[idx], lv.names)
tmp.col[idx] <- 1L
# 5. "|" th
LABEL <- paste(target$lhs, target$op, target$rhs, sep="")
idx <- which(target$group == g &
target$op == "|" & LABEL %in% ov.th)
TH <- paste(target$lhs[idx], "|", target$rhs[idx], sep="")
tmp.mat[idx] <- "tau"
tmp.row[idx] <- match(TH, ov.th)
tmp.col[idx] <- 1L
# 6. "~*~" scales
idx <- which(target$group == g &
target$op == "~*~")
tmp.mat[idx] <- "delta"
tmp.row[idx] <- match(target$lhs[idx], ov.names)
tmp.col[idx] <- 1L
# new 0.5-12: catch lower-elements in theta/psi
idx.lower <- which(tmp.mat %in% c("theta","psi") & tmp.row > tmp.col)
if(length(idx.lower) > 0L) {
tmp <- tmp.row[idx.lower]
tmp.row[idx.lower] <- tmp.col[idx.lower]
tmp.col[idx.lower] <- tmp
}
# new 0.5-16: group weights
idx <- which(target$group == g & target$lhs == "group" &
target$op == "%")
tmp.mat[idx] <- "gw"
tmp.row[idx] <- 1L
tmp.col[idx] <- 1L
if(extra) {
# mRows
mmRows <- list(tau = nth,
delta = nvar,
nu = nvar,
lambda = nvar,
theta = nvar,
alpha = nfac,
beta = nfac,
gamma = nfac,
gw = 1L,
psi = nfac)
# mCols
mmCols <- list(tau = 1L,
delta = 1L,
nu = 1L,
lambda = nfac,
theta = nvar,
alpha = 1L,
beta = nfac,
gamma = nexo,
gw = 1L,
psi = nfac)
# dimNames for LISREL model matrices
mmDimNames <- list(tau = list( ov.th, "threshold"),
delta = list( ov.names, "scales"),
nu = list( ov.names, "intercept"),
lambda = list( ov.names, lv.names),
theta = list( ov.names, ov.names),
alpha = list( lv.names, "intercept"),
beta = list( lv.names, lv.names),
gamma = list( lv.names, ov.names.x),
gw = list( "group", "weight"),
psi = list( lv.names, lv.names))
# isSymmetric
mmSymmetric <- list(tau = FALSE,
delta = FALSE,
nu = FALSE,
lambda = FALSE,
theta = TRUE,
alpha = FALSE,
beta = FALSE,
gamma = FALSE,
gw = FALSE,
psi = TRUE)
# which mm's do we need? (always include lambda, theta and psi)
mmNames <- c("lambda", "theta", "psi")
if("beta" %in% tmp.mat) mmNames <- c(mmNames, "beta")
if(meanstructure) mmNames <- c(mmNames, "nu", "alpha")
if("tau" %in% tmp.mat) mmNames <- c(mmNames, "tau")
if("delta" %in% tmp.mat) mmNames <- c(mmNames, "delta")
if("gamma" %in% tmp.mat) mmNames <- c(mmNames, "gamma")
if("gw" %in% tmp.mat) mmNames <- c(mmNames, "gw")
REP.mmNames[[g]] <- mmNames
REP.mmNumber[[g]] <- length(mmNames)
REP.mmRows[[g]] <- unlist(mmRows[ mmNames ])
REP.mmCols[[g]] <- unlist(mmCols[ mmNames ])
REP.mmDimNames[[g]] <- mmDimNames[ mmNames ]
REP.mmSymmetric[[g]] <- unlist(mmSymmetric[ mmNames ])
} # extra
} # ngroups
REP <- list(mat = tmp.mat,
row = tmp.row,
col = tmp.col)
# remove non-existing (NAs)?
# here we remove `non-existing' parameters; this depends on the matrix
# representation (eg in LISREL rep, there is no ~~ between lv and ov)
#if(remove.nonexisting) {
# idx <- which( nchar(REP$mat) > 0L &
# !is.na(REP$row) & REP$row > 0L &
# !is.na(REP$col) & REP$col > 0L )
# # but keep ==, :=, etc.
# idx <- c(idx, which(partable$op %in% c("==", ":=", "<", ">")))
# REP$mat <- REP$mat[idx]
# REP$row <- REP$row[idx]
# REP$col <- REP$col[idx]
#
# always add 'ov.dummy.*.names' attributes
attr(REP, "ov.dummy.names.nox") <- ov.dummy.names.nox
attr(REP, "ov.dummy.names.x") <- ov.dummy.names.x
if(extra) {
attr(REP, "mmNames") <- REP.mmNames
attr(REP, "mmNumber") <- REP.mmNumber
attr(REP, "mmRows") <- REP.mmRows
attr(REP, "mmCols") <- REP.mmCols
attr(REP, "mmDimNames") <- REP.mmDimNames
attr(REP, "mmSymmetric") <- REP.mmSymmetric
}
REP
}
# ETA:
# 1) EETA
# 2) EETAx
# 3) VETA
# 4) VETAx
# 1) EETA
# compute E(ETA): expected value of latent variables (marginal over x)
# - if no eXo (and GAMMA):
# E(ETA) = (I-B)^-1 ALPHA
# - if eXo and GAMMA:
# E(ETA) = (I-B)^-1 ALPHA + (I-B)^-1 GAMMA mean.x
computeEETA.LISREL <- function(MLIST=NULL, mean.x=NULL,
sample.mean=NULL,
ov.y.dummy.ov.idx=NULL,
ov.x.dummy.ov.idx=NULL,
ov.y.dummy.lv.idx=NULL,
ov.x.dummy.lv.idx=NULL) {
LAMBDA <- MLIST$lambda; BETA <- MLIST$beta; GAMMA <- MLIST$gamma
# ALPHA? (reconstruct, but no 'fix')
ALPHA <- .internal_get_ALPHA(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# BETA?
if(!is.null(BETA)) {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
# GAMMA?
if(!is.null(GAMMA)) {
eeta <- as.vector(IB.inv %*% ALPHA + IB.inv %*% GAMMA %*% mean.x)
} else {
eeta <- as.vector(IB.inv %*% ALPHA)
}
} else {
# GAMMA?
if(!is.null(GAMMA)) {
eeta <- as.vector(ALPHA + GAMMA %*% mean.x)
} else {
eeta <- as.vector(ALPHA)
}
}
eeta
}
# 2) EETAx
# compute E(ETA|x_i): conditional expected value of latent variable,
# given specific value of x_i
# - if no eXo (and GAMMA):
# E(ETA) = (I-B)^-1 ALPHA
# we return a matrix of size [nobs x nfac] replicating E(ETA)
# - if eXo and GAMMA:
# E(ETA|x_i) = (I-B)^-1 ALPHA + (I-B)^-1 GAMMA x_i
# we return a matrix of size [nobs x nfac]
#
computeEETAx.LISREL <- function(MLIST=NULL, eXo=NULL, N=nrow(eXo),
sample.mean=NULL,
ov.y.dummy.ov.idx=NULL,
ov.x.dummy.ov.idx=NULL,
ov.y.dummy.lv.idx=NULL,
ov.x.dummy.lv.idx=NULL) {
LAMBDA <- MLIST$lambda; BETA <- MLIST$beta; GAMMA <- MLIST$gamma
nfac <- ncol(LAMBDA)
# if eXo, N must be nrow(eXo)
if(!is.null(eXo)) {
N <- nrow(eXo)
}
# ALPHA?
ALPHA <- .internal_get_ALPHA(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# construct [nobs x nfac] matrix (repeating ALPHA)
EETA <- matrix(ALPHA, N, nfac, byrow=TRUE)
# put back eXo values if dummy
if(length(ov.x.dummy.lv.idx) > 0L) {
EETA[,ov.x.dummy.lv.idx] <- eXo
}
# BETA?
if(!is.null(BETA)) {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
EETA <- EETA %*% t(IB.inv)
}
# GAMMA?
if(!is.null(GAMMA)) {
if(!is.null(BETA)) {
EETA <- EETA + eXo %*% t(IB.inv %*% GAMMA)
} else {
EETA <- EETA + eXo %*% t(GAMMA)
}
}
EETA
}
# 3) VETA
# compute V(ETA): variances/covariances of latent variables
# - if no eXo (and GAMMA)
# V(ETA) = (I-B)^-1 PSI (I-B)^-T
# - if eXo and GAMMA: (cfr lisrel submodel 3a with ksi=x)
# V(ETA) = (I-B)^-1 [ GAMMA cov.x t(GAMMA) + PSI] (I-B)^-T
computeVETA.LISREL <- function(MLIST=NULL, cov.x=NULL) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA)
PSI <- MLIST$psi
THETA <- MLIST$theta
BETA <- MLIST$beta
GAMMA <- MLIST$gamma
if(!is.null(GAMMA)) {
stopifnot(!is.null(cov.x))
# we treat 'x' as 'ksi' in the LISREL model; cov.x is PHI
PSI <- tcrossprod(GAMMA %*% cov.x, GAMMA) + PSI
}
# beta?
if(is.null(BETA)) {
VETA <- PSI
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
VETA <- tcrossprod(IB.inv %*% PSI, IB.inv)
}
VETA
}
# 4) VETAx
# compute V(ETA|x_i): variances/covariances of latent variables
# V(ETA) = (I-B)^-1 PSI (I-B)^-T + remove dummies
computeVETAx.LISREL <- function(MLIST=NULL, lv.dummy.idx=NULL) {
PSI <- MLIST$psi
BETA <- MLIST$beta
# beta?
if(is.null(BETA)) {
VETA <- PSI
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
VETA <- tcrossprod(IB.inv %*% PSI, IB.inv)
}
# remove dummy lv?
if(!is.null(lv.dummy.idx)) {
VETA <- VETA[-lv.dummy.idx, -lv.dummy.idx, drop=FALSE]
}
VETA
}
# Y
# 1) EY
# 2) EYx
# 3) EYetax
# 4) VY
# 5) VYx
# 6) VYetax
# 1) EY
# compute E(Y): expected value of observed
# E(Y) = NU + LAMBDA %*% E(eta)
# = NU + LAMBDA %*% (IB.inv %*% ALPHA) # no exo, no GAMMA
# = NU + LAMBDA %*% (IB.inv %*% ALPHA + IB.inv %*% GAMMA %*% mean.x) # eXo
# if DELTA -> E(Y) = delta * E(Y)
#
# this is similar to computeMuHat but:
# - we ALWAYS compute NU+ALPHA, even if meanstructure=FALSE
# - never used if GAMMA, since we then have categorical variables, and the
# 'part 1' structure contains the (thresholds +) intercepts, not
# the means
computeEY.LISREL <- function(MLIST=NULL, mean.x = NULL, sample.mean = NULL,
ov.y.dummy.ov.idx=NULL,
ov.x.dummy.ov.idx=NULL,
ov.y.dummy.lv.idx=NULL,
ov.x.dummy.lv.idx=NULL) {
LAMBDA <- MLIST$lambda
# get NU, but do not 'fix'
NU <- .internal_get_NU(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# compute E(ETA)
EETA <- computeEETA.LISREL(MLIST = MLIST, sample.mean = sample.mean,
mean.x = mean.x,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# EY
EY <- as.vector(NU) + as.vector(LAMBDA %*% EETA)
# if delta, scale
if(!is.null(MLIST$delta)) {
EY <- EY * as.vector(MLIST$delta)
}
EY
}
# 2) EYx
# compute E(Y|x_i): expected value of observed, conditional on x_i
# E(Y|x_i) = NU + LAMBDA %*% E(eta|x_i)
# - if no eXo (and GAMMA):
# E(ETA|x_i) = (I-B)^-1 ALPHA
# we return a matrix of size [nobs x nfac] replicating E(ETA)
# - if eXo and GAMMA:
# E(ETA|x_i) = (I-B)^-1 ALPHA + (I-B)^-1 GAMMA x_i
# we return a matrix of size [nobs x nfac]
#
# - we ALWAYS compute NU+ALPHA, even if meanstructure=FALSE
# - never used if GAMMA, since we then have categorical variables, and the
# 'part 1' structure contains the (thresholds +) intercepts, not
# the means
computeEYx.LISREL <- function(MLIST = NULL,
eXo = NULL,
N = nrow(eXo),
sample.mean = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
LAMBDA <- MLIST$lambda
# get NU, but do not 'fix'
NU <- .internal_get_NU(MLIST = MLIST,
sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# compute E(ETA|x_i)
EETAx <- computeEETAx.LISREL(MLIST = MLIST,
eXo = eXo,
N = N,
sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# EYx
EYx <- sweep(tcrossprod(EETAx, LAMBDA), 2L, STATS = NU, FUN = "+")
# if delta, scale
if(!is.null(MLIST$delta)) {
EYx <- sweep(EYx, 2L, STATS = MLIST$delta, FUN = "*")
}
EYx
}
# 3) EYetax
# compute E(Y|eta_i,x_i): conditional expected value of observed variable
# given specific value of eta_i AND x_i
#
# E(y*_i|eta_i, x_i) = NU + LAMBDA eta_i + KAPPA x_i
#
# where eta_i = predict(fit) = factor scores OR specific values for eta_i
# (as in GH integration)
#
# if nexo = 0, and eta_i is single row, YHAT is the same for each observation
# in this case, we return a single row, unless Nobs > 1L, in which case
# we return Nobs identical rows
#
# NOTE: we assume that any effect of x_i on eta_i has already been taken
# care off
# categorical version
computeEYetax.LISREL <- function(MLIST = NULL,
eXo = NULL,
ETA = NULL,
N = nrow(eXo),
sample.mean = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
LAMBDA <- MLIST$lambda
BETA <- MLIST$beta
if(!is.null(eXo)) {
N <- nrow(eXo)
} else if(!is.null(N)) {
# nothing to do
} else {
N <- 1L
}
# create ETA matrix
if(nrow(ETA) == 1L) {
ETA <- matrix(ETA, N, ncol(ETA), byrow=TRUE)
}
# always augment ETA with 'dummy values' (0 for ov.y, eXo for ov.x)
#ndummy <- length(c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx))
#if(ndummy > 0L) {
# ETA2 <- cbind(ETA, matrix(0, N, ndummy))
#} else {
ETA2 <- ETA
#}
# only if we have dummy ov.y, we need to compute the 'yhat' values
# beforehand
if(length(ov.y.dummy.lv.idx) > 0L) {
# insert eXo values
if(length(ov.x.dummy.lv.idx) > 0L) {
ETA2[,ov.x.dummy.lv.idx] <- eXo
}
# zero ov.y values
if(length(ov.y.dummy.lv.idx) > 0L) {
ETA2[,ov.y.dummy.lv.idx] <- 0
}
# ALPHA? (reconstruct, but no 'fix')
ALPHA <- .internal_get_ALPHA(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# BETA?
if(!is.null(BETA)) {
ETA2 <- sweep(tcrossprod(ETA2, BETA), 2L, STATS = ALPHA, FUN = "+")
} else {
ETA2 <- sweep(ETA2, 2L, STATS = ALPHA, FUN = "+")
}
# put back eXo values
if(length(ov.x.dummy.lv.idx) > 0L) {
ETA2[,ov.x.dummy.lv.idx] <- eXo
}
# put back ETA values for the 'real' latent variables
dummy.idx <- c(ov.x.dummy.lv.idx, ov.y.dummy.lv.idx)
if(length(dummy.idx) > 0L) {
lv.regular.idx <- seq_len( min(dummy.idx) - 1L )
ETA2[, lv.regular.idx] <- ETA[,lv.regular.idx, drop = FALSE]
}
}
# get NU, but do not 'fix'
NU <- .internal_get_NU(MLIST = MLIST,
sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# EYetax
EYetax <- sweep(tcrossprod(ETA2, LAMBDA), 2L, STATS = NU, FUN = "+")
# if delta, scale
if(!is.null(MLIST$delta)) {
EYetax <- sweep(EYetax, 2L, STATS = MLIST$delta, FUN = "*")
}
EYetax
}
# unconditional version
computeEYetax2.LISREL <- function(MLIST = NULL,
ETA = NULL,
sample.mean = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
LAMBDA <- MLIST$lambda
BETA <- MLIST$beta
# only if we have dummy ov.y, we need to compute the 'yhat' values
# beforehand, and impute them in ETA[,ov.y]
if(length(ov.y.dummy.lv.idx) > 0L) {
# ALPHA? (reconstruct, but no 'fix')
ALPHA <- .internal_get_ALPHA(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# keep all, but ov.y values
OV.NOY <- ETA[,-ov.y.dummy.lv.idx, drop = FALSE]
# ov.y rows, non-ov.y cols
BETAY <- BETA[ov.y.dummy.lv.idx,-ov.y.dummy.lv.idx, drop = FALSE]
# ov.y intercepts
ALPHAY <- ALPHA[ov.y.dummy.lv.idx,, drop=FALSE]
# impute ov.y values in ETA
ETA[,ov.y.dummy.lv.idx] <-
sweep(tcrossprod(OV.NOY, BETAY), 2L, STATS = ALPHAY, FUN = "+")
}
# get NU, but do not 'fix'
NU <- .internal_get_NU(MLIST = MLIST,
sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# EYetax
EYetax <- sweep(tcrossprod(ETA, LAMBDA), 2L, STATS = NU, FUN = "+")
# if delta, scale
if(!is.null(MLIST$delta)) {
EYetax <- sweep(EYetax, 2L, STATS = MLIST$delta, FUN = "*")
}
EYetax
}
# unconditional version
computeEYetax3.LISREL <- function(MLIST = NULL,
ETA = NULL,
sample.mean = NULL,
mean.x = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
LAMBDA <- MLIST$lambda
# special case: empty lambda
if(ncol(LAMBDA) == 0L) {
return( matrix(sample.mean,
nrow(ETA), length(sample.mean), byrow=TRUE) )
}
# lv idx
dummy.idx <- c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx)
if(length(dummy.idx) > 0L) {
nondummy.idx <- seq_len( min(dummy.idx) - 1L )
} else {
nondummy.idx <- seq_len( ncol(MLIST$lambda) )
}
# beta?
if(is.null(MLIST$beta) || length(ov.y.dummy.lv.idx) == 0L ||
length(nondummy.idx) == 0L) {
LAMBDA..IB.inv <- LAMBDA
} else {
# only keep those columns of BETA that correspond to the
# the `regular' latent variables
# (ie. ignore the structural part altogether)
MLIST2 <- MLIST
MLIST2$beta[,dummy.idx] <- 0
IB.inv <- .internal_get_IB.inv(MLIST = MLIST2)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# compute model-implied means
EY <- computeEY.LISREL(MLIST = MLIST, mean.x = mean.x,
sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
EETA <- computeEETA.LISREL(MLIST = MLIST, sample.mean = sample.mean,
mean.x = mean.x,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# center regular lv only
ETA[,nondummy.idx] <- sweep(ETA[,nondummy.idx,drop = FALSE], 2L,
STATS = EETA[nondummy.idx], FUN = "-")
# project from lv to ov, if we have any lv
if(length(nondummy.idx) > 0) {
EYetax <- sweep(tcrossprod(ETA[,nondummy.idx,drop=FALSE],
LAMBDA..IB.inv[,nondummy.idx,drop=FALSE]),
2L, STATS = EY, FUN = "+")
} else {
EYetax <- ETA
}
# put back eXo variables
if(length(ov.x.dummy.lv.idx) > 0L) {
EYetax[,ov.x.dummy.ov.idx] <- ETA[,ov.x.dummy.lv.idx, drop = FALSE]
}
# if delta, scale
if(!is.null(MLIST$delta)) {
EYetax <- sweep(EYetax, 2L, STATS = MLIST$delta, FUN = "*")
}
EYetax
}
# 4) VY
# compute the *un*conditional variance of y: V(Y) or V(Y*)
# 'unconditional' model-implied variances
# - same as diag(Sigma.hat) if all Y are continuous
# - 1.0 (or delta^2) if categorical
# - if also Gamma, cov.x is used (only if categorical)
# only in THIS case, VY is different from diag(VYx)
#
# V(Y) = LAMBDA V(ETA) t(LAMBDA) + THETA
computeVY.LISREL <- function(MLIST=NULL, cov.x=NULL) {
LAMBDA <- MLIST$lambda
THETA <- MLIST$theta
VETA <- computeVETA.LISREL(MLIST = MLIST, cov.x = cov.x)
VY <- tcrossprod(LAMBDA %*% VETA, LAMBDA) + THETA
# variances only
diag(VY)
}
# 5) VYx
# compute V(Y*|x_i) == model-implied covariance matrix
# this equals V(Y*) if no (explicit) eXo no GAMMA
computeVYx.LISREL <- computeSigmaHat.LISREL <- function(MLIST = NULL,
delta = TRUE) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA)
PSI <- MLIST$psi
THETA <- MLIST$theta
BETA <- MLIST$beta
# beta?
if(is.null(BETA)) {
LAMBDA..IB.inv <- LAMBDA
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# compute V(Y*|x_i)
VYx <- tcrossprod(LAMBDA..IB.inv %*% PSI, LAMBDA..IB.inv) + THETA
# if delta, scale
if(delta && !is.null(MLIST$delta)) {
DELTA <- diag(MLIST$delta[,1L], nrow=nvar, ncol=nvar)
VYx <- DELTA %*% VYx %*% DELTA
}
VYx
}
# 6) VYetax
# V(Y | eta_i, x_i) = THETA
computeVYetax.LISREL <- function(MLIST = NULL, delta = TRUE) {
VYetax <- MLIST$theta; nvar <- nrow(MLIST$theta)
# if delta, scale
if(delta && !is.null(MLIST$delta)) {
DELTA <- diag(MLIST$delta[,1L], nrow=nvar, ncol=nvar)
VYetax <- DELTA %*% VYetax %*% DELTA
}
VYetax
}
### compute model-implied sample statistics
#
# 1) MuHat (similar to EY, but continuous only)
# 2) TH
# 3) PI
# 4) SigmaHat == VYx
# compute MuHat for a single group -- only for the continuous case (no eXo)
#
# this is a special case of E(Y) where
# - we have no (explicit) eXogenous variables
# - only continuous
computeMuHat.LISREL <- function(MLIST=NULL) {
NU <- MLIST$nu
ALPHA <- MLIST$alpha
LAMBDA <- MLIST$lambda
BETA <- MLIST$beta
# shortcut
if(is.null(ALPHA) || is.null(NU)) return(matrix(0, nrow(LAMBDA), 1L))
# beta?
if(is.null(BETA)) {
LAMBDA..IB.inv <- LAMBDA
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# compute Mu Hat
Mu.hat <- NU + LAMBDA..IB.inv %*% ALPHA
Mu.hat
}
# compute TH for a single group
computeTH.LISREL <- function(MLIST=NULL, th.idx=NULL) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
BETA <- MLIST$beta
TAU <- MLIST$tau; nth <- nrow(TAU)
# missing alpha
if(is.null(MLIST$alpha)) {
ALPHA <- matrix(0, nfac, 1L)
} else {
ALPHA <- MLIST$alpha
}
# missing nu
if(is.null(MLIST$nu)) {
NU <- matrix(0, nvar, 1L)
} else {
NU <- MLIST$nu
}
if(is.null(th.idx)) {
th.idx <- seq_len(nth)
nlev <- rep(1L, nvar)
K_nu <- diag(nvar)
} else {
nlev <- tabulate(th.idx, nbins=nvar); nlev[nlev == 0L] <- 1L
K_nu <- matrix(0, sum(nlev), nvar)
K_nu[ cbind(seq_len(sum(nlev)), rep(seq_len(nvar), times=nlev)) ] <- 1.0
}
# shortcut
if(is.null(TAU)) return(matrix(0, length(th.idx), 1L))
# beta?
if(is.null(BETA)) {
LAMBDA..IB.inv <- LAMBDA
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# compute pi0
pi0 <- NU + LAMBDA..IB.inv %*% ALPHA
# interleave th's with zeros where we have numeric variables
th <- numeric( length(th.idx) )
th[ th.idx > 0L ] <- TAU[,1L]
# compute TH
TH <- th - (K_nu %*% pi0)
# if delta, scale
if(!is.null(MLIST$delta)) {
DELTA.diag <- MLIST$delta[,1L]
DELTA.star.diag <- rep(DELTA.diag, times=nlev)
TH <- TH * DELTA.star.diag
}
as.vector(TH)
}
# compute PI for a single group
computePI.LISREL <- function(MLIST=NULL) {
LAMBDA <- MLIST$lambda
BETA <- MLIST$beta
GAMMA <- MLIST$gamma
# shortcut
if(is.null(GAMMA)) return(matrix(0, nrow(LAMBDA), 0L))
# beta?
if(is.null(BETA)) {
LAMBDA..IB.inv <- LAMBDA
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# compute PI
PI <- LAMBDA..IB.inv %*% GAMMA
# if delta, scale
if(!is.null(MLIST$delta)) {
DELTA.diag <- MLIST$delta[,1L]
PI <- PI * DELTA.diag
}
PI
}
computeLAMBDA.LISREL <- function(MLIST = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL,
remove.dummy.lv = FALSE) {
ov.dummy.idx = c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx)
lv.dummy.idx = c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx)
# fix LAMBDA
LAMBDA <- MLIST$lambda
if(length(ov.y.dummy.ov.idx) > 0L) {
LAMBDA[ov.y.dummy.ov.idx,] <- MLIST$beta[ov.y.dummy.lv.idx,]
}
# remove dummy lv?
if(remove.dummy.lv && length(lv.dummy.idx) > 0L) {
LAMBDA <- LAMBDA[,-lv.dummy.idx,drop=FALSE]
}
LAMBDA
}
computeTHETA.LISREL <- function(MLIST=NULL,
ov.y.dummy.ov.idx=NULL,
ov.x.dummy.ov.idx=NULL,
ov.y.dummy.lv.idx=NULL,
ov.x.dummy.lv.idx=NULL) {
ov.dummy.idx = c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx)
lv.dummy.idx = c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx)
# fix THETA
THETA <- MLIST$theta
if(length(ov.dummy.idx) > 0L) {
THETA[ov.dummy.idx, ov.dummy.idx] <-
MLIST$psi[lv.dummy.idx, lv.dummy.idx]
}
THETA
}
# compute IB.inv
.internal_get_IB.inv <- function(MLIST = NULL) {
BETA <- MLIST$beta; nr <- nrow(MLIST$psi)
if(!is.null(BETA)) {
tmp <- -BETA
tmp[lav_matrix_diag_idx(nr)] <- 1
IB.inv <- solve(tmp)
} else {
IB.inv <- diag(nr)
}
IB.inv
}
# only if ALPHA=NULL but we need it anyway
# we 'reconstruct' ALPHA here (including dummy entries), no fixing
#
# without any dummy variables, this is just the zero vector
# but if we have dummy variables, we need to fill in their values
#
#
.internal_get_ALPHA <- function(MLIST = NULL, sample.mean = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
if(!is.null(MLIST$alpha)) return(MLIST$alpha)
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
BETA <- MLIST$beta
ov.dummy.idx = c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx)
lv.dummy.idx = c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx)
if(length(ov.dummy.idx) > 0L) {
ALPHA <- matrix(0, nfac, 1L)
# Note: instead of sample.mean, we need 'intercepts'
# sample.mean = NU + LAMBDA..IB.inv %*% ALPHA
# so,
# solve(LAMBDA..IB.inv) %*% (sample.mean - NU) = ALPHA
# where
# - LAMBDA..IB.inv only contains 'dummy' variables, and is square
# - NU elements are not needed (since not in ov.dummy.idx)
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
LAMBDA..IB.inv.dummy <- LAMBDA..IB.inv[ov.dummy.idx, lv.dummy.idx]
ALPHA[lv.dummy.idx] <-
solve(LAMBDA..IB.inv.dummy) %*% sample.mean[ov.dummy.idx]
} else {
ALPHA <- matrix(0, nfac, 1L)
}
ALPHA
}
# only if NU=NULL but we need it anyway
#
# since we have no meanstructure, we can assume NU is unrestricted
# and contains either:
# 1) the sample means (if not eXo)
# 2) the intercepts, if we have exogenous covariates
# since sample.mean = NU + LAMBDA %*% E(eta)
# we have NU = sample.mean - LAMBDA %*% E(eta)
.internal_get_NU <- function(MLIST = NULL, sample.mean = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
if(!is.null(MLIST$nu)) return(MLIST$nu)
# if nexo > 0, substract lambda %*% EETA
if( length(ov.x.dummy.ov.idx) > 0L ) {
EETA <- computeEETA.LISREL(MLIST, mean.x=NULL,
sample.mean=sample.mean,
ov.y.dummy.ov.idx=ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx=ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx=ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx=ov.x.dummy.lv.idx)
# 'regress' NU on X
NU <- sample.mean - MLIST$lambda %*% EETA
# just to make sure we have exact zeroes for all dummies
NU[c(ov.y.dummy.ov.idx,ov.x.dummy.ov.idx)] <- 0
} else {
# unrestricted mean
NU <- sample.mean
}
NU
}
.internal_get_KAPPA <- function(MLIST = NULL,
ov.y.dummy.ov.idx = NULL,
ov.x.dummy.ov.idx = NULL,
ov.y.dummy.lv.idx = NULL,
ov.x.dummy.lv.idx = NULL,
nexo = NULL) {
nvar <- nrow(MLIST$lambda)
if(!is.null(MLIST$gamma)) {
nexo <- ncol(MLIST$gamma)
} else if(!is.null(nexo)) {
nexo <- nexo
} else {
stop("nexo not known")
}
# create KAPPA
KAPPA <- matrix(0, nvar, nexo)
if(!is.null(MLIST$gamma)) {
KAPPA[ov.y.dummy.ov.idx,] <-
MLIST$gamma[ov.y.dummy.lv.idx,,drop=FALSE]
} else if(length(ov.x.dummy.ov.idx) > 0L) {
KAPPA[ov.y.dummy.ov.idx,] <-
MLIST$beta[ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx, drop=FALSE]
}
KAPPA
}
# old version of computeEYetax (using 'fixing')
computeYHATetax.LISREL <- function(MLIST=NULL, eXo=NULL, ETA=NULL,
sample.mean=NULL,
ov.y.dummy.ov.idx=NULL,
ov.x.dummy.ov.idx=NULL,
ov.y.dummy.lv.idx=NULL,
ov.x.dummy.lv.idx=NULL,
Nobs = 1L) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
lv.dummy.idx <- c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx)
ov.dummy.idx <- c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx)
# exogenous variables?
if(is.null(eXo)) {
nexo <- 0L
} else {
nexo <- ncol(eXo)
# check ETA rows
if(!(nrow(ETA) == 1L || nrow(ETA) == nrow(eXo))) {
stop("lavaan ERROR: !(nrow(ETA) == 1L || nrow(ETA) == nrow(eXo))")
}
}
# get NU
NU <- .internal_get_NU(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# ALPHA? (reconstruct, but no 'fix')
ALPHA <- .internal_get_ALPHA(MLIST = MLIST, sample.mean = sample.mean,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx)
# fix NU
if(length(lv.dummy.idx) > 0L) {
NU[ov.dummy.idx, 1L] <- ALPHA[lv.dummy.idx, 1L]
}
# fix LAMBDA (remove dummies) ## FIXME -- needed?
LAMBDA <- MLIST$lambda
if(length(lv.dummy.idx) > 0L) {
LAMBDA <- LAMBDA[, -lv.dummy.idx, drop=FALSE]
nfac <- ncol(LAMBDA)
LAMBDA[ov.y.dummy.ov.idx,] <-
MLIST$beta[ov.y.dummy.lv.idx, seq_len(nfac), drop=FALSE]
}
# compute YHAT
YHAT <- sweep(ETA %*% t(LAMBDA), MARGIN=2, NU, "+")
# Kappa + eXo?
# note: Kappa elements are either in Gamma or in Beta
if(nexo > 0L) {
# create KAPPA
KAPPA <- .internal_get_KAPPA(MLIST = MLIST,
ov.y.dummy.ov.idx = ov.y.dummy.ov.idx,
ov.x.dummy.ov.idx = ov.x.dummy.ov.idx,
ov.y.dummy.lv.idx = ov.y.dummy.lv.idx,
ov.x.dummy.lv.idx = ov.x.dummy.lv.idx,
nexo = nexo)
# expand YHAT if ETA only has 1 row
if(nrow(YHAT) == 1L) {
YHAT <- sweep(eXo %*% t(KAPPA), MARGIN=2, STATS=YHAT, FUN="+")
} else {
# add fixed part
YHAT <- YHAT + (eXo %*% t(KAPPA))
}
# put back eXo
if(length(ov.x.dummy.ov.idx) > 0L) {
YHAT[, ov.x.dummy.ov.idx] <- eXo
}
} else {
# duplicate?
if(is.numeric(Nobs) && Nobs > 1L && nrow(YHAT) == 1L) {
YHAT <- matrix(YHAT, Nobs, nvar, byrow=TRUE)
# YHAT <- YHAT[ rep(1L, Nobs), ]
}
}
# delta?
# FIXME: not used here?
#if(!is.null(DELTA)) {
# YHAT <- sweep(YHAT, MARGIN=2, DELTA, "*")
#}
YHAT
}
# deal with 'dummy' OV.X latent variables
# create additional matrices (eg GAMMA), and resize
# remove all ov.x related entries
MLIST2MLISTX <- function(MLIST=NULL,
ov.x.dummy.ov.idx = NULL,
ov.x.dummy.lv.idx = NULL) {
lv.idx <- ov.x.dummy.lv.idx
ov.idx <- ov.x.dummy.ov.idx
if(length(lv.idx) == 0L) return(MLIST)
if(!is.null(MLIST$gamma)) {
nexo <- ncol(MLIST$gamma)
} else {
nexo <- length(ov.x.dummy.ov.idx)
}
nvar <- nrow(MLIST$lambda)
nfac <- ncol(MLIST$lambda) - length(lv.idx)
# copy
MLISTX <- MLIST
# fix LAMBDA:
# - remove all ov.x related columns/rows
MLISTX$lambda <- MLIST$lambda[-ov.idx, -lv.idx,drop=FALSE]
# fix THETA:
# - remove ov.x related columns/rows
MLISTX$theta <- MLIST$theta[-ov.idx, -ov.idx, drop=FALSE]
# fix PSI:
# - remove ov.x related columns/rows
MLISTX$psi <- MLIST$psi[-lv.idx, -lv.idx, drop=FALSE]
# create GAMMA
if(length(ov.x.dummy.lv.idx) > 0L) {
MLISTX$gamma <- MLIST$beta[-lv.idx, lv.idx, drop=FALSE]
}
# fix BETA (remove if empty)
if(!is.null(MLIST$beta)) {
MLISTX$beta <- MLIST$beta[-lv.idx, -lv.idx, drop=FALSE]
if(ncol(MLISTX$beta) == 0L) MLISTX$beta <- NULL
}
# fix NU
if(!is.null(MLIST$nu)) {
MLISTX$nu <- MLIST$nu[-ov.idx, 1L, drop=FALSE]
}
# fix ALPHA
if(!is.null(MLIST$alpha)) {
MLISTX$alpha <- MLIST$alpha[-lv.idx, 1L, drop=FALSE]
}
MLISTX
}
# create MLIST from MLISTX
MLISTX2MLIST <- function(MLISTX=NULL,
ov.x.dummy.ov.idx = NULL,
ov.x.dummy.lv.idx = NULL,
mean.x=NULL,
cov.x=NULL) {
lv.idx <- ov.x.dummy.lv.idx; ndum <- length(lv.idx)
ov.idx <- ov.x.dummy.ov.idx
if(length(lv.idx) == 0L) return(MLISTX)
stopifnot(!is.null(cov.x), !is.null(mean.x))
nvar <- nrow(MLISTX$lambda); nfac <- ncol(MLISTX$lambda)
# copy
MLIST <- MLISTX
# resize matrices
MLIST$lambda <- rbind(cbind(MLISTX$lambda, matrix(0, nvar, ndum)),
matrix(0, ndum, nfac+ndum))
MLIST$psi <- rbind(cbind(MLISTX$psi, matrix(0, nfac, ndum)),
matrix(0, ndum, nfac+ndum))
MLIST$theta <- rbind(cbind(MLISTX$theta, matrix(0, nvar, ndum)),
matrix(0, ndum, nvar+ndum))
if(!is.null(MLISTX$beta)) {
MLIST$beta <- rbind(cbind(MLISTX$beta, matrix(0, nfac, ndum)),
matrix(0, ndum, nfac+ndum))
}
if(!is.null(MLISTX$alpha)) {
MLIST$alpha <- rbind(MLISTX$alpha, matrix(0, ndum, 1))
}
if(!is.null(MLISTX$nu)) {
MLIST$nu <- rbind(MLISTX$nu, matrix(0, ndum, 1))
}
# fix LAMBDA:
# - add columns for all dummy latent variables
MLIST$lambda[ cbind(ov.idx, lv.idx) ] <- 1
# fix PSI
# - move cov.x elements to PSI
MLIST$psi[lv.idx, lv.idx] <- cov.x
# move (ov.x.dummy elements of) GAMMA to BETA
MLIST$beta[seq_len(nfac), ov.x.dummy.lv.idx] <- MLISTX$gamma
MLIST$gamma <- NULL
# fix ALPHA
if(!is.null(MLIST$alpha)) {
MLIST$alpha[lv.idx] <- mean.x
}
MLIST
}
# if DELTA parameterization, compute residual elements (in theta, or psi)
# of observed categorical variables, as a function of other model parameters
setResidualElements.LISREL <- function(MLIST=NULL,
num.idx=NULL,
ov.y.dummy.ov.idx=NULL,
ov.y.dummy.lv.idx=NULL) {
# remove num.idx from ov.y.dummy.*
if(length(num.idx) > 0L && length(ov.y.dummy.ov.idx) > 0L) {
n.idx <- which(ov.y.dummy.ov.idx %in% num.idx)
if(length(n.idx) > 0L) {
ov.y.dummy.ov.idx <- ov.y.dummy.ov.idx[-n.idx]
ov.y.dummy.lv.idx <- ov.y.dummy.lv.idx[-n.idx]
}
}
# force non-numeric theta elements to be zero
if(length(num.idx) > 0L) {
diag(MLIST$theta)[-num.idx] <- 0.0
} else {
diag(MLIST$theta) <- 0.0
}
if(length(ov.y.dummy.ov.idx) > 0L) {
MLIST$psi[ cbind(ov.y.dummy.lv.idx, ov.y.dummy.lv.idx) ] <- 0.0
}
# special case: PSI=0, and lambda=I (eg ex3.12)
if(ncol(MLIST$psi) > 0L &&
sum(diag(MLIST$psi)) == 0.0 && all(diag(MLIST$lambda) == 1)) {
### FIXME: more elegant/general solution??
diag(MLIST$psi) <- 1
Sigma.hat <- computeSigmaHat.LISREL(MLIST = MLIST, delta=FALSE)
diag.Sigma <- diag(Sigma.hat) - 1.0
} else if(ncol(MLIST$psi) == 0L) {
diag.Sigma <- rep(0, ncol(MLIST$theta))
} else {
Sigma.hat <- computeSigmaHat.LISREL(MLIST = MLIST, delta=FALSE)
diag.Sigma <- diag(Sigma.hat)
}
if(is.null(MLIST$delta)) {
delta <- rep(1, length(diag.Sigma))
} else {
delta <- MLIST$delta
}
# theta = DELTA^(-1/2) - diag( LAMBDA (I-B)^-1 PSI (I-B)^-T t(LAMBDA) )
RESIDUAL <- as.vector(1/(delta*delta) - diag.Sigma)
if(length(num.idx) > 0L) {
diag(MLIST$theta)[-num.idx] <- RESIDUAL[-num.idx]
} else {
diag(MLIST$theta) <- RESIDUAL
}
# move ov.y.dummy 'RESIDUAL' elements from THETA to PSI
if(length(ov.y.dummy.ov.idx) > 0L) {
MLIST$psi[cbind(ov.y.dummy.lv.idx, ov.y.dummy.lv.idx)] <-
MLIST$theta[cbind(ov.y.dummy.ov.idx, ov.y.dummy.ov.idx)]
MLIST$theta[cbind(ov.y.dummy.ov.idx, ov.y.dummy.ov.idx)] <- 0.0
}
MLIST
}
# if THETA parameterization, compute delta elements
# of observed categorical variables, as a function of other model parameters
setDeltaElements.LISREL <- function(MLIST=NULL, num.idx=NULL) {
Sigma.hat <- computeSigmaHat.LISREL(MLIST = MLIST, delta=FALSE)
diag.Sigma <- diag(Sigma.hat)
# (1/delta^2) = diag( LAMBDA (I-B)^-1 PSI (I-B)^-T t(LAMBDA) ) + THETA
#tmp <- diag.Sigma + THETA
tmp <- diag.Sigma
tmp[tmp < 0] <- as.numeric(NA)
MLIST$delta[, 1L] <- sqrt(1/tmp)
# numeric delta's stay 1.0
if(length(num.idx) > 0L) {
MLIST$delta[num.idx] <- 1.0
}
MLIST
}
# compute Sigma/ETA: variances/covariances of BOTH observed and latent variables
computeCOV.LISREL <- function(MLIST=NULL, cov.x=NULL, delta=TRUE) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA)
PSI <- MLIST$psi; nlat <- nrow(PSI)
THETA <- MLIST$theta
BETA <- MLIST$beta
# 'extend' matrices
LAMBDA2 <- rbind(LAMBDA, diag(nlat))
THETA2 <- bdiag(THETA, matrix(0,nlat,nlat))
# beta?
if(is.null(BETA)) {
LAMBDA..IB.inv <- LAMBDA2
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA2 %*% IB.inv
}
# compute augment COV matrix
COV <- tcrossprod(LAMBDA..IB.inv %*% PSI, LAMBDA..IB.inv) + THETA2
# if delta, scale
if(delta && !is.null(MLIST$delta)) {
DELTA <- diag(MLIST$delta[,1L], nrow=nvar, ncol=nvar)
COV[seq_len(nvar),seq_len(nvar)] <-
DELTA %*% COV[seq_len(nvar),seq_len(nvar)] %*% DELTA
}
# if GAMMA, also x part
GAMMA <- MLIST$gamma
if(!is.null(GAMMA)) {
stopifnot(!is.null(cov.x))
if(is.null(BETA)) {
SX <- tcrossprod(GAMMA %*% cov.x, GAMMA)
} else {
IB.inv..GAMMA <- IB.inv %*% GAMMA
SX <- tcrossprod(IB.inv..GAMMA %*% cov.x, IB.inv..GAMMA)
}
COV[(nvar+1):(nvar+nlat),(nvar+1):(nvar+nlat)] <-
COV[(nvar+1):(nvar+nlat),(nvar+1):(nvar+nlat)] + SX
}
COV
}
# derivative of the objective function
derivative.F.LISREL <- function(MLIST=NULL, Omega=NULL, Omega.mu=NULL) {
LAMBDA <- MLIST$lambda
PSI <- MLIST$psi
BETA <- MLIST$beta
ALPHA <- MLIST$alpha
# beta?
if(is.null(BETA)) {
LAMBDA..IB.inv <- LAMBDA
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# meanstructure?
meanstructure <- FALSE; if(!is.null(Omega.mu)) meanstructure <- TRUE
# group weight?
group.w.free <- FALSE; if(!is.null(MLIST$gw)) group.w.free <- TRUE
# pre-compute some values
tLAMBDA..IB.inv <- t(LAMBDA..IB.inv)
if(!is.null(BETA)) {
Omega..LAMBDA..IB.inv..PSI..tIB.inv <-
( Omega %*% LAMBDA..IB.inv %*% PSI %*% t(IB.inv) )
} else {
Omega..LAMBDA <- Omega %*% LAMBDA
}
# 1. LAMBDA
if(!is.null(BETA)) {
if(meanstructure) {
LAMBDA.deriv <- -1.0 * ( Omega.mu %*% t(ALPHA) %*% t(IB.inv) +
Omega..LAMBDA..IB.inv..PSI..tIB.inv )
} else {
LAMBDA.deriv <- -1.0 * Omega..LAMBDA..IB.inv..PSI..tIB.inv
}
} else {
# no BETA
if(meanstructure) {
LAMBDA.deriv <- -1.0 * ( Omega.mu %*% t(ALPHA) +
Omega..LAMBDA %*% PSI )
} else {
LAMBDA.deriv <- -1.0 * (Omega..LAMBDA %*% PSI)
}
}
# 2. BETA
if(!is.null(BETA)) {
if(meanstructure) {
BETA.deriv <- -1.0*(( t(IB.inv) %*%
(t(LAMBDA) %*% Omega.mu %*% t(ALPHA)) %*%
t(IB.inv)) +
(tLAMBDA..IB.inv %*%
Omega..LAMBDA..IB.inv..PSI..tIB.inv))
} else {
BETA.deriv <- -1.0 * ( tLAMBDA..IB.inv %*%
Omega..LAMBDA..IB.inv..PSI..tIB.inv )
}
} else {
BETA.deriv <- NULL
}
# 3. PSI
PSI.deriv <- -1.0 * ( tLAMBDA..IB.inv %*% Omega %*% LAMBDA..IB.inv )
diag(PSI.deriv) <- 0.5 * diag(PSI.deriv)
# 4. THETA
THETA.deriv <- -1.0 * Omega
diag(THETA.deriv) <- 0.5 * diag(THETA.deriv)
if(meanstructure) {
# 5. NU
NU.deriv <- -1.0 * Omega.mu
# 6. ALPHA
ALPHA.deriv <- -1.0 * t( t(Omega.mu) %*% LAMBDA..IB.inv )
} else {
NU.deriv <- NULL
ALPHA.deriv <- NULL
}
if(group.w.free) {
GROUP.W.deriv <- 0.0
} else {
GROUP.W.deriv <- NULL
}
list(lambda = LAMBDA.deriv,
beta = BETA.deriv,
theta = THETA.deriv,
psi = PSI.deriv,
nu = NU.deriv,
alpha = ALPHA.deriv,
gw = GROUP.W.deriv)
}
# dSigma/dx -- per model matrix
# note:
# we avoid using the duplication and elimination matrices
# for now (perhaps until we'll use the Matrix package)
derivative.sigma.LISREL <- function(m="lambda",
# all model matrix elements, or only a few?
# NOTE: for symmetric matrices,
# we assume that the have full size
# (nvar*nvar) (but already correct for
# symmetry)
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL,
delta = TRUE) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
PSI <- MLIST$psi
# only lower.tri part of sigma (not same order as elimination matrix?)
v.idx <- lav_matrix_vech_idx( nvar ); pstar <- nvar*(nvar+1)/2
# shortcut for gamma, nu, alpha and tau: empty matrix
if(m == "nu" || m == "alpha" || m == "tau" || m == "gamma" || m == "gw") {
return( matrix(0.0, nrow=pstar, ncol=length(idx)) )
}
# Delta?
delta.flag <- FALSE
if(delta && !is.null(MLIST$delta)) {
DELTA <- MLIST$delta
delta.flag <- TRUE
} else if(m == "delta") { # modindices?
return( matrix(0.0, nrow=pstar, ncol=length(idx)) )
}
# beta?
if(!is.null(MLIST$ibeta.inv)) {
IB.inv <- MLIST$ibeta.inv
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
}
# pre
if(m == "lambda" || m == "beta" || m == "delta")
IK <- diag(nvar*nvar) + lav_matrix_commutation(nvar, nvar)
if(m == "lambda" || m == "beta") {
IB.inv..PSI..tIB.inv..tLAMBDA <-
IB.inv %*% PSI %*% t(IB.inv) %*% t(LAMBDA)
}
if(m == "beta" || m == "psi") {
LAMBDA..IB.inv <- LAMBDA %*% IB.inv
}
# here we go:
if(m == "lambda") {
DX <- IK %*% t(IB.inv..PSI..tIB.inv..tLAMBDA %x% diag(nvar))
if(delta.flag)
DX <- DX * as.vector(DELTA %x% DELTA)
} else if(m == "beta") {
DX <- IK %*% ( t(IB.inv..PSI..tIB.inv..tLAMBDA) %x% LAMBDA..IB.inv )
# this is not really needed (because we select idx=m.el.idx)
DX[,lav_matrix_diag_idx(nfac)] <- 0.0
if(delta.flag)
DX <- DX * as.vector(DELTA %x% DELTA)
} else if(m == "psi") {
DX <- (LAMBDA..IB.inv %x% LAMBDA..IB.inv)
# symmetry correction, but keeping all duplicated elements
# since we depend on idx=m.el.idx
# otherwise, we could simply postmultiply with the duplicationMatrix
# we sum up lower.tri + upper.tri (but not the diagonal elements!)
#imatrix <- matrix(1:nfac^2,nfac,nfac)
#lower.idx <- imatrix[lower.tri(imatrix, diag=FALSE)]
#upper.idx <- imatrix[upper.tri(imatrix, diag=FALSE)]
lower.idx <- lav_matrix_vech_idx(nfac, diagonal = FALSE)
upper.idx <- lav_matrix_vechru_idx(nfac, diagonal = FALSE)
# NOTE YR: upper.idx (see 3 lines up) is wrong in MH patch!
# fixed again 13/06/2012 after bug report of Mijke Rhemtulla.
offdiagSum <- DX[,lower.idx] + DX[,upper.idx]
DX[,c(lower.idx, upper.idx)] <- cbind(offdiagSum, offdiagSum)
if(delta.flag)
DX <- DX * as.vector(DELTA %x% DELTA)
} else if(m == "theta") {
DX <- diag(nvar*nvar) # very sparse...
# symmetry correction not needed, since all off-diagonal elements
# are zero?
if(delta.flag)
DX <- DX * as.vector(DELTA %x% DELTA)
} else if(m == "delta") {
Omega <- computeSigmaHat.LISREL(MLIST, delta=FALSE)
DD <- diag(DELTA[,1], nvar, nvar)
DD.Omega <- (DD %*% Omega)
A <- DD.Omega %x% diag(nvar); B <- diag(nvar) %x% DD.Omega
DX <- A[,lav_matrix_diag_idx(nvar),drop=FALSE] +
B[,lav_matrix_diag_idx(nvar),drop=FALSE]
} else {
stop("wrong model matrix names: ", m, "\n")
}
DX <- DX[v.idx, idx, drop=FALSE]
DX
}
# dMu/dx -- per model matrix
derivative.mu.LISREL <- function(m="alpha",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
# shortcut for empty matrices
if(m == "gamma" || m == "psi" || m == "theta" ||
m == "tau" || m == "delta"|| m == "gw") {
return( matrix(0.0, nrow=nvar, ncol=length(idx) ) )
}
# missing alpha
if(is.null(MLIST$alpha))
ALPHA <- matrix(0, nfac, 1L)
else
ALPHA <- MLIST$alpha
# beta?
if(!is.null(MLIST$ibeta.inv)) {
IB.inv <- MLIST$ibeta.inv
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
}
if(m == "nu") {
DX <- diag(nvar)
} else if(m == "lambda") {
DX <- t(IB.inv %*% ALPHA) %x% diag(nvar)
} else if(m == "beta") {
DX <- t(IB.inv %*% ALPHA) %x% (LAMBDA %*% IB.inv)
# this is not really needed (because we select idx=m.el.idx)
DX[,lav_matrix_diag_idx(nfac)] <- 0.0
} else if(m == "alpha") {
DX <- LAMBDA %*% IB.inv
} else {
stop("wrong model matrix names: ", m, "\n")
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dTh/dx -- per model matrix
derivative.th.LISREL <- function(m="tau",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
th.idx=NULL,
MLIST=NULL,
delta = TRUE) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
TAU <- MLIST$tau; nth <- nrow(TAU)
# missing alpha
if(is.null(MLIST$alpha)) {
ALPHA <- matrix(0, nfac, 1L)
} else {
ALPHA <- MLIST$alpha
}
# missing nu
if(is.null(MLIST$nu)) {
NU <- matrix(0, nvar, 1L)
} else {
NU <- MLIST$nu
}
# Delta?
delta.flag <- FALSE
if(delta && !is.null(MLIST$delta)) {
DELTA <- MLIST$delta
delta.flag <- TRUE
}
if(is.null(th.idx)) {
th.idx <- seq_len(nth)
nlev <- rep(1L, nvar)
K_nu <- diag(nvar)
} else {
nlev <- tabulate(th.idx, nbins=nvar); nlev[nlev == 0L] <- 1L
K_nu <- matrix(0, sum(nlev), nvar)
K_nu[ cbind(seq_len(sum(nlev)), rep(seq_len(nvar), times=nlev)) ] <- 1.0
}
# shortcut for empty matrices
if(m == "gamma" || m == "psi" || m == "theta" || m == "gw") {
return( matrix(0.0, nrow=length(th.idx), ncol=length(idx) ) )
}
# beta?
if(!is.null(MLIST$ibeta.inv)) {
IB.inv <- MLIST$ibeta.inv
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
}
if(m == "tau") {
DX <- matrix(0, nrow=length(th.idx), ncol=nth)
DX[ th.idx > 0L, ] <- diag(nth)
if(delta.flag)
DX <- DX * as.vector(K_nu %*% DELTA)
} else if(m == "nu") {
DX <- (-1) * K_nu
if(delta.flag)
DX <- DX * as.vector(K_nu %*% DELTA)
} else if(m == "lambda") {
DX <- (-1) * t(IB.inv %*% ALPHA) %x% diag(nvar)
DX <- K_nu %*% DX
if(delta.flag)
DX <- DX * as.vector(K_nu %*% DELTA)
} else if(m == "beta") {
DX <- (-1) * t(IB.inv %*% ALPHA) %x% (LAMBDA %*% IB.inv)
# this is not really needed (because we select idx=m.el.idx)
DX[,lav_matrix_diag_idx(nfac)] <- 0.0
DX <- K_nu %*% DX
if(delta.flag)
DX <- DX * as.vector(K_nu %*% DELTA)
} else if(m == "alpha") {
DX <- (-1) * LAMBDA %*% IB.inv
DX <- K_nu %*% DX
if(delta.flag)
DX <- DX * as.vector(K_nu %*% DELTA)
} else if(m == "delta") {
DX1 <- matrix(0, nrow=length(th.idx), ncol=1)
DX1[ th.idx > 0L, ] <- TAU
DX2 <- NU + LAMBDA %*% IB.inv %*% ALPHA
DX2 <- K_nu %*% DX2
DX <- K_nu * as.vector(DX1 - DX2)
} else {
stop("wrong model matrix names: ", m, "\n")
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dPi/dx -- per model matrix
derivative.pi.LISREL <- function(m="lambda",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA)
GAMMA <- MLIST$gamma; nexo <- ncol(GAMMA)
# Delta?
delta.flag <- FALSE
if(!is.null(MLIST$delta)) {
DELTA.diag <- MLIST$delta[,1L]
delta.flag <- TRUE
}
# shortcut for empty matrices
if(m == "tau" || m == "nu" || m == "alpha" || m == "psi" ||
m == "theta" || m == "gw") {
return( matrix(0.0, nrow=nvar*nexo, ncol=length(idx) ) )
}
# beta?
if(!is.null(MLIST$ibeta.inv)) {
IB.inv <- MLIST$ibeta.inv
} else {
IB.inv <- .internal_get_IB.inv(MLIST = MLIST)
}
if(m == "lambda") {
DX <- t(IB.inv %*% GAMMA) %x% diag(nvar)
if(delta.flag)
DX <- DX * DELTA.diag
} else if(m == "beta") {
DX <- t(IB.inv %*% GAMMA) %x% (LAMBDA %*% IB.inv)
# this is not really needed (because we select idx=m.el.idx)
DX[,lav_matrix_diag_idx(nfac)] <- 0.0
if(delta.flag)
DX <- DX * DELTA.diag
} else if(m == "gamma") {
DX <- diag(nexo) %x% (LAMBDA %*% IB.inv)
if(delta.flag)
DX <- DX * DELTA.diag
} else if(m == "delta") {
PRE <- rep(1, nexo) %x% diag(nvar)
DX <- PRE * as.vector(LAMBDA %*% IB.inv %*% GAMMA)
} else {
stop("wrong model matrix names: ", m, "\n")
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dGW/dx -- per model matrix
derivative.gw.LISREL <- function(m="gw",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
# shortcut for empty matrices
if(m != "gw") {
return( matrix(0.0, nrow=1L, ncol=length(idx) ) )
} else {
# m == "gw"
DX <- matrix(1.0, 1, 1)
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dlambda/dx -- per model matrix
derivative.lambda.LISREL <- function(m="lambda",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
LAMBDA <- MLIST$lambda
# shortcut for empty matrices
if(m != "lambda") {
return( matrix(0.0, nrow=length(LAMBDA), ncol=length(idx) ) )
} else {
# m == "lambda"
DX <- diag(1, nrow=length(LAMBDA), ncol=length(LAMBDA))
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dpsi/dx -- per model matrix - FIXME!!!!!
derivative.psi.LISREL <- function(m="psi",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
PSI <- MLIST$psi; nfac <- nrow(PSI)
v.idx <- lav_matrix_vech_idx( nfac )
# shortcut for empty matrices
if(m != "psi") {
DX <- matrix(0.0, nrow=length(PSI), ncol=length(idx))
return(DX[v.idx,,drop=FALSE])
} else {
# m == "psi"
DX <- diag(1, nrow=length(PSI), ncol=length(PSI))
}
DX <- DX[v.idx, idx, drop=FALSE]
DX
}
# dtheta/dx -- per model matrix
derivative.theta.LISREL <- function(m="theta",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
THETA <- MLIST$theta; nvar <- nrow(THETA)
v.idx <- lav_matrix_vech_idx(nvar)
# shortcut for empty matrices
if(m != "theta") {
DX <- matrix(0.0, nrow=length(THETA), ncol=length(idx))
return(DX[v.idx,,drop=FALSE])
} else {
# m == "theta"
DX <- diag(1, nrow=length(THETA), ncol=length(THETA))
}
DX <- DX[v.idx, idx, drop=FALSE]
DX
}
# dbeta/dx -- per model matrix
derivative.beta.LISREL <- function(m="beta",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
BETA <- MLIST$beta
# shortcut for empty matrices
if(m != "beta") {
return( matrix(0.0, nrow=length(BETA), ncol=length(idx)) )
} else {
# m == "beta"
DX <- diag(1, nrow=length(BETA), ncol=length(BETA))
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dgamma/dx -- per model matrix
derivative.gamma.LISREL <- function(m="gamma",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
GAMMA <- MLIST$gamma
# shortcut for empty matrices
if(m != "gamma") {
return( matrix(0.0, nrow=length(GAMMA), ncol=length(idx)) )
} else {
# m == "gamma"
DX <- diag(1, nrow=length(GAMMA), ncol=length(GAMMA))
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dnu/dx -- per model matrix
derivative.nu.LISREL <- function(m="nu",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
NU <- MLIST$nu
# shortcut for empty matrices
if(m != "nu") {
return( matrix(0.0, nrow=length(NU), ncol=length(idx)) )
} else {
# m == "nu"
DX <- diag(1, nrow=length(NU), ncol=length(NU))
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dtau/dx -- per model matrix
derivative.tau.LISREL <- function(m="tau",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
TAU <- MLIST$tau
# shortcut for empty matrices
if(m != "tau") {
return( matrix(0.0, nrow=length(TAU), ncol=length(idx)) )
} else {
# m == "tau"
DX <- diag(1, nrow=length(TAU), ncol=length(TAU))
}
DX <- DX[, idx, drop=FALSE]
DX
}
# dalpha/dx -- per model matrix
derivative.alpha.LISREL <- function(m="alpha",
# all model matrix elements, or only a few?
idx=seq_len(length(MLIST[[m]])),
MLIST=NULL) {
ALPHA <- MLIST$alpha
# shortcut for empty matrices
if(m != "alpha") {
return( matrix(0.0, nrow=length(ALPHA), ncol=length(idx)) )
} else {
# m == "alpha"
DX <- diag(1, nrow=length(ALPHA), ncol=length(ALPHA))
}
DX <- DX[, idx, drop=FALSE]
DX
}
# MLIST = NULL; meanstructure=TRUE; th=TRUE; delta=TRUE; pi=TRUE; gw=FALSE
# lav_matrix_vech_idx <- lavaan:::lav_matrix_vech_idx; lav_matrix_vechru_idx <- lavaan:::lav_matrix_vechru_idx
# vec <- lavaan:::vec; lav_func_jacobian_complex <- lavaan:::lav_func_jacobian_complex
# computeSigmaHat.LISREL <- lavaan:::computeSigmaHat.LISREL
# setDeltaElements.LISREL <- lavaan:::setDeltaElements.LISREL
TESTING_derivatives.LISREL <- function(MLIST = NULL,
nvar = NULL, nfac = NULL, nexo = NULL,
th.idx = NULL, num.idx = NULL,
meanstructure = TRUE,
th = TRUE, delta = TRUE, pi = TRUE,
gw = FALSE, theta = FALSE,
debug = FALSE) {
if(is.null(MLIST)) {
# create artificial matrices, compare 'numerical' vs 'analytical'
# derivatives
#nvar <- 12; nfac <- 3; nexo <- 4 # this combination is special?
if(is.null(nvar)) {
nvar <- 20
}
if(is.null(nfac)) {
nfac <- 6
}
if(is.null(nexo)) {
nexo <- 5
}
if(is.null(num.idx)) {
num.idx <- sort(sample(seq_len(nvar), ceiling(nvar/2)))
}
if(is.null(th.idx)) {
th.idx <- integer(0L)
for(i in seq_len(nvar)) {
if(i %in% num.idx) {
th.idx <- c(th.idx, 0)
} else {
th.idx <- c(th.idx, rep(i, sample(c(1,1,2,6), 1L)))
}
}
}
nth <- sum(th.idx > 0L)
MLIST <- list()
MLIST$lambda <- matrix(0,nvar,nfac)
MLIST$beta <- matrix(0,nfac,nfac)
MLIST$theta <- matrix(0,nvar,nvar)
MLIST$psi <- matrix(0,nfac,nfac)
if(meanstructure) {
MLIST$alpha <- matrix(0,nfac,1L)
MLIST$nu <- matrix(0,nvar,1L)
}
if(th) MLIST$tau <- matrix(0,nth,1L)
if(delta) MLIST$delta <- matrix(0,nvar,1L)
MLIST$gamma <- matrix(0,nfac,nexo)
if(gw) MLIST$gw <- matrix(0, 1L, 1L)
# feed random numbers
MLIST <- lapply(MLIST, function(x) {x[,] <- rnorm(length(x)); x})
# fix
diag(MLIST$beta) <- 0.0
diag(MLIST$theta) <- diag(MLIST$theta)*diag(MLIST$theta) * 10
diag(MLIST$psi) <- diag(MLIST$psi)*diag(MLIST$psi) * 10
MLIST$psi[ lav_matrix_vechru_idx(nfac) ] <-
MLIST$psi[ lav_matrix_vech_idx(nfac) ]
MLIST$theta[ lav_matrix_vechru_idx(nvar) ] <-
MLIST$theta[ lav_matrix_vech_idx(nvar) ]
if(delta) MLIST$delta[,] <- abs(MLIST$delta)*10
} else {
nvar <- nrow(MLIST$lambda)
}
compute.sigma <- function(x, mm="lambda", MLIST=NULL) {
mlist <- MLIST
if(mm %in% c("psi", "theta")) {
mlist[[mm]] <- lav_matrix_vech_reverse(x)
} else {
mlist[[mm]][,] <- x
}
if(theta) {
mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx)
}
lav_matrix_vech(computeSigmaHat.LISREL(mlist))
}
compute.mu <- function(x, mm="lambda", MLIST=NULL) {
mlist <- MLIST
if(mm %in% c("psi", "theta")) {
mlist[[mm]] <- lav_matrix_vech_reverse(x)
} else {
mlist[[mm]][,] <- x
}
if(theta) {
mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx)
}
computeMuHat.LISREL(mlist)
}
compute.th2 <- function(x, mm="tau", MLIST=NULL, th.idx) {
mlist <- MLIST
if(mm %in% c("psi", "theta")) {
mlist[[mm]] <- lav_matrix_vech_reverse(x)
} else {
mlist[[mm]][,] <- x
}
if(theta) {
mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx)
}
computeTH.LISREL(mlist, th.idx=th.idx)
}
compute.pi <- function(x, mm="lambda", MLIST=NULL) {
mlist <- MLIST
if(mm %in% c("psi", "theta")) {
mlist[[mm]] <- lav_matrix_vech_reverse(x)
} else {
mlist[[mm]][,] <- x
}
if(theta) {
mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx)
}
computePI.LISREL(mlist)
}
compute.gw <- function(x, mm="gw", MLIST=NULL) {
mlist <- MLIST
if(mm %in% c("psi", "theta")) {
mlist[[mm]] <- lav_matrix_vech_reverse(x)
} else {
mlist[[mm]][,] <- x
}
if(theta) {
mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx)
}
mlist$gw[1,1]
}
# if theta, set MLIST$delta
if(theta) {
MLIST <- setDeltaElements.LISREL(MLIST = MLIST, num.idx = num.idx)
}
for(mm in names(MLIST)) {
if(mm %in% c("psi", "theta")) {
x <- lav_matrix_vech(MLIST[[mm]])
} else {
x <- lav_matrix_vec(MLIST[[mm]])
}
if(mm == "delta" && theta) next
if(debug) {
cat("### mm = ", mm, "\n")
}
# 1. sigma
DX1 <- lav_func_jacobian_complex(func=compute.sigma, x=x, mm=mm, MLIST=MLIST)
DX2 <- derivative.sigma.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST, delta = !theta)
if(mm %in% c("psi","theta")) {
# remove duplicated columns of symmetric matrices
idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal=FALSE)
if(length(idx) > 0L) DX2 <- DX2[,-idx]
}
if(theta) {
sigma.hat <- computeSigmaHat.LISREL(MLIST=MLIST, delta=FALSE)
R <- lav_deriv_cov2cor(sigma.hat, num.idx = num.idx)
DX3 <- DX2
DX2 <- R %*% DX2
}
if(debug) {
cat("[SIGMA] mm = ", sprintf("%-8s:", mm), "DX1 (numerical):\n");
print(zapsmall(DX1)); cat("\n")
cat("[SIGMA] mm = ", sprintf("%-8s:", mm), "DX2 (analytical):\n");
print(DX2); cat("\n")
cat("[SIGMA] mm = ", sprintf("%-8s:", mm), "DX3 (analytical):\n");
print(DX3); cat("\n")
}
cat("[SIGMA] mm = ", sprintf("%-8s:", mm), "sum delta = ",
sprintf("%12.9f", sum(DX1-DX2)), " max delta = ",
sprintf("%12.9f", max(DX1-DX2)), "\n")
# 2. mu
DX1 <- lav_func_jacobian_complex(func=compute.mu, x=x, mm=mm, MLIST=MLIST)
DX2 <- derivative.mu.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST)
if(mm %in% c("psi","theta")) {
# remove duplicated columns of symmetric matrices
idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE)
if(length(idx) > 0L) DX2 <- DX2[,-idx]
}
cat("[MU ] mm = ", sprintf("%-8s:", mm), "sum delta = ",
sprintf("%12.9f", sum(DX1-DX2)), " max delta = ",
sprintf("%12.9f", max(DX1-DX2)), "\n")
if(debug) {
cat("[MU ] mm = ", sprintf("%-8s:", mm), "DX1 (numerical):\n");
print(zapsmall(DX1)); cat("\n")
cat("[MU ] mm = ", sprintf("%-8s:", mm), "DX2 (analytical):\n");
print(DX2); cat("\n")
}
# 3. th
if(th) {
DX1 <- lav_func_jacobian_complex(func=compute.th2, x=x, mm=mm, MLIST=MLIST,
th.idx=th.idx)
DX2 <- derivative.th.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST, th.idx=th.idx,
delta=TRUE)
if(theta) {
# 1. compute dDelta.dx
dxSigma <-
derivative.sigma.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST, delta = !theta)
var.idx <- which(!lav_matrix_vech_idx(nvar) %in%
lav_matrix_vech_idx(nvar, diagonal = FALSE))
sigma.hat <- computeSigmaHat.LISREL(MLIST=MLIST, delta=FALSE)
dsigma <- diag(sigma.hat)
# dy/ddsigma = -0.5/(ddsigma*sqrt(ddsigma))
dDelta.dx <- dxSigma[var.idx,] * -0.5 / (dsigma*sqrt(dsigma))
# 2. compute dth.dDelta
dth.dDelta <-
derivative.th.LISREL(m="delta",
idx=seq_len(length(MLIST[["delta"]])),
MLIST=MLIST, th.idx=th.idx)
# 3. add dth.dDelta %*% dDelta.dx
no.num.idx <- which(th.idx > 0)
DX2[no.num.idx,] <- DX2[no.num.idx,,drop=FALSE] +
(dth.dDelta %*% dDelta.dx)[no.num.idx,,drop=FALSE]
#DX2 <- DX2 + dth.dDelta %*% dDelta.dx
}
if(mm %in% c("psi","theta")) {
# remove duplicated columns of symmetric matrices
idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE)
if(length(idx) > 0L) DX2 <- DX2[,-idx]
}
cat("[TH ] mm = ", sprintf("%-8s:", mm), "sum delta = ",
sprintf("%12.9f", sum(DX1-DX2)), " max delta = ",
sprintf("%12.9f", max(DX1-DX2)), "\n")
if(debug) {
cat("[TH ] mm = ",sprintf("%-8s:", mm),"DX1 (numerical):\n")
print(zapsmall(DX1)); cat("\n")
cat("[TH ] mm = ",sprintf("%-8s:", mm),"DX2 (analytical):\n")
print(DX2); cat("\n")
}
}
# 4. pi
if(pi) {
DX1 <- lav_func_jacobian_complex(func=compute.pi, x=x, mm=mm, MLIST=MLIST)
DX2 <- derivative.pi.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST)
if(mm %in% c("psi","theta")) {
# remove duplicated columns of symmetric matrices
idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE)
if(length(idx) > 0L) DX2 <- DX2[,-idx]
}
if(theta) {
# 1. compute dDelta.dx
dxSigma <-
derivative.sigma.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST, delta = !theta)
if(mm %in% c("psi","theta")) {
# remove duplicated columns of symmetric matrices
idx <- lav_matrix_vechru_idx(sqrt(ncol(dxSigma)), diagonal = FALSE)
if(length(idx) > 0L) dxSigma <- dxSigma[,-idx]
}
var.idx <- which(!lav_matrix_vech_idx(nvar) %in%
lav_matrix_vech_idx(nvar, diagonal = FALSE))
sigma.hat <- computeSigmaHat.LISREL(MLIST=MLIST, delta=FALSE)
dsigma <- diag(sigma.hat)
# dy/ddsigma = -0.5/(ddsigma*sqrt(ddsigma))
dDelta.dx <- dxSigma[var.idx,] * -0.5 / (dsigma*sqrt(dsigma))
# 2. compute dpi.dDelta
dpi.dDelta <-
derivative.pi.LISREL(m="delta",
idx=seq_len(length(MLIST[["delta"]])),
MLIST=MLIST)
# 3. add dpi.dDelta %*% dDelta.dx
no.num.idx <- which(! seq.int(1L, nvar) %in% num.idx )
no.num.idx <- rep(seq.int(0,nexo-1) * nvar,
each=length(no.num.idx)) + no.num.idx
DX2[no.num.idx,] <- DX2[no.num.idx,,drop=FALSE] +
(dpi.dDelta %*% dDelta.dx)[no.num.idx,,drop=FALSE]
}
cat("[PI ] mm = ", sprintf("%-8s:", mm), "sum delta = ",
sprintf("%12.9f", sum(DX1-DX2)), " max delta = ",
sprintf("%12.9f", max(DX1-DX2)), "\n")
if(debug) {
cat("[PI ] mm = ",sprintf("%-8s:", mm),"DX1 (numerical):\n")
print(zapsmall(DX1)); cat("\n")
cat("[PI ] mm = ",sprintf("%-8s:", mm),"DX2 (analytical):\n")
print(DX2); cat("\n")
}
}
# 5. gw
if(gw) {
DX1 <- lav_func_jacobian_complex(func=compute.gw, x=x, mm=mm, MLIST=MLIST)
DX2 <- derivative.gw.LISREL(m=mm, idx=seq_len(length(MLIST[[mm]])),
MLIST=MLIST)
if(mm %in% c("psi","theta")) {
# remove duplicated columns of symmetric matrices
idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE)
if(length(idx) > 0L) DX2 <- DX2[,-idx]
}
cat("[GW ] mm = ", sprintf("%-8s:", mm), "sum delta = ",
sprintf("%12.9f", sum(DX1-DX2)), " max delta = ",
sprintf("%12.9f", max(DX1-DX2)), "\n")
if(debug) {
cat("[GW ] mm = ",sprintf("%-8s:", mm),"DX1 (numerical):\n")
print(DX1); cat("\n\n")
cat("[GW ] mm = ",sprintf("%-8s:", mm),"DX2 (analytical):\n")
print(DX2); cat("\n\n")
}
}
}
MLIST$th.idx <- th.idx
MLIST$num.idx <- num.idx
MLIST
}
|
read.scale <- function(dataset, report=message) {
## If there is a scale file, read it
scale <- c(Scale=NA, Units=NA)
scfile <- file.path(dataset, "scale.csv")
if (file.exists(scfile)) {
report("Reading scale file")
sc <- read.csv(scfile)
valid.colnames <- c("Scale", "Units")
if (!all(colnames(sc) %in% valid.colnames)) {
stop(paste("Unknown column names",
paste0("\"", setdiff(colnames(sc), valid.colnames), "\"",
collapse=", "),
"in", scfile, ". Valid column names:",
paste(valid.colnames, collapse=", ")))
}
scale <- as.matrix(sc)[1,]
if (!("Scale" %in% names(scale)) | !is.numeric(scale["Scale"])) {
stop("Scale file has not been read correctly. Check it is in the correct format.")
}
if (!("Units" %in% names(scale))) {
scale["Units"] <- NA
}
} else {
warning("Scale file \"scale.csv\" does not exist. Scale bar will not be set.")
}
return(scale)
}
read.image <- function(dataset, report=message) {
im <- NULL
imfile <- file.path(dataset, "image.png")
if (file.exists(imfile)) {
report("Reading image")
im <- grDevices::as.raster(png::readPNG(imfile))
}
return(im)
}
## Copied from demo("colors")
## @title Comparing Colors
## @param col
## @param nrow
## @param ncol
## @param txt.col
## @return the grid layout, invisibly
## @author Marius Hofert, originally
plotCol <- function(col, nrow=1, ncol=ceiling(length(col) / nrow),
txt.col="black") {
stopifnot(nrow >= 1, ncol >= 1)
if(length(col) > nrow*ncol)
warning("some colors will not be shown")
grid::grid.newpage()
gl <- grid::grid.layout(nrow, ncol)
grid::pushViewport(grid::viewport(layout=gl))
ic <- 1
for(i in 1:nrow) {
for(j in 1:ncol) {
grid::pushViewport(grid::viewport(layout.pos.row=i, layout.pos.col=j))
grid::grid.rect(gp= grid::gpar(fill=col[ic]))
grid::grid.text(col[ic], gp=grid::gpar(col=txt.col))
grid::upViewport()
ic <- ic+1
}
}
grid::upViewport()
invisible(gl)
}
check.colour <- function(col) {
if (!(col %in% grDevices::colours())) {
plotCol(grep("([0-9]|medium|light|dark)", grDevices::colors(), invert=TRUE, value=TRUE), nrow=20)
return(FALSE)
}
return(TRUE)
}
##' Read data points from a file \code{dataponts.csv} in the directory
##' \code{dataset}. The CSV should contain two columns for every
##' dataset. Each pair of columns must contain a unique name in the
##' first cell of the first row and a valid colour in the second
##' cell of the first row. In the remaining rows, the X coordinates of
##' data points should be in the first column and the Y coordinates
##' should be in the second column.
##'
##' @title Read data points in CSV format
##' @param dataset Path to directory containing \code{dataponts.csv}
##' @return List containing
##' \item{\code{Ds}}{List of sets of datapoints. Each set comprises a 2-column matrix and each set is named.}
##' \item{\code{cols}}{List of colours for each dataset. There is one element that corresponds to each element of \code{Ds} and which bears the same name.}
##' @author David Sterratt
read.datapoints <- function(dataset) {
datfile <- file.path(dataset, "datapoints.csv")
Ds <- list()
cols <- c()
if (file.exists(datfile)) {
message("Reading datapoints")
## Read file. stringsAsFactors=FALSE prevents conversion to factors
dat <- read.csv(file.path(datfile), stringsAsFactors=FALSE)
## Go through pairs of columns
while(ncol(dat) >= 2) {
## Extract first two columns
d <- dat[,1:2]
dat <- dat[,-(1:2)]
names <- colnames(d)
## Convert strings to numeric. Suppress warnings as sapply
## complains about coercion to NA
suppressWarnings({d <- sapply(d, as.numeric, USE.NAMES=FALSE)})
## Force conversion to matrix, necessary when the data has only
## one row
d <- matrix(d, ncol=2)
## Any strings (e.g. empty ones) that don't convert will be
## converted to NA. Get rid of these.
d <- na.omit(d)
attr(d, "na.action") <- NULL
colnames(d) <- c("X", "Y")
## Add to lists with appropriate names
D <- list(d)
names(D) <- names[1]
Ds <- c(Ds, D)
col <- names[2]
if (!(check.colour(col))) {
stop("Invalid colour \"", col, "\" in datapoints.csv - see window for valid colour names")
}
names(col) <- names[1]
cols <- c(cols, col)
}
}
return(list(Ds=Ds, cols=cols))
}
##' Read data counts from a file \file{datacounts.csv} in the
##' directory \code{dataset}. The CSV file should contain two columns
##' for every dataset. Each pair of columns must contain a unique name
##' in the first cell of the first row and a valid colour in the
##' second cell of the first row. In the remaining rows, the X
##' coordinates of data counts should be in the first column and the Y
##' coordinates should be in the second column.
##'
##' @title Read data counts in CSV format
##' @param dataset Path to directory containing \code{dataponts.csv}
##' @return List containing
##' \item{\code{Ds}}{List of sets of data counts. Each set comprises a 2-column matrix and each set is named.}
##' \item{\code{cols}}{List of colours for each dataset. There is one element that corresponds to each element of \code{Ds} and which bears the same name.}
##' @author David Sterratt
read.datacounts <- function(dataset) {
datfile <- file.path(dataset, "datacounts.csv")
Gs <- list()
cols <- c()
if (file.exists(datfile)) {
message("Reading datacounts")
## Read file. stringsAsFactors=FALSE prevents conversion to factors
dat <- read.csv(file.path(datfile), stringsAsFactors=FALSE)
## Go through triples of columns
while(ncol(dat) >= 3) {
## Extract first three columns
d <- dat[,1:3]
dat <- dat[,-(1:3)]
names <- colnames(d)
## Convert strings to numeric. Suppress warnings as sapply
## complains about coercion to NA
suppressWarnings({d <- sapply(d, as.numeric, USE.NAMES=FALSE)})
## Force conversion to matrix, necessary when the data has only
## one row
d <- matrix(d, ncol=3)
colnames(d) <- c("X", "Y", "C")
## Any strings (e.g. empty ones) that don't convert will be
## converted to NA. Get rid of these.
d <- na.omit(d)
attr(d, "na.action") <- NULL
## Add to lists with appropriate names
G <- list(d)
names(G) <- names[1]
Gs <- c(Gs, G)
col <- list(names[2])
if (!(check.colour(col))) {
stop("Invalid colour \"", col, "\" in datacounts.csv - see window for valid colour names")
}
names(col) <- names[1]
cols <- c(cols, col)
}
}
return(list(Gs=Gs, cols=cols))
}
| /pkg/retistruct/R/format-common.R | no_license | davidcsterratt/retistruct | R | false | false | 6,909 | r | read.scale <- function(dataset, report=message) {
## If there is a scale file, read it
scale <- c(Scale=NA, Units=NA)
scfile <- file.path(dataset, "scale.csv")
if (file.exists(scfile)) {
report("Reading scale file")
sc <- read.csv(scfile)
valid.colnames <- c("Scale", "Units")
if (!all(colnames(sc) %in% valid.colnames)) {
stop(paste("Unknown column names",
paste0("\"", setdiff(colnames(sc), valid.colnames), "\"",
collapse=", "),
"in", scfile, ". Valid column names:",
paste(valid.colnames, collapse=", ")))
}
scale <- as.matrix(sc)[1,]
if (!("Scale" %in% names(scale)) | !is.numeric(scale["Scale"])) {
stop("Scale file has not been read correctly. Check it is in the correct format.")
}
if (!("Units" %in% names(scale))) {
scale["Units"] <- NA
}
} else {
warning("Scale file \"scale.csv\" does not exist. Scale bar will not be set.")
}
return(scale)
}
read.image <- function(dataset, report=message) {
im <- NULL
imfile <- file.path(dataset, "image.png")
if (file.exists(imfile)) {
report("Reading image")
im <- grDevices::as.raster(png::readPNG(imfile))
}
return(im)
}
## Copied from demo("colors")
## @title Comparing Colors
## @param col
## @param nrow
## @param ncol
## @param txt.col
## @return the grid layout, invisibly
## @author Marius Hofert, originally
plotCol <- function(col, nrow=1, ncol=ceiling(length(col) / nrow),
txt.col="black") {
stopifnot(nrow >= 1, ncol >= 1)
if(length(col) > nrow*ncol)
warning("some colors will not be shown")
grid::grid.newpage()
gl <- grid::grid.layout(nrow, ncol)
grid::pushViewport(grid::viewport(layout=gl))
ic <- 1
for(i in 1:nrow) {
for(j in 1:ncol) {
grid::pushViewport(grid::viewport(layout.pos.row=i, layout.pos.col=j))
grid::grid.rect(gp= grid::gpar(fill=col[ic]))
grid::grid.text(col[ic], gp=grid::gpar(col=txt.col))
grid::upViewport()
ic <- ic+1
}
}
grid::upViewport()
invisible(gl)
}
check.colour <- function(col) {
if (!(col %in% grDevices::colours())) {
plotCol(grep("([0-9]|medium|light|dark)", grDevices::colors(), invert=TRUE, value=TRUE), nrow=20)
return(FALSE)
}
return(TRUE)
}
##' Read data points from a file \code{dataponts.csv} in the directory
##' \code{dataset}. The CSV should contain two columns for every
##' dataset. Each pair of columns must contain a unique name in the
##' first cell of the first row and a valid colour in the second
##' cell of the first row. In the remaining rows, the X coordinates of
##' data points should be in the first column and the Y coordinates
##' should be in the second column.
##'
##' @title Read data points in CSV format
##' @param dataset Path to directory containing \code{dataponts.csv}
##' @return List containing
##' \item{\code{Ds}}{List of sets of datapoints. Each set comprises a 2-column matrix and each set is named.}
##' \item{\code{cols}}{List of colours for each dataset. There is one element that corresponds to each element of \code{Ds} and which bears the same name.}
##' @author David Sterratt
read.datapoints <- function(dataset) {
datfile <- file.path(dataset, "datapoints.csv")
Ds <- list()
cols <- c()
if (file.exists(datfile)) {
message("Reading datapoints")
## Read file. stringsAsFactors=FALSE prevents conversion to factors
dat <- read.csv(file.path(datfile), stringsAsFactors=FALSE)
## Go through pairs of columns
while(ncol(dat) >= 2) {
## Extract first two columns
d <- dat[,1:2]
dat <- dat[,-(1:2)]
names <- colnames(d)
## Convert strings to numeric. Suppress warnings as sapply
## complains about coercion to NA
suppressWarnings({d <- sapply(d, as.numeric, USE.NAMES=FALSE)})
## Force conversion to matrix, necessary when the data has only
## one row
d <- matrix(d, ncol=2)
## Any strings (e.g. empty ones) that don't convert will be
## converted to NA. Get rid of these.
d <- na.omit(d)
attr(d, "na.action") <- NULL
colnames(d) <- c("X", "Y")
## Add to lists with appropriate names
D <- list(d)
names(D) <- names[1]
Ds <- c(Ds, D)
col <- names[2]
if (!(check.colour(col))) {
stop("Invalid colour \"", col, "\" in datapoints.csv - see window for valid colour names")
}
names(col) <- names[1]
cols <- c(cols, col)
}
}
return(list(Ds=Ds, cols=cols))
}
##' Read data counts from a file \file{datacounts.csv} in the
##' directory \code{dataset}. The CSV file should contain two columns
##' for every dataset. Each pair of columns must contain a unique name
##' in the first cell of the first row and a valid colour in the
##' second cell of the first row. In the remaining rows, the X
##' coordinates of data counts should be in the first column and the Y
##' coordinates should be in the second column.
##'
##' @title Read data counts in CSV format
##' @param dataset Path to directory containing \code{dataponts.csv}
##' @return List containing
##' \item{\code{Ds}}{List of sets of data counts. Each set comprises a 2-column matrix and each set is named.}
##' \item{\code{cols}}{List of colours for each dataset. There is one element that corresponds to each element of \code{Ds} and which bears the same name.}
##' @author David Sterratt
read.datacounts <- function(dataset) {
datfile <- file.path(dataset, "datacounts.csv")
Gs <- list()
cols <- c()
if (file.exists(datfile)) {
message("Reading datacounts")
## Read file. stringsAsFactors=FALSE prevents conversion to factors
dat <- read.csv(file.path(datfile), stringsAsFactors=FALSE)
## Go through triples of columns
while(ncol(dat) >= 3) {
## Extract first three columns
d <- dat[,1:3]
dat <- dat[,-(1:3)]
names <- colnames(d)
## Convert strings to numeric. Suppress warnings as sapply
## complains about coercion to NA
suppressWarnings({d <- sapply(d, as.numeric, USE.NAMES=FALSE)})
## Force conversion to matrix, necessary when the data has only
## one row
d <- matrix(d, ncol=3)
colnames(d) <- c("X", "Y", "C")
## Any strings (e.g. empty ones) that don't convert will be
## converted to NA. Get rid of these.
d <- na.omit(d)
attr(d, "na.action") <- NULL
## Add to lists with appropriate names
G <- list(d)
names(G) <- names[1]
Gs <- c(Gs, G)
col <- list(names[2])
if (!(check.colour(col))) {
stop("Invalid colour \"", col, "\" in datacounts.csv - see window for valid colour names")
}
names(col) <- names[1]
cols <- c(cols, col)
}
}
return(list(Gs=Gs, cols=cols))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare-solutions.R
\name{compare_solutions}
\alias{compare_solutions}
\title{Compare latent profile models}
\usage{
compare_solutions(x, statistics = "BIC")
}
\arguments{
\item{x}{An object of class 'tidyLPA'.}
\item{statistics}{Character vector. Which statistics to examine for
determining the optimal model. Defaults to 'BIC'.}
}
\value{
An object of class 'bestLPA' and 'list', containing a tibble of fits
'fits', a named vector 'best', indicating which model fit best according to
each fit index, a numeric vector 'AHP' indicating the best model according to
the \code{\link{AHP}}, an object 'plot' of class 'ggplot', and a numeric
vector 'statistics' corresponding to argument of the same name.
}
\description{
Takes an object of class 'tidyLPA', containing multiple latent profile models
with different number of classes or model specifications, and helps select
the optimal number of classes and model specification.
}
\examples{
results <- iris \%>\%
subset(select = c("Sepal.Length", "Sepal.Width",
"Petal.Length", "Petal.Width")) \%>\%
estimate_profiles(1:3) \%>\%
compare_solutions()
}
\author{
Caspar J. van Lissa
}
| /man/compare_solutions.Rd | permissive | needystatistician/tidyLPA | R | false | true | 1,218 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare-solutions.R
\name{compare_solutions}
\alias{compare_solutions}
\title{Compare latent profile models}
\usage{
compare_solutions(x, statistics = "BIC")
}
\arguments{
\item{x}{An object of class 'tidyLPA'.}
\item{statistics}{Character vector. Which statistics to examine for
determining the optimal model. Defaults to 'BIC'.}
}
\value{
An object of class 'bestLPA' and 'list', containing a tibble of fits
'fits', a named vector 'best', indicating which model fit best according to
each fit index, a numeric vector 'AHP' indicating the best model according to
the \code{\link{AHP}}, an object 'plot' of class 'ggplot', and a numeric
vector 'statistics' corresponding to argument of the same name.
}
\description{
Takes an object of class 'tidyLPA', containing multiple latent profile models
with different number of classes or model specifications, and helps select
the optimal number of classes and model specification.
}
\examples{
results <- iris \%>\%
subset(select = c("Sepal.Length", "Sepal.Width",
"Petal.Length", "Petal.Width")) \%>\%
estimate_profiles(1:3) \%>\%
compare_solutions()
}
\author{
Caspar J. van Lissa
}
|
#########################################
### DMA-PP.R ###
### ###
### Maria Ranci 02/09/2009 ###
### ###
# 26/11/09 MR e CR. Passaggio Flag_automatica 'G' -> 'P' e 'W'->'S'
# 2020-01-27 MR&MS dockerizzazione
#########################################
#
library(DBI)
library(RMySQL)
#==============================================================================
# LEGGI ARGOMENTI RIGA DI COMANDO
#
# Riga di comando:
# R --vanilla inizio fine < DMA-PP.R
# inizio > marca temporale del primo record da elaborare
# formato "2009-02-16 00:00:00"
# fine > marca temporale dell'ultimo record da elaborare
# formato "2009-02-16 00:00:00"
#..............................................................................
arguments <- commandArgs()
arguments
inizio1 <- arguments[3] #"2009-02-16"
inizio2 <- arguments[4] #"00:00:00"
fine1 <- arguments[5] #"2009-03-04"
fine2 <- arguments[6] #"01:00:00"
inizio<-paste(inizio1,inizio2,sep=" ")
fine<-paste(fine1,fine2,sep=" ")
file_log <- paste('DMA-PP_',inizio1,'_',fine1,'_rem2.log',sep='')
tipologia<-"Pluviometri"
#
anno_inizio<-as.numeric(substr(inizio1,0,4))
anno_fine<-as.numeric(substr(fine1,0,4))
anni<-anno_inizio:anno_fine
#___________________________________________________
# SUBROUTINES
#___________________________________________________
############## GESTIONE DEGLI ERRORI
neverstop<-function(){
cat("EE..ERRORE durante l'esecuzione dello script!! Messaggio d'Errore prodotto:\n",file=file_log,append=T)
}
options(show.error.messages=FALSE,error=neverstop)
#==============================================================================
# BODY - BODY - BODY - BODY - BODY - BODY - BODY - BODY - BODY - BODY -BODY
#==============================================================================
cat ( "ESECUZIONE DMA-PP ", date()," \n\n" , file = file_log)
cat ( " tipologia > Pluviometri" , file = file_log,append=T)
cat ( "\n" , file = file_log,append=T)
cat ( " argomenti riga di comando:\n" , file = file_log,append=T)
cat ( paste(" inizio > ",inizio,"\n") , file = file_log,append=T)
cat ( paste(" fine > ",fine,"\n") , file = file_log,append=T)
cat ( "----------------------------\n" , file = file_log,append=T)
#___________________________________________________
# COLLEGAMENTO AL DB
#___________________________________________________
cat("collegamento al DB\n",file=file_log,append=T)
#definisco driver
drv<-dbDriver("MySQL")
#apro connessione con il db
conn<-try(dbConnect(drv, user=as.character(Sys.getenv("MYSQL_USR")), password=as.character(Sys.getenv("MYSQL_PWD")), dbname=as.character(Sys.getenv("MYSQL_DBNAME")), host=as.character(Sys.getenv("MYSQL_HOST")),port=as.numeric(Sys.getenv("MYSQL_PORT"))))
#___________________________________________________
# ciclo sulle tipologie di sensori
#___________________________________________________
#
nome_tavola_recente <- paste("M_", tipologia , sep="")
nome_tavola_DQC <- paste("M_", tipologia, "DQC" , sep="")
#___________________________________________________
# estraggo info da tavola DQC
#___________________________________________________
# estraggo dalla tabella DQC le coppie sensore-istante segnalate su cui poi ciclare per l'assegnazione della flag di validita'
#query_coppie <- paste ("select distinct IDsensore, Data_e_ora from ",nome_tavola_DQC," where Data_e_ora>'",inizio,"' and Data_e_ora<'",fine ,"'", sep="")
query_coppie <- paste ("select distinct IDsensore, Data_e_ora from ",nome_tavola_DQC," where Data_e_ora>'",inizio,"' and Data_e_ora<'",fine ,"' and Test='P1a' and Result='F'", sep="")
q_coppie <- try( dbGetQuery(conn,query_coppie), silent=TRUE )
if (inherits( q_coppie, "try-error")) {
quit(status=1)
}
# print(q_coppie)
coppia <- 1
while(coppia < length(q_coppie$IDsensore) + 1){
flag ='P'
auxP1aF=NULL
# auxT2aS=NULL
# print("------------------------")
# print(coppia)
#___________________________________________________
# estraggo esito test relativo alla coppia
#___________________________________________________
query_esito <- paste ("select Test, Result from ", nome_tavola_DQC ," where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"'" , sep="")
q_esito <- try( dbGetQuery(conn,query_esito), silent=TRUE )
if (inherits( q_esito, "try-error")) {
quit(status=1)
}
cat ( paste(" elaborazione sensore/data > ",q_coppie$IDsensore[coppia],q_coppie$Data_e_ora[coppia],"\n"),
file = file_log,append=T)
#___________________________________________________
# assegno flag di validita'
#___________________________________________________
# q_esito e' un vettore colonna delle dimensione del numero di test
# P1a, S1a, T1a, T2a che ritornano F o S per la coppia univoca (IDsens,Data) in esame
# print(q_esito)
#..............................................
# l'operazione:
# (q_esito$Test %in% 'P1a') & (q_esito$Result %in% 'F')
# restituisce un vettore colonna delle dimensioni di q_esito e di tipo LOGICO
# con TRUE nella posizione i-esima se Test='P1a' e Result='F'
# e FALSE altrimenti
# l'operazione:
# any( (q_esito$Test %in% 'P1a') & (q_esito$Result %in% 'F') )
# restituisce un solo valore LOGICO:
# TRUE se esiste almeno un record del vettore TRUE
#..............................................
auxP1aF <- any( (q_esito$Test %in% 'P1a') & (q_esito$Result %in% 'F') )
# auxT2aS <- any( (q_esito$Test %in% 'T2a') & (q_esito$Result %in% 'S') )
# DMA-PP-1
# if( (auxP1aF == FALSE) &
# (auxT2aS == FALSE) ) flag='P'
# DMA-PP-2
# if( (auxP1aF == FALSE) &
# (auxT2aS == TRUE ) ) flag='P'
# DMA-PP-3
if( (auxP1aF == TRUE) ) flag='F'
#
cat( paste(" Risultati: P1a F? ",auxP1aF,'allora risultato finale =',flag,"\n"),
file=file_log, append=T)
#___________________________________________________
# prima di scrivere nelle tabelle dati l'esito dei test
# verifico il valore della flag_automantica dell'ultimo update.
# solo se cambia la scrivo anche in DQCinDBUNICO per passare
# l'informazione al REM, altrimenti no
#___________________________________________________
query_select_flag <- paste ("select Flag_automatica from ", nome_tavola_recente ," where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"'" , sep="")
q_select_flag <- try( dbGetQuery(conn,query_select_flag ), silent=TRUE )
if (inherits( q_select_flag , "try-error")) {
quit(status=1)
}
flag_precedente<-q_select_flag$Flag_automatica
#___________________________________________________
# update flag nelle tavole dei dati annuale
# la prima query assegna la flag automatica
# la query bis allinea le Flag_manuale_DBunico in caso di F
# la query tris allinea le Flag_manuale_DBunico in caso di P
#___________________________________________________
for (anno in anni) {
nome_tavola_annuale <- paste("M_", tipologia, "_", anno , sep="")
query_update_annuale <- paste ("update ", nome_tavola_annuale ," set Flag_automatica='",flag, "', Autore='DMA-PP.R',Data='",Sys.time(),"' where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"'" , sep="")
q_update_annuale <- try( dbGetQuery(conn,query_update_annuale), silent=TRUE )
if (inherits( q_update_annuale, "try-error")) {
quit(status=1)
}
query_update_annuale_bis <- paste ("update ", nome_tavola_annuale ," set Flag_manuale_DBunico=5, Autore='DMA-PP.R',Data='",Sys.time(),"' where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"' and Flag_automatica='F' and Flag_manuale_DBunico in (-1,0,1,2)" , sep="")
q_update_annuale_bis <- try( dbGetQuery(conn,query_update_annuale_bis), silent=TRUE )
if (inherits( q_update_annuale_bis, "try-error")) {
quit(status=1)
}
query_update_annuale_tris <- paste ("update ", nome_tavola_annuale ," set Flag_manuale_DBunico=-1, Autore='DMA-PP.R',Data='",Sys.time(),"' where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"' and Flag_automatica='P' and Flag_manuale_DBunico in (0,5)" , sep="")
q_update_annuale_tris <- try( dbGetQuery(conn,query_update_annuale_tris), silent=TRUE )
if (inherits( q_update_annuale_tris, "try-error")) {
quit(status=1)
}
}
#___________________________________________________
# update flag nelle tavole dei dati recenti
# la prima query assegna la flag automatica
# la query bis allinea le Flag_manuale_DBunico in caso di F
# la query tris allinea le Flag_manuale_DBunico in caso di P
#___________________________________________________
query_update_recente <- paste ("update ", nome_tavola_recente ," set Flag_automatica='",flag, "', Autore='DMA-PP.R',Data='",Sys.time(),"' where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"'" , sep="")
q_update_recente <- try( dbGetQuery(conn,query_update_recente), silent=TRUE )
if (inherits( q_update_recente, "try-error")) {
quit(status=1)
}
query_update_recente_bis <- paste ("update ", nome_tavola_recente ," set Flag_manuale_DBunico=5, Autore='DMA-PP.R',Data='",Sys.time(),"' where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"' and Flag_automatica='F' and Flag_manuale_DBunico in (-1,0,1,2)" , sep="")
q_update_recente_bis <- try( dbGetQuery(conn,query_update_recente_bis), silent=TRUE )
if (inherits( q_update_recente_bis, "try-error")) {
quit(status=1)
}
query_update_recente_tris <- paste ("update ", nome_tavola_recente ," set Flag_manuale_DBunico=-1, Autore='DMA-PP.R',Data='",Sys.time(),"' where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"' and Flag_automatica='P' and Flag_manuale_DBunico in (0,5)" , sep="")
q_update_recente_tris <- try( dbGetQuery(conn,query_update_recente_tris), silent=TRUE )
if (inherits( q_update_recente_tris, "try-error")) {
quit(status=1)
}
#___________________________________________________
# update flag nella tavola DQCinDBUNICO
#___________________________________________________
if(flag_precedente!=flag){
query_update_DQCinDBUNICO <- paste ("REPLACE INTO DQCinDBUNICO_dati SELECT * from ", nome_tavola_recente," where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"'" , sep="")
q_update_DQCinDBUNICO <- try( dbGetQuery(conn,query_update_DQCinDBUNICO), silent=TRUE )
if (inherits( q_update_DQCinDBUNICO, "try-error")) {
quit(status=1)
}
}
coppia <- coppia + 1
} # fine ciclo sulle copie
#___________________________________________________
# DISCONNESSIONE DAL DB
#___________________________________________________
# chiudo db
cat ( "chiudo DB \n" , file = file_log , append = TRUE )
dbDisconnect(conn)
rm(conn)
dbUnloadDriver(drv)
cat ( "PROGRAMMA ESEGUITO CON SUCCESSO alle ", date()," \n" , file = file_log , append = TRUE )
| /DMA-PP.R | no_license | ARPASMR/adqc | R | false | false | 11,121 | r | #########################################
### DMA-PP.R ###
### ###
### Maria Ranci 02/09/2009 ###
### ###
# 26/11/09 MR e CR. Passaggio Flag_automatica 'G' -> 'P' e 'W'->'S'
# 2020-01-27 MR&MS dockerizzazione
#########################################
#
library(DBI)
library(RMySQL)
#==============================================================================
# LEGGI ARGOMENTI RIGA DI COMANDO
#
# Riga di comando:
# R --vanilla inizio fine < DMA-PP.R
# inizio > marca temporale del primo record da elaborare
# formato "2009-02-16 00:00:00"
# fine > marca temporale dell'ultimo record da elaborare
# formato "2009-02-16 00:00:00"
#..............................................................................
arguments <- commandArgs()
arguments
inizio1 <- arguments[3] #"2009-02-16"
inizio2 <- arguments[4] #"00:00:00"
fine1 <- arguments[5] #"2009-03-04"
fine2 <- arguments[6] #"01:00:00"
inizio<-paste(inizio1,inizio2,sep=" ")
fine<-paste(fine1,fine2,sep=" ")
file_log <- paste('DMA-PP_',inizio1,'_',fine1,'_rem2.log',sep='')
tipologia<-"Pluviometri"
#
anno_inizio<-as.numeric(substr(inizio1,0,4))
anno_fine<-as.numeric(substr(fine1,0,4))
anni<-anno_inizio:anno_fine
#___________________________________________________
# SUBROUTINES
#___________________________________________________
############## GESTIONE DEGLI ERRORI
neverstop<-function(){
cat("EE..ERRORE durante l'esecuzione dello script!! Messaggio d'Errore prodotto:\n",file=file_log,append=T)
}
options(show.error.messages=FALSE,error=neverstop)
#==============================================================================
# BODY - BODY - BODY - BODY - BODY - BODY - BODY - BODY - BODY - BODY -BODY
#==============================================================================
cat ( "ESECUZIONE DMA-PP ", date()," \n\n" , file = file_log)
cat ( " tipologia > Pluviometri" , file = file_log,append=T)
cat ( "\n" , file = file_log,append=T)
cat ( " argomenti riga di comando:\n" , file = file_log,append=T)
cat ( paste(" inizio > ",inizio,"\n") , file = file_log,append=T)
cat ( paste(" fine > ",fine,"\n") , file = file_log,append=T)
cat ( "----------------------------\n" , file = file_log,append=T)
#___________________________________________________
# COLLEGAMENTO AL DB
#___________________________________________________
cat("collegamento al DB\n",file=file_log,append=T)
#definisco driver
drv<-dbDriver("MySQL")
#apro connessione con il db
conn<-try(dbConnect(drv, user=as.character(Sys.getenv("MYSQL_USR")), password=as.character(Sys.getenv("MYSQL_PWD")), dbname=as.character(Sys.getenv("MYSQL_DBNAME")), host=as.character(Sys.getenv("MYSQL_HOST")),port=as.numeric(Sys.getenv("MYSQL_PORT"))))
#___________________________________________________
# ciclo sulle tipologie di sensori
#___________________________________________________
#
nome_tavola_recente <- paste("M_", tipologia , sep="")
nome_tavola_DQC <- paste("M_", tipologia, "DQC" , sep="")
#___________________________________________________
# estraggo info da tavola DQC
#___________________________________________________
# estraggo dalla tabella DQC le coppie sensore-istante segnalate su cui poi ciclare per l'assegnazione della flag di validita'
#query_coppie <- paste ("select distinct IDsensore, Data_e_ora from ",nome_tavola_DQC," where Data_e_ora>'",inizio,"' and Data_e_ora<'",fine ,"'", sep="")
query_coppie <- paste ("select distinct IDsensore, Data_e_ora from ",nome_tavola_DQC," where Data_e_ora>'",inizio,"' and Data_e_ora<'",fine ,"' and Test='P1a' and Result='F'", sep="")
q_coppie <- try( dbGetQuery(conn,query_coppie), silent=TRUE )
if (inherits( q_coppie, "try-error")) {
quit(status=1)
}
# print(q_coppie)
coppia <- 1
while(coppia < length(q_coppie$IDsensore) + 1){
flag ='P'
auxP1aF=NULL
# auxT2aS=NULL
# print("------------------------")
# print(coppia)
#___________________________________________________
# estraggo esito test relativo alla coppia
#___________________________________________________
query_esito <- paste ("select Test, Result from ", nome_tavola_DQC ," where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"'" , sep="")
q_esito <- try( dbGetQuery(conn,query_esito), silent=TRUE )
if (inherits( q_esito, "try-error")) {
quit(status=1)
}
cat ( paste(" elaborazione sensore/data > ",q_coppie$IDsensore[coppia],q_coppie$Data_e_ora[coppia],"\n"),
file = file_log,append=T)
#___________________________________________________
# assegno flag di validita'
#___________________________________________________
# q_esito e' un vettore colonna delle dimensione del numero di test
# P1a, S1a, T1a, T2a che ritornano F o S per la coppia univoca (IDsens,Data) in esame
# print(q_esito)
#..............................................
# l'operazione:
# (q_esito$Test %in% 'P1a') & (q_esito$Result %in% 'F')
# restituisce un vettore colonna delle dimensioni di q_esito e di tipo LOGICO
# con TRUE nella posizione i-esima se Test='P1a' e Result='F'
# e FALSE altrimenti
# l'operazione:
# any( (q_esito$Test %in% 'P1a') & (q_esito$Result %in% 'F') )
# restituisce un solo valore LOGICO:
# TRUE se esiste almeno un record del vettore TRUE
#..............................................
auxP1aF <- any( (q_esito$Test %in% 'P1a') & (q_esito$Result %in% 'F') )
# auxT2aS <- any( (q_esito$Test %in% 'T2a') & (q_esito$Result %in% 'S') )
# DMA-PP-1
# if( (auxP1aF == FALSE) &
# (auxT2aS == FALSE) ) flag='P'
# DMA-PP-2
# if( (auxP1aF == FALSE) &
# (auxT2aS == TRUE ) ) flag='P'
# DMA-PP-3
if( (auxP1aF == TRUE) ) flag='F'
#
cat( paste(" Risultati: P1a F? ",auxP1aF,'allora risultato finale =',flag,"\n"),
file=file_log, append=T)
#___________________________________________________
# prima di scrivere nelle tabelle dati l'esito dei test
# verifico il valore della flag_automantica dell'ultimo update.
# solo se cambia la scrivo anche in DQCinDBUNICO per passare
# l'informazione al REM, altrimenti no
#___________________________________________________
query_select_flag <- paste ("select Flag_automatica from ", nome_tavola_recente ," where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"'" , sep="")
q_select_flag <- try( dbGetQuery(conn,query_select_flag ), silent=TRUE )
if (inherits( q_select_flag , "try-error")) {
quit(status=1)
}
flag_precedente<-q_select_flag$Flag_automatica
#___________________________________________________
# update flag nelle tavole dei dati annuale
# la prima query assegna la flag automatica
# la query bis allinea le Flag_manuale_DBunico in caso di F
# la query tris allinea le Flag_manuale_DBunico in caso di P
#___________________________________________________
for (anno in anni) {
nome_tavola_annuale <- paste("M_", tipologia, "_", anno , sep="")
query_update_annuale <- paste ("update ", nome_tavola_annuale ," set Flag_automatica='",flag, "', Autore='DMA-PP.R',Data='",Sys.time(),"' where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"'" , sep="")
q_update_annuale <- try( dbGetQuery(conn,query_update_annuale), silent=TRUE )
if (inherits( q_update_annuale, "try-error")) {
quit(status=1)
}
query_update_annuale_bis <- paste ("update ", nome_tavola_annuale ," set Flag_manuale_DBunico=5, Autore='DMA-PP.R',Data='",Sys.time(),"' where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"' and Flag_automatica='F' and Flag_manuale_DBunico in (-1,0,1,2)" , sep="")
q_update_annuale_bis <- try( dbGetQuery(conn,query_update_annuale_bis), silent=TRUE )
if (inherits( q_update_annuale_bis, "try-error")) {
quit(status=1)
}
query_update_annuale_tris <- paste ("update ", nome_tavola_annuale ," set Flag_manuale_DBunico=-1, Autore='DMA-PP.R',Data='",Sys.time(),"' where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"' and Flag_automatica='P' and Flag_manuale_DBunico in (0,5)" , sep="")
q_update_annuale_tris <- try( dbGetQuery(conn,query_update_annuale_tris), silent=TRUE )
if (inherits( q_update_annuale_tris, "try-error")) {
quit(status=1)
}
}
#___________________________________________________
# update flag nelle tavole dei dati recenti
# la prima query assegna la flag automatica
# la query bis allinea le Flag_manuale_DBunico in caso di F
# la query tris allinea le Flag_manuale_DBunico in caso di P
#___________________________________________________
query_update_recente <- paste ("update ", nome_tavola_recente ," set Flag_automatica='",flag, "', Autore='DMA-PP.R',Data='",Sys.time(),"' where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"'" , sep="")
q_update_recente <- try( dbGetQuery(conn,query_update_recente), silent=TRUE )
if (inherits( q_update_recente, "try-error")) {
quit(status=1)
}
query_update_recente_bis <- paste ("update ", nome_tavola_recente ," set Flag_manuale_DBunico=5, Autore='DMA-PP.R',Data='",Sys.time(),"' where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"' and Flag_automatica='F' and Flag_manuale_DBunico in (-1,0,1,2)" , sep="")
q_update_recente_bis <- try( dbGetQuery(conn,query_update_recente_bis), silent=TRUE )
if (inherits( q_update_recente_bis, "try-error")) {
quit(status=1)
}
query_update_recente_tris <- paste ("update ", nome_tavola_recente ," set Flag_manuale_DBunico=-1, Autore='DMA-PP.R',Data='",Sys.time(),"' where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"' and Flag_automatica='P' and Flag_manuale_DBunico in (0,5)" , sep="")
q_update_recente_tris <- try( dbGetQuery(conn,query_update_recente_tris), silent=TRUE )
if (inherits( q_update_recente_tris, "try-error")) {
quit(status=1)
}
#___________________________________________________
# update flag nella tavola DQCinDBUNICO
#___________________________________________________
if(flag_precedente!=flag){
query_update_DQCinDBUNICO <- paste ("REPLACE INTO DQCinDBUNICO_dati SELECT * from ", nome_tavola_recente," where IDsensore=",q_coppie$IDsensore[coppia], " and Data_e_ora='",q_coppie$Data_e_ora[coppia],"'" , sep="")
q_update_DQCinDBUNICO <- try( dbGetQuery(conn,query_update_DQCinDBUNICO), silent=TRUE )
if (inherits( q_update_DQCinDBUNICO, "try-error")) {
quit(status=1)
}
}
coppia <- coppia + 1
} # fine ciclo sulle copie
#___________________________________________________
# DISCONNESSIONE DAL DB
#___________________________________________________
# chiudo db
cat ( "chiudo DB \n" , file = file_log , append = TRUE )
dbDisconnect(conn)
rm(conn)
dbUnloadDriver(drv)
cat ( "PROGRAMMA ESEGUITO CON SUCCESSO alle ", date()," \n" , file = file_log , append = TRUE )
|
#####################################################################
### Access the Impala shell OpenSky Network
### - The functions are refered from R package "osn".
#####################################################################
### Converte "YYYY-MM-DD hh:mm:ss" to UNIX timestamp
date2unixtime <- function(YYYYMMDD_hhmmss) {
# wef <- "2021-02-05 09:00:00"
wef <- lubridate::as_datetime(YYYYMMDD_hhmmss)
wef <- wef %>% as.integer()
# floor to POSIX hour
wefh <- wef - (wef %% 3600)
# floor to POSIX day
wefd <- wefh - (wefh %% 86400)
return(wefd)
}
### Converte UNIX timestamp to "YYYY-MM-DD hh:mm:ss"
unixtime2date <- function(unixtime) {
return( as.POSIXct(unixtime, origin = "1970-01-01") )
}
### Run the SQL query and get data from Impala shell
impala_query <- function(session, query) {
# impala_query <- function(session, query, cols) {
stopifnot(class(session) == "ssh_session")
# stopifnot(!is.null(cols))
lines <- ssh::ssh_exec_internal(
session,
stringr::str_glue("-q {query}", query = query)) %>%
{ rawToChar(.$stdout) }
if (logger::log_threshold() == logger::TRACE) {
lines %>%
readr::write_lines("query_output.txt")
}
lines <- lines %>%
parse_impala_query_output()
# make a 1 line data so to return an empty tibble in case of empty Impala result
if (length(lines) == 0) {
stop("There is no available data!")
# lines <- paste0(paste(names(cols$cols), collapse = "|"),
# "\n")
}
I(lines) %>%
readr::read_delim(
# col_types = cols,
delim = "|",
na = c("", "NULL"),
trim_ws = TRUE
)
}
#' Create an ssh session to OpenSky Networkβs Impala shell.
#'
#' @param usr user account
#' @param port port to connect to
#' @inheritParams ssh::ssh_connect
#'
#' @return an SSH session
#' @export
#'
#' @examples
#' \dontrun{
#' # connect directly to OSN
#' session <- osn_connect("cucu", verbose = 2)
#'
#' # connect via SSH port forwarding
#' session <- osn_connect_osn(
#' usr = Sys.getenv("OSN_USER"),
#' passwd = Sys.getenv("OSN_PASSWORD"),
#' port = 6666,
#' host = "localhost"
#' )
#' }
osn_connect <- function(usr, passwd = askpass::askpass,
host = "data.opensky-network.org", port = 2230,
verbose = FALSE) {
fullhost <- stringr::str_glue("{usr}@{host}:{port}")
ssh::ssh_connect(fullhost, passwd = passwd, verbose = verbose)
}
#' Disconnect from OpenSky Networkβs Impala shell.
#'
#' @inheritParams ssh::ssh_disconnect
#'
#' @return an SSH session
#' @export
#'
#' @examples
#' \dontrun{
#' session <- osn_connect("cucu", verbose = 2)
#' osn_disconnect(session)
#' }
osn_disconnect <- function(session) {
ssh::ssh_disconnect(session)
}
### Utill function to make data from database to tibble
parse_impala_query_output <- function(lines) {
lines %>%
stringi::stri_split_lines() %>%
purrr::flatten_chr() %>%
# remove empty lines
stringr::str_subset(pattern = "^$", negate = TRUE) %>%
# remove delimiting lines
stringr::str_subset(pattern = "^\\+-", negate = TRUE) %>%
# remove blanks
stringr::str_replace_all(pattern = "[ ][ ]*", "") %>%
# remove leading/last '|'
stringr::str_replace_all("^[|](.+)[|]$", "\\1") %>%
# remove duplicated lines, i.e. repeated column names header
unique()
}
| /manifold_clust/Rcode_others/OpenSkyNetwork/connect_impala_shell.R | permissive | statKim/FDA-Lab | R | false | false | 3,562 | r | #####################################################################
### Access the Impala shell OpenSky Network
### - The functions are refered from R package "osn".
#####################################################################
### Converte "YYYY-MM-DD hh:mm:ss" to UNIX timestamp
date2unixtime <- function(YYYYMMDD_hhmmss) {
# wef <- "2021-02-05 09:00:00"
wef <- lubridate::as_datetime(YYYYMMDD_hhmmss)
wef <- wef %>% as.integer()
# floor to POSIX hour
wefh <- wef - (wef %% 3600)
# floor to POSIX day
wefd <- wefh - (wefh %% 86400)
return(wefd)
}
### Converte UNIX timestamp to "YYYY-MM-DD hh:mm:ss"
unixtime2date <- function(unixtime) {
return( as.POSIXct(unixtime, origin = "1970-01-01") )
}
### Run the SQL query and get data from Impala shell
impala_query <- function(session, query) {
# impala_query <- function(session, query, cols) {
stopifnot(class(session) == "ssh_session")
# stopifnot(!is.null(cols))
lines <- ssh::ssh_exec_internal(
session,
stringr::str_glue("-q {query}", query = query)) %>%
{ rawToChar(.$stdout) }
if (logger::log_threshold() == logger::TRACE) {
lines %>%
readr::write_lines("query_output.txt")
}
lines <- lines %>%
parse_impala_query_output()
# make a 1 line data so to return an empty tibble in case of empty Impala result
if (length(lines) == 0) {
stop("There is no available data!")
# lines <- paste0(paste(names(cols$cols), collapse = "|"),
# "\n")
}
I(lines) %>%
readr::read_delim(
# col_types = cols,
delim = "|",
na = c("", "NULL"),
trim_ws = TRUE
)
}
#' Create an ssh session to OpenSky Networkβs Impala shell.
#'
#' @param usr user account
#' @param port port to connect to
#' @inheritParams ssh::ssh_connect
#'
#' @return an SSH session
#' @export
#'
#' @examples
#' \dontrun{
#' # connect directly to OSN
#' session <- osn_connect("cucu", verbose = 2)
#'
#' # connect via SSH port forwarding
#' session <- osn_connect_osn(
#' usr = Sys.getenv("OSN_USER"),
#' passwd = Sys.getenv("OSN_PASSWORD"),
#' port = 6666,
#' host = "localhost"
#' )
#' }
osn_connect <- function(usr, passwd = askpass::askpass,
host = "data.opensky-network.org", port = 2230,
verbose = FALSE) {
fullhost <- stringr::str_glue("{usr}@{host}:{port}")
ssh::ssh_connect(fullhost, passwd = passwd, verbose = verbose)
}
#' Disconnect from OpenSky Networkβs Impala shell.
#'
#' @inheritParams ssh::ssh_disconnect
#'
#' @return an SSH session
#' @export
#'
#' @examples
#' \dontrun{
#' session <- osn_connect("cucu", verbose = 2)
#' osn_disconnect(session)
#' }
osn_disconnect <- function(session) {
ssh::ssh_disconnect(session)
}
### Utill function to make data from database to tibble
parse_impala_query_output <- function(lines) {
lines %>%
stringi::stri_split_lines() %>%
purrr::flatten_chr() %>%
# remove empty lines
stringr::str_subset(pattern = "^$", negate = TRUE) %>%
# remove delimiting lines
stringr::str_subset(pattern = "^\\+-", negate = TRUE) %>%
# remove blanks
stringr::str_replace_all(pattern = "[ ][ ]*", "") %>%
# remove leading/last '|'
stringr::str_replace_all("^[|](.+)[|]$", "\\1") %>%
# remove duplicated lines, i.e. repeated column names header
unique()
}
|
library(shiny)
library(shinythemes)
# Define UI
ui <- fluidPage(theme = shinytheme("flatly"),
withMathJax(), # to be able to use LaTeX expressions within the text
navbarPage(
"Final Project - Group 11",
tabPanel("The Assignment",
sidebarPanel(style="text-align: center;",
tags$h2("Project Information"),
tags$p(),
tags$br(),
tags$h3("Objective"),
tags$h5("To develop an R package implementing linear regression"),
tags$p(),
tags$br(),
tags$h3("Contributors"),
tags$h5(a(href="https://github.com/gabiitokazu", "Ana Gabriela Itokazu")),
tags$h5(a(href="https://github.com/EyoelBerhane", "Eyoel Berhane")),
tags$h5(a(href="https://github.com/Johnstaph", "John Musah")),
tags$p(),
tags$br(),
tags$h3("Sources"),
a(href="https://github.com/AU-R-Programming/FinalProject-11", "Package"),
tags$br(),
a(href="https://github.com/AU-R-Programming/FinalProject-11/tree/main/shiny", "Shiny App"),
tags$br(),
a(href="https://github.com/AU-R-Programming/FinalProject-11", "RMarkdown"),
tags$br(),
a(href="https://github.com/AU-R-Programming/FinalProject-11", "GitHub Repository"),
tags$p(),
tags$br(),
tags$h3("Class"),
tags$h5("STAT 6210"),
tags$h5("R Programming for Data Science"),
tags$h5(a(href="https://github.com/robertomolinari", "Prof. Dr. Roberto Molinari")),
tags$h5("Auburn University - Fall 2020"),
), # sidebarPanel
mainPanel(style="text-align: justify;",
h1("The Assignment"),
em("This package was built as part of the requirements for the 'R Programming for Data Science' course, by Prof. Dr. Roberto Molinari. The assignment was lined up as follows:"),
br(),
br(),
p("The final project will be evaluated on 100 points and the goal is to develop an R package implementing linear regression as highlighted in",
a(href="https://smac-group.github.io/ds/section-functions.html#section-example-continued-least-squares-function", "Section 6.4 of the book"),
"."),
p("The package must contain the basic functions to perform linear regression (", em("e.g."), "estimate the coefficient vector \\(\\beta\\)) and obtain different statistics from the procedure. Using the notation from the book and without using any of the linear regression functions already available in R (", em("i.e."), "all outputs must be produced using formulas provided in the book and in this document), the basic outputs from the procedure must be the following:"),
tags$ul(
tags$li("Confidence intervals: the user must be able to choose the significance level \\(\\alpha\\) to obtain for the \\(1β\\alpha\\) confidence intervals for \\(\\beta\\) and whether to use the asymptotic or bootstrap approach for this."),
tags$li("Plots (with ", em("e.g."), "ggplot2) including:",
tags$ol(
tags$li("Residuals vs fitted-value."),
tags$li("qq-plot of residuals."),
tags$li("Histogram (or density) of residuals."),
),
),
tags$li("Mean Square Prediction Error (MSPE) computed in matrix form."),
tags$li("F-test: compute the statistic in matrix form and output the corresponding p-value."),
tags$li("Help documentation for all functions (for example using the", em("roxygen2"), "package)"),
),
br(),
hr(),
p("The package will be made available for download on a GitHub repository in the",
a(href="https://github.com/AU-R-Programming", "AU-R-Programming organization"),
"and the submission will be an html file on Canvas. The html file wil be a so-called vignette which indicates the name of the GitHub repository (and package) where you explain and give examples of how to use the package functions for all the desired outputs using one of the datasets on the Canvas course page."),
hr(),
br(),
) # mainPanel
), # tabPanel, The Assignment
tabPanel("The Package",
"Page under construction...."
# sidebarPanel(
# tags$h3("Input:"),
# textInput("txt1", "First Name:", ""),
# textInput("txt2", "Last Name:", ""),
#
# ), # sidebarPanel
# mainPanel(
# h1("Header 1"),
#
# h4("Output"),
# verbatimTextOutput("txtout"),
# ) # mainPanel
), # tabPanel, The Package
tabPanel("The Theory Behind It",
mainPanel(style="text-align: center;",
) #mainPanel, The Theory
), # tabPanel, The Theory
tabPanel("How to use it",
"Page under construction...."
) # tabPanel, Examples
# tabPanel("Try It Yourself!",
# mainPanel(style="text-align: justify;",
# p("You want to try it yourself to see if we really did something? Sure! Just follow the link below to our page:"),
# a(href="www.rstudio.com", "Click here!"),
# ) # tabPanel, Try It Yourself
) # navbarPage
) #fluidPage
# Define server function
server <- function(input, output) {
} # server
# Run the application
shinyApp(ui = ui, server = server)
| /shiny/shiny_g11/app.R | no_license | gabiitokazu/desperation | R | false | false | 8,032 | r |
library(shiny)
library(shinythemes)
# Define UI
ui <- fluidPage(theme = shinytheme("flatly"),
withMathJax(), # to be able to use LaTeX expressions within the text
navbarPage(
"Final Project - Group 11",
tabPanel("The Assignment",
sidebarPanel(style="text-align: center;",
tags$h2("Project Information"),
tags$p(),
tags$br(),
tags$h3("Objective"),
tags$h5("To develop an R package implementing linear regression"),
tags$p(),
tags$br(),
tags$h3("Contributors"),
tags$h5(a(href="https://github.com/gabiitokazu", "Ana Gabriela Itokazu")),
tags$h5(a(href="https://github.com/EyoelBerhane", "Eyoel Berhane")),
tags$h5(a(href="https://github.com/Johnstaph", "John Musah")),
tags$p(),
tags$br(),
tags$h3("Sources"),
a(href="https://github.com/AU-R-Programming/FinalProject-11", "Package"),
tags$br(),
a(href="https://github.com/AU-R-Programming/FinalProject-11/tree/main/shiny", "Shiny App"),
tags$br(),
a(href="https://github.com/AU-R-Programming/FinalProject-11", "RMarkdown"),
tags$br(),
a(href="https://github.com/AU-R-Programming/FinalProject-11", "GitHub Repository"),
tags$p(),
tags$br(),
tags$h3("Class"),
tags$h5("STAT 6210"),
tags$h5("R Programming for Data Science"),
tags$h5(a(href="https://github.com/robertomolinari", "Prof. Dr. Roberto Molinari")),
tags$h5("Auburn University - Fall 2020"),
), # sidebarPanel
mainPanel(style="text-align: justify;",
h1("The Assignment"),
em("This package was built as part of the requirements for the 'R Programming for Data Science' course, by Prof. Dr. Roberto Molinari. The assignment was lined up as follows:"),
br(),
br(),
p("The final project will be evaluated on 100 points and the goal is to develop an R package implementing linear regression as highlighted in",
a(href="https://smac-group.github.io/ds/section-functions.html#section-example-continued-least-squares-function", "Section 6.4 of the book"),
"."),
p("The package must contain the basic functions to perform linear regression (", em("e.g."), "estimate the coefficient vector \\(\\beta\\)) and obtain different statistics from the procedure. Using the notation from the book and without using any of the linear regression functions already available in R (", em("i.e."), "all outputs must be produced using formulas provided in the book and in this document), the basic outputs from the procedure must be the following:"),
tags$ul(
tags$li("Confidence intervals: the user must be able to choose the significance level \\(\\alpha\\) to obtain for the \\(1β\\alpha\\) confidence intervals for \\(\\beta\\) and whether to use the asymptotic or bootstrap approach for this."),
tags$li("Plots (with ", em("e.g."), "ggplot2) including:",
tags$ol(
tags$li("Residuals vs fitted-value."),
tags$li("qq-plot of residuals."),
tags$li("Histogram (or density) of residuals."),
),
),
tags$li("Mean Square Prediction Error (MSPE) computed in matrix form."),
tags$li("F-test: compute the statistic in matrix form and output the corresponding p-value."),
tags$li("Help documentation for all functions (for example using the", em("roxygen2"), "package)"),
),
br(),
hr(),
p("The package will be made available for download on a GitHub repository in the",
a(href="https://github.com/AU-R-Programming", "AU-R-Programming organization"),
"and the submission will be an html file on Canvas. The html file wil be a so-called vignette which indicates the name of the GitHub repository (and package) where you explain and give examples of how to use the package functions for all the desired outputs using one of the datasets on the Canvas course page."),
hr(),
br(),
) # mainPanel
), # tabPanel, The Assignment
tabPanel("The Package",
"Page under construction...."
# sidebarPanel(
# tags$h3("Input:"),
# textInput("txt1", "First Name:", ""),
# textInput("txt2", "Last Name:", ""),
#
# ), # sidebarPanel
# mainPanel(
# h1("Header 1"),
#
# h4("Output"),
# verbatimTextOutput("txtout"),
# ) # mainPanel
), # tabPanel, The Package
tabPanel("The Theory Behind It",
mainPanel(style="text-align: center;",
) #mainPanel, The Theory
), # tabPanel, The Theory
tabPanel("How to use it",
"Page under construction...."
) # tabPanel, Examples
# tabPanel("Try It Yourself!",
# mainPanel(style="text-align: justify;",
# p("You want to try it yourself to see if we really did something? Sure! Just follow the link below to our page:"),
# a(href="www.rstudio.com", "Click here!"),
# ) # tabPanel, Try It Yourself
) # navbarPage
) #fluidPage
# Define server function
server <- function(input, output) {
} # server
# Run the application
shinyApp(ui = ui, server = server)
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Find out the index of SCC where any of the four columns contains "Motor"
SCCindex <- sort(union(union(union(grep("Motor",SCC$Short.Name),grep("Motor",SCC$EI.Sector)),grep("Motor", SCC$SCC.Level.Three)),grep("Motor", SCC$SCC.Level.Four)))
## Get the SCC associated with "Motor"
MotorSCC <- SCC$SCC[SCCindex]
## Get the data associated with motor vehicles
NEIMotor <- NEI[which(NEI$SCC %in% MotorSCC),]
## Get the data associated with motor vehicles in Baltimore City
NEIMotorBaltimore <- subset(NEIMotor,fips=="24510")
## Calculate log(Emission) and remove inf values
logEmissions = log(NEIMotorBaltimore$Emissions)
logEmissions2 = replace(logEmissions, is.infinite(logEmissions),NA)
NEIMotorBaltimore$logEmissions = logEmissions2
## Create point plots of log(Emission) vs year and a line of linear regression
png(filename = "plot5.png")
q <- qplot(year, logEmissions, data = NEIMotorBaltimore, geom = c("point", "smooth"), method = "lm", main = "Emissions of Motor Vehicles in Baltimore City")
q + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + labs(y = "log(Emissions)")
dev.off()
| /Coursera-Exploratory Data Analysis in R/exdata-data-NEI_data/plot5.R | no_license | rachtw/course | R | false | false | 1,216 | r | NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Find out the index of SCC where any of the four columns contains "Motor"
SCCindex <- sort(union(union(union(grep("Motor",SCC$Short.Name),grep("Motor",SCC$EI.Sector)),grep("Motor", SCC$SCC.Level.Three)),grep("Motor", SCC$SCC.Level.Four)))
## Get the SCC associated with "Motor"
MotorSCC <- SCC$SCC[SCCindex]
## Get the data associated with motor vehicles
NEIMotor <- NEI[which(NEI$SCC %in% MotorSCC),]
## Get the data associated with motor vehicles in Baltimore City
NEIMotorBaltimore <- subset(NEIMotor,fips=="24510")
## Calculate log(Emission) and remove inf values
logEmissions = log(NEIMotorBaltimore$Emissions)
logEmissions2 = replace(logEmissions, is.infinite(logEmissions),NA)
NEIMotorBaltimore$logEmissions = logEmissions2
## Create point plots of log(Emission) vs year and a line of linear regression
png(filename = "plot5.png")
q <- qplot(year, logEmissions, data = NEIMotorBaltimore, geom = c("point", "smooth"), method = "lm", main = "Emissions of Motor Vehicles in Baltimore City")
q + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + labs(y = "log(Emissions)")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.licensemanager_operations.R
\name{update_license_configuration}
\alias{update_license_configuration}
\title{Modifies the attributes of an existing license configuration object}
\usage{
update_license_configuration(LicenseConfigurationArn,
LicenseConfigurationStatus = NULL, LicenseRules = NULL,
LicenseCount = NULL, LicenseCountHardLimit = NULL, Name = NULL,
Description = NULL)
}
\arguments{
\item{LicenseConfigurationArn}{[required] ARN for a license configuration.}
\item{LicenseConfigurationStatus}{New status of the license configuration (\code{ACTIVE} or \code{INACTIVE}).}
\item{LicenseRules}{List of flexible text strings designating license rules.}
\item{LicenseCount}{New number of licenses managed by the license configuration.}
\item{LicenseCountHardLimit}{Sets the number of available licenses as a hard limit.}
\item{Name}{New name of the license configuration.}
\item{Description}{New human-friendly description of the license configuration.}
}
\description{
Modifies the attributes of an existing license configuration object. A license configuration is an abstraction of a customer license agreement that can be consumed and enforced by License Manager. Components include specifications for the license type (Instances, cores, sockets, VCPUs), tenancy (shared or Dedicated Host), host affinity (how long a VM is associated with a host), the number of licenses purchased and used.
}
\section{Accepted Parameters}{
\preformatted{update_license_configuration(
LicenseConfigurationArn = "string",
LicenseConfigurationStatus = "AVAILABLE"|"DISABLED",
LicenseRules = list(
"string"
),
LicenseCount = 123,
LicenseCountHardLimit = TRUE|FALSE,
Name = "string",
Description = "string"
)
}
}
| /service/paws.licensemanager/man/update_license_configuration.Rd | permissive | CR-Mercado/paws | R | false | true | 1,815 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.licensemanager_operations.R
\name{update_license_configuration}
\alias{update_license_configuration}
\title{Modifies the attributes of an existing license configuration object}
\usage{
update_license_configuration(LicenseConfigurationArn,
LicenseConfigurationStatus = NULL, LicenseRules = NULL,
LicenseCount = NULL, LicenseCountHardLimit = NULL, Name = NULL,
Description = NULL)
}
\arguments{
\item{LicenseConfigurationArn}{[required] ARN for a license configuration.}
\item{LicenseConfigurationStatus}{New status of the license configuration (\code{ACTIVE} or \code{INACTIVE}).}
\item{LicenseRules}{List of flexible text strings designating license rules.}
\item{LicenseCount}{New number of licenses managed by the license configuration.}
\item{LicenseCountHardLimit}{Sets the number of available licenses as a hard limit.}
\item{Name}{New name of the license configuration.}
\item{Description}{New human-friendly description of the license configuration.}
}
\description{
Modifies the attributes of an existing license configuration object. A license configuration is an abstraction of a customer license agreement that can be consumed and enforced by License Manager. Components include specifications for the license type (Instances, cores, sockets, VCPUs), tenancy (shared or Dedicated Host), host affinity (how long a VM is associated with a host), the number of licenses purchased and used.
}
\section{Accepted Parameters}{
\preformatted{update_license_configuration(
LicenseConfigurationArn = "string",
LicenseConfigurationStatus = "AVAILABLE"|"DISABLED",
LicenseRules = list(
"string"
),
LicenseCount = 123,
LicenseCountHardLimit = TRUE|FALSE,
Name = "string",
Description = "string"
)
}
}
|
# readr and fread
rm(list = ls())
x = read.csv("http://pages.stat.wisc.edu/~karlrohe/ds679/badRead.csv")
head(x)
str(x)
library(readr)
y = read_csv("http://pages.stat.wisc.edu/~karlrohe/ds679/badRead.csv")
library(data.table)
z = fread("http://pages.stat.wisc.edu/~karlrohe/ds679/badRead.csv")
str(x)
str(y)
str(z)
sum(is.na(x))
sum(is.na(y))
# what do we do?
# Read the error!!
# Does this error look google-able? What is "parsing"
y = read.csv("http://pages.stat.wisc.edu/~karlrohe/ds679/badRead.csv", colClasses = c("character", "character"))
str(y)
sum(is.na(y)) # cool!
mean(y[,2])
mean(as.numeric(y[,2]))
which(is.na(as.numeric(y[,2])))
bad = which(is.na(as.numeric(y[,2])))
y[bad,] # yeah, that's messy.
mean(y[-bad,2])
mean(as.numeric(y[-bad,2]))
# what about fread?
fread("http://pages.stat.wisc.edu/~karlrohe/ds679/badRead.csv")
badFile = "http://pages.stat.wisc.edu/~karlrohe/ds679/badRead.csv"
read_lines(badFile,skip = 1071,n_max = 3)
# I guess you have to go into a text editor to fix it,
# or try skipping/restarting several times... ugh. any thoughts?
# At this point, regular expressions can be particularly useful!!! And python / more text friendly languages...
# https://github.com/Rdatatable/data.table/issues/711
| /readingBadData.R | no_license | sycatkim/Data_Science_with_R | R | false | false | 1,262 | r | # readr and fread
rm(list = ls())
x = read.csv("http://pages.stat.wisc.edu/~karlrohe/ds679/badRead.csv")
head(x)
str(x)
library(readr)
y = read_csv("http://pages.stat.wisc.edu/~karlrohe/ds679/badRead.csv")
library(data.table)
z = fread("http://pages.stat.wisc.edu/~karlrohe/ds679/badRead.csv")
str(x)
str(y)
str(z)
sum(is.na(x))
sum(is.na(y))
# what do we do?
# Read the error!!
# Does this error look google-able? What is "parsing"
y = read.csv("http://pages.stat.wisc.edu/~karlrohe/ds679/badRead.csv", colClasses = c("character", "character"))
str(y)
sum(is.na(y)) # cool!
mean(y[,2])
mean(as.numeric(y[,2]))
which(is.na(as.numeric(y[,2])))
bad = which(is.na(as.numeric(y[,2])))
y[bad,] # yeah, that's messy.
mean(y[-bad,2])
mean(as.numeric(y[-bad,2]))
# what about fread?
fread("http://pages.stat.wisc.edu/~karlrohe/ds679/badRead.csv")
badFile = "http://pages.stat.wisc.edu/~karlrohe/ds679/badRead.csv"
read_lines(badFile,skip = 1071,n_max = 3)
# I guess you have to go into a text editor to fix it,
# or try skipping/restarting several times... ugh. any thoughts?
# At this point, regular expressions can be particularly useful!!! And python / more text friendly languages...
# https://github.com/Rdatatable/data.table/issues/711
|
surfplot <-
function(metric=9, prop=0.7, rho=0.2, colour=TRUE, drop=TRUE, cross=TRUE, dat=data$surfaces) {
#--------------------------------------------------------------
#
# TITLE: surfplot()
# AUTHOR: TARMO REMMEL
# DATE: 23 January 2020
# CALLS: NA
# CALLED BY: NA
# NEEDS: MATRIX OBJECT FOR SURFACE AND X,Y COORDINATES
# TO DEFINE THE PROPORTION AND RHO VALUES TO PLOT
# REQUIRES surfaces OBJECT AS LOOKUP TABLE
# REQUIRES AN INTEGER FOR metric TO INDICATE WITH ONE TO WORK WITH
# NOTES: prop must be > 0.12
# rho must be > 0.1
# IF colour=FALSE, THE PLOT IS DONE IN BW
# IF drop=FALSE, THE DROP LINE FROM THE POINT IS OMITTED
#--------------------------------------------------------------
# SAVE GRAPHIC PARAMETERS AND RESTATE THEM ON EXIT
opar <- par(no.readonly =TRUE)
on.exit(par(opar))
surfaces <- dat
plot.new()
if(cross) {
par(pty="s", mfrow=c(1,3))
} # END IF
else {
par(pty="s", mfrow=c(1,1))
} # END ELSE
# PLOT PERSPECTIVE SURFACE WITH PROPORTION AND RHO POINT INDICATED WITH DROP LINE
if(colour) {
surfaceobj <- apply(surfaces[metric,,,], MARGIN=c(1,2), median)
surf <- persp(seq(0.1,0.9,by=0.1), seq(0,0.2499999, by=0.2499999/10)*4, surfaceobj, ticktype="detailed", xlab="Proportion", ylab="Rho", zlab="Metric", theta=-45)
if(drop) {
from <- trans3d(x=prop, y=rho, z=surfaceobj[(prop*9)+1, (rho*11)+1], surf)
to <- trans3d(x=prop, y=rho, z=min(surfaceobj), surf)
segments(from$x, from$y, to$x, to$y, col="Red", lwd=1, lty="dotted")
} # END IF
points(trans3d(x=prop, y=rho, z=surfaceobj[(prop*9)+1, (rho*11)+1], surf), col="Red", pch=19)
} # END IF
else {
surfaceobj <- apply(surfaces[metric,,,], MARGIN=c(1,2), median)
surf <- persp(seq(0.1,0.9,by=0.1), seq(0,0.2499999, by=0.2499999/10)*4, surfaceobj, ticktype="detailed", xlab="Proportion", ylab="Rho", zlab="Metric", theta=-45)
if(drop) {
from <- trans3d(x=prop, y=rho, z=surfaceobj[(prop*9)+1, (rho*11)+1], surf)
to <- trans3d(x=prop, y=rho, z=min(surfaceobj), surf)
segments(from$x, from$y, to$x, to$y, lwd=1, lty="dotted")
} # END IF
points(trans3d(x=prop, y=rho, z=surfaceobj[(prop*9)+1, (rho*11)+1], surf), pch=19)
} # END ELSE
if(cross) {
# PLOT BOXPLOTS ACROSS THE 11 LEVELS OF SPATIAL AUTOCORRELATION (RHO)
plot(factor(round(seq(0,0.2499999, by=0.2499999/10)*4, 2)), surfaces[metric,round(rho*11),,], xlab="Spatial Autocorrelation", ylab="Metric Value")
title("Metric Versus Autocorrelation (Rho)")
# PLOT BOXPLOTS ACROSS THE 9 LEVELS OF PROPORTION
plot(factor(seq(0.1,0.9,by=0.1)), surfaces[metric,,round(prop*9),], xlab="Proportion", ylab="Metric Value")
title("Metric Versus Proportion")
} # END IF
} # END FUNCTION: surfplot
| /R/surfplot.R | no_license | cran/ShapePattern | R | false | false | 2,955 | r | surfplot <-
function(metric=9, prop=0.7, rho=0.2, colour=TRUE, drop=TRUE, cross=TRUE, dat=data$surfaces) {
#--------------------------------------------------------------
#
# TITLE: surfplot()
# AUTHOR: TARMO REMMEL
# DATE: 23 January 2020
# CALLS: NA
# CALLED BY: NA
# NEEDS: MATRIX OBJECT FOR SURFACE AND X,Y COORDINATES
# TO DEFINE THE PROPORTION AND RHO VALUES TO PLOT
# REQUIRES surfaces OBJECT AS LOOKUP TABLE
# REQUIRES AN INTEGER FOR metric TO INDICATE WITH ONE TO WORK WITH
# NOTES: prop must be > 0.12
# rho must be > 0.1
# IF colour=FALSE, THE PLOT IS DONE IN BW
# IF drop=FALSE, THE DROP LINE FROM THE POINT IS OMITTED
#--------------------------------------------------------------
# SAVE GRAPHIC PARAMETERS AND RESTATE THEM ON EXIT
opar <- par(no.readonly =TRUE)
on.exit(par(opar))
surfaces <- dat
plot.new()
if(cross) {
par(pty="s", mfrow=c(1,3))
} # END IF
else {
par(pty="s", mfrow=c(1,1))
} # END ELSE
# PLOT PERSPECTIVE SURFACE WITH PROPORTION AND RHO POINT INDICATED WITH DROP LINE
if(colour) {
surfaceobj <- apply(surfaces[metric,,,], MARGIN=c(1,2), median)
surf <- persp(seq(0.1,0.9,by=0.1), seq(0,0.2499999, by=0.2499999/10)*4, surfaceobj, ticktype="detailed", xlab="Proportion", ylab="Rho", zlab="Metric", theta=-45)
if(drop) {
from <- trans3d(x=prop, y=rho, z=surfaceobj[(prop*9)+1, (rho*11)+1], surf)
to <- trans3d(x=prop, y=rho, z=min(surfaceobj), surf)
segments(from$x, from$y, to$x, to$y, col="Red", lwd=1, lty="dotted")
} # END IF
points(trans3d(x=prop, y=rho, z=surfaceobj[(prop*9)+1, (rho*11)+1], surf), col="Red", pch=19)
} # END IF
else {
surfaceobj <- apply(surfaces[metric,,,], MARGIN=c(1,2), median)
surf <- persp(seq(0.1,0.9,by=0.1), seq(0,0.2499999, by=0.2499999/10)*4, surfaceobj, ticktype="detailed", xlab="Proportion", ylab="Rho", zlab="Metric", theta=-45)
if(drop) {
from <- trans3d(x=prop, y=rho, z=surfaceobj[(prop*9)+1, (rho*11)+1], surf)
to <- trans3d(x=prop, y=rho, z=min(surfaceobj), surf)
segments(from$x, from$y, to$x, to$y, lwd=1, lty="dotted")
} # END IF
points(trans3d(x=prop, y=rho, z=surfaceobj[(prop*9)+1, (rho*11)+1], surf), pch=19)
} # END ELSE
if(cross) {
# PLOT BOXPLOTS ACROSS THE 11 LEVELS OF SPATIAL AUTOCORRELATION (RHO)
plot(factor(round(seq(0,0.2499999, by=0.2499999/10)*4, 2)), surfaces[metric,round(rho*11),,], xlab="Spatial Autocorrelation", ylab="Metric Value")
title("Metric Versus Autocorrelation (Rho)")
# PLOT BOXPLOTS ACROSS THE 9 LEVELS OF PROPORTION
plot(factor(seq(0.1,0.9,by=0.1)), surfaces[metric,,round(prop*9),], xlab="Proportion", ylab="Metric Value")
title("Metric Versus Proportion")
} # END IF
} # END FUNCTION: surfplot
|
ggplot(data = data.frame(x = c(70, 80)), aes(x)) +
stat_function(fun = dnorm, n = 101, args = list(mean = 75, sd = 2)) + ylab("") +
scale_y_continuous(breaks = NULL)+
geom_vline(aes(xintercept=75),linetype="dashed", size=1)
# TT<-t.test(rnorm(10000,75,1),conf.level = .995)
#
# quantile(rnorm(10000,75,1),.05)
| /Normal_Plot.R | no_license | jcval94/Tesis | R | false | false | 329 | r |
ggplot(data = data.frame(x = c(70, 80)), aes(x)) +
stat_function(fun = dnorm, n = 101, args = list(mean = 75, sd = 2)) + ylab("") +
scale_y_continuous(breaks = NULL)+
geom_vline(aes(xintercept=75),linetype="dashed", size=1)
# TT<-t.test(rnorm(10000,75,1),conf.level = .995)
#
# quantile(rnorm(10000,75,1),.05)
|
is.plotly <- function(x) {
inherits(x, "plotly")
}
is.formula <- function(f) {
inherits(f, "formula")
}
is.colorbar <- function(tr) {
inherits(tr, "plotly_colorbar")
}
is.evaled <- function(p) {
all(vapply(p$x$attrs, function(attr) inherits(attr, "plotly_eval"), logical(1)))
}
is.webgl <- function(p) {
if (!is.evaled(p)) p <- plotly_build(p)
types <- vapply(p$x$data, function(tr) tr[["type"]] %||% "scatter", character(1))
any(types %in% glTypes())
}
glTypes <- function() {
c(
"scattergl", "scatter3d", "mesh3d", "heatmapgl", "pointcloud", "parcoords",
"surface"
)
}
# just like ggplot2:::is.discrete()
is.discrete <- function(x) {
is.factor(x) || is.character(x) || is.logical(x)
}
"%||%" <- function(x, y) {
if (length(x) > 0 || is_blank(x)) x else y
}
# kind of like %||%, but only respects user-defined defaults
# (instead of defaults provided in the build step)
"%|D|%" <- function(x, y) {
if (!is.default(x)) x %||% y else y
}
# standard way to specify a line break
br <- function() "<br />"
is.default <- function(x) {
inherits(x, "plotly_default")
}
default <- function(x) {
structure(x, class = "plotly_default")
}
compact <- function(x) {
Filter(Negate(is.null), x)
}
modify_list <- function(x, y, ...) {
modifyList(x %||% list(), y %||% list(), ...)
}
# convert a vector of dates/date-times to milliseconds
to_milliseconds <- function(x) {
if (inherits(x, "Date")) return(as.numeric(x) * 86400000)
if (inherits(x, "POSIXt")) return(as.numeric(x) * 1000)
# throw warning?
x
}
# apply a function to x, retaining class and "special" plotly attributes
retain <- function(x, f = identity) {
y <- structure(f(x), class = oldClass(x))
attrs <- attributes(x)
# TODO: do we set any other "special" attributes internally
# (grepping "structure(" suggests no)
attrs <- attrs[names(attrs) %in% c("defaultAlpha", "apiSrc")]
if (length(attrs)) {
attributes(y) <- attrs
}
y
}
deparse2 <- function(x) {
if (is.null(x) || !is.language(x)) return(NULL)
sub("^~", "", paste(deparse(x, 500L), collapse = ""))
}
new_id <- function() {
htmlwidgets:::createWidgetId()
}
names2 <- function(x) {
names(x) %||% rep("", length(x))
}
getLevels <- function(x) {
if (is.factor(x)) levels(x) else sort(unique(x))
}
tryNULL <- function(expr) tryCatch(expr, error = function(e) NULL)
# Don't attempt to do "tidy" data training on these trace types
is_tidy <- function(trace) {
type <- trace[["type"]] %||% "scatter"
!type %in% c(
"mesh3d", "heatmap", "histogram2d",
"histogram2dcontour", "contour", "surface"
)
}
# is grouping relevant for this geometry? (e.g., grouping doesn't effect a scatterplot)
has_group <- function(trace) {
inherits(trace, paste0("plotly_", c("segment", "path", "line", "polygon"))) ||
(grepl("scatter", trace[["type"]]) && grepl("lines", trace[["mode"]]))
}
# currently implemented non-positional scales in plot_ly()
npscales <- function() {
c("color", "symbol", "linetype", "size", "split")
}
# copied from https://github.com/plotly/plotly.js/blob/master/src/components/color/attributes.js
traceColorDefaults <- function() {
c('#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf')
}
# column name for crosstalk key
# TODO: make this more unique?
crosstalk_key <- function() ".crossTalkKey"
# modifyList turns elements that are data.frames into lists
# which changes the behavior of toJSON
as_df <- function(x) {
if (is.null(x) || is.matrix(x)) return(x)
if (is.list(x) && !is.data.frame(x)) {
setNames(as.data.frame(x), NULL)
}
}
# arrange data if the vars exist, don't throw error if they don't
arrange_safe <- function(data, vars) {
vars <- vars[vars %in% names(data)]
if (length(vars)) dplyr::arrange_(data, .dots = vars) else data
}
is_mapbox <- function(p) {
identical(p$x$layout[["mapType"]], "mapbox")
}
is_geo <- function(p) {
identical(p$x$layout[["mapType"]], "geo")
}
is_type <- function(p, type) {
types <- vapply(p$x$data, function(tr) tr[["type"]] %||% "scatter", character(1))
all(types %in% type)
}
# Replace elements of a nested list
#
# @param x a named list
# @param indicies a vector of indices.
# A 1D list may be used to specify both numeric and non-numeric inidices
# @param val the value used to
# @examples
#
# x <- list(a = 1)
# # equivalent to `x$a <- 2`
# re_place(x, "a", 2)
#
# y <- list(a = list(list(b = 2)))
#
# # equivalent to `y$a[[1]]$b <- 2`
# y <- re_place(y, list("a", 1, "b"), 3)
# y
re_place <- function(x, indicies = 1, val) {
expr <- call("[[", quote(x), indicies[[1]])
if (length(indicies) == 1) {
eval(call("<-", expr, val))
return(x)
}
for (i in seq(2, length(indicies))) {
expr <- call("[[", expr, indicies[[i]])
}
eval(call("<-", expr, val))
x
}
# retrive mapbox token if one is set; otherwise, throw error
mapbox_token <- function() {
token <- Sys.getenv("MAPBOX_TOKEN", NA)
if (is.na(token)) {
stop(
"No mapbox access token found. Obtain a token here\n",
"https://www.mapbox.com/help/create-api-access-token/\n",
"Once you have a token, assign it to an environment variable \n",
"named 'MAPBOX_TOKEN', for example,\n",
"Sys.setenv('MAPBOX_TOKEN' = 'secret token')", call. = FALSE
)
}
token
}
# rename attrs (unevaluated arguments) from geo locations (lat/lon) to cartesian
geo2cartesian <- function(p) {
p$x$attrs <- lapply(p$x$attrs, function(tr) {
tr[["x"]] <- tr[["x"]] %||% tr[["lat"]]
tr[["y"]] <- tr[["y"]] %||% tr[["lon"]]
tr
})
p
}
is_subplot <- function(p) {
isTRUE(p$x$subplot)
}
supply_defaults <- function(p) {
# no need to supply defaults for subplots
if (is_subplot(p)) return(p)
# supply trace anchor defaults
anchors <- if (is_geo(p)) c("geo" = "geo") else if (is_mapbox(p)) c("subplot" = "mapbox") else c("xaxis" = "x", "yaxis" = "y")
p$x$data <- lapply(p$x$data, function(tr) {
for (i in seq_along(anchors)) {
key <- names(anchors)[[i]]
if (!has_attr(tr[["type"]] %||% "scatter", key)) next
tr[[key]] <- sub("^y1$", "y", sub("^x1$", "x", tr[[key]][1])) %||% anchors[[i]]
}
tr
})
# hack to avoid https://github.com/ropensci/plotly/issues/945
if (is_type(p, "parcoords")) p$x$layout$margin$t <- NULL
# supply domain defaults
geoDomain <- list(x = c(0, 1), y = c(0, 1))
if (is_geo(p) || is_mapbox(p)) {
p$x$layout[grepl("^[x-y]axis", names(p$x$layout))] <- NULL
p$x$layout[[p$x$layout$mapType]] <- modify_list(
list(domain = geoDomain), p$x$layout[[p$x$layout$mapType]]
)
} else {
axes <- if (is_type(p, "scatterternary")) {
c("aaxis", "baxis", "caxis")
} else if (is_type(p, "pie") || is_type(p, "parcoords") || is_type(p, "sankey")) {
NULL
} else {
c("xaxis", "yaxis")
}
for (axis in axes) {
p$x$layout[[axis]] <- modify_list(
list(domain = c(0, 1)), p$x$layout[[axis]]
)
}
}
p
}
supply_highlight_attrs <- function(p) {
# set "global" options via crosstalk variable
p$x$highlight <- p$x$highlight %||% highlight_defaults()
p <- htmlwidgets::onRender(
p, sprintf(
"function(el, x) { var ctConfig = crosstalk.var('plotlyCrosstalkOpts').set(%s); }",
to_JSON(p$x$highlight)
)
)
# defaults are now populated, allowing us to populate some other
# attributes such as the selectize widget definition
sets <- unlist(lapply(p$x$data, "[[", "set"))
keys <- setNames(lapply(p$x$data, "[[", "key"), sets)
p$x$highlight$ctGroups <- i(unique(sets))
# TODO: throw warning if we don't detect valid keys?
hasKeys <- FALSE
for (i in p$x$highlight$ctGroups) {
k <- unique(unlist(keys[names(keys) %in% i], use.names = FALSE))
if (is.null(k)) next
k <- k[!is.null(k)]
hasKeys <- TRUE
# include one selectize dropdown per "valid" SharedData layer
if (isTRUE(p$x$highlight$selectize)) {
p$x$selectize[[new_id()]] <- list(
items = data.frame(value = k, label = k), group = i
)
}
# set default values via crosstalk api
vals <- p$x$highlight$defaultValues[p$x$highlight$defaultValues %in% k]
if (length(vals)) {
p <- htmlwidgets::onRender(
p, sprintf(
"function(el, x) { crosstalk.group('%s').var('selection').set(%s) }",
i, jsonlite::toJSON(vals, auto_unbox = FALSE)
)
)
}
}
# add HTML dependencies, set a sensible dragmode default, & throw messages
if (hasKeys) {
p$x$layout$dragmode <- p$x$layout$dragmode %|D|%
default(switch(p$x$highlight$on %||% "", plotly_selected = "select") %||% "zoom")
if (is.default(p$x$highlight$off)) {
message(
sprintf(
"Setting the `off` event (i.e., '%s') to match the `on` event (i.e., '%s'). You can change this default via the `highlight()` function.",
p$x$highlight$off, p$x$highlight$on
)
)
}
}
p
}
# make sure plot attributes adhere to the plotly.js schema
verify_attr_names <- function(p) {
# some layout attributes (e.g., [x-y]axis can have trailing numbers)
attrs_name_check(
sub("[0-9]+$", "", names(p$x$layout)),
c(names(Schema$layout$layoutAttributes), c("barmode", "bargap", "mapType")),
"layout"
)
for (tr in seq_along(p$x$data)) {
thisTrace <- p$x$data[[tr]]
attrSpec <- Schema$traces[[thisTrace$type %||% "scatter"]]$attributes
# make sure attribute names are valid
attrs_name_check(
names(thisTrace),
c(names(attrSpec), "key", "set", "frame", "transforms", "_isNestedKey", "_isSimpleKey", "_isGraticule"),
thisTrace$type
)
}
invisible(p)
}
# ensure both the layout and trace attributes adhere to the plot schema
verify_attr_spec <- function(p) {
if (!is.null(p$x$layout)) {
layoutNames <- names(p$x$layout)
layoutNew <- verify_attr(
setNames(p$x$layout, sub("[0-9]+$", "", layoutNames)),
Schema$layout$layoutAttributes
)
p$x$layout <- setNames(layoutNew, layoutNames)
}
for (tr in seq_along(p$x$data)) {
thisTrace <- p$x$data[[tr]]
validAttrs <- Schema$traces[[thisTrace$type %||% "scatter"]]$attributes
p$x$data[[tr]] <- verify_attr(thisTrace, validAttrs)
# prevent these objects from sending null keys
p$x$data[[tr]][["xaxis"]] <- p$x$data[[tr]][["xaxis"]] %||% NULL
p$x$data[[tr]][["yaxis"]] <- p$x$data[[tr]][["yaxis"]] %||% NULL
}
p
}
verify_attr <- function(proposed, schema) {
for (attr in names(proposed)) {
attrSchema <- schema[[attr]]
# if schema is missing (i.e., this is an un-official attr), move along
if (is.null(attrSchema)) next
valType <- tryNULL(attrSchema[["valType"]]) %||% ""
role <- tryNULL(attrSchema[["role"]]) %||% ""
arrayOK <- tryNULL(attrSchema[["arrayOk"]]) %||% FALSE
isDataArray <- identical(valType, "data_array")
# where applicable, reduce single valued vectors to a constant
# (while preserving attributes)
if (!isDataArray && !arrayOK && !identical(role, "object")) {
proposed[[attr]] <- retain(proposed[[attr]], unique)
}
# ensure data_arrays of length 1 are boxed up by to_JSON()
if (isDataArray) {
proposed[[attr]] <- i(proposed[[attr]])
}
# tag 'src-able' attributes (needed for api_create())
isSrcAble <- !is.null(schema[[paste0(attr, "src")]]) && length(proposed[[attr]]) > 1
if (isDataArray || isSrcAble) {
proposed[[attr]] <- structure(proposed[[attr]], apiSrc = TRUE)
}
# do the same for "sub-attributes"
# TODO: should this be done recursively?
if (identical(role, "object")) {
for (attr2 in names(proposed[[attr]])) {
if (is.null(attrSchema[[attr2]])) next
valType2 <- tryNULL(attrSchema[[attr2]][["valType"]]) %||% ""
role2 <- tryNULL(attrSchema[[attr2]][["role"]]) %||% ""
arrayOK2 <- tryNULL(attrSchema[[attr2]][["arrayOk"]]) %||% FALSE
isDataArray2 <- identical(valType2, "data_array")
if (!isDataArray2 && !arrayOK2 && !identical(role2, "object")) {
proposed[[attr]][[attr2]] <- retain(proposed[[attr]][[attr2]], unique)
}
# ensure data_arrays of length 1 are boxed up by to_JSON()
if (isDataArray2) {
proposed[[attr]][[attr2]] <- i(proposed[[attr]][[attr2]])
}
# tag 'src-able' attributes (needed for api_create())
isSrcAble2 <- !is.null(schema[[attr]][[paste0(attr2, "src")]]) &&
length(proposed[[attr]][[attr2]]) > 1
if (isDataArray2 || isSrcAble2) {
proposed[[attr]][[attr2]] <- structure(
proposed[[attr]][[attr2]], apiSrc = TRUE
)
}
}
}
}
proposed
}
attrs_name_check <- function(proposedAttrs, validAttrs, type = "scatter") {
illegalAttrs <- setdiff(proposedAttrs, validAttrs)
if (length(illegalAttrs)) {
warning("'", type, "' objects don't have these attributes: '",
paste(illegalAttrs, collapse = "', '"), "'\n",
"Valid attributes include:\n'",
paste(validAttrs, collapse = "', '"), "'\n",
call. = FALSE)
}
invisible(proposedAttrs)
}
# make sure trace type is valid
# TODO: add an argument to verify trace properties are valid (https://github.com/ropensci/plotly/issues/540)
verify_type <- function(trace) {
if (is.null(trace$type)) {
attrs <- names(trace)
attrLengths <- lengths(trace)
trace$type <- if (all(c("x", "y", "z") %in% attrs)) {
if (all(c("i", "j", "k") %in% attrs)) "mesh3d" else "scatter3d"
} else if (all(c("x", "y") %in% attrs)) {
xNumeric <- !is.discrete(trace[["x"]])
yNumeric <- !is.discrete(trace[["y"]])
if (xNumeric && yNumeric) {
if (any(attrLengths) > 15000) "scattergl" else "scatter"
} else if (xNumeric || yNumeric) {
"bar"
} else "histogram2d"
} else if ("y" %in% attrs || "x" %in% attrs) {
"histogram"
} else if ("z" %in% attrs) {
"heatmap"
} else {
warning("No trace type specified and no positional attributes specified",
call. = FALSE)
"scatter"
}
relay_type(trace$type)
}
if (!is.character(trace$type) || length(trace$type) != 1) {
stop("The trace type must be a character vector of length 1.\n",
call. = FALSE)
}
if (!trace$type %in% names(Schema$traces)) {
stop("Trace type must be one of the following: \n",
"'", paste(names(Schema$traces), collapse = "', '"), "'",
call. = FALSE)
}
# if scatter/scatter3d/scattergl, default to a scatterplot
if (grepl("scatter", trace$type) && is.null(trace$mode)) {
message(
"No ", trace$type, " mode specifed:\n",
" Setting the mode to markers\n",
" Read more about this attribute -> https://plot.ly/r/reference/#scatter-mode"
)
trace$mode <- "markers"
}
trace
}
relay_type <- function(type) {
message(
"No trace type specified:\n",
" Based on info supplied, a '", type, "' trace seems appropriate.\n",
" Read more about this trace type -> https://plot.ly/r/reference/#", type
)
type
}
# Searches a list for character strings and translates R linebreaks to HTML
# linebreaks (i.e., '\n' -> '<br />'). JavaScript function definitions created
# via `htmlwidgets::JS()` are ignored
translate_linebreaks <- function(p) {
recurse <- function(a) {
typ <- typeof(a)
if (typ == "list") {
# retain the class of list elements
# which important for many things, such as colorbars
a[] <- lapply(a, recurse)
} else if (typ == "character" && !inherits(a, "JS_EVAL")) {
attrs <- attributes(a)
a <- gsub("\n", br(), a, fixed = TRUE)
attributes(a) <- attrs
}
a
}
p$x[] <- lapply(p$x, recurse)
p
}
verify_orientation <- function(trace) {
xNumeric <- !is.discrete(trace[["x"]]) && !is.null(trace[["x"]] %||% NULL)
yNumeric <- !is.discrete(trace[["y"]]) && !is.null(trace[["y"]] %||% NULL)
if (xNumeric && !yNumeric) {
if (any(c("bar", "box") %in% trace[["type"]])) {
trace$orientation <- "h"
}
}
if (yNumeric && "histogram" %in% trace[["type"]]) {
trace$orientation <- "h"
}
trace
}
verify_mode <- function(p) {
for (tr in seq_along(p$x$data)) {
trace <- p$x$data[[tr]]
if (grepl("scatter", trace$type %||% "scatter")) {
if (!is.null(trace$marker) && !grepl("markers", trace$mode %||% "")) {
message(
"A marker object has been specified, but markers is not in the mode\n",
"Adding markers to the mode..."
)
p$x$data[[tr]]$mode <- paste0(p$x$data[[tr]]$mode, "+markers")
}
if (!is.null(trace$line) && !grepl("lines", trace$mode %||% "")) {
message(
"A line object has been specified, but lines is not in the mode\n",
"Adding lines to the mode..."
)
p$x$data[[tr]]$mode <- paste0(p$x$data[[tr]]$mode, "+lines")
}
if (!is.null(trace$textfont) && !grepl("text", trace$mode %||% "")) {
warning(
"A textfont object has been specified, but text is not in the mode\n",
"Adding text to the mode..."
)
p$x$data[[tr]]$mode <- paste0(p$x$data[[tr]]$mode, "+text")
}
}
}
p
}
# populate categorical axes using categoryorder="array" & categoryarray=[]
populate_categorical_axes <- function(p) {
axes <- p$x$layout[grepl("^xaxis|^yaxis", names(p$x$layout))] %||%
list(xaxis = NULL, yaxis = NULL)
for (i in seq_along(axes)) {
axis <- axes[[i]]
axisName <- names(axes)[[i]]
axisType <- substr(axisName, 0, 1)
# ggplotly() populates these attributes...don't want to clobber that
if (!is.null(axis$ticktext) || !is.null(axis$tickvals)) next
# collect all the data that goes on this axis
d <- lapply(p$x$data, "[[", axisType)
isOnThisAxis <- function(tr) {
is.null(tr[["geo"]]) && sub("axis", "", axisName) %in%
(tr[[sub("[0-9]+", "", axisName)]] %||% axisType) &&
# avoid reordering matrices (see #863)
!is.matrix(tr[["z"]])
}
d <- d[vapply(p$x$data, isOnThisAxis, logical(1))]
if (length(d) == 0) next
isDiscrete <- vapply(d, is.discrete, logical(1))
if (0 < sum(isDiscrete) & sum(isDiscrete) < length(d)) {
warning(
"Can't display both discrete & non-discrete data on same axis",
call. = FALSE
)
next
}
if (sum(isDiscrete) == 0) next
categories <- lapply(d, getLevels)
categories <- unique(unlist(categories))
if (any(!vapply(d, is.factor, logical(1)))) categories <- sort(categories)
p$x$layout[[axisName]]$type <-
p$x$layout[[axisName]]$type %||% "category"
p$x$layout[[axisName]]$categoryorder <-
p$x$layout[[axisName]]$categoryorder %||% "array"
p$x$layout[[axisName]]$categoryarray <-
p$x$layout[[axisName]]$categoryarray %||% categories
}
p
}
verify_arrays <- function(p) {
for (i in c("annotations", "shapes", "images")) {
thing <- p$x$layout[[i]]
if (is.list(thing) && !is.null(names(thing))) {
p$x$layout[[i]] <- list(thing)
}
}
p
}
verify_hovermode <- function(p) {
if (!is.null(p$x$layout$hovermode)) {
return(p)
}
types <- unlist(lapply(p$x$data, function(tr) tr$type %||% "scatter"))
modes <- unlist(lapply(p$x$data, function(tr) tr$mode %||% "lines"))
if (any(grepl("markers", modes) & types == "scatter") ||
any(c("plotly_hover", "plotly_click") %in% p$x$highlight$on)) {
p$x$layout$hovermode <- "closest"
}
p
}
verify_key_type <- function(p) {
keys <- lapply(p$x$data, "[[", "key")
for (i in seq_along(keys)) {
k <- keys[[i]]
if (is.null(k)) next
# does it *ever* make sense to have a missing key value?
uk <- uniq(k)
if (length(uk) == 1) {
# i.e., the key for this trace has one value. In this case,
# we don't have iterate through the entire key, so instead,
# we provide a flag to inform client side logic to match the _entire_
# trace if this one key value is a match
p$x$data[[i]]$key <- uk[[1]]
p$x$data[[i]]$`_isSimpleKey` <- TRUE
p$x$data[[i]]$`_isNestedKey` <- FALSE
}
p$x$data[[i]]$`_isNestedKey` <- p$x$data[[i]]$`_isNestedKey` %||% !lazyeval::is_atomic(k)
# key values should always be strings
if (p$x$data[[i]]$`_isNestedKey`) {
p$x$data[[i]]$key <- lapply(p$x$data[[i]]$key, function(x) I(as.character(x)))
p$x$data[[i]]$key <- setNames(p$x$data[[i]]$key, NULL)
} else {
p$x$data[[i]]$key <- I(as.character(p$x$data[[i]]$key))
}
}
p
}
verify_webgl <- function(p) {
# see toWebGL
if (!isTRUE(p$x$.plotlyWebGl)) {
return(p)
}
types <- sapply(p$x$data, function(x) x[["type"]][1] %||% "scatter")
idx <- paste0(types, "gl") %in% names(Schema$traces)
if (any(!idx)) {
warning(
"The following traces don't have a WebGL equivalent: ",
paste(which(!idx), collapse = ", ")
)
}
for (i in which(idx)) {
p$x$data[[i]]$type <- paste0(p$x$data[[i]]$type, "gl")
}
p
}
verify_showlegend <- function(p) {
# this attribute should be set in hide_legend()
# it ensures that "legend titles" go away in addition to showlegend = FALSE
if (isTRUE(p$x$.hideLegend)) {
ann <- p$x$layout$annotations
is_title <- vapply(ann, function(x) isTRUE(x$legendTitle), logical(1))
p$x$layout$annotations <- ann[!is_title]
p$x$layout$showlegend <- FALSE
}
show <- vapply(p$x$data, function(x) x$showlegend %||% TRUE, logical(1))
# respect only _user-specified_ defaults
p$x$layout$showlegend <- p$x$layout$showlegend %|D|%
default(sum(show) > 1 || isTRUE(p$x$highlight$showInLegend))
p
}
verify_guides <- function(p) {
# since colorbars are implemented as "invisible" traces, prevent a "trivial" legend
if (has_colorbar(p) && has_legend(p) && length(p$x$data) <= 2) {
p$x$layout$showlegend <- default(FALSE)
}
isVisibleBar <- function(tr) {
is.colorbar(tr) && isTRUE(tr$showscale %||% TRUE)
}
isBar <- vapply(p$x$data, isVisibleBar, logical(1))
nGuides <- sum(isBar) + has_legend(p)
if (nGuides > 1) {
# place legend at bottom since its scrolly
p$x$layout$legend <- modify_list(
list(y = 1 - ((nGuides - 1) / nGuides), yanchor = "top"),
p$x$layout$legend
)
idx <- which(isBar)
for (i in seq_along(idx)) {
p <- colorbar_built(
p, which = i, len = 1 / nGuides, y = 1 - ((i - 1) / nGuides),
lenmode = "fraction", yanchor = "top"
)
}
}
p
}
has_marker <- function(types, modes) {
is_scatter <- grepl("scatter", types)
ifelse(is_scatter, grepl("marker", modes), has_attr(types, "marker"))
}
has_line <- function(types, modes) {
is_scatter <- grepl("scatter", types)
ifelse(is_scatter, grepl("line", modes), has_attr(types, "line"))
}
has_text <- function(types, modes) {
is_scatter <- grepl("scatter", types)
ifelse(is_scatter, grepl("text", modes), has_attr(types, "textfont"))
}
has_attr <- function(types, attr = "marker") {
if (length(attr) != 1) stop("attr must be of length 1")
vapply(types, function(x) attr %in% names(Schema$traces[[x]]$attributes), logical(1))
}
has_legend <- function(p) {
showLegend <- function(tr) {
tr$showlegend %||% TRUE
}
any(vapply(p$x$data, showLegend, logical(1))) &&
isTRUE(p$x$layout$showlegend %|D|% TRUE)
}
has_colorbar <- function(p) {
isVisibleBar <- function(tr) {
is.colorbar(tr) && isTRUE(tr$showscale %||% TRUE)
}
any(vapply(p$x$data, isVisibleBar, logical(1)))
}
# is a given trace type 3d?
is3d <- function(type = NULL) {
type <- type %||% "scatter"
type %in% c("mesh3d", "scatter3d", "surface")
}
# Check for credentials/configuration and throw warnings where appropriate
verify <- function(what = "username", warn = TRUE) {
val <- grab(what)
if (val == "" && warn) {
switch(what,
username = warning("You need a plotly username. See help(signup, package = 'plotly')", call. = FALSE),
api_key = warning("You need an api_key. See help(signup, package = 'plotly')", call. = FALSE))
warning("Couldn't find ", what, call. = FALSE)
}
as.character(val)
}
# Check whether a certain credential/configuration exists.
grab <- function(what = "username") {
who <- paste0("plotly_", what)
val <- Sys.getenv(who, "")
# If the environment variable doesn't exist, try reading hidden files that may
# have been created using other languages or earlier versions of this package
if (val == "") {
PLOTLY_DIR <- file.path(normalizePath("~", mustWork = TRUE), ".plotly")
CREDENTIALS_FILE <- file.path(PLOTLY_DIR, ".credentials")
CONFIG_FILE <- file.path(PLOTLY_DIR, ".config")
# note: try_file can be 'succesful', yet return NULL
val2 <- try_file(CREDENTIALS_FILE, what)
val <- if (length(nchar(val2)) == 0) try_file(CONFIG_FILE, what) else val2
val <- val %||% ""
}
# return true if value is non-trivial
setNames(val, who)
}
# try to grab an object key from a JSON file (returns empty string on error)
try_file <- function(f, what) {
tryCatch(jsonlite::fromJSON(f)[[what]], error = function(e) NULL)
}
# preferred defaults for toJSON mapping
to_JSON <- function(x, ...) {
jsonlite::toJSON(x, digits = 50, auto_unbox = TRUE, force = TRUE,
null = "null", na = "null", ...)
}
# preferred defaults for toJSON mapping
from_JSON <- function(x, ...) {
jsonlite::fromJSON(x, simplifyDataFrame = FALSE, simplifyMatrix = FALSE, ...)
}
i <- function(x) {
if (is.null(x)) {
return(NULL)
} else if (length(x) == 1) {
return(I(x))
} else{
return(x)
}
}
rm_asis <- function(x) {
# jsonlite converts NULL to {} and NA to null (plotly prefers null to {})
# https://github.com/jeroenooms/jsonlite/issues/29
if (is.null(x)) return(NA)
if (is.data.frame(x)) return(x)
if (is.list(x)) lapply(x, rm_asis)
# strip any existing 'AsIs' list elements of their 'AsIs' status.
# this is necessary since ggplot_build(qplot(1:10, fill = I("red")))
# returns list element with their 'AsIs' class,
# which conflicts with our JSON unboxing strategy.
else if (inherits(x, "AsIs")) class(x) <- setdiff(class(x), "AsIs")
else x
}
# add a class to an object only if it is new, and keep any existing classes of
# that object
append_class <- function(x, y) {
structure(x, class = unique(c(class(x), y)))
}
prefix_class <- function(x, y) {
structure(x, class = unique(c(y, class(x))))
}
replace_class <- function(x, new, old) {
class(x) <- sub(old, new, class(x))
x
}
remove_class <- function(x, y) {
oldClass(x) <- setdiff(oldClass(x), y)
x
}
# TODO: what are some other common configuration options we want to support??
get_domain <- function(type = "") {
if (type == "api") {
# new onprem instances don't have an https://api-thiscompany.plot.ly
# but https://thiscompany.plot.ly seems to just work in that case...
Sys.getenv("plotly_api_domain", Sys.getenv("plotly_domain", "https://api.plot.ly"))
} else {
Sys.getenv("plotly_domain", "https://plot.ly")
}
}
# plotly's special keyword arguments in POST body
get_kwargs <- function() {
c("filename", "fileopt", "style", "traces", "layout", "frames", "world_readable")
}
# "common" POST header fields
api_headers <- function() {
v <- as.character(packageVersion("plotly"))
httr::add_headers(
plotly_version = v,
`Plotly-Client-Platform` = paste("R", v),
`Content-Type` = "application/json",
Accept = "*/*"
)
}
api_auth <- function() {
httr::authenticate(
verify("username"),
verify("api_key")
)
}
# try to write environment variables to an .Rprofile
cat_profile <- function(key, value, path = "~") {
r_profile <- file.path(normalizePath(path, mustWork = TRUE),
".Rprofile")
snippet <- sprintf('\nSys.setenv("plotly_%s" = "%s")', key, value)
if (!file.exists(r_profile)) {
message("Creating", r_profile)
r_profile_con <- file(r_profile)
}
if (file.access(r_profile, 2) != 0) {
stop("R doesn't have permission to write to this file: ", path, "\n",
"You should consider putting this in an .Rprofile ", "\n",
"(or sourcing it when you use plotly): ", snippet)
}
if (file.access(r_profile, 4) != 0) {
stop("R doesn't have permission to read this file: ", path)
}
message("Adding plotly_", key, " environment variable to ", r_profile)
cat(snippet, file = r_profile, append = TRUE)
}
# check that suggested packages are installed
try_library <- function(pkg, fun = NULL) {
if (system.file(package = pkg) != "") {
return(invisible())
}
stop("Package `", pkg, "` required", if (!is.null(fun)) paste0(" for `", fun, "`"), ".\n",
"Please install and try again.", call. = FALSE)
}
is_rstudio <- function() {
identical(Sys.getenv("RSTUDIO", NA), "1")
}
| /R/utils.R | permissive | saurfang/plotly | R | false | false | 29,043 | r | is.plotly <- function(x) {
inherits(x, "plotly")
}
is.formula <- function(f) {
inherits(f, "formula")
}
is.colorbar <- function(tr) {
inherits(tr, "plotly_colorbar")
}
is.evaled <- function(p) {
all(vapply(p$x$attrs, function(attr) inherits(attr, "plotly_eval"), logical(1)))
}
is.webgl <- function(p) {
if (!is.evaled(p)) p <- plotly_build(p)
types <- vapply(p$x$data, function(tr) tr[["type"]] %||% "scatter", character(1))
any(types %in% glTypes())
}
glTypes <- function() {
c(
"scattergl", "scatter3d", "mesh3d", "heatmapgl", "pointcloud", "parcoords",
"surface"
)
}
# just like ggplot2:::is.discrete()
is.discrete <- function(x) {
is.factor(x) || is.character(x) || is.logical(x)
}
"%||%" <- function(x, y) {
if (length(x) > 0 || is_blank(x)) x else y
}
# kind of like %||%, but only respects user-defined defaults
# (instead of defaults provided in the build step)
"%|D|%" <- function(x, y) {
if (!is.default(x)) x %||% y else y
}
# standard way to specify a line break
br <- function() "<br />"
is.default <- function(x) {
inherits(x, "plotly_default")
}
default <- function(x) {
structure(x, class = "plotly_default")
}
compact <- function(x) {
Filter(Negate(is.null), x)
}
modify_list <- function(x, y, ...) {
modifyList(x %||% list(), y %||% list(), ...)
}
# convert a vector of dates/date-times to milliseconds
to_milliseconds <- function(x) {
if (inherits(x, "Date")) return(as.numeric(x) * 86400000)
if (inherits(x, "POSIXt")) return(as.numeric(x) * 1000)
# throw warning?
x
}
# apply a function to x, retaining class and "special" plotly attributes
retain <- function(x, f = identity) {
y <- structure(f(x), class = oldClass(x))
attrs <- attributes(x)
# TODO: do we set any other "special" attributes internally
# (grepping "structure(" suggests no)
attrs <- attrs[names(attrs) %in% c("defaultAlpha", "apiSrc")]
if (length(attrs)) {
attributes(y) <- attrs
}
y
}
deparse2 <- function(x) {
if (is.null(x) || !is.language(x)) return(NULL)
sub("^~", "", paste(deparse(x, 500L), collapse = ""))
}
new_id <- function() {
htmlwidgets:::createWidgetId()
}
names2 <- function(x) {
names(x) %||% rep("", length(x))
}
getLevels <- function(x) {
if (is.factor(x)) levels(x) else sort(unique(x))
}
tryNULL <- function(expr) tryCatch(expr, error = function(e) NULL)
# Don't attempt to do "tidy" data training on these trace types
is_tidy <- function(trace) {
type <- trace[["type"]] %||% "scatter"
!type %in% c(
"mesh3d", "heatmap", "histogram2d",
"histogram2dcontour", "contour", "surface"
)
}
# is grouping relevant for this geometry? (e.g., grouping doesn't effect a scatterplot)
has_group <- function(trace) {
inherits(trace, paste0("plotly_", c("segment", "path", "line", "polygon"))) ||
(grepl("scatter", trace[["type"]]) && grepl("lines", trace[["mode"]]))
}
# currently implemented non-positional scales in plot_ly()
npscales <- function() {
c("color", "symbol", "linetype", "size", "split")
}
# copied from https://github.com/plotly/plotly.js/blob/master/src/components/color/attributes.js
traceColorDefaults <- function() {
c('#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf')
}
# column name for crosstalk key
# TODO: make this more unique?
crosstalk_key <- function() ".crossTalkKey"
# modifyList turns elements that are data.frames into lists
# which changes the behavior of toJSON
as_df <- function(x) {
if (is.null(x) || is.matrix(x)) return(x)
if (is.list(x) && !is.data.frame(x)) {
setNames(as.data.frame(x), NULL)
}
}
# arrange data if the vars exist, don't throw error if they don't
arrange_safe <- function(data, vars) {
vars <- vars[vars %in% names(data)]
if (length(vars)) dplyr::arrange_(data, .dots = vars) else data
}
is_mapbox <- function(p) {
identical(p$x$layout[["mapType"]], "mapbox")
}
is_geo <- function(p) {
identical(p$x$layout[["mapType"]], "geo")
}
is_type <- function(p, type) {
types <- vapply(p$x$data, function(tr) tr[["type"]] %||% "scatter", character(1))
all(types %in% type)
}
# Replace elements of a nested list
#
# @param x a named list
# @param indicies a vector of indices.
# A 1D list may be used to specify both numeric and non-numeric inidices
# @param val the value used to
# @examples
#
# x <- list(a = 1)
# # equivalent to `x$a <- 2`
# re_place(x, "a", 2)
#
# y <- list(a = list(list(b = 2)))
#
# # equivalent to `y$a[[1]]$b <- 2`
# y <- re_place(y, list("a", 1, "b"), 3)
# y
re_place <- function(x, indicies = 1, val) {
expr <- call("[[", quote(x), indicies[[1]])
if (length(indicies) == 1) {
eval(call("<-", expr, val))
return(x)
}
for (i in seq(2, length(indicies))) {
expr <- call("[[", expr, indicies[[i]])
}
eval(call("<-", expr, val))
x
}
# retrive mapbox token if one is set; otherwise, throw error
mapbox_token <- function() {
token <- Sys.getenv("MAPBOX_TOKEN", NA)
if (is.na(token)) {
stop(
"No mapbox access token found. Obtain a token here\n",
"https://www.mapbox.com/help/create-api-access-token/\n",
"Once you have a token, assign it to an environment variable \n",
"named 'MAPBOX_TOKEN', for example,\n",
"Sys.setenv('MAPBOX_TOKEN' = 'secret token')", call. = FALSE
)
}
token
}
# rename attrs (unevaluated arguments) from geo locations (lat/lon) to cartesian
geo2cartesian <- function(p) {
p$x$attrs <- lapply(p$x$attrs, function(tr) {
tr[["x"]] <- tr[["x"]] %||% tr[["lat"]]
tr[["y"]] <- tr[["y"]] %||% tr[["lon"]]
tr
})
p
}
is_subplot <- function(p) {
isTRUE(p$x$subplot)
}
supply_defaults <- function(p) {
# no need to supply defaults for subplots
if (is_subplot(p)) return(p)
# supply trace anchor defaults
anchors <- if (is_geo(p)) c("geo" = "geo") else if (is_mapbox(p)) c("subplot" = "mapbox") else c("xaxis" = "x", "yaxis" = "y")
p$x$data <- lapply(p$x$data, function(tr) {
for (i in seq_along(anchors)) {
key <- names(anchors)[[i]]
if (!has_attr(tr[["type"]] %||% "scatter", key)) next
tr[[key]] <- sub("^y1$", "y", sub("^x1$", "x", tr[[key]][1])) %||% anchors[[i]]
}
tr
})
# hack to avoid https://github.com/ropensci/plotly/issues/945
if (is_type(p, "parcoords")) p$x$layout$margin$t <- NULL
# supply domain defaults
geoDomain <- list(x = c(0, 1), y = c(0, 1))
if (is_geo(p) || is_mapbox(p)) {
p$x$layout[grepl("^[x-y]axis", names(p$x$layout))] <- NULL
p$x$layout[[p$x$layout$mapType]] <- modify_list(
list(domain = geoDomain), p$x$layout[[p$x$layout$mapType]]
)
} else {
axes <- if (is_type(p, "scatterternary")) {
c("aaxis", "baxis", "caxis")
} else if (is_type(p, "pie") || is_type(p, "parcoords") || is_type(p, "sankey")) {
NULL
} else {
c("xaxis", "yaxis")
}
for (axis in axes) {
p$x$layout[[axis]] <- modify_list(
list(domain = c(0, 1)), p$x$layout[[axis]]
)
}
}
p
}
supply_highlight_attrs <- function(p) {
# set "global" options via crosstalk variable
p$x$highlight <- p$x$highlight %||% highlight_defaults()
p <- htmlwidgets::onRender(
p, sprintf(
"function(el, x) { var ctConfig = crosstalk.var('plotlyCrosstalkOpts').set(%s); }",
to_JSON(p$x$highlight)
)
)
# defaults are now populated, allowing us to populate some other
# attributes such as the selectize widget definition
sets <- unlist(lapply(p$x$data, "[[", "set"))
keys <- setNames(lapply(p$x$data, "[[", "key"), sets)
p$x$highlight$ctGroups <- i(unique(sets))
# TODO: throw warning if we don't detect valid keys?
hasKeys <- FALSE
for (i in p$x$highlight$ctGroups) {
k <- unique(unlist(keys[names(keys) %in% i], use.names = FALSE))
if (is.null(k)) next
k <- k[!is.null(k)]
hasKeys <- TRUE
# include one selectize dropdown per "valid" SharedData layer
if (isTRUE(p$x$highlight$selectize)) {
p$x$selectize[[new_id()]] <- list(
items = data.frame(value = k, label = k), group = i
)
}
# set default values via crosstalk api
vals <- p$x$highlight$defaultValues[p$x$highlight$defaultValues %in% k]
if (length(vals)) {
p <- htmlwidgets::onRender(
p, sprintf(
"function(el, x) { crosstalk.group('%s').var('selection').set(%s) }",
i, jsonlite::toJSON(vals, auto_unbox = FALSE)
)
)
}
}
# add HTML dependencies, set a sensible dragmode default, & throw messages
if (hasKeys) {
p$x$layout$dragmode <- p$x$layout$dragmode %|D|%
default(switch(p$x$highlight$on %||% "", plotly_selected = "select") %||% "zoom")
if (is.default(p$x$highlight$off)) {
message(
sprintf(
"Setting the `off` event (i.e., '%s') to match the `on` event (i.e., '%s'). You can change this default via the `highlight()` function.",
p$x$highlight$off, p$x$highlight$on
)
)
}
}
p
}
# make sure plot attributes adhere to the plotly.js schema
verify_attr_names <- function(p) {
# some layout attributes (e.g., [x-y]axis can have trailing numbers)
attrs_name_check(
sub("[0-9]+$", "", names(p$x$layout)),
c(names(Schema$layout$layoutAttributes), c("barmode", "bargap", "mapType")),
"layout"
)
for (tr in seq_along(p$x$data)) {
thisTrace <- p$x$data[[tr]]
attrSpec <- Schema$traces[[thisTrace$type %||% "scatter"]]$attributes
# make sure attribute names are valid
attrs_name_check(
names(thisTrace),
c(names(attrSpec), "key", "set", "frame", "transforms", "_isNestedKey", "_isSimpleKey", "_isGraticule"),
thisTrace$type
)
}
invisible(p)
}
# ensure both the layout and trace attributes adhere to the plot schema
verify_attr_spec <- function(p) {
if (!is.null(p$x$layout)) {
layoutNames <- names(p$x$layout)
layoutNew <- verify_attr(
setNames(p$x$layout, sub("[0-9]+$", "", layoutNames)),
Schema$layout$layoutAttributes
)
p$x$layout <- setNames(layoutNew, layoutNames)
}
for (tr in seq_along(p$x$data)) {
thisTrace <- p$x$data[[tr]]
validAttrs <- Schema$traces[[thisTrace$type %||% "scatter"]]$attributes
p$x$data[[tr]] <- verify_attr(thisTrace, validAttrs)
# prevent these objects from sending null keys
p$x$data[[tr]][["xaxis"]] <- p$x$data[[tr]][["xaxis"]] %||% NULL
p$x$data[[tr]][["yaxis"]] <- p$x$data[[tr]][["yaxis"]] %||% NULL
}
p
}
verify_attr <- function(proposed, schema) {
for (attr in names(proposed)) {
attrSchema <- schema[[attr]]
# if schema is missing (i.e., this is an un-official attr), move along
if (is.null(attrSchema)) next
valType <- tryNULL(attrSchema[["valType"]]) %||% ""
role <- tryNULL(attrSchema[["role"]]) %||% ""
arrayOK <- tryNULL(attrSchema[["arrayOk"]]) %||% FALSE
isDataArray <- identical(valType, "data_array")
# where applicable, reduce single valued vectors to a constant
# (while preserving attributes)
if (!isDataArray && !arrayOK && !identical(role, "object")) {
proposed[[attr]] <- retain(proposed[[attr]], unique)
}
# ensure data_arrays of length 1 are boxed up by to_JSON()
if (isDataArray) {
proposed[[attr]] <- i(proposed[[attr]])
}
# tag 'src-able' attributes (needed for api_create())
isSrcAble <- !is.null(schema[[paste0(attr, "src")]]) && length(proposed[[attr]]) > 1
if (isDataArray || isSrcAble) {
proposed[[attr]] <- structure(proposed[[attr]], apiSrc = TRUE)
}
# do the same for "sub-attributes"
# TODO: should this be done recursively?
if (identical(role, "object")) {
for (attr2 in names(proposed[[attr]])) {
if (is.null(attrSchema[[attr2]])) next
valType2 <- tryNULL(attrSchema[[attr2]][["valType"]]) %||% ""
role2 <- tryNULL(attrSchema[[attr2]][["role"]]) %||% ""
arrayOK2 <- tryNULL(attrSchema[[attr2]][["arrayOk"]]) %||% FALSE
isDataArray2 <- identical(valType2, "data_array")
if (!isDataArray2 && !arrayOK2 && !identical(role2, "object")) {
proposed[[attr]][[attr2]] <- retain(proposed[[attr]][[attr2]], unique)
}
# ensure data_arrays of length 1 are boxed up by to_JSON()
if (isDataArray2) {
proposed[[attr]][[attr2]] <- i(proposed[[attr]][[attr2]])
}
# tag 'src-able' attributes (needed for api_create())
isSrcAble2 <- !is.null(schema[[attr]][[paste0(attr2, "src")]]) &&
length(proposed[[attr]][[attr2]]) > 1
if (isDataArray2 || isSrcAble2) {
proposed[[attr]][[attr2]] <- structure(
proposed[[attr]][[attr2]], apiSrc = TRUE
)
}
}
}
}
proposed
}
attrs_name_check <- function(proposedAttrs, validAttrs, type = "scatter") {
illegalAttrs <- setdiff(proposedAttrs, validAttrs)
if (length(illegalAttrs)) {
warning("'", type, "' objects don't have these attributes: '",
paste(illegalAttrs, collapse = "', '"), "'\n",
"Valid attributes include:\n'",
paste(validAttrs, collapse = "', '"), "'\n",
call. = FALSE)
}
invisible(proposedAttrs)
}
# make sure trace type is valid
# TODO: add an argument to verify trace properties are valid (https://github.com/ropensci/plotly/issues/540)
verify_type <- function(trace) {
if (is.null(trace$type)) {
attrs <- names(trace)
attrLengths <- lengths(trace)
trace$type <- if (all(c("x", "y", "z") %in% attrs)) {
if (all(c("i", "j", "k") %in% attrs)) "mesh3d" else "scatter3d"
} else if (all(c("x", "y") %in% attrs)) {
xNumeric <- !is.discrete(trace[["x"]])
yNumeric <- !is.discrete(trace[["y"]])
if (xNumeric && yNumeric) {
if (any(attrLengths) > 15000) "scattergl" else "scatter"
} else if (xNumeric || yNumeric) {
"bar"
} else "histogram2d"
} else if ("y" %in% attrs || "x" %in% attrs) {
"histogram"
} else if ("z" %in% attrs) {
"heatmap"
} else {
warning("No trace type specified and no positional attributes specified",
call. = FALSE)
"scatter"
}
relay_type(trace$type)
}
if (!is.character(trace$type) || length(trace$type) != 1) {
stop("The trace type must be a character vector of length 1.\n",
call. = FALSE)
}
if (!trace$type %in% names(Schema$traces)) {
stop("Trace type must be one of the following: \n",
"'", paste(names(Schema$traces), collapse = "', '"), "'",
call. = FALSE)
}
# if scatter/scatter3d/scattergl, default to a scatterplot
if (grepl("scatter", trace$type) && is.null(trace$mode)) {
message(
"No ", trace$type, " mode specifed:\n",
" Setting the mode to markers\n",
" Read more about this attribute -> https://plot.ly/r/reference/#scatter-mode"
)
trace$mode <- "markers"
}
trace
}
relay_type <- function(type) {
message(
"No trace type specified:\n",
" Based on info supplied, a '", type, "' trace seems appropriate.\n",
" Read more about this trace type -> https://plot.ly/r/reference/#", type
)
type
}
# Searches a list for character strings and translates R linebreaks to HTML
# linebreaks (i.e., '\n' -> '<br />'). JavaScript function definitions created
# via `htmlwidgets::JS()` are ignored
translate_linebreaks <- function(p) {
recurse <- function(a) {
typ <- typeof(a)
if (typ == "list") {
# retain the class of list elements
# which important for many things, such as colorbars
a[] <- lapply(a, recurse)
} else if (typ == "character" && !inherits(a, "JS_EVAL")) {
attrs <- attributes(a)
a <- gsub("\n", br(), a, fixed = TRUE)
attributes(a) <- attrs
}
a
}
p$x[] <- lapply(p$x, recurse)
p
}
verify_orientation <- function(trace) {
xNumeric <- !is.discrete(trace[["x"]]) && !is.null(trace[["x"]] %||% NULL)
yNumeric <- !is.discrete(trace[["y"]]) && !is.null(trace[["y"]] %||% NULL)
if (xNumeric && !yNumeric) {
if (any(c("bar", "box") %in% trace[["type"]])) {
trace$orientation <- "h"
}
}
if (yNumeric && "histogram" %in% trace[["type"]]) {
trace$orientation <- "h"
}
trace
}
verify_mode <- function(p) {
for (tr in seq_along(p$x$data)) {
trace <- p$x$data[[tr]]
if (grepl("scatter", trace$type %||% "scatter")) {
if (!is.null(trace$marker) && !grepl("markers", trace$mode %||% "")) {
message(
"A marker object has been specified, but markers is not in the mode\n",
"Adding markers to the mode..."
)
p$x$data[[tr]]$mode <- paste0(p$x$data[[tr]]$mode, "+markers")
}
if (!is.null(trace$line) && !grepl("lines", trace$mode %||% "")) {
message(
"A line object has been specified, but lines is not in the mode\n",
"Adding lines to the mode..."
)
p$x$data[[tr]]$mode <- paste0(p$x$data[[tr]]$mode, "+lines")
}
if (!is.null(trace$textfont) && !grepl("text", trace$mode %||% "")) {
warning(
"A textfont object has been specified, but text is not in the mode\n",
"Adding text to the mode..."
)
p$x$data[[tr]]$mode <- paste0(p$x$data[[tr]]$mode, "+text")
}
}
}
p
}
# populate categorical axes using categoryorder="array" & categoryarray=[]
populate_categorical_axes <- function(p) {
axes <- p$x$layout[grepl("^xaxis|^yaxis", names(p$x$layout))] %||%
list(xaxis = NULL, yaxis = NULL)
for (i in seq_along(axes)) {
axis <- axes[[i]]
axisName <- names(axes)[[i]]
axisType <- substr(axisName, 0, 1)
# ggplotly() populates these attributes...don't want to clobber that
if (!is.null(axis$ticktext) || !is.null(axis$tickvals)) next
# collect all the data that goes on this axis
d <- lapply(p$x$data, "[[", axisType)
isOnThisAxis <- function(tr) {
is.null(tr[["geo"]]) && sub("axis", "", axisName) %in%
(tr[[sub("[0-9]+", "", axisName)]] %||% axisType) &&
# avoid reordering matrices (see #863)
!is.matrix(tr[["z"]])
}
d <- d[vapply(p$x$data, isOnThisAxis, logical(1))]
if (length(d) == 0) next
isDiscrete <- vapply(d, is.discrete, logical(1))
if (0 < sum(isDiscrete) & sum(isDiscrete) < length(d)) {
warning(
"Can't display both discrete & non-discrete data on same axis",
call. = FALSE
)
next
}
if (sum(isDiscrete) == 0) next
categories <- lapply(d, getLevels)
categories <- unique(unlist(categories))
if (any(!vapply(d, is.factor, logical(1)))) categories <- sort(categories)
p$x$layout[[axisName]]$type <-
p$x$layout[[axisName]]$type %||% "category"
p$x$layout[[axisName]]$categoryorder <-
p$x$layout[[axisName]]$categoryorder %||% "array"
p$x$layout[[axisName]]$categoryarray <-
p$x$layout[[axisName]]$categoryarray %||% categories
}
p
}
verify_arrays <- function(p) {
for (i in c("annotations", "shapes", "images")) {
thing <- p$x$layout[[i]]
if (is.list(thing) && !is.null(names(thing))) {
p$x$layout[[i]] <- list(thing)
}
}
p
}
verify_hovermode <- function(p) {
if (!is.null(p$x$layout$hovermode)) {
return(p)
}
types <- unlist(lapply(p$x$data, function(tr) tr$type %||% "scatter"))
modes <- unlist(lapply(p$x$data, function(tr) tr$mode %||% "lines"))
if (any(grepl("markers", modes) & types == "scatter") ||
any(c("plotly_hover", "plotly_click") %in% p$x$highlight$on)) {
p$x$layout$hovermode <- "closest"
}
p
}
verify_key_type <- function(p) {
keys <- lapply(p$x$data, "[[", "key")
for (i in seq_along(keys)) {
k <- keys[[i]]
if (is.null(k)) next
# does it *ever* make sense to have a missing key value?
uk <- uniq(k)
if (length(uk) == 1) {
# i.e., the key for this trace has one value. In this case,
# we don't have iterate through the entire key, so instead,
# we provide a flag to inform client side logic to match the _entire_
# trace if this one key value is a match
p$x$data[[i]]$key <- uk[[1]]
p$x$data[[i]]$`_isSimpleKey` <- TRUE
p$x$data[[i]]$`_isNestedKey` <- FALSE
}
p$x$data[[i]]$`_isNestedKey` <- p$x$data[[i]]$`_isNestedKey` %||% !lazyeval::is_atomic(k)
# key values should always be strings
if (p$x$data[[i]]$`_isNestedKey`) {
p$x$data[[i]]$key <- lapply(p$x$data[[i]]$key, function(x) I(as.character(x)))
p$x$data[[i]]$key <- setNames(p$x$data[[i]]$key, NULL)
} else {
p$x$data[[i]]$key <- I(as.character(p$x$data[[i]]$key))
}
}
p
}
verify_webgl <- function(p) {
# see toWebGL
if (!isTRUE(p$x$.plotlyWebGl)) {
return(p)
}
types <- sapply(p$x$data, function(x) x[["type"]][1] %||% "scatter")
idx <- paste0(types, "gl") %in% names(Schema$traces)
if (any(!idx)) {
warning(
"The following traces don't have a WebGL equivalent: ",
paste(which(!idx), collapse = ", ")
)
}
for (i in which(idx)) {
p$x$data[[i]]$type <- paste0(p$x$data[[i]]$type, "gl")
}
p
}
verify_showlegend <- function(p) {
# this attribute should be set in hide_legend()
# it ensures that "legend titles" go away in addition to showlegend = FALSE
if (isTRUE(p$x$.hideLegend)) {
ann <- p$x$layout$annotations
is_title <- vapply(ann, function(x) isTRUE(x$legendTitle), logical(1))
p$x$layout$annotations <- ann[!is_title]
p$x$layout$showlegend <- FALSE
}
show <- vapply(p$x$data, function(x) x$showlegend %||% TRUE, logical(1))
# respect only _user-specified_ defaults
p$x$layout$showlegend <- p$x$layout$showlegend %|D|%
default(sum(show) > 1 || isTRUE(p$x$highlight$showInLegend))
p
}
verify_guides <- function(p) {
# since colorbars are implemented as "invisible" traces, prevent a "trivial" legend
if (has_colorbar(p) && has_legend(p) && length(p$x$data) <= 2) {
p$x$layout$showlegend <- default(FALSE)
}
isVisibleBar <- function(tr) {
is.colorbar(tr) && isTRUE(tr$showscale %||% TRUE)
}
isBar <- vapply(p$x$data, isVisibleBar, logical(1))
nGuides <- sum(isBar) + has_legend(p)
if (nGuides > 1) {
# place legend at bottom since its scrolly
p$x$layout$legend <- modify_list(
list(y = 1 - ((nGuides - 1) / nGuides), yanchor = "top"),
p$x$layout$legend
)
idx <- which(isBar)
for (i in seq_along(idx)) {
p <- colorbar_built(
p, which = i, len = 1 / nGuides, y = 1 - ((i - 1) / nGuides),
lenmode = "fraction", yanchor = "top"
)
}
}
p
}
has_marker <- function(types, modes) {
is_scatter <- grepl("scatter", types)
ifelse(is_scatter, grepl("marker", modes), has_attr(types, "marker"))
}
has_line <- function(types, modes) {
is_scatter <- grepl("scatter", types)
ifelse(is_scatter, grepl("line", modes), has_attr(types, "line"))
}
has_text <- function(types, modes) {
is_scatter <- grepl("scatter", types)
ifelse(is_scatter, grepl("text", modes), has_attr(types, "textfont"))
}
has_attr <- function(types, attr = "marker") {
if (length(attr) != 1) stop("attr must be of length 1")
vapply(types, function(x) attr %in% names(Schema$traces[[x]]$attributes), logical(1))
}
has_legend <- function(p) {
showLegend <- function(tr) {
tr$showlegend %||% TRUE
}
any(vapply(p$x$data, showLegend, logical(1))) &&
isTRUE(p$x$layout$showlegend %|D|% TRUE)
}
has_colorbar <- function(p) {
isVisibleBar <- function(tr) {
is.colorbar(tr) && isTRUE(tr$showscale %||% TRUE)
}
any(vapply(p$x$data, isVisibleBar, logical(1)))
}
# is a given trace type 3d?
is3d <- function(type = NULL) {
type <- type %||% "scatter"
type %in% c("mesh3d", "scatter3d", "surface")
}
# Check for credentials/configuration and throw warnings where appropriate
verify <- function(what = "username", warn = TRUE) {
val <- grab(what)
if (val == "" && warn) {
switch(what,
username = warning("You need a plotly username. See help(signup, package = 'plotly')", call. = FALSE),
api_key = warning("You need an api_key. See help(signup, package = 'plotly')", call. = FALSE))
warning("Couldn't find ", what, call. = FALSE)
}
as.character(val)
}
# Check whether a certain credential/configuration exists.
grab <- function(what = "username") {
who <- paste0("plotly_", what)
val <- Sys.getenv(who, "")
# If the environment variable doesn't exist, try reading hidden files that may
# have been created using other languages or earlier versions of this package
if (val == "") {
PLOTLY_DIR <- file.path(normalizePath("~", mustWork = TRUE), ".plotly")
CREDENTIALS_FILE <- file.path(PLOTLY_DIR, ".credentials")
CONFIG_FILE <- file.path(PLOTLY_DIR, ".config")
# note: try_file can be 'succesful', yet return NULL
val2 <- try_file(CREDENTIALS_FILE, what)
val <- if (length(nchar(val2)) == 0) try_file(CONFIG_FILE, what) else val2
val <- val %||% ""
}
# return true if value is non-trivial
setNames(val, who)
}
# try to grab an object key from a JSON file (returns empty string on error)
try_file <- function(f, what) {
tryCatch(jsonlite::fromJSON(f)[[what]], error = function(e) NULL)
}
# preferred defaults for toJSON mapping
to_JSON <- function(x, ...) {
jsonlite::toJSON(x, digits = 50, auto_unbox = TRUE, force = TRUE,
null = "null", na = "null", ...)
}
# preferred defaults for toJSON mapping
from_JSON <- function(x, ...) {
jsonlite::fromJSON(x, simplifyDataFrame = FALSE, simplifyMatrix = FALSE, ...)
}
i <- function(x) {
if (is.null(x)) {
return(NULL)
} else if (length(x) == 1) {
return(I(x))
} else{
return(x)
}
}
rm_asis <- function(x) {
# jsonlite converts NULL to {} and NA to null (plotly prefers null to {})
# https://github.com/jeroenooms/jsonlite/issues/29
if (is.null(x)) return(NA)
if (is.data.frame(x)) return(x)
if (is.list(x)) lapply(x, rm_asis)
# strip any existing 'AsIs' list elements of their 'AsIs' status.
# this is necessary since ggplot_build(qplot(1:10, fill = I("red")))
# returns list element with their 'AsIs' class,
# which conflicts with our JSON unboxing strategy.
else if (inherits(x, "AsIs")) class(x) <- setdiff(class(x), "AsIs")
else x
}
# add a class to an object only if it is new, and keep any existing classes of
# that object
append_class <- function(x, y) {
structure(x, class = unique(c(class(x), y)))
}
prefix_class <- function(x, y) {
structure(x, class = unique(c(y, class(x))))
}
replace_class <- function(x, new, old) {
class(x) <- sub(old, new, class(x))
x
}
remove_class <- function(x, y) {
oldClass(x) <- setdiff(oldClass(x), y)
x
}
# TODO: what are some other common configuration options we want to support??
get_domain <- function(type = "") {
if (type == "api") {
# new onprem instances don't have an https://api-thiscompany.plot.ly
# but https://thiscompany.plot.ly seems to just work in that case...
Sys.getenv("plotly_api_domain", Sys.getenv("plotly_domain", "https://api.plot.ly"))
} else {
Sys.getenv("plotly_domain", "https://plot.ly")
}
}
# plotly's special keyword arguments in POST body
get_kwargs <- function() {
c("filename", "fileopt", "style", "traces", "layout", "frames", "world_readable")
}
# "common" POST header fields
api_headers <- function() {
v <- as.character(packageVersion("plotly"))
httr::add_headers(
plotly_version = v,
`Plotly-Client-Platform` = paste("R", v),
`Content-Type` = "application/json",
Accept = "*/*"
)
}
api_auth <- function() {
httr::authenticate(
verify("username"),
verify("api_key")
)
}
# try to write environment variables to an .Rprofile
cat_profile <- function(key, value, path = "~") {
r_profile <- file.path(normalizePath(path, mustWork = TRUE),
".Rprofile")
snippet <- sprintf('\nSys.setenv("plotly_%s" = "%s")', key, value)
if (!file.exists(r_profile)) {
message("Creating", r_profile)
r_profile_con <- file(r_profile)
}
if (file.access(r_profile, 2) != 0) {
stop("R doesn't have permission to write to this file: ", path, "\n",
"You should consider putting this in an .Rprofile ", "\n",
"(or sourcing it when you use plotly): ", snippet)
}
if (file.access(r_profile, 4) != 0) {
stop("R doesn't have permission to read this file: ", path)
}
message("Adding plotly_", key, " environment variable to ", r_profile)
cat(snippet, file = r_profile, append = TRUE)
}
# check that suggested packages are installed
try_library <- function(pkg, fun = NULL) {
if (system.file(package = pkg) != "") {
return(invisible())
}
stop("Package `", pkg, "` required", if (!is.null(fun)) paste0(" for `", fun, "`"), ".\n",
"Please install and try again.", call. = FALSE)
}
is_rstudio <- function() {
identical(Sys.getenv("RSTUDIO", NA), "1")
}
|
#' Add one or more edges using a text string
#'
#' @description
#'
#' With a graph object of class `dgr_graph`, add one or more edges to the graph
#' using a text string.
#'
#' @inheritParams render_graph
#' @param edges A single-length vector with a character string specifying the
#' edges. For a directed graph, the string object should be formatted as a
#' series of node ID values as `[node_ID_1]->[node_ID_2]` separated by a one
#' or more space characters. For undirected graphs, `--` should replace `->`.
#' Line breaks in the vector won't cause an error.
#' @param rel An optional vector specifying the relationship between the
#' connected nodes.
#' @param use_labels An option to use node `label` values in the `edges` string
#' to define node connections. Note that this is only possible if all nodes
#' have distinct `label` values set and none exist as an empty string.
#'
#' @return A graph object of class `dgr_graph`.
#'
#' @examples
#' # Create a graph with 4 nodes
#' graph <-
#' create_graph() %>%
#' add_node(label = "one") %>%
#' add_node(label = "two") %>%
#' add_node(label = "three") %>%
#' add_node(label = "four")
#'
#' # Add edges between nodes using
#' # a character string with node
#' # ID values
#' graph_node_id <-
#' graph %>%
#' add_edges_w_string(
#' edges = "1->2 1->3 2->4 2->3")
#'
#' # Show the graph's internal
#' # edge data frame
#' graph_node_id %>% get_edge_df()
#'
#' # Add edges between nodes using
#' # a character string with node
#' # label values and setting
#' # `use_labels = TRUE`; note that
#' # all nodes must have unique
#' # `label` values to use this
#' graph_node_label <-
#' graph %>%
#' add_edges_w_string(
#' edges =
#' "one->two one->three
#' two->four two->three",
#' use_labels = TRUE)
#'
#' # Show the graph's internal
#' # edge data frame (it's the
#' # same as before)
#' graph_node_label %>% get_edge_df()
#'
#' @family Edge creation and removal
#'
#' @export
add_edges_w_string <- function(
graph,
edges,
rel = NULL,
use_labels = FALSE
) {
# Get the time of function start
time_function_start <- Sys.time()
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Validation: Graph contains nodes
if (graph_contains_nodes(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph contains no nodes, so, edges cannot be added")
}
# Get the value for the latest `version_id` for
# graph (in the `graph_log`)
current_graph_log_version_id <-
graph$graph_log$version_id %>%
max()
# Remove linebreak characters from `edges`
edges_cleaned <-
gsub("\n", " ", edges)
# Remove extra spaces within the string
edges_cleaned <-
gsub("(?<=[\\s])\\s*|^\\s+|\\s+$", "",
edges_cleaned, perl = TRUE)
# Split by single spaces into separate edge
# expressions
edges_split <-
unlist(strsplit(edges_cleaned, " "))
# Split the edge expressions in a directed
# graph into `from` and `to` vectors
if (graph$directed) {
from <-
sapply(strsplit(edges_split, "->"), "[[", 1)
to <-
sapply(strsplit(edges_split, "->"), "[[", 2)
}
# Split the edge expressions in an undirected
# graph into `from` and `to` vectors
if (graph$directed == FALSE) {
from <-
sapply(strsplit(edges_split, "--"), "[[", 1)
to <-
sapply(strsplit(edges_split, "--"), "[[", 2)
}
# If `use_label` is set to TRUE, treat values in
# list as labels; need to map to node ID values
if (use_labels) {
from_to_node_id <-
translate_to_node_id(
graph = graph,
from = from,
to = to)
from <- from_to_node_id$from
to <- from_to_node_id$to
}
# Create an edge data frame (edf) without
# associated `rel` values
if (is.null(rel)) {
new_edges <-
create_edge_df(
from = from,
to = to)
}
# Create an edge data frame (edf) with
# associated `rel` values
if (!is.null(rel)) {
new_edges <-
create_edge_df(
from = from,
to = to,
rel = rel)
}
# Get the number of edges in the graph
edges_graph_1 <- graph %>% count_edges()
# Add the new edges to the graph
graph <- add_edge_df(graph, new_edges)
# Get the updated number of edges in the graph
edges_graph_2 <- graph %>% count_edges()
# Get the number of edges added to
# the graph
edges_added <- edges_graph_2 - edges_graph_1
# Clear the graph's active selection
graph <-
suppressMessages(
graph %>%
clear_selection())
# Remove extra items from the `graph_log`
graph$graph_log <-
graph$graph_log %>%
dplyr::filter(version_id <= current_graph_log_version_id)
graph$graph_log <-
add_action_to_log(
graph_log = graph$graph_log,
version_id = nrow(graph$graph_log) + 1,
function_used = fcn_name,
time_modified = time_function_start,
duration = graph_function_duration(time_function_start),
nodes = nrow(graph$nodes_df),
edges = nrow(graph$edges_df),
d_e = edges_added)
# Perform graph actions, if any are available
if (nrow(graph$graph_actions) > 0) {
graph <-
graph %>%
trigger_graph_actions()
}
# Write graph backup if the option is set
if (graph$graph_info$write_backups) {
save_graph_as_rds(graph = graph)
}
graph
}
| /R/add_edges_w_string.R | permissive | rich-iannone/DiagrammeR | R | false | false | 5,553 | r | #' Add one or more edges using a text string
#'
#' @description
#'
#' With a graph object of class `dgr_graph`, add one or more edges to the graph
#' using a text string.
#'
#' @inheritParams render_graph
#' @param edges A single-length vector with a character string specifying the
#' edges. For a directed graph, the string object should be formatted as a
#' series of node ID values as `[node_ID_1]->[node_ID_2]` separated by a one
#' or more space characters. For undirected graphs, `--` should replace `->`.
#' Line breaks in the vector won't cause an error.
#' @param rel An optional vector specifying the relationship between the
#' connected nodes.
#' @param use_labels An option to use node `label` values in the `edges` string
#' to define node connections. Note that this is only possible if all nodes
#' have distinct `label` values set and none exist as an empty string.
#'
#' @return A graph object of class `dgr_graph`.
#'
#' @examples
#' # Create a graph with 4 nodes
#' graph <-
#' create_graph() %>%
#' add_node(label = "one") %>%
#' add_node(label = "two") %>%
#' add_node(label = "three") %>%
#' add_node(label = "four")
#'
#' # Add edges between nodes using
#' # a character string with node
#' # ID values
#' graph_node_id <-
#' graph %>%
#' add_edges_w_string(
#' edges = "1->2 1->3 2->4 2->3")
#'
#' # Show the graph's internal
#' # edge data frame
#' graph_node_id %>% get_edge_df()
#'
#' # Add edges between nodes using
#' # a character string with node
#' # label values and setting
#' # `use_labels = TRUE`; note that
#' # all nodes must have unique
#' # `label` values to use this
#' graph_node_label <-
#' graph %>%
#' add_edges_w_string(
#' edges =
#' "one->two one->three
#' two->four two->three",
#' use_labels = TRUE)
#'
#' # Show the graph's internal
#' # edge data frame (it's the
#' # same as before)
#' graph_node_label %>% get_edge_df()
#'
#' @family Edge creation and removal
#'
#' @export
add_edges_w_string <- function(
graph,
edges,
rel = NULL,
use_labels = FALSE
) {
# Get the time of function start
time_function_start <- Sys.time()
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Validation: Graph contains nodes
if (graph_contains_nodes(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph contains no nodes, so, edges cannot be added")
}
# Get the value for the latest `version_id` for
# graph (in the `graph_log`)
current_graph_log_version_id <-
graph$graph_log$version_id %>%
max()
# Remove linebreak characters from `edges`
edges_cleaned <-
gsub("\n", " ", edges)
# Remove extra spaces within the string
edges_cleaned <-
gsub("(?<=[\\s])\\s*|^\\s+|\\s+$", "",
edges_cleaned, perl = TRUE)
# Split by single spaces into separate edge
# expressions
edges_split <-
unlist(strsplit(edges_cleaned, " "))
# Split the edge expressions in a directed
# graph into `from` and `to` vectors
if (graph$directed) {
from <-
sapply(strsplit(edges_split, "->"), "[[", 1)
to <-
sapply(strsplit(edges_split, "->"), "[[", 2)
}
# Split the edge expressions in an undirected
# graph into `from` and `to` vectors
if (graph$directed == FALSE) {
from <-
sapply(strsplit(edges_split, "--"), "[[", 1)
to <-
sapply(strsplit(edges_split, "--"), "[[", 2)
}
# If `use_label` is set to TRUE, treat values in
# list as labels; need to map to node ID values
if (use_labels) {
from_to_node_id <-
translate_to_node_id(
graph = graph,
from = from,
to = to)
from <- from_to_node_id$from
to <- from_to_node_id$to
}
# Create an edge data frame (edf) without
# associated `rel` values
if (is.null(rel)) {
new_edges <-
create_edge_df(
from = from,
to = to)
}
# Create an edge data frame (edf) with
# associated `rel` values
if (!is.null(rel)) {
new_edges <-
create_edge_df(
from = from,
to = to,
rel = rel)
}
# Get the number of edges in the graph
edges_graph_1 <- graph %>% count_edges()
# Add the new edges to the graph
graph <- add_edge_df(graph, new_edges)
# Get the updated number of edges in the graph
edges_graph_2 <- graph %>% count_edges()
# Get the number of edges added to
# the graph
edges_added <- edges_graph_2 - edges_graph_1
# Clear the graph's active selection
graph <-
suppressMessages(
graph %>%
clear_selection())
# Remove extra items from the `graph_log`
graph$graph_log <-
graph$graph_log %>%
dplyr::filter(version_id <= current_graph_log_version_id)
graph$graph_log <-
add_action_to_log(
graph_log = graph$graph_log,
version_id = nrow(graph$graph_log) + 1,
function_used = fcn_name,
time_modified = time_function_start,
duration = graph_function_duration(time_function_start),
nodes = nrow(graph$nodes_df),
edges = nrow(graph$edges_df),
d_e = edges_added)
# Perform graph actions, if any are available
if (nrow(graph$graph_actions) > 0) {
graph <-
graph %>%
trigger_graph_actions()
}
# Write graph backup if the option is set
if (graph$graph_info$write_backups) {
save_graph_as_rds(graph = graph)
}
graph
}
|
#setwd("~/Projects/DFS")
#load("cleaned_2016_results.Rdata")
analyzePros <- function(username) {
buyIn <- c(3,rep(20,8), 44, rep(27,5), 50, 20)
wks.20 <- c(2:9,17) # c(2:9) if using sunday only (if thu-mon or sun-mon, need to enter weeks) # hard coded
wks.27 <- c(11:15)
returnDataFrame <- as.data.frame(matrix(0,17,5))
names(returnDataFrame) <- c("Week", "NumberLineups", "MaxScores", "BestPlace", "PnL")
returnDataFrame$Week <- c(1:17)
for (week in 1:17) {
#file.name <- paste0("resultsAnalysis/data_warehouse/weekly_payout_structure/$", buyIn[week], "_payout_structure_week", week, ".csv")
# Check if we have the data
if(!exists(paste0("contest_1M_results_wk", week))) {
returnDataFrame[week,c(2:5)] <- NA
}
# else if(!file.exists(file.name)) {
# returnDataFrame[week,c(2:5)] <- NA
# # Find Number of Lineups
# temp.results <- eval(parse(text=paste0("contest_1M_results_wk", week)))
# temp.user.results <- temp.results[temp.results$User.Name==username,]
#
#
# returnDataFrame$NumberLineups[week] <- length(temp.user.results[,1])
#
# if(returnDataFrame$NumberLineups[week] == 0) {
# returnDataFrame[week,c(3:5)] <- NA
# } else {
#
#
# #Calculate MaxScores
# returnDataFrame$MaxScores[week] <- max(temp.user.results$Points)
#
# # Best Place
# returnDataFrame$BestPlace[week] <- min(temp.user.results$Rank)
# }
# }
else {
#Load Payout Structure
#payout.data <- read.csv(file = file.name, stringsAsFactors = F)
payout.data <- eval(parse(text=paste0("payout_wk", week)))
# Find Number of Lineups
temp.results <- eval(parse(text=paste0("contest_1M_results_wk", week)))
temp.user.results <- temp.results[temp.results$User.Name==username,]
returnDataFrame$NumberLineups[week] <- length(temp.user.results[,1])
if(returnDataFrame$NumberLineups[week] == 0) {
returnDataFrame[week,c(3:5)] <- NA
} else {
#Calculate MaxScores
returnDataFrame$MaxScores[week] <- max(temp.user.results$Points)
# Best Place
returnDataFrame$BestPlace[week] <- min(temp.user.results$Rank)
# Week PnL
temp_PnL <- -(buyIn[week]*length(temp.user.results[,1]))
for(lineup in 1:length(temp.user.results[,1])) {
for (j in 1:nrow(payout.data)) {
if (temp.user.results$Rank[lineup] >= payout.data$Place_lo[j] && temp.user.results$Rank[lineup] <= payout.data$Place_hi[j]) {
temp_PnL <- temp_PnL + payout.data$Payout[j]
break
}
}
}
returnDataFrame$PnL[week] <- temp_PnL
}
}
}
plot(returnDataFrame$Week, returnDataFrame$PnL, main = username)
lines(returnDataFrame$Week, returnDataFrame$PnL)
abline(h = 0, col = "red")
return(returnDataFrame)
}
# ---- Graph Player Results ---- #
graphPlayersResult <- function(username) {
df <- analyzePros(username)
cumsumCalc <- df$PnL
cumsumCalc[is.na(cumsumCalc)]<-0
ggplot(df,
aes(y = PnL, x = Week)) +
geom_point(aes(color = PnL>0), size = 1.7) +
geom_abline() +
geom_line(aes(x = Week, y = cumsum(cumsumCalc))) +
scale_color_manual(values=c("#cc0000", "#00CC00")) + labs(title=paste0(username, "'s 2016 Milly Maker Results")) +
geom_text_repel(aes(label=PnL), size = 3) +
annotate("text", x = 18, y = cumsum(cumsumCalc)[17] , label = cumsum(cumsumCalc)[17])
}
username = "youdacao"
graphPlayersResult(username)
### Graph Multiple players at the same time.
winner1 <- "SaahilSud"
winner2 <- "youdacao"
winner3 <- "CONDIA"
winner4 <- "aejones"
winner5 <- "CSURAM88"
winner6 <- "ehafner"
winner7 <- "BrandonAdams"
winner8 <- "Bales"
winner9 <- "00oreo00"
winner10 <- "ThatStunna"
temp <- as.data.frame(matrix(0,17,11))
names(temp) <- c("Week", winner1, winner2, winner3, winner4, winner5, winner6, winner7, winner8, winner9, winner10)
temp$Week <- 1:17
for (i in 1:10) {
df <- analyzePros(names(temp)[i+1])
cumsumCalc <- df$PnL
cumsumCalc[is.na(cumsumCalc)]<-0
temp[,i+1] <- cumsum(cumsumCalc)
}
ggplot(temp) +
geom_line(aes(y = SaahilSud, x = Week, color = "SaahilSud")) +
geom_line(aes(y = CONDIA, x = Week, color = "CONDIA")) +
geom_line(aes(y = aejones, x = Week, color = "aejones")) +
geom_line(aes(y = CSURAM88, x = Week, color = "CSURAM88")) +
geom_line(aes(y = ehafner, x = Week, color = "ehafner")) +
geom_line(aes(y = BrandonAdams, x = Week, color = "BrandonAdams")) +
geom_line(aes(y = youdacao, x = Week, color = "youdacao")) +
geom_line(aes(y = Bales, x = Week, color = "Bales")) +
geom_line(aes(y = `00oreo00`, x = Week, color = "00oreo00")) +
scale_colour_brewer(palette = "Set1") +
geom_abline() +
labs(title=paste0("Pros 2016 Milly Maker Results")) +
ylab("Profit") | /NFL/resultsAnalysis/analyze_pros/analyzePros_function.R | no_license | alandu20/dailyfantasy | R | false | false | 5,008 | r | #setwd("~/Projects/DFS")
#load("cleaned_2016_results.Rdata")
analyzePros <- function(username) {
buyIn <- c(3,rep(20,8), 44, rep(27,5), 50, 20)
wks.20 <- c(2:9,17) # c(2:9) if using sunday only (if thu-mon or sun-mon, need to enter weeks) # hard coded
wks.27 <- c(11:15)
returnDataFrame <- as.data.frame(matrix(0,17,5))
names(returnDataFrame) <- c("Week", "NumberLineups", "MaxScores", "BestPlace", "PnL")
returnDataFrame$Week <- c(1:17)
for (week in 1:17) {
#file.name <- paste0("resultsAnalysis/data_warehouse/weekly_payout_structure/$", buyIn[week], "_payout_structure_week", week, ".csv")
# Check if we have the data
if(!exists(paste0("contest_1M_results_wk", week))) {
returnDataFrame[week,c(2:5)] <- NA
}
# else if(!file.exists(file.name)) {
# returnDataFrame[week,c(2:5)] <- NA
# # Find Number of Lineups
# temp.results <- eval(parse(text=paste0("contest_1M_results_wk", week)))
# temp.user.results <- temp.results[temp.results$User.Name==username,]
#
#
# returnDataFrame$NumberLineups[week] <- length(temp.user.results[,1])
#
# if(returnDataFrame$NumberLineups[week] == 0) {
# returnDataFrame[week,c(3:5)] <- NA
# } else {
#
#
# #Calculate MaxScores
# returnDataFrame$MaxScores[week] <- max(temp.user.results$Points)
#
# # Best Place
# returnDataFrame$BestPlace[week] <- min(temp.user.results$Rank)
# }
# }
else {
#Load Payout Structure
#payout.data <- read.csv(file = file.name, stringsAsFactors = F)
payout.data <- eval(parse(text=paste0("payout_wk", week)))
# Find Number of Lineups
temp.results <- eval(parse(text=paste0("contest_1M_results_wk", week)))
temp.user.results <- temp.results[temp.results$User.Name==username,]
returnDataFrame$NumberLineups[week] <- length(temp.user.results[,1])
if(returnDataFrame$NumberLineups[week] == 0) {
returnDataFrame[week,c(3:5)] <- NA
} else {
#Calculate MaxScores
returnDataFrame$MaxScores[week] <- max(temp.user.results$Points)
# Best Place
returnDataFrame$BestPlace[week] <- min(temp.user.results$Rank)
# Week PnL
temp_PnL <- -(buyIn[week]*length(temp.user.results[,1]))
for(lineup in 1:length(temp.user.results[,1])) {
for (j in 1:nrow(payout.data)) {
if (temp.user.results$Rank[lineup] >= payout.data$Place_lo[j] && temp.user.results$Rank[lineup] <= payout.data$Place_hi[j]) {
temp_PnL <- temp_PnL + payout.data$Payout[j]
break
}
}
}
returnDataFrame$PnL[week] <- temp_PnL
}
}
}
plot(returnDataFrame$Week, returnDataFrame$PnL, main = username)
lines(returnDataFrame$Week, returnDataFrame$PnL)
abline(h = 0, col = "red")
return(returnDataFrame)
}
# ---- Graph Player Results ---- #
graphPlayersResult <- function(username) {
df <- analyzePros(username)
cumsumCalc <- df$PnL
cumsumCalc[is.na(cumsumCalc)]<-0
ggplot(df,
aes(y = PnL, x = Week)) +
geom_point(aes(color = PnL>0), size = 1.7) +
geom_abline() +
geom_line(aes(x = Week, y = cumsum(cumsumCalc))) +
scale_color_manual(values=c("#cc0000", "#00CC00")) + labs(title=paste0(username, "'s 2016 Milly Maker Results")) +
geom_text_repel(aes(label=PnL), size = 3) +
annotate("text", x = 18, y = cumsum(cumsumCalc)[17] , label = cumsum(cumsumCalc)[17])
}
username = "youdacao"
graphPlayersResult(username)
### Graph Multiple players at the same time.
winner1 <- "SaahilSud"
winner2 <- "youdacao"
winner3 <- "CONDIA"
winner4 <- "aejones"
winner5 <- "CSURAM88"
winner6 <- "ehafner"
winner7 <- "BrandonAdams"
winner8 <- "Bales"
winner9 <- "00oreo00"
winner10 <- "ThatStunna"
temp <- as.data.frame(matrix(0,17,11))
names(temp) <- c("Week", winner1, winner2, winner3, winner4, winner5, winner6, winner7, winner8, winner9, winner10)
temp$Week <- 1:17
for (i in 1:10) {
df <- analyzePros(names(temp)[i+1])
cumsumCalc <- df$PnL
cumsumCalc[is.na(cumsumCalc)]<-0
temp[,i+1] <- cumsum(cumsumCalc)
}
ggplot(temp) +
geom_line(aes(y = SaahilSud, x = Week, color = "SaahilSud")) +
geom_line(aes(y = CONDIA, x = Week, color = "CONDIA")) +
geom_line(aes(y = aejones, x = Week, color = "aejones")) +
geom_line(aes(y = CSURAM88, x = Week, color = "CSURAM88")) +
geom_line(aes(y = ehafner, x = Week, color = "ehafner")) +
geom_line(aes(y = BrandonAdams, x = Week, color = "BrandonAdams")) +
geom_line(aes(y = youdacao, x = Week, color = "youdacao")) +
geom_line(aes(y = Bales, x = Week, color = "Bales")) +
geom_line(aes(y = `00oreo00`, x = Week, color = "00oreo00")) +
scale_colour_brewer(palette = "Set1") +
geom_abline() +
labs(title=paste0("Pros 2016 Milly Maker Results")) +
ylab("Profit") |
# λ°μ΄ν° λΆμκ° _ james \
# \
# μ€ν¬λ¦½νΈ μ€ν(Run a script) \
## : Windows : 'Ctrl + Enter' \
## : MAC : 'Command + Enter'\
#---------------------------------
##0 μμ
# plot ν¨μλ‘ μκΉ μ μ°κΈ°
plot(0,0, pch=16, cex=10, col='black')
plot(0,0, pch=16, cex=10, col='pink')
plot(0,0, pch=16, cex=10, col='dodgerblue')
## μΌλ°μ μΌλ‘ "col=" μ΅μ
μΌλ‘ μμ λ³κ²½ κ°λ₯
## μμμ΄λ¦μ μλ μ°Έκ³
## http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf
# rgb( ) ν¨μμ "#RRGGBB" HEXμ½λ νμ©
rgb( 0/255, 70/255, 42/255)
## Ewha Green
plot(0,0, pch=16, cex=10, col='#00462A')
# RColorBrewer ν¨ν€μ§μ νμ©
install.packages('RColorBrewer')
library(RColorBrewer)
## http://colorbrewer2.org/
# ν¨ν€μ§ λ΄ λͺ¨λ μμμ‘°ν© νμΈ
display.brewer.all()
## μμμ‘°ν© μ΄λ¦ νμΈ
brewer.pal(9, 'Set1')
brewer.pal(9, 'Blues')
brewer.pal(9, 'YlGnBu')
brewer.pal(9, 'Spectral')
##1 ggplot2 ν¨ν€μ§λ₯Ό νμ©ν μκ°ν
# ggplot2 ν¨ν€μ§ μ€μΉ, λΆλ¬μ€κΈ°
install.packages('ggplot2')
library(ggplot2)
# λ°μ΄ν° μμ½/μ²λ¦¬λ₯Ό μν ν¨ν€μ§λ λΆλ¬μ€κΈ°
library(dplyr)
library(tidyr)
install.packages("gapminder")
library(gapminder)
data(gapminder)
data1 <- gapminder[gapminder$year=="2007",]
####################
## 1. 그릴 λΆλΆμ λμμ§λ₯Ό κ·Έλ €λ³Έλ€. (aes(x = , y=))
ggplot(data1) +
aes(x = gdpPercap) + #xμΆ μ§μ
aes(y = lifeExp) #yμΆ μ§μ
# μ΄λ κ² νλ²μ 그릴 μ μλ€.
# ggplot(data1,aes(x=gdpPercap,y=lifeExp))
####################
## 2. κ·Έλ¦Όμ μ ννλ€. +geom_point
ggplot(data1) +
aes(x = gdpPercap) + #xμΆ μ§μ
aes(y = lifeExp) + #yμΆ μ§μ
geom_point() #λνλΌ κ·Έλ¦Ό
####################
## 3. κ·Έλ¦Όμ κΎΈλ©°μ€λ€
## 3-1 μμ μ§μ νλ€ aes(color = )
ggplot(data1) +
aes(x = gdpPercap) + #xμΆ μ§μ
aes(y = lifeExp) + #yμΆ μ§μ
geom_point() + #λνλΌ κ·Έλ¦Ό
aes(color = continent) #μ μ§μ
#κ°μ νν
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color=continent)) + geom_point()
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp)) + geom_point(aes(color=continent))
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp)) + geom_point(color = "red")
## λΆκ°λ₯
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = "red")) + geom_point()
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, fill = continent)) + geom_point()
####################
## 3-2 λͺ¨μ μ§μ νλ€ aes(shape = )
ggplot(data1) +
aes(x = gdpPercap) + #xμΆ μ§μ
aes(y = lifeExp) + #yμΆ μ§μ
geom_point() + #λνλΌ κ·Έλ¦Ό
aes(color = continent) + #μ μ§μ
aes(shape = continent) #λͺ¨μ μ§μ
# κ°μνν
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent, shape = continent)) + geom_point()
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent)) + geom_point(aes(shape = continent))
# νΉμ λͺ¨μ μ§μ
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent)) + geom_point(shape = 3)
####################
## 3-3 ν¬κΈ° μ§μ νλ€ aes(size = )
ggplot(data1) +
aes(x = gdpPercap) + #xμΆ μ§μ
aes(y = lifeExp) + #yμΆ μ§μ
geom_point() + #λνλΌ κ·Έλ¦Ό
aes(color = continent) + #μ μ§μ
aes(shape = continent) + #λͺ¨μ μ§μ
aes(size = pop) #ν¬κΈ° μ§μ
# κ°μνν
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent, shape = continent, size = pop)) + geom_point()
# νΉμ ν¬κΈ° μ§μ
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent, shape = continent)) + geom_point(size = 3)
####################
## 3-4 ν¬λͺ
λλ₯Ό μ§μ νλ€ aes(alpha = )
ggplot(data1) +
aes(x = gdpPercap) + #xμΆ μ§μ
aes(y = lifeExp) + #yμΆ μ§μ
geom_point() + #λνλΌ κ·Έλ¦Ό
aes(color = continent) + #μ μ§μ
aes(shape = continent) + #λͺ¨μ μ§μ
aes(size = pop) + #ν¬κΈ° μ§μ
aes(alpha = lifeExp) #ν¬λͺ
λ
# κ°μνν
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent, shape = continent, size = pop, alpha = lifeExp)) + geom_point()
# νΉμ ν¬κΈ° μ§μ
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent, shape = continent, size = pop)) + geom_point(alpha = 0.5)
###########################################
############### μ°μ΅ν΄λ³΄κΈ° ###############
###########################################
head(insurance)
#1. bmiμ λ°λΌμ chargesκ° μ΄λ»κ² λ³νλμ§ μ κ·Έλνλ₯Ό 그리μμ€
## regionμ μμΌλ‘ μ§μ
## sexλ₯Ό λͺ¨μμΌλ‘ μ§μ
## ν¬λͺ
λλ 0.7
#2. ageμ λ°λΌμ chargesκ° μ΄λ»κ² λ³νλμ§ μ κ·Έλνλ₯Ό 그리μμ€
## bmi μμΌλ‘ μ§μ
## smokerλ₯Ό λͺ¨μμΌλ‘ μ§μ
###########################################
## λ§λκ·Έλν
######################### λ§λκ·Έλν
#1. λνμ§ κ·Έλ¦¬κΈ°
ggplot(data1) +
aes(x = continent) # xμΆ μ§μ
#2. 그림 그리기
ggplot(data1) +
aes(x = continent) + # xμΆ μ§μ
geom_bar() # λ§λκ·Έλν 그리기
#3. κΎΈλ―ΈκΈ°
ggplot(data1) +
aes(x = continent) + # xμΆ μ§μ
geom_bar() + # λ§λκ·Έλν 그리기
aes(fill = continent) # μ 체μ
ggplot(data1) +
aes(x = continent) + # xμΆ μ§μ
geom_bar() + # λ§λκ·Έλν 그리기
aes(fill = continent) + # μ 체μ
scale_fill_brewer(palette='Set1') #νλ νΈ μ¬μ©νκΈ°
#######
# μ£Όμ!
# λ§λκ·Έλνλ colorμ΄ μλ fillλ‘ μ¬μ©!
ggplot(data1) +
aes(x = continent) + # xμΆ μ§μ
geom_bar() + # λ§λκ·Έλν 그리기
aes(color = continent) # κ°λ³μ
#######
##### xμ yλ₯Ό λͺ¨λ μ§μ ν΄μ£Όκ³ μΆμΌλ©΄? stat = "identity"
ggplot(data1) +
aes(x = continent) + # xμΆ μ§μ
aes(y = lifeExp) + # yμΆ μ§μ
geom_bar(stat = "identity") + # λ§λκ·Έλν x,yμΆ
aes(fill = continent) # μ 체μ
# μ£Όμ
ggplot(data1) +
aes(x = continent) + # xμΆ μ§μ
aes(y = lifeExp) + # yμΆ μ§μ
geom_bar(stat = "identity") + # λ§λκ·Έλν x,yμΆ
aes(color = continent) # μ 체μ
##### λ°μ΄ν° μ μ²λ¦¬μ λ§λ μ°¨νΈ κ·Έλ¦¬κΈ°
# continent λ§λ€ νκ· μ κ·Έλ¦¬κ³ μΆμΌλ©΄??
data1 %>%
group_by(continent) %>%
summarise(mean = mean(lifeExp))
data1 %>%
group_by(continent) %>%
dplyr::summarise(mean = mean(lifeExp)) %>%
ggplot() +
aes(x = continent) +
aes(y = mean) +
geom_bar(stat = "identity") +
aes(fill = continent) +
aes(alpha = 0.7)
# λλ μ 그리λ λ°©λ²!
gapminder %>%
filter(year %in% c(2002,2007)) %>%
group_by(continent,year) %>%
dplyr::summarise(mean = mean(lifeExp)) %>%
ggplot() +
aes(x = continent) +
aes(y = mean) +
geom_bar(stat = "identity") +
aes(color = continent) +
aes(fill = continent) +
facet_grid(~year) # νΉμ λ³μλ‘ κ΅¬λΆν΄μ κ·Έλ¦¬κ³ μΆλ€λ©΄?
###########################################
############### μ°μ΅ν΄λ³΄κΈ° ###############
###########################################
head(insurance)
#1. insurance λ°μ΄ν°μμ regionλ³ μ€μκ°μ ꡬνν λ§λκ·Έλνλ₯Ό 그리μκ³
## regionμ μμΌλ‘ μ§μ
## ν¬λͺ
λλ 0.7
#2. insurance λ°μ΄ν°μμ sex, smokerλ³ μ€μκ°μ ꡬνν λ§λκ·Έλνλ₯Ό 그리μκ³
## xμΆμ smokerμ΄λ©° sexλ₯Ό μμΌλ‘ ꡬλΆ
## regionμ μμΌλ‘ μ§μ
## ν¬λͺ
λλ 0.7
######################### λ°μ€ κ·Έλν geom_boxplot()
gapminder %>%
ggplot(aes(x=continent, y= lifeExp)) +
geom_boxplot()
gapminder %>%
ggplot(aes(x=continent, y= lifeExp, fill= continent)) +
geom_boxplot()
gapminder %>%
ggplot(aes(x=continent, y= lifeExp, fill= continent)) +
geom_boxplot(alpha = 0.5)
# μ£Όμ! μμ½μ ν λ°μ΄ν°λ₯Ό μ¬μ©νμ§ μλλ€!
gapminder %>%
group_by(continent) %>%
dplyr::summarise(mean = mean(lifeExp)) %>%
ggplot(aes(x=continent, y= mean, fill= continent)) + geom_boxplot()
######################### νμ€ν κ·Έλ¨ geom_boxplot()
gapminder %>%
ggplot(aes(x=lifeExp)) +
geom_histogram()
gapminder %>%
ggplot(aes(x=lifeExp)) +
geom_histogram() +
facet_grid(~continent)
######################### μ κ·Έλν
gapminder %>%
group_by(year) %>%
summarise(sum = sum(lifeExp))
gapminder %>%
group_by(year) %>%
dplyr::summarise(sum = sum(lifeExp)) %>%
ggplot(aes(x=year,y=sum)) + geom_line()
# μ¬λ¬ κ·Έλ£Ήμ κ·Έλ¦¬κ³ μΆμ κ²½μ°
gapminder %>%
group_by(year,continent) %>%
summarise(mean = mean(lifeExp))
gapminder %>%
group_by(year,continent) %>%
dplyr::summarise(mean = mean(lifeExp)) %>%
ggplot(aes(x=year, y=mean , group=continent ,color= continent)) + geom_line()
###########################################
############### μ°μ΅ν΄λ³΄κΈ° ###############
###########################################
#1 insuranceλ°μ΄ν°μμ childrenμ΄ 0λ³΄λ€ ν¬λ©΄ 1, 0μ΄λ©΄ 0μΈ λ³μ ch_dataλ₯Ό λ§λμμ€
#2 insuranceλ°μ΄ν°λ₯Ό νμ©ν΄μ λ§λκ·Έλνλ₯Ό 그리μμ€
## xμΆμ region yμΆμ chargesμ΄λ©° ch_dataλ₯Ό μμΌλ‘ ꡬλΆ
#3 insuranceλ°μ΄ν°λ₯Ό νμ©ν΄μ λ§λκ·Έλνλ₯Ό 그리μμ€
## xμΆμ charges ch_dataλ₯Ό μμΌλ‘ ꡬλΆ
## regionλ§λ€ 4κ°μ κ·Έλνλ₯Ό 그리μμ€
#4 insuranceλ°μ΄ν°λ₯Ό νμ©ν΄μ λ§λκ·Έλνλ₯Ό 그리μμ€
## xμΆμ region yμΆμ chargesμ΄λ©° ch_dataλ₯Ό μμΌλ‘ ꡬλΆ
## (λμ λ§λκ·Έλνμ ch_dataλ³ λΉκ΅ λ§λκ·Έλν)
### ggplot μΆκ°
HR <- read.csv("HR_comma_sep.csv")
HR$left = as.factor(HR$left)
HR$salary = factor(HR$salary,levels = c("low","medium","high"))
# satisfaction_level : μ§λ¬΄ λ§μ‘±λ
# last_evaluation : λ§μ§λ§ νκ°μ μ
# number_project : μ§ν νλ‘μ νΈ μ
# average_monthly_hours : μνκ· κ·Όλ¬΄μκ°
# time_spend_company : κ·Όμλ
μ
# work_accident : μ¬κ±΄μ¬κ³ μ¬λΆ(0: μμ, 1: μμ, λͺ
λͺ©ν)
# left : μ΄μ§ μ¬λΆ(0: μλ₯, 1: μ΄μ§, λͺ
λͺ©ν)
# promotion_last_5years: μ΅κ·Ό 5λ
κ° μΉμ§μ¬λΆ(0: μΉμ§ x, 1: μΉμ§, λͺ
λͺ©ν)
# sales : λΆμ
# salary : μκΈ μμ€
#####################
### ν
λ§ λ³κ²½νκΈ° ###
#####################
library(ggthemes)
# Classic Theme
ggplot(HR,aes(x=salary)) +
geom_bar(aes(fill=salary)) +
theme_classic()
# BW Theme
ggplot(HR,aes(x=salary)) +
geom_bar(aes(fill=salary)) +
theme_bw()
Graph = ggplot(HR,aes(x=salary)) +
geom_bar(aes(fill=salary))
## ν¨ν€μ§λ₯Ό ν΅ν λ€μν ν
λ§ λ³κ²½
Graph + theme_bw() + ggtitle("Theme_bw")
Graph + theme_classic() + ggtitle("Theme_classic")
Graph + theme_dark() + ggtitle("Theme_dark")
Graph + theme_light() + ggtitle("Theme_light")
Graph + theme_linedraw() + ggtitle("Theme_linedraw")
Graph + theme_minimal() + ggtitle("Theme_minimal")
Graph + theme_test() + ggtitle("Theme_test")
Graph + theme_void() + ggtitle("Theme_vold")
#####################
### λ²λ‘μ λͺ© μμ ###
#####################
ggplot(HR,aes(x=salary)) +
geom_bar(aes(fill=salary)) +
theme_bw() +
labs(fill = "λ²λ‘ μ λͺ© μμ (fill)")
# λ²λ‘ ν
λ리 μ€μ
Graph + theme(legend.position = "top")
Graph + theme(legend.position = "bottom")
Graph + theme(legend.position = c(0.9,0.7))
Graph + theme(legend.position = 'none')
#####################
### μΆ λ³κ²½ ###
#####################
# μ΄μ°ν - deiscreate()
# μ°μν - continuous()
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary)) +
theme_bw() +
scale_x_discrete(labels = c("ν","μ€","μ")) +
scale_y_continuous(breaks = seq(0,8000,by = 1000))
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary)) +
theme_bw() +
scale_x_discrete(labels = c("ν","μ€","μ")) +
scale_y_continuous(breaks = seq(0,8000,by = 1000)) +
scale_fill_discrete(labels = c("ν","μ€","μ"))
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary)) +
theme_bw() +
ylim(0,5000)
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary)) +
theme_bw() +
ylim(0,13000)
#####################
### μ λ³κ²½ ###
#####################
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary)) +
theme_bw() +
scale_fill_manual(values = c('red','royalblue','tan'))
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary), alpha = 0.4) +
theme_bw() +
scale_fill_manual(values = c('red','royalblue','tan'))
#####################
### κΈμν¬κΈ°,κ°λ μμ ###
#####################
# coord_flip() : λμΉ κ·Έλν
# theme_bw : κΈμ체 μμ
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary), alpha = 0.4) +
theme_bw() +
scale_fill_manual(values = c('red','royalblue','tan')) +
coord_flip()
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary)) +
theme_bw() +
scale_fill_manual(values = c('red','royalblue','tan')) +
coord_flip() +
theme(legend.position = 'none',
axis.text.x = element_text(size = 15,angle = 90),
axis.text.y = element_text(size = 15),
legend.text = element_text(size = 15))
# κ·Έλνμ ννμ , μμ§μ , λκ°μ μ 그릴 μ μλ λͺ
λ Ήμ΄
ggplot(NULL) +
geom_vline(xintercept = 10,
col = 'royalblue', size = 2) +
geom_hline(yintercept = 10, linetype = 'dashed',
col = 'royalblue', size = 2) +
geom_abline(intercept = 0, slope = 1, col = 'red',
size = 2) +
theme_bw()
#### μΆκ° μ μ©ν κ·Έλν
###################μ΄μ§λ(heatmap)
# λ°μ΄ν° μμ½
agg2 = insurance %>%
mutate(bmi_grp = cut(bmi,
breaks=c(0,30,35,40,100),
labels=c('G1','G2','G3','G4'))) %>%
group_by(region, bmi_grp) %>%
summarise(Q90 = quantile(charges, 0.9))
quantile(iris$Sepal.Width,0.5) #μ€μμ
quantile(iris$Sepal.Width,0.7) #70%
## quantile( , q) : q*100% κ° κ³μ°
agg2 %>%
ggplot(aes(x=region, y=bmi_grp, fill=Q90)) +
geom_tile()
# μμ μ§μ
agg2 %>%
ggplot(aes(x=region, y=bmi_grp, fill=Q90)) +
geom_tile() +
scale_fill_gradient(low='white', high='#FF6600')
agg2 %>%
ggplot(aes(x=region, y=bmi_grp, fill=Q90)) +
geom_tile() +
scale_fill_distiller(palette='YlGnBu')
###########################################
############### μ°μ΅ν΄λ³΄κΈ° ###############
###########################################
# (μ€μ΅) NHISμμ AGE_GROUP, DSBJT_CDλ³ EDEC_TRAMT νκ· κ³μ° ν μ μ₯
# μ μ₯λ λ°μ΄ν°λ‘ μ΄μ§λ μκ°ν
###########################################
# tidyr + dplyr + ggplotμ νλ²μ
# λ°μ΄ν° λΆλ¬μ€κΈ°
## μλ³νΈκ° 150μΈ μμΈμ λ°μ΄ν°
library(openxlsx)
subway_2017 = read.xlsx('subway_1701_1709.xlsx')
names(subway_2017)[6:25] <- paste0('H', substr(names(subway_2017)[6:25], 1, 2))
head(subway_2017)
# gather( ) ν¨μλ₯Ό νμ©νμ¬ H05λΆν° H24κΉμ§ λ³μλ₯Ό λͺ¨μ
# 'μκ°λ'μ 'μΉκ°μ'μΌλ‘ ꡬλΆνλ λ°μ΄ν° subway2 λ§λ€κΈ°
subway2 = gather(subway_2017, μκ°λ, μΉκ°μ, H05:H24)
## μμμ λ§λ subway2 λ°μ΄ν°μ dplyr ν¨ν€μ§λ₯Ό νμ©νμ¬
# μλͺ
/μκ°λλ³ μ 체 μΉκ°μ ν©κ³ κ³μ° (μΉκ°μ ν©κ³μ λ΄λ¦Όμ°¨μμΌλ‘ μ λ ¬)
subway2 %>%
group_by(μλͺ
, μκ°λ) %>%
summarise(SUM = sum(μΉκ°μ)) %>%
arrange(desc(SUM))
### μ΄λ¬ν tidyrμ ν΅ν΄μ λ°μ΄ν°λ₯Ό μκ°ννκΈ°
### μκ°λλ³λ‘ μΉκ° ν©κ³ λ§λμ°¨νΈλ‘ κ·Έλ €λ³΄κΈ°!
# options("scipen" = 100)
| /D_r λ°μ΄ν°κ³Όν μλ£λͺ¨μ/drive-download-20201230T075655Z-001/6day/λλ
Έ_6day.R | no_license | mwithgod3952/saving-passing-file | R | false | false | 15,850 | r | # λ°μ΄ν° λΆμκ° _ james \
# \
# μ€ν¬λ¦½νΈ μ€ν(Run a script) \
## : Windows : 'Ctrl + Enter' \
## : MAC : 'Command + Enter'\
#---------------------------------
##0 μμ
# plot ν¨μλ‘ μκΉ μ μ°κΈ°
plot(0,0, pch=16, cex=10, col='black')
plot(0,0, pch=16, cex=10, col='pink')
plot(0,0, pch=16, cex=10, col='dodgerblue')
## μΌλ°μ μΌλ‘ "col=" μ΅μ
μΌλ‘ μμ λ³κ²½ κ°λ₯
## μμμ΄λ¦μ μλ μ°Έκ³
## http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf
# rgb( ) ν¨μμ "#RRGGBB" HEXμ½λ νμ©
rgb( 0/255, 70/255, 42/255)
## Ewha Green
plot(0,0, pch=16, cex=10, col='#00462A')
# RColorBrewer ν¨ν€μ§μ νμ©
install.packages('RColorBrewer')
library(RColorBrewer)
## http://colorbrewer2.org/
# ν¨ν€μ§ λ΄ λͺ¨λ μμμ‘°ν© νμΈ
display.brewer.all()
## μμμ‘°ν© μ΄λ¦ νμΈ
brewer.pal(9, 'Set1')
brewer.pal(9, 'Blues')
brewer.pal(9, 'YlGnBu')
brewer.pal(9, 'Spectral')
##1 ggplot2 ν¨ν€μ§λ₯Ό νμ©ν μκ°ν
# ggplot2 ν¨ν€μ§ μ€μΉ, λΆλ¬μ€κΈ°
install.packages('ggplot2')
library(ggplot2)
# λ°μ΄ν° μμ½/μ²λ¦¬λ₯Ό μν ν¨ν€μ§λ λΆλ¬μ€κΈ°
library(dplyr)
library(tidyr)
install.packages("gapminder")
library(gapminder)
data(gapminder)
data1 <- gapminder[gapminder$year=="2007",]
####################
## 1. 그릴 λΆλΆμ λμμ§λ₯Ό κ·Έλ €λ³Έλ€. (aes(x = , y=))
ggplot(data1) +
aes(x = gdpPercap) + #xμΆ μ§μ
aes(y = lifeExp) #yμΆ μ§μ
# μ΄λ κ² νλ²μ 그릴 μ μλ€.
# ggplot(data1,aes(x=gdpPercap,y=lifeExp))
####################
## 2. κ·Έλ¦Όμ μ ννλ€. +geom_point
ggplot(data1) +
aes(x = gdpPercap) + #xμΆ μ§μ
aes(y = lifeExp) + #yμΆ μ§μ
geom_point() #λνλΌ κ·Έλ¦Ό
####################
## 3. κ·Έλ¦Όμ κΎΈλ©°μ€λ€
## 3-1 μμ μ§μ νλ€ aes(color = )
ggplot(data1) +
aes(x = gdpPercap) + #xμΆ μ§μ
aes(y = lifeExp) + #yμΆ μ§μ
geom_point() + #λνλΌ κ·Έλ¦Ό
aes(color = continent) #μ μ§μ
#κ°μ νν
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color=continent)) + geom_point()
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp)) + geom_point(aes(color=continent))
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp)) + geom_point(color = "red")
## λΆκ°λ₯
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = "red")) + geom_point()
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, fill = continent)) + geom_point()
####################
## 3-2 λͺ¨μ μ§μ νλ€ aes(shape = )
ggplot(data1) +
aes(x = gdpPercap) + #xμΆ μ§μ
aes(y = lifeExp) + #yμΆ μ§μ
geom_point() + #λνλΌ κ·Έλ¦Ό
aes(color = continent) + #μ μ§μ
aes(shape = continent) #λͺ¨μ μ§μ
# κ°μνν
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent, shape = continent)) + geom_point()
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent)) + geom_point(aes(shape = continent))
# νΉμ λͺ¨μ μ§μ
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent)) + geom_point(shape = 3)
####################
## 3-3 ν¬κΈ° μ§μ νλ€ aes(size = )
ggplot(data1) +
aes(x = gdpPercap) + #xμΆ μ§μ
aes(y = lifeExp) + #yμΆ μ§μ
geom_point() + #λνλΌ κ·Έλ¦Ό
aes(color = continent) + #μ μ§μ
aes(shape = continent) + #λͺ¨μ μ§μ
aes(size = pop) #ν¬κΈ° μ§μ
# κ°μνν
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent, shape = continent, size = pop)) + geom_point()
# νΉμ ν¬κΈ° μ§μ
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent, shape = continent)) + geom_point(size = 3)
####################
## 3-4 ν¬λͺ
λλ₯Ό μ§μ νλ€ aes(alpha = )
ggplot(data1) +
aes(x = gdpPercap) + #xμΆ μ§μ
aes(y = lifeExp) + #yμΆ μ§μ
geom_point() + #λνλΌ κ·Έλ¦Ό
aes(color = continent) + #μ μ§μ
aes(shape = continent) + #λͺ¨μ μ§μ
aes(size = pop) + #ν¬κΈ° μ§μ
aes(alpha = lifeExp) #ν¬λͺ
λ
# κ°μνν
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent, shape = continent, size = pop, alpha = lifeExp)) + geom_point()
# νΉμ ν¬κΈ° μ§μ
data1 %>% ggplot(aes(x=gdpPercap, y=lifeExp, color = continent, shape = continent, size = pop)) + geom_point(alpha = 0.5)
###########################################
############### μ°μ΅ν΄λ³΄κΈ° ###############
###########################################
head(insurance)
#1. bmiμ λ°λΌμ chargesκ° μ΄λ»κ² λ³νλμ§ μ κ·Έλνλ₯Ό 그리μμ€
## regionμ μμΌλ‘ μ§μ
## sexλ₯Ό λͺ¨μμΌλ‘ μ§μ
## ν¬λͺ
λλ 0.7
#2. ageμ λ°λΌμ chargesκ° μ΄λ»κ² λ³νλμ§ μ κ·Έλνλ₯Ό 그리μμ€
## bmi μμΌλ‘ μ§μ
## smokerλ₯Ό λͺ¨μμΌλ‘ μ§μ
###########################################
## λ§λκ·Έλν
######################### λ§λκ·Έλν
#1. λνμ§ κ·Έλ¦¬κΈ°
ggplot(data1) +
aes(x = continent) # xμΆ μ§μ
#2. 그림 그리기
ggplot(data1) +
aes(x = continent) + # xμΆ μ§μ
geom_bar() # λ§λκ·Έλν 그리기
#3. κΎΈλ―ΈκΈ°
ggplot(data1) +
aes(x = continent) + # xμΆ μ§μ
geom_bar() + # λ§λκ·Έλν 그리기
aes(fill = continent) # μ 체μ
ggplot(data1) +
aes(x = continent) + # xμΆ μ§μ
geom_bar() + # λ§λκ·Έλν 그리기
aes(fill = continent) + # μ 체μ
scale_fill_brewer(palette='Set1') #νλ νΈ μ¬μ©νκΈ°
#######
# μ£Όμ!
# λ§λκ·Έλνλ colorμ΄ μλ fillλ‘ μ¬μ©!
ggplot(data1) +
aes(x = continent) + # xμΆ μ§μ
geom_bar() + # λ§λκ·Έλν 그리기
aes(color = continent) # κ°λ³μ
#######
##### xμ yλ₯Ό λͺ¨λ μ§μ ν΄μ£Όκ³ μΆμΌλ©΄? stat = "identity"
ggplot(data1) +
aes(x = continent) + # xμΆ μ§μ
aes(y = lifeExp) + # yμΆ μ§μ
geom_bar(stat = "identity") + # λ§λκ·Έλν x,yμΆ
aes(fill = continent) # μ 체μ
# μ£Όμ
ggplot(data1) +
aes(x = continent) + # xμΆ μ§μ
aes(y = lifeExp) + # yμΆ μ§μ
geom_bar(stat = "identity") + # λ§λκ·Έλν x,yμΆ
aes(color = continent) # μ 체μ
##### λ°μ΄ν° μ μ²λ¦¬μ λ§λ μ°¨νΈ κ·Έλ¦¬κΈ°
# continent λ§λ€ νκ· μ κ·Έλ¦¬κ³ μΆμΌλ©΄??
data1 %>%
group_by(continent) %>%
summarise(mean = mean(lifeExp))
data1 %>%
group_by(continent) %>%
dplyr::summarise(mean = mean(lifeExp)) %>%
ggplot() +
aes(x = continent) +
aes(y = mean) +
geom_bar(stat = "identity") +
aes(fill = continent) +
aes(alpha = 0.7)
# λλ μ 그리λ λ°©λ²!
gapminder %>%
filter(year %in% c(2002,2007)) %>%
group_by(continent,year) %>%
dplyr::summarise(mean = mean(lifeExp)) %>%
ggplot() +
aes(x = continent) +
aes(y = mean) +
geom_bar(stat = "identity") +
aes(color = continent) +
aes(fill = continent) +
facet_grid(~year) # νΉμ λ³μλ‘ κ΅¬λΆν΄μ κ·Έλ¦¬κ³ μΆλ€λ©΄?
###########################################
############### μ°μ΅ν΄λ³΄κΈ° ###############
###########################################
head(insurance)
#1. insurance λ°μ΄ν°μμ regionλ³ μ€μκ°μ ꡬνν λ§λκ·Έλνλ₯Ό 그리μκ³
## regionμ μμΌλ‘ μ§μ
## ν¬λͺ
λλ 0.7
#2. insurance λ°μ΄ν°μμ sex, smokerλ³ μ€μκ°μ ꡬνν λ§λκ·Έλνλ₯Ό 그리μκ³
## xμΆμ smokerμ΄λ©° sexλ₯Ό μμΌλ‘ ꡬλΆ
## regionμ μμΌλ‘ μ§μ
## ν¬λͺ
λλ 0.7
######################### λ°μ€ κ·Έλν geom_boxplot()
gapminder %>%
ggplot(aes(x=continent, y= lifeExp)) +
geom_boxplot()
gapminder %>%
ggplot(aes(x=continent, y= lifeExp, fill= continent)) +
geom_boxplot()
gapminder %>%
ggplot(aes(x=continent, y= lifeExp, fill= continent)) +
geom_boxplot(alpha = 0.5)
# μ£Όμ! μμ½μ ν λ°μ΄ν°λ₯Ό μ¬μ©νμ§ μλλ€!
gapminder %>%
group_by(continent) %>%
dplyr::summarise(mean = mean(lifeExp)) %>%
ggplot(aes(x=continent, y= mean, fill= continent)) + geom_boxplot()
######################### νμ€ν κ·Έλ¨ geom_boxplot()
gapminder %>%
ggplot(aes(x=lifeExp)) +
geom_histogram()
gapminder %>%
ggplot(aes(x=lifeExp)) +
geom_histogram() +
facet_grid(~continent)
######################### μ κ·Έλν
gapminder %>%
group_by(year) %>%
summarise(sum = sum(lifeExp))
gapminder %>%
group_by(year) %>%
dplyr::summarise(sum = sum(lifeExp)) %>%
ggplot(aes(x=year,y=sum)) + geom_line()
# μ¬λ¬ κ·Έλ£Ήμ κ·Έλ¦¬κ³ μΆμ κ²½μ°
gapminder %>%
group_by(year,continent) %>%
summarise(mean = mean(lifeExp))
gapminder %>%
group_by(year,continent) %>%
dplyr::summarise(mean = mean(lifeExp)) %>%
ggplot(aes(x=year, y=mean , group=continent ,color= continent)) + geom_line()
###########################################
############### μ°μ΅ν΄λ³΄κΈ° ###############
###########################################
#1 insuranceλ°μ΄ν°μμ childrenμ΄ 0λ³΄λ€ ν¬λ©΄ 1, 0μ΄λ©΄ 0μΈ λ³μ ch_dataλ₯Ό λ§λμμ€
#2 insuranceλ°μ΄ν°λ₯Ό νμ©ν΄μ λ§λκ·Έλνλ₯Ό 그리μμ€
## xμΆμ region yμΆμ chargesμ΄λ©° ch_dataλ₯Ό μμΌλ‘ ꡬλΆ
#3 insuranceλ°μ΄ν°λ₯Ό νμ©ν΄μ λ§λκ·Έλνλ₯Ό 그리μμ€
## xμΆμ charges ch_dataλ₯Ό μμΌλ‘ ꡬλΆ
## regionλ§λ€ 4κ°μ κ·Έλνλ₯Ό 그리μμ€
#4 insuranceλ°μ΄ν°λ₯Ό νμ©ν΄μ λ§λκ·Έλνλ₯Ό 그리μμ€
## xμΆμ region yμΆμ chargesμ΄λ©° ch_dataλ₯Ό μμΌλ‘ ꡬλΆ
## (λμ λ§λκ·Έλνμ ch_dataλ³ λΉκ΅ λ§λκ·Έλν)
### ggplot μΆκ°
HR <- read.csv("HR_comma_sep.csv")
HR$left = as.factor(HR$left)
HR$salary = factor(HR$salary,levels = c("low","medium","high"))
# satisfaction_level : μ§λ¬΄ λ§μ‘±λ
# last_evaluation : λ§μ§λ§ νκ°μ μ
# number_project : μ§ν νλ‘μ νΈ μ
# average_monthly_hours : μνκ· κ·Όλ¬΄μκ°
# time_spend_company : κ·Όμλ
μ
# work_accident : μ¬κ±΄μ¬κ³ μ¬λΆ(0: μμ, 1: μμ, λͺ
λͺ©ν)
# left : μ΄μ§ μ¬λΆ(0: μλ₯, 1: μ΄μ§, λͺ
λͺ©ν)
# promotion_last_5years: μ΅κ·Ό 5λ
κ° μΉμ§μ¬λΆ(0: μΉμ§ x, 1: μΉμ§, λͺ
λͺ©ν)
# sales : λΆμ
# salary : μκΈ μμ€
#####################
### ν
λ§ λ³κ²½νκΈ° ###
#####################
library(ggthemes)
# Classic Theme
ggplot(HR,aes(x=salary)) +
geom_bar(aes(fill=salary)) +
theme_classic()
# BW Theme
ggplot(HR,aes(x=salary)) +
geom_bar(aes(fill=salary)) +
theme_bw()
Graph = ggplot(HR,aes(x=salary)) +
geom_bar(aes(fill=salary))
## ν¨ν€μ§λ₯Ό ν΅ν λ€μν ν
λ§ λ³κ²½
Graph + theme_bw() + ggtitle("Theme_bw")
Graph + theme_classic() + ggtitle("Theme_classic")
Graph + theme_dark() + ggtitle("Theme_dark")
Graph + theme_light() + ggtitle("Theme_light")
Graph + theme_linedraw() + ggtitle("Theme_linedraw")
Graph + theme_minimal() + ggtitle("Theme_minimal")
Graph + theme_test() + ggtitle("Theme_test")
Graph + theme_void() + ggtitle("Theme_vold")
#####################
### λ²λ‘μ λͺ© μμ ###
#####################
ggplot(HR,aes(x=salary)) +
geom_bar(aes(fill=salary)) +
theme_bw() +
labs(fill = "λ²λ‘ μ λͺ© μμ (fill)")
# λ²λ‘ ν
λ리 μ€μ
Graph + theme(legend.position = "top")
Graph + theme(legend.position = "bottom")
Graph + theme(legend.position = c(0.9,0.7))
Graph + theme(legend.position = 'none')
#####################
### μΆ λ³κ²½ ###
#####################
# μ΄μ°ν - deiscreate()
# μ°μν - continuous()
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary)) +
theme_bw() +
scale_x_discrete(labels = c("ν","μ€","μ")) +
scale_y_continuous(breaks = seq(0,8000,by = 1000))
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary)) +
theme_bw() +
scale_x_discrete(labels = c("ν","μ€","μ")) +
scale_y_continuous(breaks = seq(0,8000,by = 1000)) +
scale_fill_discrete(labels = c("ν","μ€","μ"))
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary)) +
theme_bw() +
ylim(0,5000)
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary)) +
theme_bw() +
ylim(0,13000)
#####################
### μ λ³κ²½ ###
#####################
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary)) +
theme_bw() +
scale_fill_manual(values = c('red','royalblue','tan'))
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary), alpha = 0.4) +
theme_bw() +
scale_fill_manual(values = c('red','royalblue','tan'))
#####################
### κΈμν¬κΈ°,κ°λ μμ ###
#####################
# coord_flip() : λμΉ κ·Έλν
# theme_bw : κΈμ체 μμ
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary), alpha = 0.4) +
theme_bw() +
scale_fill_manual(values = c('red','royalblue','tan')) +
coord_flip()
ggplot(HR,aes(x = salary)) +
geom_bar(aes(fill = salary)) +
theme_bw() +
scale_fill_manual(values = c('red','royalblue','tan')) +
coord_flip() +
theme(legend.position = 'none',
axis.text.x = element_text(size = 15,angle = 90),
axis.text.y = element_text(size = 15),
legend.text = element_text(size = 15))
# κ·Έλνμ ννμ , μμ§μ , λκ°μ μ 그릴 μ μλ λͺ
λ Ήμ΄
ggplot(NULL) +
geom_vline(xintercept = 10,
col = 'royalblue', size = 2) +
geom_hline(yintercept = 10, linetype = 'dashed',
col = 'royalblue', size = 2) +
geom_abline(intercept = 0, slope = 1, col = 'red',
size = 2) +
theme_bw()
#### μΆκ° μ μ©ν κ·Έλν
###################μ΄μ§λ(heatmap)
# λ°μ΄ν° μμ½
agg2 = insurance %>%
mutate(bmi_grp = cut(bmi,
breaks=c(0,30,35,40,100),
labels=c('G1','G2','G3','G4'))) %>%
group_by(region, bmi_grp) %>%
summarise(Q90 = quantile(charges, 0.9))
quantile(iris$Sepal.Width,0.5) #μ€μμ
quantile(iris$Sepal.Width,0.7) #70%
## quantile( , q) : q*100% κ° κ³μ°
agg2 %>%
ggplot(aes(x=region, y=bmi_grp, fill=Q90)) +
geom_tile()
# μμ μ§μ
agg2 %>%
ggplot(aes(x=region, y=bmi_grp, fill=Q90)) +
geom_tile() +
scale_fill_gradient(low='white', high='#FF6600')
agg2 %>%
ggplot(aes(x=region, y=bmi_grp, fill=Q90)) +
geom_tile() +
scale_fill_distiller(palette='YlGnBu')
###########################################
############### μ°μ΅ν΄λ³΄κΈ° ###############
###########################################
# (μ€μ΅) NHISμμ AGE_GROUP, DSBJT_CDλ³ EDEC_TRAMT νκ· κ³μ° ν μ μ₯
# μ μ₯λ λ°μ΄ν°λ‘ μ΄μ§λ μκ°ν
###########################################
# tidyr + dplyr + ggplotμ νλ²μ
# λ°μ΄ν° λΆλ¬μ€κΈ°
## μλ³νΈκ° 150μΈ μμΈμ λ°μ΄ν°
library(openxlsx)
subway_2017 = read.xlsx('subway_1701_1709.xlsx')
names(subway_2017)[6:25] <- paste0('H', substr(names(subway_2017)[6:25], 1, 2))
head(subway_2017)
# gather( ) ν¨μλ₯Ό νμ©νμ¬ H05λΆν° H24κΉμ§ λ³μλ₯Ό λͺ¨μ
# 'μκ°λ'μ 'μΉκ°μ'μΌλ‘ ꡬλΆνλ λ°μ΄ν° subway2 λ§λ€κΈ°
subway2 = gather(subway_2017, μκ°λ, μΉκ°μ, H05:H24)
## μμμ λ§λ subway2 λ°μ΄ν°μ dplyr ν¨ν€μ§λ₯Ό νμ©νμ¬
# μλͺ
/μκ°λλ³ μ 체 μΉκ°μ ν©κ³ κ³μ° (μΉκ°μ ν©κ³μ λ΄λ¦Όμ°¨μμΌλ‘ μ λ ¬)
subway2 %>%
group_by(μλͺ
, μκ°λ) %>%
summarise(SUM = sum(μΉκ°μ)) %>%
arrange(desc(SUM))
### μ΄λ¬ν tidyrμ ν΅ν΄μ λ°μ΄ν°λ₯Ό μκ°ννκΈ°
### μκ°λλ³λ‘ μΉκ° ν©κ³ λ§λμ°¨νΈλ‘ κ·Έλ €λ³΄κΈ°!
# options("scipen" = 100)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.emfrail.R
\name{predict.emfrail}
\alias{predict.emfrail}
\title{Predicted hazard and survival curves from an \code{emfrail} object}
\usage{
\method{predict}{emfrail}(object, newdata = NULL, lp = NULL,
strata = NULL, quantity = c("cumhaz", "survival"),
type = c("conditional", "marginal"), conf_int = c("regular", "adjusted"),
individual = FALSE, ...)
}
\arguments{
\item{object}{An \code{emfrail} fit object}
\item{newdata}{A data frame with the same variable names as those that appear in the \code{emfrail} formula, used to calculate the \code{lp} (optional).}
\item{lp}{A vector of linear predictor values at which to calculate the curves. Default is 0 (baseline).}
\item{strata}{The name of the strata (if applicable) for which the prediction should be made.}
\item{quantity}{Can be \code{"cumhaz"} and/or \code{"survival"}. The quantity to be calculated for the values of \code{lp}.}
\item{type}{Can be \code{"conditional"} and/or \code{"marginal"}. The type of the quantity to be calculated.}
\item{conf_int}{Can be \code{"regular"} and/or \code{"adjusted"}. The type of confidence interval to be calculated.}
\item{individual}{Logical. Are the observations in \code{newdata} from the same individual? See details.}
\item{...}{Ignored}
}
\value{
The return value is a single data frame (if \code{lp} has length 1,
\code{newdata} has 1 row or \code{individual == TRUE}) or a list of data frames corresponding to each value of
\code{lp} or each row of \code{newdata} otherwise.
The names of the columns in the returned data frames are as follows: \code{time} represents the unique event time points
from the data set, \code{lp} is the value of the linear predictor (as specified in the input or as calculated from the lines of \code{newdata}).
By default, for each \code{lp} a data frame will contain the following columns: \code{cumhaz}, \code{survival},
\code{cumhaz_m}, \code{survival_m} for the cumulative hazard and survival, conditional and marginal, with corresponding confidence
bands. The naming of the columns is explained more in the Details section.
}
\description{
Predicted hazard and survival curves from an \code{emfrail} object
}
\details{
The function calculates predicted cumulative hazard and survival curves for given covariate
or linear predictor values; for the first, \code{newdata} must be specified and for the latter
\code{lp} must be specified. Each row of \code{newdata} or element of \code{lp} is consiered to be
a different subject, and the desired predictions are produced for each of them separately.
In \code{newdata} two columns may be specified with the names \code{tstart} and \code{tstop}.
In this case, each subject is assumed to be at risk only during the times specified by these two values.
If the two are not specified, the predicted curves are produced for a subject that is at risk for the
whole follow-up time.
A slightly different behaviour is observed if \code{individual == TRUE}. In this case, all the rows of
\code{newdata} are assumed to come from the same individual, and \code{tstart} and \code{tstop} must
be specified, and must not overlap. This may be used for describing subjects that
are not at risk during certain periods or subjects with time-dependent covariate values.
The two "quantities" that can be returned are
named \code{cumhaz} and \code{survival}. If we denote each quantity with \code{q}, then the columns with the marginal estimates
are named \code{q_m}. The confidence intervals contain the name of the quantity (conditional or marginal) followed by \code{_l} or \code{_r} for
the lower and upper bound. The bounds calculated with the adjusted standard errors have the name of the regular bounds followed by
\code{_a}. For example, the adjusted lower bound for the marginal survival is in the column named \code{survival_m_l_a}.
The \code{emfrail} only gives the Breslow estimates of the baseline hazard \eqn{\lambda_0(t)} at the
event time points, conditional on the frailty. Let \eqn{\lambda(t)} be the baseline hazard for a linear predictor of interest.
The estimated conditional cumulative hazard is then
\eqn{\Lambda(t) = \sum_{s= 0}^t \lambda(s)}. The variance of \eqn{\Lambda(t)} can be calculated from the (maybe adjusted)
variance-covariance matrix.
The conditional survival is obtained by the usual expression \eqn{S(t) = \exp(-\Lambda(t))}. The marginal survival
is given by
\deqn{\bar S(t) = E \left[\exp(-\Lambda(t)) \right] = \mathcal{L}(\Lambda(t)),}
i.e. the Laplace transform of the frailty distribution calculated in \eqn{\Lambda(t)}.
The marginal hazard is obtained as \deqn{\bar \Lambda(t) = - \log \bar S(t).}
The only standard errors that are available from \code{emfrail} are those for \eqn{\lambda_0(t)}. From this,
standard errors of \eqn{\log \Lambda(t)} may be calculated. On this scale, the symmetric confidence intervals are built, and then
moved to the desired scale.
}
\note{
The linear predictor is taken as fixed, so the variability in the estimation of the regression coefficient is not taken into account.
Does not support left truncation (at the moment). That is because, if \code{individual == TRUE} and \code{tstart} and \code{tstop} are
specified, for the marginal estimates the distribution of the frailty is used to calculate the integral, and not
the distribution of the frailty given the truncation.
For performance reasons, consider running with \code{conf_int = NULL}; the reason is that the \code{deltamethod} function that is used
to calculate the confidence intervals easily becomes slow when there is a large number of time points
for the cumulative hazard.
}
\examples{
kidney$sex <- ifelse(kidney$sex == 1, "male", "female")
m1 <- emfrail(formula = Surv(time, status) ~ sex + age + cluster(id),
data = kidney)
# get all the possible prediction for the value 0 of the linear predictor
predict(m1, lp = 0)
# get the cumulative hazards for two different values of the linear predictor
predict(m1, lp = c(0, 1), quantity = "cumhaz", conf_int = NULL)
# get the cumulative hazards for a female and for a male, both aged 30
newdata1 <- data.frame(sex = c("female", "male"),
age = c(30, 30))
predict(m1, newdata = newdata1, quantity = "cumhaz", conf_int = NULL)
# get the cumulative hazards for an individual that changes
# sex from female to male at time 40.
newdata2 <- data.frame(sex = c("female", "male"),
age = c(30, 30),
tstart = c(0, 40),
tstop = c(40, Inf))
predict(m1, newdata = newdata2,
individual = TRUE,
quantity = "cumhaz", conf_int = NULL)
}
\seealso{
\code{\link{plot.emfrail}}, \code{\link{autoplot.emfrail}}
}
| /man/predict.emfrail.Rd | no_license | AMeddis/frailtyEM | R | false | true | 6,787 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.emfrail.R
\name{predict.emfrail}
\alias{predict.emfrail}
\title{Predicted hazard and survival curves from an \code{emfrail} object}
\usage{
\method{predict}{emfrail}(object, newdata = NULL, lp = NULL,
strata = NULL, quantity = c("cumhaz", "survival"),
type = c("conditional", "marginal"), conf_int = c("regular", "adjusted"),
individual = FALSE, ...)
}
\arguments{
\item{object}{An \code{emfrail} fit object}
\item{newdata}{A data frame with the same variable names as those that appear in the \code{emfrail} formula, used to calculate the \code{lp} (optional).}
\item{lp}{A vector of linear predictor values at which to calculate the curves. Default is 0 (baseline).}
\item{strata}{The name of the strata (if applicable) for which the prediction should be made.}
\item{quantity}{Can be \code{"cumhaz"} and/or \code{"survival"}. The quantity to be calculated for the values of \code{lp}.}
\item{type}{Can be \code{"conditional"} and/or \code{"marginal"}. The type of the quantity to be calculated.}
\item{conf_int}{Can be \code{"regular"} and/or \code{"adjusted"}. The type of confidence interval to be calculated.}
\item{individual}{Logical. Are the observations in \code{newdata} from the same individual? See details.}
\item{...}{Ignored}
}
\value{
The return value is a single data frame (if \code{lp} has length 1,
\code{newdata} has 1 row or \code{individual == TRUE}) or a list of data frames corresponding to each value of
\code{lp} or each row of \code{newdata} otherwise.
The names of the columns in the returned data frames are as follows: \code{time} represents the unique event time points
from the data set, \code{lp} is the value of the linear predictor (as specified in the input or as calculated from the lines of \code{newdata}).
By default, for each \code{lp} a data frame will contain the following columns: \code{cumhaz}, \code{survival},
\code{cumhaz_m}, \code{survival_m} for the cumulative hazard and survival, conditional and marginal, with corresponding confidence
bands. The naming of the columns is explained more in the Details section.
}
\description{
Predicted hazard and survival curves from an \code{emfrail} object
}
\details{
The function calculates predicted cumulative hazard and survival curves for given covariate
or linear predictor values; for the first, \code{newdata} must be specified and for the latter
\code{lp} must be specified. Each row of \code{newdata} or element of \code{lp} is consiered to be
a different subject, and the desired predictions are produced for each of them separately.
In \code{newdata} two columns may be specified with the names \code{tstart} and \code{tstop}.
In this case, each subject is assumed to be at risk only during the times specified by these two values.
If the two are not specified, the predicted curves are produced for a subject that is at risk for the
whole follow-up time.
A slightly different behaviour is observed if \code{individual == TRUE}. In this case, all the rows of
\code{newdata} are assumed to come from the same individual, and \code{tstart} and \code{tstop} must
be specified, and must not overlap. This may be used for describing subjects that
are not at risk during certain periods or subjects with time-dependent covariate values.
The two "quantities" that can be returned are
named \code{cumhaz} and \code{survival}. If we denote each quantity with \code{q}, then the columns with the marginal estimates
are named \code{q_m}. The confidence intervals contain the name of the quantity (conditional or marginal) followed by \code{_l} or \code{_r} for
the lower and upper bound. The bounds calculated with the adjusted standard errors have the name of the regular bounds followed by
\code{_a}. For example, the adjusted lower bound for the marginal survival is in the column named \code{survival_m_l_a}.
The \code{emfrail} only gives the Breslow estimates of the baseline hazard \eqn{\lambda_0(t)} at the
event time points, conditional on the frailty. Let \eqn{\lambda(t)} be the baseline hazard for a linear predictor of interest.
The estimated conditional cumulative hazard is then
\eqn{\Lambda(t) = \sum_{s= 0}^t \lambda(s)}. The variance of \eqn{\Lambda(t)} can be calculated from the (maybe adjusted)
variance-covariance matrix.
The conditional survival is obtained by the usual expression \eqn{S(t) = \exp(-\Lambda(t))}. The marginal survival
is given by
\deqn{\bar S(t) = E \left[\exp(-\Lambda(t)) \right] = \mathcal{L}(\Lambda(t)),}
i.e. the Laplace transform of the frailty distribution calculated in \eqn{\Lambda(t)}.
The marginal hazard is obtained as \deqn{\bar \Lambda(t) = - \log \bar S(t).}
The only standard errors that are available from \code{emfrail} are those for \eqn{\lambda_0(t)}. From this,
standard errors of \eqn{\log \Lambda(t)} may be calculated. On this scale, the symmetric confidence intervals are built, and then
moved to the desired scale.
}
\note{
The linear predictor is taken as fixed, so the variability in the estimation of the regression coefficient is not taken into account.
Does not support left truncation (at the moment). That is because, if \code{individual == TRUE} and \code{tstart} and \code{tstop} are
specified, for the marginal estimates the distribution of the frailty is used to calculate the integral, and not
the distribution of the frailty given the truncation.
For performance reasons, consider running with \code{conf_int = NULL}; the reason is that the \code{deltamethod} function that is used
to calculate the confidence intervals easily becomes slow when there is a large number of time points
for the cumulative hazard.
}
\examples{
kidney$sex <- ifelse(kidney$sex == 1, "male", "female")
m1 <- emfrail(formula = Surv(time, status) ~ sex + age + cluster(id),
data = kidney)
# get all the possible prediction for the value 0 of the linear predictor
predict(m1, lp = 0)
# get the cumulative hazards for two different values of the linear predictor
predict(m1, lp = c(0, 1), quantity = "cumhaz", conf_int = NULL)
# get the cumulative hazards for a female and for a male, both aged 30
newdata1 <- data.frame(sex = c("female", "male"),
age = c(30, 30))
predict(m1, newdata = newdata1, quantity = "cumhaz", conf_int = NULL)
# get the cumulative hazards for an individual that changes
# sex from female to male at time 40.
newdata2 <- data.frame(sex = c("female", "male"),
age = c(30, 30),
tstart = c(0, 40),
tstop = c(40, Inf))
predict(m1, newdata = newdata2,
individual = TRUE,
quantity = "cumhaz", conf_int = NULL)
}
\seealso{
\code{\link{plot.emfrail}}, \code{\link{autoplot.emfrail}}
}
|
#===============================================================================
# 2020-07-31 -- TidyTuesday
# Gentoo penguins
# Ilya Kashnitsky, ilya.kashnitsky@gmail.com
#===============================================================================
# load required packages
library(tidyverse)
library(palmerpenguins)
# data on human birth weight
# http://data.un.org/Data.aspx?q=birth+weight&d=POP&f=tableCode%3a60
hum <- read_csv("2020-31-gentoo/UNdata_Export_20200731_154711593.zip") %>%
janitor::clean_names()
hum %>%
filter(!birth_weight=="Total") %>%
mutate(bw_3_4 = birth_weight %in% c("3000 - 3499", "3500 - 3999")) %>%
group_by(bw_3_4) %>%
summarise(prop = value %>% sum) %>%
pull(prop)
hum %>%
filter(!birth_weight=="Total") %>%
separate(birth_weight, into = c("lower", "upper"), sep = " - ") %>%
drop_na() %>%
mutate(est = (as.numeric(lower)+as.numeric(upper))/2) %>%
group_by(est) %>%
summarise(prop = value %>% sum) %>%
mutate(
weight_group = c(
"1,500 and less", "1,500 and less",
"1,500β2,000", "1,500β2,000",
"2,000β2,500", "2,500β3,000",
"3,000β3,500", "3,500 and more",
"3,500 and more"
)
) %>%
group_by(weight_group) %>%
summarise(prop = prop %>% sum) %>%
ungroup() %>%
mutate(prop = prop %>% prop.table()) %>%
ggplot(aes(prop, weight_group))+
geom_col(color = NA, fill = 5)+
hrbrthemes::scale_x_percent()+
ggdark::dark_theme_minimal(base_size = 14)+
labs(
title = "Weight of human newborns",
subtitle = "United Nations data, pooled across countries and years",
caption = "@ikashnitsky",
y = "Weight, grams",
x = NULL
)+
theme(text = element_text(family = "mono"),
plot.title = element_text(size = 30, face = 2))
ggsave("2020-31-gentoo/human-newborns.png", width = 9, height = 5 )
# penguins ----------------------------------------------------------------
peng <- penguins %>%
mutate(weight_group = body_mass_g %>% cut(c(0, 3e3, 4e3, Inf))) %>%
group_by(species, weight_group) %>%
summarise(n = n()) %>%
drop_na() %>%
group_by(species) %>%
mutate(prop = prop.table(n)) %>%
pivot_wider(names_from = weight_group, values_from = prop)
| /2020-31-gentoo/code-gentoo.R | permissive | ikashnitsky/tidy-tuesday | R | false | false | 2,363 | r | #===============================================================================
# 2020-07-31 -- TidyTuesday
# Gentoo penguins
# Ilya Kashnitsky, ilya.kashnitsky@gmail.com
#===============================================================================
# load required packages
library(tidyverse)
library(palmerpenguins)
# data on human birth weight
# http://data.un.org/Data.aspx?q=birth+weight&d=POP&f=tableCode%3a60
hum <- read_csv("2020-31-gentoo/UNdata_Export_20200731_154711593.zip") %>%
janitor::clean_names()
hum %>%
filter(!birth_weight=="Total") %>%
mutate(bw_3_4 = birth_weight %in% c("3000 - 3499", "3500 - 3999")) %>%
group_by(bw_3_4) %>%
summarise(prop = value %>% sum) %>%
pull(prop)
hum %>%
filter(!birth_weight=="Total") %>%
separate(birth_weight, into = c("lower", "upper"), sep = " - ") %>%
drop_na() %>%
mutate(est = (as.numeric(lower)+as.numeric(upper))/2) %>%
group_by(est) %>%
summarise(prop = value %>% sum) %>%
mutate(
weight_group = c(
"1,500 and less", "1,500 and less",
"1,500β2,000", "1,500β2,000",
"2,000β2,500", "2,500β3,000",
"3,000β3,500", "3,500 and more",
"3,500 and more"
)
) %>%
group_by(weight_group) %>%
summarise(prop = prop %>% sum) %>%
ungroup() %>%
mutate(prop = prop %>% prop.table()) %>%
ggplot(aes(prop, weight_group))+
geom_col(color = NA, fill = 5)+
hrbrthemes::scale_x_percent()+
ggdark::dark_theme_minimal(base_size = 14)+
labs(
title = "Weight of human newborns",
subtitle = "United Nations data, pooled across countries and years",
caption = "@ikashnitsky",
y = "Weight, grams",
x = NULL
)+
theme(text = element_text(family = "mono"),
plot.title = element_text(size = 30, face = 2))
ggsave("2020-31-gentoo/human-newborns.png", width = 9, height = 5 )
# penguins ----------------------------------------------------------------
peng <- penguins %>%
mutate(weight_group = body_mass_g %>% cut(c(0, 3e3, 4e3, Inf))) %>%
group_by(species, weight_group) %>%
summarise(n = n()) %>%
drop_na() %>%
group_by(species) %>%
mutate(prop = prop.table(n)) %>%
pivot_wider(names_from = weight_group, values_from = prop)
|
####################
#Set the data path
####################
getwd()
path="/Users/Chidam/Downloads/Practice Fusion"
setwd(path)
getwd()
####################
#Generic function to load the datasets
#Loads the datasets
#inputs filename
####################
funcLoad<-function(filename){
print(filename)
read.csv(filename,stringsAsFactors=FALSE)
}
####################
#Load the datasets
#Allergy - Factors (AllergyType,ReactionName,SeverityName)
#Condition
#Diagnosis
####################
#createDF<-function(){
Allergy=funcLoad('test_SyncAllergy.csv')
Allergy$AllergyType=as.factor(Allergy$AllergyType)
Allergy$ReactionName=as.factor(Allergy$ReactionName)
Allergy$SeverityName=as.factor(Allergy$SeverityName)
Condition=funcLoad('SyncCondition.csv')
Diagnosis=funcLoad('test_SyncDiagnosis.csv')
Diagnosis$Acute=as.factor(Diagnosis$Acute)
Immunization=funcLoad('test_SyncImmunization.csv')
LabObservation=funcLoad('test_SyncLabObservation.csv')
LabObservation$HL7Text=as.factor(LabObservation$HL7Text)
LabObservation$HL7CodingSystem=as.factor(LabObservation$HL7CodingSystem)
LabObservation$AbnormalFlags=as.factor(LabObservation$AbnormalFlags)
LabObservation$ResultStatus=as.factor(LabObservation$ResultStatus)
LabObservation$IsAbnormalValue=as.factor(LabObservation$IsAbnormalValue)
LabPanel=funcLoad('test_SyncLabPanel.csv')
LabPanel$Status=as.factor(LabPanel$Status)
LabResult=funcLoad('test_SyncLabResult.csv')
Medication=funcLoad('test_SyncMedication.csv')
Patient=funcLoad('test_SyncPatient.csv')
Patient$Gender=as.factor(Patient$Gender)
Patient$State=as.factor(Patient$State)
PatientCondition=funcLoad('test_SyncPatientCondition.csv')
PatientSmokingStatus=funcLoad('test_SyncPatientSmokingStatus.csv')
Prescription=funcLoad('test_SyncPrescription.csv')
Prescription$RefillAsNeeded=as.factor(Prescription$RefillAsNeeded)
Prescription$GenericAllowed=as.factor(Prescription$GenericAllowed)
SmokingStatus=funcLoad('SyncSmokingStatus.csv')
SmokingStatus$NISTcode=as.factor(SmokingStatus$NISTcode)
Transcript=funcLoad('test_SyncTranscript.csv')
TranscriptAllergy=funcLoad('test_SyncTranscriptAllergy.csv')
TranscriptDiagnosis=funcLoad('test_SyncTranscriptDiagnosis.csv')
TranscriptMedication=funcLoad('test_SyncTranscriptMedication.csv')
#}
#createDF()
#fix_Transcript<-function(){
library(doBy)
#Identifying the best value for weight
TranscriptSummary<-summaryBy(Weight~PatientGuid,data=Transcript,FUN=list(min,max,mean,median,sd))
nrow(TranscriptSummary[which(TranscriptSummary$Weight.median==0.0),])
#1401
TranscriptSummary$Weightsel=ifelse(TranscriptSummary$Weight.median==0.0,TranscriptSummary$Weight.max,TranscriptSummary$Weight.median)
TranscriptSummary=TranscriptSummary[,c(1,7)]
Transcript_new=merge(Transcript,TranscriptSummary,byintersect(names(Transcript),names(TranscriptSummary)),by.x='PatientGuid',by.y='PatientGuid')
Transcript_new$Weight=ifelse(Transcript_new$Weight==0.0,Transcript_new$Weightsel,Transcript_new$Weight)
Transcript=Transcript_new
#Identifying the best value for Height
rm(TranscriptSummary)
Transcript$Height=as.numeric(Transcript$Height)
TranscriptSummary<-summaryBy(Height~PatientGuid,data=Transcript,FUN=list(min,max,mean,median,sd),na.rm=TRUE)
nrow(TranscriptSummary[which(TranscriptSummary$Height.median=="NULL"),])
#0
nrow(TranscriptSummary[which(is.na(TranscriptSummary$Height.median)),])
#0
nrow(TranscriptSummary[which(TranscriptSummary$Height.median=="NULL" | is.na(TranscriptSummary$Height.median)),])
#0
TranscriptSummary$Heightsel=ifelse(TranscriptSummary$Height.median=="NULL" | is.na(TranscriptSummary$Height.median) ,TranscriptSummary$Height.min,TranscriptSummary$Height.median)
TranscriptSummary=TranscriptSummary[,c(1,7)]
Transcript_new=merge(Transcript,TranscriptSummary,byintersect(names(Transcript),names(TranscriptSummary)),by.x='PatientGuid',by.y='PatientGuid')
Transcript_new$Height=ifelse(is.na(Transcript_new$Height),Transcript_new$Heightsel,Transcript_new$Height)
Transcript=Transcript_new
#Identifying the best value for BMI
rm(TranscriptSummary)
Transcript$BMI=as.numeric(Transcript$BMI)
TranscriptSummary<-summaryBy(BMI~PatientGuid,data=Transcript,FUN=list(min,max,mean,median,sd),na.rm=TRUE)
nrow(TranscriptSummary[which(TranscriptSummary$BMI.median=="NULL" | is.na(TranscriptSummary$BMI.median)),])
#0
TranscriptSummary$BMIsel=ifelse(TranscriptSummary$BMI.median=="NULL" | is.na(TranscriptSummary$BMI.median) ,TranscriptSummary$BMI.min,TranscriptSummary$BMI.median)
TranscriptSummary=TranscriptSummary[,c(1,7)]
Transcript_new=merge(Transcript,TranscriptSummary,byintersect(names(Transcript),names(TranscriptSummary)),by.x='PatientGuid',by.y='PatientGuid')
Transcript_new$BMI=ifelse(is.na(Transcript_new$BMI),Transcript_new$BMIsel,Transcript_new$BMI)
Transcript=Transcript_new
#Identifying the best value for SystolicBP
rm(TranscriptSummary)
#Transcript$SystolicBP=as.numeric(Transcript$SystolicBP)
TranscriptSummary<-summaryBy(SystolicBP~PatientGuid,data=Transcript,FUN=list(min,max,mean,median,sd),na.rm=TRUE)
nrow(TranscriptSummary[which(TranscriptSummary$SystolicBP.median==0.0 | is.na(TranscriptSummary$SystolicBP.median)),])
#943
TranscriptSummary$SystolicBPsel=ifelse(TranscriptSummary$SystolicBP.median==0.0 | is.na(TranscriptSummary$SystolicBP.median) ,TranscriptSummary$SystolicBP.max,TranscriptSummary$SystolicBP.median)
TranscriptSummary=TranscriptSummary[,c(1,7)]
Transcript_new=merge(Transcript,TranscriptSummary,byintersect(names(Transcript),names(TranscriptSummary)),by.x='PatientGuid',by.y='PatientGuid')
Transcript_new$SystolicBP=ifelse(Transcript_new$SystolicBP==0.0,Transcript_new$SystolicBPsel,Transcript_new$SystolicBP)
Transcript=Transcript_new
#Identifying the best value for DiastolicBP
rm(TranscriptSummary)
TranscriptSummary<-summaryBy(DiastolicBP~PatientGuid,data=Transcript,FUN=list(min,max,mean,median,sd),na.rm=TRUE)
nrow(TranscriptSummary[which(TranscriptSummary$DiastolicBP.median==0.0 | is.na(TranscriptSummary$DiastolicBP.median)),])
#943
TranscriptSummary$DiastolicBPsel=ifelse(TranscriptSummary$DiastolicBP.median==0.0 | is.na(TranscriptSummary$DiastolicBP.median) ,TranscriptSummary$DiastolicBP.max,TranscriptSummary$DiastolicBP.median)
TranscriptSummary=TranscriptSummary[,c(1,7)]
Transcript_new=merge(Transcript,TranscriptSummary,byintersect(names(Transcript),names(TranscriptSummary)),by.x='PatientGuid',by.y='PatientGuid')
Transcript_new$DiastolicBP=ifelse(Transcript_new$DiastolicBP==0.0,Transcript_new$DiastolicBPsel,Transcript_new$DiastolicBP)
Transcript=Transcript_new
#Identifying the best value for RespiratoryRate
rm(TranscriptSummary)
Transcript$RespiratoryRate=as.numeric(Transcript$RespiratoryRate)
TranscriptSummary<-summaryBy(RespiratoryRate~PatientGuid,data=Transcript,FUN=list(min,max,mean,median,sd),na.rm=TRUE)
nrow(TranscriptSummary[which(TranscriptSummary$RespiratoryRate.median=="NULL" | is.na(TranscriptSummary$RespiratoryRate.median | TranscriptSummary$RespiratoryRate.median=="Inf"| TranscriptSummary$RespiratoryRate.median=="-Inf" )),])
#1439
#Retaining the NAs frm median of RespiratoryRate as min, max are bad
#TranscriptSummary$RespiratoryRatesel=ifelse(TranscriptSummary$RespiratoryRate.median=="NULL" | is.na(TranscriptSummary$RespiratoryRate.median) ,TranscriptSummary$RespiratoryRate.median,TranscriptSummary$RespiratoryRate.median)
TranscriptSummary$RespiratoryRatesel=TranscriptSummary$RespiratoryRate.median
TranscriptSummary=TranscriptSummary[,c(1,7)]
Transcript_new=merge(Transcript,TranscriptSummary,byintersect(names(Transcript),names(TranscriptSummary)),by.x='PatientGuid',by.y='PatientGuid')
Transcript_new$RespiratoryRate=ifelse(is.na(Transcript_new$RespiratoryRate),Transcript_new$RespiratoryRatesel,Transcript_new$RespiratoryRate)
Transcript=Transcript_new
#}
#Build_Dataset<-function(){
#Add new derived feature ICD9root based on ICD9Code as below in Diagnosis DF
Diagnosis$ICD9root=substr(Diagnosis$ICD9Code,1,3)
# Add a new feature ICD9class based on the ICD9root in Diagnosis DF
Diagnosis$ICD9class[Diagnosis$ICD9root>"000" & Diagnosis$ICD9root < "140"]="Infectious Parasite Diseases"
Diagnosis$ICD9class[Diagnosis$ICD9root>="140" & Diagnosis$ICD9root < "240"]="neoplasms"
Diagnosis$ICD9class[Diagnosis$ICD9root>="240" & Diagnosis$ICD9root < "280"]="endocrine, nutritional and metabolic diseases, and immunity disorders"
Diagnosis$ICD9class[Diagnosis$ICD9root>="280" & Diagnosis$ICD9root < "290"]="diseases of the blood and blood-forming organs"
Diagnosis$ICD9class[Diagnosis$ICD9root>="290" & Diagnosis$ICD9root < "320"]="mental disorders"
Diagnosis$ICD9class[Diagnosis$ICD9root>="320" & Diagnosis$ICD9root < "360"]="diseases of the nervous system"
Diagnosis$ICD9class[Diagnosis$ICD9root>="360" & Diagnosis$ICD9root < "390"]="diseases of the sense organs"
Diagnosis$ICD9class[Diagnosis$ICD9root>="390" & Diagnosis$ICD9root < "460"]="diseases of the circulatory system"
Diagnosis$ICD9class[Diagnosis$ICD9root>="460" & Diagnosis$ICD9root < "520"]="diseases of the respiratory system"
Diagnosis$ICD9class[Diagnosis$ICD9root>="520" & Diagnosis$ICD9root < "580"]="diseases of the digestive system"
Diagnosis$ICD9class[Diagnosis$ICD9root>="580" & Diagnosis$ICD9root < "630"]="diseases of the genitourinary system"
Diagnosis$ICD9class[Diagnosis$ICD9root>="630" & Diagnosis$ICD9root < "680"]="complications of pregnancy, childbirth, and the puerperium"
Diagnosis$ICD9class[Diagnosis$ICD9root>="680" & Diagnosis$ICD9root < "710"]="diseases of the skin and subcutaneous tissue"
Diagnosis$ICD9class[Diagnosis$ICD9root>="710" & Diagnosis$ICD9root < "740"]="diseases of the musculoskeletal system and connective tissue"
Diagnosis$ICD9class[Diagnosis$ICD9root>="740" & Diagnosis$ICD9root < "760"]="congenital anomalies"
Diagnosis$ICD9class[Diagnosis$ICD9root>="760" & Diagnosis$ICD9root < "780"]="certain conditions originating in the perinatal period"
Diagnosis$ICD9class[Diagnosis$ICD9root>="780" & Diagnosis$ICD9root < "800"]="symptoms, signs, and ill-defined conditions"
Diagnosis$ICD9class[Diagnosis$ICD9root>="800" & Diagnosis$ICD9root <= "999"]="injury and poisoning"
Diagnosis$ICD9class[Diagnosis$ICD9root>="E00" & Diagnosis$ICD9root < "V99"]="external causes of injury and supplemental classification"
Diagnosis$ICD9root=as.factor(Diagnosis$ICD9root)
#merge Patient & Diagnosis
Patient_Diagnosis=merge(Patient,Diagnosis,by=intersect(names(Patient), names(Diagnosis)),by.x='PatientGuid',by.y='PatientGuid')
#fix_Transcript()
#merge Patient_Diagnosis and Transcript
Patient_Diagnosis_Transcript=merge(Patient_Diagnosis,Transcript,by=intersect(names(Patient_Diagnosis), names(Transcript)),by.x='PatientGuid',by.y='PatientGuid')
#merge Patient_Diagnosis_Transcript and Allergy
Patient_Diagnosis_Transcript_Allergy=merge(Patient_Diagnosis_Transcript,Allergy,by=intersect(names(Patient_Diagnosis_Transcript), names(Allergy)),by.x='PatientGuid',by.y='PatientGuid',all.x=TRUE)
#merge Patient_Diagnosis_Transcript_Allergy and SmokingStatus
Patient_Diagnosis_Transcript_Allergy_SmokingStatus=merge(Patient_Diagnosis_Transcript_Allergy,PatientSmokingStatus,by=intersect(names(Patient_Diagnosis_Transcript_Allergy), names(PatientSmokingStatus)),by.x='PatientGuid',by.y='PatientGuid',all.x=TRUE)
#merge Patient_Diagnosis_Transcript_Allergy_SmokingStatus with Smoking status
Patient_Diagnosis_Transcript_Allergy_SmokingStatus_Smoking=merge(Patient_Diagnosis_Transcript_Allergy_SmokingStatus,SmokingStatus,by=intersect(names(Patient_Diagnosis_Transcript_Allergy_SmokingStatus), names(SmokingStatus)),by.x='SmokingStatusGuid',by.y='SmokingStatusGuid',all.x=TRUE)
final=Patient_Diagnosis_Transcript_Allergy_SmokingStatus_Smoking
final$age=2010-as.numeric(final$YearOfBirth)
final$YearOfBirth=NULL
#}
#Build_Dataset()
write.csv(final,file='data_prep_final.csv')
| /scripts/Dataprep.R | no_license | chidamnat/Healthcare-Analytics-using-EHR-data | R | false | false | 11,867 | r | ####################
#Set the data path
####################
getwd()
path="/Users/Chidam/Downloads/Practice Fusion"
setwd(path)
getwd()
####################
#Generic function to load the datasets
#Loads the datasets
#inputs filename
####################
funcLoad<-function(filename){
print(filename)
read.csv(filename,stringsAsFactors=FALSE)
}
####################
#Load the datasets
#Allergy - Factors (AllergyType,ReactionName,SeverityName)
#Condition
#Diagnosis
####################
#createDF<-function(){
Allergy=funcLoad('test_SyncAllergy.csv')
Allergy$AllergyType=as.factor(Allergy$AllergyType)
Allergy$ReactionName=as.factor(Allergy$ReactionName)
Allergy$SeverityName=as.factor(Allergy$SeverityName)
Condition=funcLoad('SyncCondition.csv')
Diagnosis=funcLoad('test_SyncDiagnosis.csv')
Diagnosis$Acute=as.factor(Diagnosis$Acute)
Immunization=funcLoad('test_SyncImmunization.csv')
LabObservation=funcLoad('test_SyncLabObservation.csv')
LabObservation$HL7Text=as.factor(LabObservation$HL7Text)
LabObservation$HL7CodingSystem=as.factor(LabObservation$HL7CodingSystem)
LabObservation$AbnormalFlags=as.factor(LabObservation$AbnormalFlags)
LabObservation$ResultStatus=as.factor(LabObservation$ResultStatus)
LabObservation$IsAbnormalValue=as.factor(LabObservation$IsAbnormalValue)
LabPanel=funcLoad('test_SyncLabPanel.csv')
LabPanel$Status=as.factor(LabPanel$Status)
LabResult=funcLoad('test_SyncLabResult.csv')
Medication=funcLoad('test_SyncMedication.csv')
Patient=funcLoad('test_SyncPatient.csv')
Patient$Gender=as.factor(Patient$Gender)
Patient$State=as.factor(Patient$State)
PatientCondition=funcLoad('test_SyncPatientCondition.csv')
PatientSmokingStatus=funcLoad('test_SyncPatientSmokingStatus.csv')
Prescription=funcLoad('test_SyncPrescription.csv')
Prescription$RefillAsNeeded=as.factor(Prescription$RefillAsNeeded)
Prescription$GenericAllowed=as.factor(Prescription$GenericAllowed)
SmokingStatus=funcLoad('SyncSmokingStatus.csv')
SmokingStatus$NISTcode=as.factor(SmokingStatus$NISTcode)
Transcript=funcLoad('test_SyncTranscript.csv')
TranscriptAllergy=funcLoad('test_SyncTranscriptAllergy.csv')
TranscriptDiagnosis=funcLoad('test_SyncTranscriptDiagnosis.csv')
TranscriptMedication=funcLoad('test_SyncTranscriptMedication.csv')
#}
#createDF()
#fix_Transcript<-function(){
library(doBy)
#Identifying the best value for weight
TranscriptSummary<-summaryBy(Weight~PatientGuid,data=Transcript,FUN=list(min,max,mean,median,sd))
nrow(TranscriptSummary[which(TranscriptSummary$Weight.median==0.0),])
#1401
TranscriptSummary$Weightsel=ifelse(TranscriptSummary$Weight.median==0.0,TranscriptSummary$Weight.max,TranscriptSummary$Weight.median)
TranscriptSummary=TranscriptSummary[,c(1,7)]
Transcript_new=merge(Transcript,TranscriptSummary,byintersect(names(Transcript),names(TranscriptSummary)),by.x='PatientGuid',by.y='PatientGuid')
Transcript_new$Weight=ifelse(Transcript_new$Weight==0.0,Transcript_new$Weightsel,Transcript_new$Weight)
Transcript=Transcript_new
#Identifying the best value for Height
rm(TranscriptSummary)
Transcript$Height=as.numeric(Transcript$Height)
TranscriptSummary<-summaryBy(Height~PatientGuid,data=Transcript,FUN=list(min,max,mean,median,sd),na.rm=TRUE)
nrow(TranscriptSummary[which(TranscriptSummary$Height.median=="NULL"),])
#0
nrow(TranscriptSummary[which(is.na(TranscriptSummary$Height.median)),])
#0
nrow(TranscriptSummary[which(TranscriptSummary$Height.median=="NULL" | is.na(TranscriptSummary$Height.median)),])
#0
TranscriptSummary$Heightsel=ifelse(TranscriptSummary$Height.median=="NULL" | is.na(TranscriptSummary$Height.median) ,TranscriptSummary$Height.min,TranscriptSummary$Height.median)
TranscriptSummary=TranscriptSummary[,c(1,7)]
Transcript_new=merge(Transcript,TranscriptSummary,byintersect(names(Transcript),names(TranscriptSummary)),by.x='PatientGuid',by.y='PatientGuid')
Transcript_new$Height=ifelse(is.na(Transcript_new$Height),Transcript_new$Heightsel,Transcript_new$Height)
Transcript=Transcript_new
#Identifying the best value for BMI
rm(TranscriptSummary)
Transcript$BMI=as.numeric(Transcript$BMI)
TranscriptSummary<-summaryBy(BMI~PatientGuid,data=Transcript,FUN=list(min,max,mean,median,sd),na.rm=TRUE)
nrow(TranscriptSummary[which(TranscriptSummary$BMI.median=="NULL" | is.na(TranscriptSummary$BMI.median)),])
#0
TranscriptSummary$BMIsel=ifelse(TranscriptSummary$BMI.median=="NULL" | is.na(TranscriptSummary$BMI.median) ,TranscriptSummary$BMI.min,TranscriptSummary$BMI.median)
TranscriptSummary=TranscriptSummary[,c(1,7)]
Transcript_new=merge(Transcript,TranscriptSummary,byintersect(names(Transcript),names(TranscriptSummary)),by.x='PatientGuid',by.y='PatientGuid')
Transcript_new$BMI=ifelse(is.na(Transcript_new$BMI),Transcript_new$BMIsel,Transcript_new$BMI)
Transcript=Transcript_new
#Identifying the best value for SystolicBP
rm(TranscriptSummary)
#Transcript$SystolicBP=as.numeric(Transcript$SystolicBP)
TranscriptSummary<-summaryBy(SystolicBP~PatientGuid,data=Transcript,FUN=list(min,max,mean,median,sd),na.rm=TRUE)
nrow(TranscriptSummary[which(TranscriptSummary$SystolicBP.median==0.0 | is.na(TranscriptSummary$SystolicBP.median)),])
#943
TranscriptSummary$SystolicBPsel=ifelse(TranscriptSummary$SystolicBP.median==0.0 | is.na(TranscriptSummary$SystolicBP.median) ,TranscriptSummary$SystolicBP.max,TranscriptSummary$SystolicBP.median)
TranscriptSummary=TranscriptSummary[,c(1,7)]
Transcript_new=merge(Transcript,TranscriptSummary,byintersect(names(Transcript),names(TranscriptSummary)),by.x='PatientGuid',by.y='PatientGuid')
Transcript_new$SystolicBP=ifelse(Transcript_new$SystolicBP==0.0,Transcript_new$SystolicBPsel,Transcript_new$SystolicBP)
Transcript=Transcript_new
#Identifying the best value for DiastolicBP
rm(TranscriptSummary)
TranscriptSummary<-summaryBy(DiastolicBP~PatientGuid,data=Transcript,FUN=list(min,max,mean,median,sd),na.rm=TRUE)
nrow(TranscriptSummary[which(TranscriptSummary$DiastolicBP.median==0.0 | is.na(TranscriptSummary$DiastolicBP.median)),])
#943
TranscriptSummary$DiastolicBPsel=ifelse(TranscriptSummary$DiastolicBP.median==0.0 | is.na(TranscriptSummary$DiastolicBP.median) ,TranscriptSummary$DiastolicBP.max,TranscriptSummary$DiastolicBP.median)
TranscriptSummary=TranscriptSummary[,c(1,7)]
Transcript_new=merge(Transcript,TranscriptSummary,byintersect(names(Transcript),names(TranscriptSummary)),by.x='PatientGuid',by.y='PatientGuid')
Transcript_new$DiastolicBP=ifelse(Transcript_new$DiastolicBP==0.0,Transcript_new$DiastolicBPsel,Transcript_new$DiastolicBP)
Transcript=Transcript_new
#Identifying the best value for RespiratoryRate
rm(TranscriptSummary)
Transcript$RespiratoryRate=as.numeric(Transcript$RespiratoryRate)
TranscriptSummary<-summaryBy(RespiratoryRate~PatientGuid,data=Transcript,FUN=list(min,max,mean,median,sd),na.rm=TRUE)
nrow(TranscriptSummary[which(TranscriptSummary$RespiratoryRate.median=="NULL" | is.na(TranscriptSummary$RespiratoryRate.median | TranscriptSummary$RespiratoryRate.median=="Inf"| TranscriptSummary$RespiratoryRate.median=="-Inf" )),])
#1439
#Retaining the NAs frm median of RespiratoryRate as min, max are bad
#TranscriptSummary$RespiratoryRatesel=ifelse(TranscriptSummary$RespiratoryRate.median=="NULL" | is.na(TranscriptSummary$RespiratoryRate.median) ,TranscriptSummary$RespiratoryRate.median,TranscriptSummary$RespiratoryRate.median)
TranscriptSummary$RespiratoryRatesel=TranscriptSummary$RespiratoryRate.median
TranscriptSummary=TranscriptSummary[,c(1,7)]
Transcript_new=merge(Transcript,TranscriptSummary,byintersect(names(Transcript),names(TranscriptSummary)),by.x='PatientGuid',by.y='PatientGuid')
Transcript_new$RespiratoryRate=ifelse(is.na(Transcript_new$RespiratoryRate),Transcript_new$RespiratoryRatesel,Transcript_new$RespiratoryRate)
Transcript=Transcript_new
#}
#Build_Dataset<-function(){
#Add new derived feature ICD9root based on ICD9Code as below in Diagnosis DF
Diagnosis$ICD9root=substr(Diagnosis$ICD9Code,1,3)
# Add a new feature ICD9class based on the ICD9root in Diagnosis DF
Diagnosis$ICD9class[Diagnosis$ICD9root>"000" & Diagnosis$ICD9root < "140"]="Infectious Parasite Diseases"
Diagnosis$ICD9class[Diagnosis$ICD9root>="140" & Diagnosis$ICD9root < "240"]="neoplasms"
Diagnosis$ICD9class[Diagnosis$ICD9root>="240" & Diagnosis$ICD9root < "280"]="endocrine, nutritional and metabolic diseases, and immunity disorders"
Diagnosis$ICD9class[Diagnosis$ICD9root>="280" & Diagnosis$ICD9root < "290"]="diseases of the blood and blood-forming organs"
Diagnosis$ICD9class[Diagnosis$ICD9root>="290" & Diagnosis$ICD9root < "320"]="mental disorders"
Diagnosis$ICD9class[Diagnosis$ICD9root>="320" & Diagnosis$ICD9root < "360"]="diseases of the nervous system"
Diagnosis$ICD9class[Diagnosis$ICD9root>="360" & Diagnosis$ICD9root < "390"]="diseases of the sense organs"
Diagnosis$ICD9class[Diagnosis$ICD9root>="390" & Diagnosis$ICD9root < "460"]="diseases of the circulatory system"
Diagnosis$ICD9class[Diagnosis$ICD9root>="460" & Diagnosis$ICD9root < "520"]="diseases of the respiratory system"
Diagnosis$ICD9class[Diagnosis$ICD9root>="520" & Diagnosis$ICD9root < "580"]="diseases of the digestive system"
Diagnosis$ICD9class[Diagnosis$ICD9root>="580" & Diagnosis$ICD9root < "630"]="diseases of the genitourinary system"
Diagnosis$ICD9class[Diagnosis$ICD9root>="630" & Diagnosis$ICD9root < "680"]="complications of pregnancy, childbirth, and the puerperium"
Diagnosis$ICD9class[Diagnosis$ICD9root>="680" & Diagnosis$ICD9root < "710"]="diseases of the skin and subcutaneous tissue"
Diagnosis$ICD9class[Diagnosis$ICD9root>="710" & Diagnosis$ICD9root < "740"]="diseases of the musculoskeletal system and connective tissue"
Diagnosis$ICD9class[Diagnosis$ICD9root>="740" & Diagnosis$ICD9root < "760"]="congenital anomalies"
Diagnosis$ICD9class[Diagnosis$ICD9root>="760" & Diagnosis$ICD9root < "780"]="certain conditions originating in the perinatal period"
Diagnosis$ICD9class[Diagnosis$ICD9root>="780" & Diagnosis$ICD9root < "800"]="symptoms, signs, and ill-defined conditions"
Diagnosis$ICD9class[Diagnosis$ICD9root>="800" & Diagnosis$ICD9root <= "999"]="injury and poisoning"
Diagnosis$ICD9class[Diagnosis$ICD9root>="E00" & Diagnosis$ICD9root < "V99"]="external causes of injury and supplemental classification"
Diagnosis$ICD9root=as.factor(Diagnosis$ICD9root)
#merge Patient & Diagnosis
Patient_Diagnosis=merge(Patient,Diagnosis,by=intersect(names(Patient), names(Diagnosis)),by.x='PatientGuid',by.y='PatientGuid')
#fix_Transcript()
#merge Patient_Diagnosis and Transcript
Patient_Diagnosis_Transcript=merge(Patient_Diagnosis,Transcript,by=intersect(names(Patient_Diagnosis), names(Transcript)),by.x='PatientGuid',by.y='PatientGuid')
#merge Patient_Diagnosis_Transcript and Allergy
Patient_Diagnosis_Transcript_Allergy=merge(Patient_Diagnosis_Transcript,Allergy,by=intersect(names(Patient_Diagnosis_Transcript), names(Allergy)),by.x='PatientGuid',by.y='PatientGuid',all.x=TRUE)
#merge Patient_Diagnosis_Transcript_Allergy and SmokingStatus
Patient_Diagnosis_Transcript_Allergy_SmokingStatus=merge(Patient_Diagnosis_Transcript_Allergy,PatientSmokingStatus,by=intersect(names(Patient_Diagnosis_Transcript_Allergy), names(PatientSmokingStatus)),by.x='PatientGuid',by.y='PatientGuid',all.x=TRUE)
#merge Patient_Diagnosis_Transcript_Allergy_SmokingStatus with Smoking status
Patient_Diagnosis_Transcript_Allergy_SmokingStatus_Smoking=merge(Patient_Diagnosis_Transcript_Allergy_SmokingStatus,SmokingStatus,by=intersect(names(Patient_Diagnosis_Transcript_Allergy_SmokingStatus), names(SmokingStatus)),by.x='SmokingStatusGuid',by.y='SmokingStatusGuid',all.x=TRUE)
final=Patient_Diagnosis_Transcript_Allergy_SmokingStatus_Smoking
final$age=2010-as.numeric(final$YearOfBirth)
final$YearOfBirth=NULL
#}
#Build_Dataset()
write.csv(final,file='data_prep_final.csv')
|
#' @title norm_TSS
#'
#' @importFrom phyloseq taxa_are_rows sample_sums
#' @export
#' @description
#' Calculate the raw library sizes from a phyloseq object. If used to divide
#' counts, known as Total Sum Scaling normalization (TSS).
#'
#' @param object phyloseq object containing the counts to be normalized.
#' @param method normalization method to be used.
#' @param verbose an optional logical value. If \code{TRUE}, information about
#' the steps of the algorithm is printed. Default \code{verbose = TRUE}.
#'
#' @return A new column containing the TSS scaling factors is added to the
#' phyloseq \code{sample_data} slot.
#'
#' @seealso \code{\link{setNormalizations}} and \code{\link{runNormalizations}}
#' to fastly set and run normalizations.
#'
#' @examples
#' set.seed(1)
#' # Create a very simple phyloseq object
#' counts <- matrix(rnbinom(n = 60, size = 3, prob = 0.5), nrow = 10, ncol = 6)
#' metadata <- data.frame("Sample" = c("S1", "S2", "S3", "S4", "S5", "S6"),
#' "group" = as.factor(c("A", "A", "A", "B", "B", "B")))
#' ps <- phyloseq::phyloseq(phyloseq::otu_table(counts, taxa_are_rows = TRUE),
#' phyloseq::sample_data(metadata))
#'
#' # Calculate the scaling factors
#' ps_NF <- norm_TSS(object = ps)
#' # The phyloseq object now contains the scaling factors:
#' scaleFacts <- phyloseq::sample_data(ps_NF)[, "NF.TSS"]
#' head(scaleFacts)
#'
#' # VERY IMPORTANT: to convert scaling factors to normalization factors
#' # multiply them by the library sizes and renormalize.
#' normFacts = scaleFacts * phyloseq::sample_sums(ps_stool_16S)
#' # Renormalize: multiply to 1
#' normFacts = normFacts/exp(colMeans(log(normFacts)))
norm_TSS <- function(object, method = "TSS", verbose = TRUE)
{
if (!phyloseq::taxa_are_rows(object))
object <- t(object)
normFacts <- 1/phyloseq::sample_sums(object)
NF.col <- paste("NF", method, sep = ".")
phyloseq::sample_data(object)[,NF.col] <- normFacts
if(verbose)
message(NF.col, " column has been added.")
return(object)
}# END - function: norm_TSS
| /R/norm_TSS.R | no_license | changrong1023/benchdamic | R | false | false | 2,092 | r | #' @title norm_TSS
#'
#' @importFrom phyloseq taxa_are_rows sample_sums
#' @export
#' @description
#' Calculate the raw library sizes from a phyloseq object. If used to divide
#' counts, known as Total Sum Scaling normalization (TSS).
#'
#' @param object phyloseq object containing the counts to be normalized.
#' @param method normalization method to be used.
#' @param verbose an optional logical value. If \code{TRUE}, information about
#' the steps of the algorithm is printed. Default \code{verbose = TRUE}.
#'
#' @return A new column containing the TSS scaling factors is added to the
#' phyloseq \code{sample_data} slot.
#'
#' @seealso \code{\link{setNormalizations}} and \code{\link{runNormalizations}}
#' to fastly set and run normalizations.
#'
#' @examples
#' set.seed(1)
#' # Create a very simple phyloseq object
#' counts <- matrix(rnbinom(n = 60, size = 3, prob = 0.5), nrow = 10, ncol = 6)
#' metadata <- data.frame("Sample" = c("S1", "S2", "S3", "S4", "S5", "S6"),
#' "group" = as.factor(c("A", "A", "A", "B", "B", "B")))
#' ps <- phyloseq::phyloseq(phyloseq::otu_table(counts, taxa_are_rows = TRUE),
#' phyloseq::sample_data(metadata))
#'
#' # Calculate the scaling factors
#' ps_NF <- norm_TSS(object = ps)
#' # The phyloseq object now contains the scaling factors:
#' scaleFacts <- phyloseq::sample_data(ps_NF)[, "NF.TSS"]
#' head(scaleFacts)
#'
#' # VERY IMPORTANT: to convert scaling factors to normalization factors
#' # multiply them by the library sizes and renormalize.
#' normFacts = scaleFacts * phyloseq::sample_sums(ps_stool_16S)
#' # Renormalize: multiply to 1
#' normFacts = normFacts/exp(colMeans(log(normFacts)))
norm_TSS <- function(object, method = "TSS", verbose = TRUE)
{
if (!phyloseq::taxa_are_rows(object))
object <- t(object)
normFacts <- 1/phyloseq::sample_sums(object)
NF.col <- paste("NF", method, sep = ".")
phyloseq::sample_data(object)[,NF.col] <- normFacts
if(verbose)
message(NF.col, " column has been added.")
return(object)
}# END - function: norm_TSS
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.